xref: /linux/drivers/infiniband/hw/irdma/uk.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "defs.h"
6 #include "user.h"
7 #include "irdma.h"
8 
9 /**
10  * irdma_set_fragment - set fragment in wqe
11  * @wqe: wqe for setting fragment
12  * @offset: offset value
13  * @sge: sge length and stag
14  * @valid: The wqe valid
15  */
16 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
17 			       u8 valid)
18 {
19 	if (sge) {
20 		set_64bit_val(wqe, offset,
21 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
22 		set_64bit_val(wqe, offset + 8,
23 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
25 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
26 	} else {
27 		set_64bit_val(wqe, offset, 0);
28 		set_64bit_val(wqe, offset + 8,
29 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
30 	}
31 }
32 
33 /**
34  * irdma_set_fragment_gen_1 - set fragment in wqe
35  * @wqe: wqe for setting fragment
36  * @offset: offset value
37  * @sge: sge length and stag
38  * @valid: wqe valid flag
39  */
40 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
41 				     struct irdma_sge *sge, u8 valid)
42 {
43 	if (sge) {
44 		set_64bit_val(wqe, offset,
45 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
46 		set_64bit_val(wqe, offset + 8,
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
48 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
49 	} else {
50 		set_64bit_val(wqe, offset, 0);
51 		set_64bit_val(wqe, offset + 8, 0);
52 	}
53 }
54 
55 /**
56  * irdma_nop_1 - insert a NOP wqe
57  * @qp: hw qp ptr
58  */
59 static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
60 {
61 	u64 hdr;
62 	__le64 *wqe;
63 	u32 wqe_idx;
64 	bool signaled = false;
65 
66 	if (!qp->sq_ring.head)
67 		return IRDMA_ERR_PARAM;
68 
69 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
70 	wqe = qp->sq_base[wqe_idx].elem;
71 
72 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
73 
74 	set_64bit_val(wqe, 0, 0);
75 	set_64bit_val(wqe, 8, 0);
76 	set_64bit_val(wqe, 16, 0);
77 
78 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
79 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
80 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
81 
82 	/* make sure WQE is written before valid bit is set */
83 	dma_wmb();
84 
85 	set_64bit_val(wqe, 24, hdr);
86 
87 	return 0;
88 }
89 
90 /**
91  * irdma_clr_wqes - clear next 128 sq entries
92  * @qp: hw qp ptr
93  * @qp_wqe_idx: wqe_idx
94  */
95 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
96 {
97 	__le64 *wqe;
98 	u32 wqe_idx;
99 
100 	if (!(qp_wqe_idx & 0x7F)) {
101 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
102 		wqe = qp->sq_base[wqe_idx].elem;
103 		if (wqe_idx)
104 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
105 		else
106 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
107 	}
108 }
109 
110 /**
111  * irdma_uk_qp_post_wr - ring doorbell
112  * @qp: hw qp ptr
113  */
114 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
115 {
116 	u64 temp;
117 	u32 hw_sq_tail;
118 	u32 sw_sq_head;
119 
120 	/* valid bit is written and loads completed before reading shadow */
121 	mb();
122 
123 	/* read the doorbell shadow area */
124 	get_64bit_val(qp->shadow_area, 0, &temp);
125 
126 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
127 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
128 	if (sw_sq_head != qp->initial_ring.head) {
129 		if (qp->push_dropped) {
130 			writel(qp->qp_id, qp->wqe_alloc_db);
131 			qp->push_dropped = false;
132 		} else if (sw_sq_head != hw_sq_tail) {
133 			if (sw_sq_head > qp->initial_ring.head) {
134 				if (hw_sq_tail >= qp->initial_ring.head &&
135 				    hw_sq_tail < sw_sq_head)
136 					writel(qp->qp_id, qp->wqe_alloc_db);
137 			} else {
138 				if (hw_sq_tail >= qp->initial_ring.head ||
139 				    hw_sq_tail < sw_sq_head)
140 					writel(qp->qp_id, qp->wqe_alloc_db);
141 			}
142 		}
143 	}
144 
145 	qp->initial_ring.head = qp->sq_ring.head;
146 }
147 
148 /**
149  * irdma_qp_ring_push_db -  ring qp doorbell
150  * @qp: hw qp ptr
151  * @wqe_idx: wqe index
152  */
153 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
154 {
155 	set_32bit_val(qp->push_db, 0,
156 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
157 	qp->initial_ring.head = qp->sq_ring.head;
158 	qp->push_mode = true;
159 	qp->push_dropped = false;
160 }
161 
162 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
163 		       u32 wqe_idx, bool post_sq)
164 {
165 	__le64 *push;
166 
167 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
168 		    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
169 	    !qp->push_mode) {
170 		if (post_sq)
171 			irdma_uk_qp_post_wr(qp);
172 	} else {
173 		push = (__le64 *)((uintptr_t)qp->push_wqe +
174 				  (wqe_idx & 0x7) * 0x20);
175 		memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
176 		irdma_qp_ring_push_db(qp, wqe_idx);
177 	}
178 }
179 
180 /**
181  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
182  * @qp: hw qp ptr
183  * @wqe_idx: return wqe index
184  * @quanta: size of WR in quanta
185  * @total_size: size of WR in bytes
186  * @info: info on WR
187  */
188 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
189 				   u16 quanta, u32 total_size,
190 				   struct irdma_post_sq_info *info)
191 {
192 	__le64 *wqe;
193 	__le64 *wqe_0 = NULL;
194 	u32 nop_wqe_idx;
195 	u16 avail_quanta;
196 	u16 i;
197 
198 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
199 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
200 		       qp->uk_attrs->max_hw_sq_chunk);
201 	if (quanta <= avail_quanta) {
202 		/* WR fits in current chunk */
203 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
204 			return NULL;
205 	} else {
206 		/* Need to pad with NOP */
207 		if (quanta + avail_quanta >
208 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
209 			return NULL;
210 
211 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
212 		for (i = 0; i < avail_quanta; i++) {
213 			irdma_nop_1(qp);
214 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
215 		}
216 		if (qp->push_db && info->push_wqe)
217 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
218 					  avail_quanta, nop_wqe_idx, true);
219 	}
220 
221 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
222 	if (!*wqe_idx)
223 		qp->swqe_polarity = !qp->swqe_polarity;
224 
225 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
226 
227 	wqe = qp->sq_base[*wqe_idx].elem;
228 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
229 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
230 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
231 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
232 	}
233 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
234 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
235 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
236 
237 	return wqe;
238 }
239 
240 /**
241  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
242  * @qp: hw qp ptr
243  * @wqe_idx: return wqe index
244  */
245 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
246 {
247 	__le64 *wqe;
248 	enum irdma_status_code ret_code;
249 
250 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
251 		return NULL;
252 
253 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
254 	if (ret_code)
255 		return NULL;
256 
257 	if (!*wqe_idx)
258 		qp->rwqe_polarity = !qp->rwqe_polarity;
259 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
260 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
261 
262 	return wqe;
263 }
264 
265 /**
266  * irdma_uk_rdma_write - rdma write operation
267  * @qp: hw qp ptr
268  * @info: post sq information
269  * @post_sq: flag to post sq
270  */
271 enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
272 					   struct irdma_post_sq_info *info,
273 					   bool post_sq)
274 {
275 	u64 hdr;
276 	__le64 *wqe;
277 	struct irdma_rdma_write *op_info;
278 	u32 i, wqe_idx;
279 	u32 total_size = 0, byte_off;
280 	enum irdma_status_code ret_code;
281 	u32 frag_cnt, addl_frag_cnt;
282 	bool read_fence = false;
283 	u16 quanta;
284 
285 	info->push_wqe = qp->push_db ? true : false;
286 
287 	op_info = &info->op.rdma_write;
288 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
289 		return IRDMA_ERR_INVALID_FRAG_COUNT;
290 
291 	for (i = 0; i < op_info->num_lo_sges; i++)
292 		total_size += op_info->lo_sg_list[i].len;
293 
294 	read_fence |= info->read_fence;
295 
296 	if (info->imm_data_valid)
297 		frag_cnt = op_info->num_lo_sges + 1;
298 	else
299 		frag_cnt = op_info->num_lo_sges;
300 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
302 	if (ret_code)
303 		return ret_code;
304 
305 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
306 					 info);
307 	if (!wqe)
308 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
309 
310 	irdma_clr_wqes(qp, wqe_idx);
311 
312 	set_64bit_val(wqe, 16,
313 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
314 
315 	if (info->imm_data_valid) {
316 		set_64bit_val(wqe, 0,
317 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
318 		i = 0;
319 	} else {
320 		qp->wqe_ops.iw_set_fragment(wqe, 0,
321 					    op_info->lo_sg_list,
322 					    qp->swqe_polarity);
323 		i = 1;
324 	}
325 
326 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328 					    &op_info->lo_sg_list[i],
329 					    qp->swqe_polarity);
330 		byte_off += 16;
331 	}
332 
333 	/* if not an odd number set valid bit in next fragment */
334 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
335 	    frag_cnt) {
336 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
337 					    qp->swqe_polarity);
338 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
339 			++addl_frag_cnt;
340 	}
341 
342 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
343 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
352 
353 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
354 
355 	set_64bit_val(wqe, 24, hdr);
356 	if (info->push_wqe) {
357 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
358 	} else {
359 		if (post_sq)
360 			irdma_uk_qp_post_wr(qp);
361 	}
362 
363 	return 0;
364 }
365 
366 /**
367  * irdma_uk_rdma_read - rdma read command
368  * @qp: hw qp ptr
369  * @info: post sq information
370  * @inv_stag: flag for inv_stag
371  * @post_sq: flag to post sq
372  */
373 enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
374 					  struct irdma_post_sq_info *info,
375 					  bool inv_stag, bool post_sq)
376 {
377 	struct irdma_rdma_read *op_info;
378 	enum irdma_status_code ret_code;
379 	u32 i, byte_off, total_size = 0;
380 	bool local_fence = false;
381 	u32 addl_frag_cnt;
382 	__le64 *wqe;
383 	u32 wqe_idx;
384 	u16 quanta;
385 	u64 hdr;
386 
387 	info->push_wqe = qp->push_db ? true : false;
388 
389 	op_info = &info->op.rdma_read;
390 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
391 		return IRDMA_ERR_INVALID_FRAG_COUNT;
392 
393 	for (i = 0; i < op_info->num_lo_sges; i++)
394 		total_size += op_info->lo_sg_list[i].len;
395 
396 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
397 	if (ret_code)
398 		return ret_code;
399 
400 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
401 					 info);
402 	if (!wqe)
403 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
404 
405 	irdma_clr_wqes(qp, wqe_idx);
406 
407 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
408 			(op_info->num_lo_sges - 1) : 0;
409 	local_fence |= info->local_fence;
410 
411 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
412 				    qp->swqe_polarity);
413 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
414 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
415 					    &op_info->lo_sg_list[i],
416 					    qp->swqe_polarity);
417 		byte_off += 16;
418 	}
419 
420 	/* if not an odd number set valid bit in next fragment */
421 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
422 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
423 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
424 					    qp->swqe_polarity);
425 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
426 			++addl_frag_cnt;
427 	}
428 	set_64bit_val(wqe, 16,
429 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
430 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
431 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
432 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
433 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
434 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
435 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
436 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
437 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
438 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
439 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
440 
441 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
442 
443 	set_64bit_val(wqe, 24, hdr);
444 	if (info->push_wqe) {
445 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
446 	} else {
447 		if (post_sq)
448 			irdma_uk_qp_post_wr(qp);
449 	}
450 
451 	return 0;
452 }
453 
454 /**
455  * irdma_uk_send - rdma send command
456  * @qp: hw qp ptr
457  * @info: post sq information
458  * @post_sq: flag to post sq
459  */
460 enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
461 				     struct irdma_post_sq_info *info,
462 				     bool post_sq)
463 {
464 	__le64 *wqe;
465 	struct irdma_post_send *op_info;
466 	u64 hdr;
467 	u32 i, wqe_idx, total_size = 0, byte_off;
468 	enum irdma_status_code ret_code;
469 	u32 frag_cnt, addl_frag_cnt;
470 	bool read_fence = false;
471 	u16 quanta;
472 
473 	info->push_wqe = qp->push_db ? true : false;
474 
475 	op_info = &info->op.send;
476 	if (qp->max_sq_frag_cnt < op_info->num_sges)
477 		return IRDMA_ERR_INVALID_FRAG_COUNT;
478 
479 	for (i = 0; i < op_info->num_sges; i++)
480 		total_size += op_info->sg_list[i].len;
481 
482 	if (info->imm_data_valid)
483 		frag_cnt = op_info->num_sges + 1;
484 	else
485 		frag_cnt = op_info->num_sges;
486 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
487 	if (ret_code)
488 		return ret_code;
489 
490 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
491 					 info);
492 	if (!wqe)
493 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
494 
495 	irdma_clr_wqes(qp, wqe_idx);
496 
497 	read_fence |= info->read_fence;
498 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
499 	if (info->imm_data_valid) {
500 		set_64bit_val(wqe, 0,
501 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
502 		i = 0;
503 	} else {
504 		qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
505 					    qp->swqe_polarity);
506 		i = 1;
507 	}
508 
509 	for (byte_off = 32; i < op_info->num_sges; i++) {
510 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
511 					    qp->swqe_polarity);
512 		byte_off += 16;
513 	}
514 
515 	/* if not an odd number set valid bit in next fragment */
516 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
517 	    frag_cnt) {
518 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
519 					    qp->swqe_polarity);
520 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
521 			++addl_frag_cnt;
522 	}
523 
524 	set_64bit_val(wqe, 16,
525 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
526 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
527 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
528 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
529 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
530 			 (info->imm_data_valid ? 1 : 0)) |
531 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
532 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
533 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
534 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
535 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
536 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
537 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
538 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
539 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
540 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
541 
542 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
543 
544 	set_64bit_val(wqe, 24, hdr);
545 	if (info->push_wqe) {
546 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
547 	} else {
548 		if (post_sq)
549 			irdma_uk_qp_post_wr(qp);
550 	}
551 
552 	return 0;
553 }
554 
555 /**
556  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
557  * @wqe: wqe for setting fragment
558  * @op_info: info for setting bind wqe values
559  */
560 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
561 					struct irdma_bind_window *op_info)
562 {
563 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
564 	set_64bit_val(wqe, 8,
565 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
566 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
567 	set_64bit_val(wqe, 16, op_info->bind_len);
568 }
569 
570 /**
571  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
572  * @dest: pointer to wqe
573  * @src: pointer to inline data
574  * @len: length of inline data to copy
575  * @polarity: compatibility parameter
576  */
577 static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
578 					 u8 polarity)
579 {
580 	if (len <= 16) {
581 		memcpy(dest, src, len);
582 	} else {
583 		memcpy(dest, src, 16);
584 		src += 16;
585 		dest = dest + 32;
586 		memcpy(dest, src, len - 16);
587 	}
588 }
589 
590 /**
591  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
592  * @data_size: data size for inline
593  *
594  * Gets the quanta based on inline and immediate data.
595  */
596 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
597 {
598 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
599 }
600 
601 /**
602  * irdma_set_mw_bind_wqe - set mw bind in wqe
603  * @wqe: wqe for setting mw bind
604  * @op_info: info for setting wqe values
605  */
606 static void irdma_set_mw_bind_wqe(__le64 *wqe,
607 				  struct irdma_bind_window *op_info)
608 {
609 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
610 	set_64bit_val(wqe, 8,
611 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
612 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
613 	set_64bit_val(wqe, 16, op_info->bind_len);
614 }
615 
616 /**
617  * irdma_copy_inline_data - Copy inline data to wqe
618  * @dest: pointer to wqe
619  * @src: pointer to inline data
620  * @len: length of inline data to copy
621  * @polarity: polarity of wqe valid bit
622  */
623 static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
624 {
625 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
626 	u32 copy_size;
627 
628 	dest += 8;
629 	if (len <= 8) {
630 		memcpy(dest, src, len);
631 		return;
632 	}
633 
634 	*((u64 *)dest) = *((u64 *)src);
635 	len -= 8;
636 	src += 8;
637 	dest += 24; /* point to additional 32 byte quanta */
638 
639 	while (len) {
640 		copy_size = len < 31 ? len : 31;
641 		memcpy(dest, src, copy_size);
642 		*(dest + 31) = inline_valid;
643 		len -= copy_size;
644 		dest += 32;
645 		src += copy_size;
646 	}
647 }
648 
649 /**
650  * irdma_inline_data_size_to_quanta - based on inline data, quanta
651  * @data_size: data size for inline
652  *
653  * Gets the quanta based on inline and immediate data.
654  */
655 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
656 {
657 	if (data_size <= 8)
658 		return IRDMA_QP_WQE_MIN_QUANTA;
659 	else if (data_size <= 39)
660 		return 2;
661 	else if (data_size <= 70)
662 		return 3;
663 	else if (data_size <= 101)
664 		return 4;
665 	else if (data_size <= 132)
666 		return 5;
667 	else if (data_size <= 163)
668 		return 6;
669 	else if (data_size <= 194)
670 		return 7;
671 	else
672 		return 8;
673 }
674 
675 /**
676  * irdma_uk_inline_rdma_write - inline rdma write operation
677  * @qp: hw qp ptr
678  * @info: post sq information
679  * @post_sq: flag to post sq
680  */
681 enum irdma_status_code
682 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
683 			   bool post_sq)
684 {
685 	__le64 *wqe;
686 	struct irdma_inline_rdma_write *op_info;
687 	u64 hdr = 0;
688 	u32 wqe_idx;
689 	bool read_fence = false;
690 	u16 quanta;
691 
692 	info->push_wqe = qp->push_db ? true : false;
693 	op_info = &info->op.inline_rdma_write;
694 
695 	if (op_info->len > qp->max_inline_data)
696 		return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
697 
698 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
699 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
700 					 info);
701 	if (!wqe)
702 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
703 
704 	irdma_clr_wqes(qp, wqe_idx);
705 
706 	read_fence |= info->read_fence;
707 	set_64bit_val(wqe, 16,
708 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
709 
710 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
711 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
712 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
713 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
714 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
715 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
716 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
717 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
718 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
719 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
720 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
721 
722 	if (info->imm_data_valid)
723 		set_64bit_val(wqe, 0,
724 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
725 
726 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
727 					qp->swqe_polarity);
728 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
729 
730 	set_64bit_val(wqe, 24, hdr);
731 
732 	if (info->push_wqe) {
733 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
734 	} else {
735 		if (post_sq)
736 			irdma_uk_qp_post_wr(qp);
737 	}
738 
739 	return 0;
740 }
741 
742 /**
743  * irdma_uk_inline_send - inline send operation
744  * @qp: hw qp ptr
745  * @info: post sq information
746  * @post_sq: flag to post sq
747  */
748 enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
749 					    struct irdma_post_sq_info *info,
750 					    bool post_sq)
751 {
752 	__le64 *wqe;
753 	struct irdma_post_inline_send *op_info;
754 	u64 hdr;
755 	u32 wqe_idx;
756 	bool read_fence = false;
757 	u16 quanta;
758 
759 	info->push_wqe = qp->push_db ? true : false;
760 	op_info = &info->op.inline_send;
761 
762 	if (op_info->len > qp->max_inline_data)
763 		return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
764 
765 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
766 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
767 					 info);
768 	if (!wqe)
769 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
770 
771 	irdma_clr_wqes(qp, wqe_idx);
772 
773 	set_64bit_val(wqe, 16,
774 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
775 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
776 
777 	read_fence |= info->read_fence;
778 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
779 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
780 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
781 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
782 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
783 			 (info->imm_data_valid ? 1 : 0)) |
784 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
785 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
786 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
787 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
788 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
789 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
790 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
791 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
792 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
793 
794 	if (info->imm_data_valid)
795 		set_64bit_val(wqe, 0,
796 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
797 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
798 					qp->swqe_polarity);
799 
800 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
801 
802 	set_64bit_val(wqe, 24, hdr);
803 
804 	if (info->push_wqe) {
805 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
806 	} else {
807 		if (post_sq)
808 			irdma_uk_qp_post_wr(qp);
809 	}
810 
811 	return 0;
812 }
813 
814 /**
815  * irdma_uk_stag_local_invalidate - stag invalidate operation
816  * @qp: hw qp ptr
817  * @info: post sq information
818  * @post_sq: flag to post sq
819  */
820 enum irdma_status_code
821 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
822 			       struct irdma_post_sq_info *info, bool post_sq)
823 {
824 	__le64 *wqe;
825 	struct irdma_inv_local_stag *op_info;
826 	u64 hdr;
827 	u32 wqe_idx;
828 	bool local_fence = false;
829 	struct irdma_sge sge = {};
830 
831 	info->push_wqe = qp->push_db ? true : false;
832 	op_info = &info->op.inv_local_stag;
833 	local_fence = info->local_fence;
834 
835 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
836 					 0, info);
837 	if (!wqe)
838 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
839 
840 	irdma_clr_wqes(qp, wqe_idx);
841 
842 	sge.stag = op_info->target_stag;
843 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
844 
845 	set_64bit_val(wqe, 16, 0);
846 
847 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
848 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
849 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
850 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
851 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
852 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
853 
854 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
855 
856 	set_64bit_val(wqe, 24, hdr);
857 
858 	if (info->push_wqe) {
859 		irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
860 				  post_sq);
861 	} else {
862 		if (post_sq)
863 			irdma_uk_qp_post_wr(qp);
864 	}
865 
866 	return 0;
867 }
868 
869 /**
870  * irdma_uk_mw_bind - bind Memory Window
871  * @qp: hw qp ptr
872  * @info: post sq information
873  * @post_sq: flag to post sq
874  */
875 enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
876 					struct irdma_post_sq_info *info,
877 					bool post_sq)
878 {
879 	__le64 *wqe;
880 	struct irdma_bind_window *op_info;
881 	u64 hdr;
882 	u32 wqe_idx;
883 	bool local_fence = false;
884 
885 	info->push_wqe = qp->push_db ? true : false;
886 	op_info = &info->op.bind_window;
887 	local_fence |= info->local_fence;
888 
889 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
890 					 0, info);
891 	if (!wqe)
892 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
893 
894 	irdma_clr_wqes(qp, wqe_idx);
895 
896 	qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
897 
898 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
899 	      FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
900 			 ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
901 	      FIELD_PREP(IRDMAQPSQ_VABASEDTO,
902 			 (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
903 	      FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
904 			 (op_info->mem_window_type_1 ? 1 : 0)) |
905 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
906 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
907 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
908 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
909 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
910 
911 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
912 
913 	set_64bit_val(wqe, 24, hdr);
914 
915 	if (info->push_wqe) {
916 		irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
917 				  post_sq);
918 	} else {
919 		if (post_sq)
920 			irdma_uk_qp_post_wr(qp);
921 	}
922 
923 	return 0;
924 }
925 
926 /**
927  * irdma_uk_post_receive - post receive wqe
928  * @qp: hw qp ptr
929  * @info: post rq information
930  */
931 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
932 					     struct irdma_post_rq_info *info)
933 {
934 	u32 total_size = 0, wqe_idx, i, byte_off;
935 	u32 addl_frag_cnt;
936 	__le64 *wqe;
937 	u64 hdr;
938 
939 	if (qp->max_rq_frag_cnt < info->num_sges)
940 		return IRDMA_ERR_INVALID_FRAG_COUNT;
941 
942 	for (i = 0; i < info->num_sges; i++)
943 		total_size += info->sg_list[i].len;
944 
945 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
946 	if (!wqe)
947 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
948 
949 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
950 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
951 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
952 				    qp->rwqe_polarity);
953 
954 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
955 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
956 					    qp->rwqe_polarity);
957 		byte_off += 16;
958 	}
959 
960 	/* if not an odd number set valid bit in next fragment */
961 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
962 	    info->num_sges) {
963 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
964 					    qp->rwqe_polarity);
965 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
966 			++addl_frag_cnt;
967 	}
968 
969 	set_64bit_val(wqe, 16, 0);
970 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
971 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
972 
973 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
974 
975 	set_64bit_val(wqe, 24, hdr);
976 
977 	return 0;
978 }
979 
980 /**
981  * irdma_uk_cq_resize - reset the cq buffer info
982  * @cq: cq to resize
983  * @cq_base: new cq buffer addr
984  * @cq_size: number of cqes
985  */
986 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
987 {
988 	cq->cq_base = cq_base;
989 	cq->cq_size = cq_size;
990 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
991 	cq->polarity = 1;
992 }
993 
994 /**
995  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
996  * @cq: cq to resize
997  * @cq_cnt: the count of the resized cq buffers
998  */
999 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1000 {
1001 	u64 temp_val;
1002 	u16 sw_cq_sel;
1003 	u8 arm_next_se;
1004 	u8 arm_next;
1005 	u8 arm_seq_num;
1006 
1007 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1008 
1009 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1010 	sw_cq_sel += cq_cnt;
1011 
1012 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1013 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1014 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1015 
1016 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1017 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1018 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1019 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1020 
1021 	set_64bit_val(cq->shadow_area, 32, temp_val);
1022 }
1023 
1024 /**
1025  * irdma_uk_cq_request_notification - cq notification request (door bell)
1026  * @cq: hw cq
1027  * @cq_notify: notification type
1028  */
1029 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1030 				      enum irdma_cmpl_notify cq_notify)
1031 {
1032 	u64 temp_val;
1033 	u16 sw_cq_sel;
1034 	u8 arm_next_se = 0;
1035 	u8 arm_next = 0;
1036 	u8 arm_seq_num;
1037 
1038 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1039 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1040 	arm_seq_num++;
1041 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1042 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1043 	arm_next_se |= 1;
1044 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1045 		arm_next = 1;
1046 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1047 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1048 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1049 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1050 
1051 	set_64bit_val(cq->shadow_area, 32, temp_val);
1052 
1053 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1054 
1055 	writel(cq->cq_id, cq->cqe_alloc_db);
1056 }
1057 
1058 /**
1059  * irdma_uk_cq_poll_cmpl - get cq completion info
1060  * @cq: hw cq
1061  * @info: cq poll information returned
1062  */
1063 enum irdma_status_code
1064 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
1065 {
1066 	u64 comp_ctx, qword0, qword2, qword3;
1067 	__le64 *cqe;
1068 	struct irdma_qp_uk *qp;
1069 	struct irdma_ring *pring = NULL;
1070 	u32 wqe_idx, q_type;
1071 	enum irdma_status_code ret_code;
1072 	bool move_cq_head = true;
1073 	u8 polarity;
1074 	bool ext_valid;
1075 	__le64 *ext_cqe;
1076 
1077 	if (cq->avoid_mem_cflct)
1078 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1079 	else
1080 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1081 
1082 	get_64bit_val(cqe, 24, &qword3);
1083 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1084 	if (polarity != cq->polarity)
1085 		return IRDMA_ERR_Q_EMPTY;
1086 
1087 	/* Ensure CQE contents are read after valid bit is checked */
1088 	dma_rmb();
1089 
1090 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1091 	if (ext_valid) {
1092 		u64 qword6, qword7;
1093 		u32 peek_head;
1094 
1095 		if (cq->avoid_mem_cflct) {
1096 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1097 			get_64bit_val(ext_cqe, 24, &qword7);
1098 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1099 		} else {
1100 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1101 			ext_cqe = cq->cq_base[peek_head].buf;
1102 			get_64bit_val(ext_cqe, 24, &qword7);
1103 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1104 			if (!peek_head)
1105 				polarity ^= 1;
1106 		}
1107 		if (polarity != cq->polarity)
1108 			return IRDMA_ERR_Q_EMPTY;
1109 
1110 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1111 		dma_rmb();
1112 
1113 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1114 		if (info->imm_valid) {
1115 			u64 qword4;
1116 
1117 			get_64bit_val(ext_cqe, 0, &qword4);
1118 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1119 		}
1120 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1121 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1122 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1123 			get_64bit_val(ext_cqe, 16, &qword6);
1124 			if (info->ud_vlan_valid)
1125 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1126 			if (info->ud_smac_valid) {
1127 				info->ud_smac[5] = qword6 & 0xFF;
1128 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1129 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1130 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1131 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1132 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1133 			}
1134 		}
1135 	} else {
1136 		info->imm_valid = false;
1137 		info->ud_smac_valid = false;
1138 		info->ud_vlan_valid = false;
1139 	}
1140 
1141 	q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1142 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1143 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1144 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1145 	if (info->error) {
1146 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1147 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1148 		if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1149 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1150 			/* Set the min error to standard flush error code for remaining cqes */
1151 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1152 				qword3 &= ~IRDMA_CQ_MINERR;
1153 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1154 				set_64bit_val(cqe, 24, qword3);
1155 			}
1156 		} else {
1157 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1158 		}
1159 	} else {
1160 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1161 	}
1162 
1163 	get_64bit_val(cqe, 0, &qword0);
1164 	get_64bit_val(cqe, 16, &qword2);
1165 
1166 	info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1167 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1168 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1169 
1170 	get_64bit_val(cqe, 8, &comp_ctx);
1171 
1172 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1173 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1174 	if (!qp || qp->destroy_pending) {
1175 		ret_code = IRDMA_ERR_Q_DESTROYED;
1176 		goto exit;
1177 	}
1178 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1179 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1180 
1181 	if (q_type == IRDMA_CQE_QTYPE_RQ) {
1182 		u32 array_idx;
1183 
1184 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1185 
1186 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1187 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1188 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1189 				ret_code = IRDMA_ERR_Q_EMPTY;
1190 				goto exit;
1191 			}
1192 
1193 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1194 			array_idx = qp->rq_ring.tail;
1195 		} else {
1196 			info->wr_id = qp->rq_wrid_array[array_idx];
1197 		}
1198 
1199 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1200 
1201 		if (info->imm_valid)
1202 			info->op_type = IRDMA_OP_TYPE_REC_IMM;
1203 		else
1204 			info->op_type = IRDMA_OP_TYPE_REC;
1205 		if (qword3 & IRDMACQ_STAG) {
1206 			info->stag_invalid_set = true;
1207 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1208 		} else {
1209 			info->stag_invalid_set = false;
1210 		}
1211 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1212 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1213 			qp->rq_flush_seen = true;
1214 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1215 				qp->rq_flush_complete = true;
1216 			else
1217 				move_cq_head = false;
1218 		}
1219 		pring = &qp->rq_ring;
1220 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1221 		if (qp->first_sq_wq) {
1222 			if (wqe_idx + 1 >= qp->conn_wqes)
1223 				qp->first_sq_wq = false;
1224 
1225 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1226 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1227 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1228 				set_64bit_val(cq->shadow_area, 0,
1229 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1230 				memset(info, 0,
1231 				       sizeof(struct irdma_cq_poll_info));
1232 				return irdma_uk_cq_poll_cmpl(cq, info);
1233 			}
1234 		}
1235 		/*cease posting push mode on push drop*/
1236 		if (info->push_dropped) {
1237 			qp->push_mode = false;
1238 			qp->push_dropped = true;
1239 		}
1240 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1241 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1242 			if (!info->comp_status)
1243 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1244 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1245 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1246 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1247 		} else {
1248 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1249 				ret_code = IRDMA_ERR_Q_EMPTY;
1250 				goto exit;
1251 			}
1252 
1253 			do {
1254 				__le64 *sw_wqe;
1255 				u64 wqe_qword;
1256 				u8 op_type;
1257 				u32 tail;
1258 
1259 				tail = qp->sq_ring.tail;
1260 				sw_wqe = qp->sq_base[tail].elem;
1261 				get_64bit_val(sw_wqe, 24,
1262 					      &wqe_qword);
1263 				op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1264 				info->op_type = op_type;
1265 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1266 						    tail + qp->sq_wrtrk_array[tail].quanta);
1267 				if (op_type != IRDMAQP_OP_NOP) {
1268 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1269 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1270 					break;
1271 				}
1272 			} while (1);
1273 			qp->sq_flush_seen = true;
1274 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1275 				qp->sq_flush_complete = true;
1276 		}
1277 		pring = &qp->sq_ring;
1278 	}
1279 
1280 	ret_code = 0;
1281 
1282 exit:
1283 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1284 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1285 			move_cq_head = false;
1286 
1287 	if (move_cq_head) {
1288 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1289 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1290 			cq->polarity ^= 1;
1291 
1292 		if (ext_valid && !cq->avoid_mem_cflct) {
1293 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1294 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1295 				cq->polarity ^= 1;
1296 		}
1297 
1298 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1299 		if (!cq->avoid_mem_cflct && ext_valid)
1300 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1301 		set_64bit_val(cq->shadow_area, 0,
1302 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1303 	} else {
1304 		qword3 &= ~IRDMA_CQ_WQEIDX;
1305 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1306 		set_64bit_val(cqe, 24, qword3);
1307 	}
1308 
1309 	return ret_code;
1310 }
1311 
1312 /**
1313  * irdma_qp_round_up - return round up qp wq depth
1314  * @wqdepth: wq depth in quanta to round up
1315  */
1316 static int irdma_qp_round_up(u32 wqdepth)
1317 {
1318 	int scount = 1;
1319 
1320 	for (wqdepth--; scount <= 16; scount *= 2)
1321 		wqdepth |= wqdepth >> scount;
1322 
1323 	return ++wqdepth;
1324 }
1325 
1326 /**
1327  * irdma_get_wqe_shift - get shift count for maximum wqe size
1328  * @uk_attrs: qp HW attributes
1329  * @sge: Maximum Scatter Gather Elements wqe
1330  * @inline_data: Maximum inline data size
1331  * @shift: Returns the shift needed based on sge
1332  *
1333  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1334  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1335  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1336  * size of 64 bytes).
1337  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1338  * size of 256 bytes).
1339  */
1340 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1341 			 u32 inline_data, u8 *shift)
1342 {
1343 	*shift = 0;
1344 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1345 		if (sge > 1 || inline_data > 8) {
1346 			if (sge < 4 && inline_data <= 39)
1347 				*shift = 1;
1348 			else if (sge < 8 && inline_data <= 101)
1349 				*shift = 2;
1350 			else
1351 				*shift = 3;
1352 		}
1353 	} else if (sge > 1 || inline_data > 16) {
1354 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1355 	}
1356 }
1357 
1358 /*
1359  * irdma_get_sqdepth - get SQ depth (quanta)
1360  * @uk_attrs: qp HW attributes
1361  * @sq_size: SQ size
1362  * @shift: shift which determines size of WQE
1363  * @sqdepth: depth of SQ
1364  *
1365  */
1366 enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
1367 					 u32 sq_size, u8 shift, u32 *sqdepth)
1368 {
1369 	*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1370 
1371 	if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1372 		*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1373 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1374 		return IRDMA_ERR_INVALID_SIZE;
1375 
1376 	return 0;
1377 }
1378 
1379 /*
1380  * irdma_get_rqdepth - get RQ depth (quanta)
1381  * @uk_attrs: qp HW attributes
1382  * @rq_size: RQ size
1383  * @shift: shift which determines size of WQE
1384  * @rqdepth: depth of RQ
1385  */
1386 enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
1387 					 u32 rq_size, u8 shift, u32 *rqdepth)
1388 {
1389 	*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1390 
1391 	if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1392 		*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1393 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1394 		return IRDMA_ERR_INVALID_SIZE;
1395 
1396 	return 0;
1397 }
1398 
1399 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1400 	.iw_copy_inline_data = irdma_copy_inline_data,
1401 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1402 	.iw_set_fragment = irdma_set_fragment,
1403 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1404 };
1405 
1406 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1407 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1408 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1409 	.iw_set_fragment = irdma_set_fragment_gen_1,
1410 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1411 };
1412 
1413 /**
1414  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1415  * connection.
1416  * @qp: hw qp (user and kernel)
1417  * @info: qp initialization info
1418  */
1419 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1420 					struct irdma_qp_uk_init_info *info)
1421 {
1422 	u16 move_cnt = 1;
1423 
1424 	if (!info->legacy_mode &&
1425 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1426 		move_cnt = 3;
1427 
1428 	qp->conn_wqes = move_cnt;
1429 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1430 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1431 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1432 }
1433 
1434 /**
1435  * irdma_uk_qp_init - initialize shared qp
1436  * @qp: hw qp (user and kernel)
1437  * @info: qp initialization info
1438  *
1439  * initializes the vars used in both user and kernel mode.
1440  * size of the wqe depends on numbers of max. fragements
1441  * allowed. Then size of wqe * the number of wqes should be the
1442  * amount of memory allocated for sq and rq.
1443  */
1444 enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
1445 					struct irdma_qp_uk_init_info *info)
1446 {
1447 	enum irdma_status_code ret_code = 0;
1448 	u32 sq_ring_size;
1449 	u8 sqshift, rqshift;
1450 
1451 	qp->uk_attrs = info->uk_attrs;
1452 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1453 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1454 		return IRDMA_ERR_INVALID_FRAG_COUNT;
1455 
1456 	irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1457 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1458 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1459 				    info->max_inline_data, &sqshift);
1460 		if (info->abi_ver > 4)
1461 			rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1462 	} else {
1463 		irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1464 				    info->max_inline_data, &sqshift);
1465 	}
1466 	qp->qp_caps = info->qp_caps;
1467 	qp->sq_base = info->sq;
1468 	qp->rq_base = info->rq;
1469 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1470 	qp->shadow_area = info->shadow_area;
1471 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1472 
1473 	qp->rq_wrid_array = info->rq_wrid_array;
1474 	qp->wqe_alloc_db = info->wqe_alloc_db;
1475 	qp->qp_id = info->qp_id;
1476 	qp->sq_size = info->sq_size;
1477 	qp->push_mode = false;
1478 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1479 	sq_ring_size = qp->sq_size << sqshift;
1480 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1481 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1482 	if (info->first_sq_wq) {
1483 		irdma_setup_connection_wqes(qp, info);
1484 		qp->swqe_polarity = 1;
1485 		qp->first_sq_wq = true;
1486 	} else {
1487 		qp->swqe_polarity = 0;
1488 	}
1489 	qp->swqe_polarity_deferred = 1;
1490 	qp->rwqe_polarity = 0;
1491 	qp->rq_size = info->rq_size;
1492 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1493 	qp->max_inline_data = info->max_inline_data;
1494 	qp->rq_wqe_size = rqshift;
1495 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1496 	qp->rq_wqe_size_multiplier = 1 << rqshift;
1497 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1498 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1499 	else
1500 		qp->wqe_ops = iw_wqe_uk_ops;
1501 	return ret_code;
1502 }
1503 
1504 /**
1505  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1506  * @cq: hw cq
1507  * @info: hw cq initialization info
1508  */
1509 enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
1510 					struct irdma_cq_uk_init_info *info)
1511 {
1512 	cq->cq_base = info->cq_base;
1513 	cq->cq_id = info->cq_id;
1514 	cq->cq_size = info->cq_size;
1515 	cq->cqe_alloc_db = info->cqe_alloc_db;
1516 	cq->cq_ack_db = info->cq_ack_db;
1517 	cq->shadow_area = info->shadow_area;
1518 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1519 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1520 	cq->polarity = 1;
1521 
1522 	return 0;
1523 }
1524 
1525 /**
1526  * irdma_uk_clean_cq - clean cq entries
1527  * @q: completion context
1528  * @cq: cq to clean
1529  */
1530 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1531 {
1532 	__le64 *cqe;
1533 	u64 qword3, comp_ctx;
1534 	u32 cq_head;
1535 	u8 polarity, temp;
1536 
1537 	cq_head = cq->cq_ring.head;
1538 	temp = cq->polarity;
1539 	do {
1540 		if (cq->avoid_mem_cflct)
1541 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1542 		else
1543 			cqe = cq->cq_base[cq_head].buf;
1544 		get_64bit_val(cqe, 24, &qword3);
1545 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1546 
1547 		if (polarity != temp)
1548 			break;
1549 
1550 		get_64bit_val(cqe, 8, &comp_ctx);
1551 		if ((void *)(unsigned long)comp_ctx == q)
1552 			set_64bit_val(cqe, 8, 0);
1553 
1554 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1555 		if (!cq_head)
1556 			temp ^= 1;
1557 	} while (true);
1558 }
1559 
1560 /**
1561  * irdma_nop - post a nop
1562  * @qp: hw qp ptr
1563  * @wr_id: work request id
1564  * @signaled: signaled for completion
1565  * @post_sq: ring doorbell
1566  */
1567 enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
1568 				 bool signaled, bool post_sq)
1569 {
1570 	__le64 *wqe;
1571 	u64 hdr;
1572 	u32 wqe_idx;
1573 	struct irdma_post_sq_info info = {};
1574 
1575 	info.push_wqe = false;
1576 	info.wr_id = wr_id;
1577 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1578 					 0, &info);
1579 	if (!wqe)
1580 		return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
1581 
1582 	irdma_clr_wqes(qp, wqe_idx);
1583 
1584 	set_64bit_val(wqe, 0, 0);
1585 	set_64bit_val(wqe, 8, 0);
1586 	set_64bit_val(wqe, 16, 0);
1587 
1588 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1589 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1590 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1591 
1592 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1593 
1594 	set_64bit_val(wqe, 24, hdr);
1595 	if (post_sq)
1596 		irdma_uk_qp_post_wr(qp);
1597 
1598 	return 0;
1599 }
1600 
1601 /**
1602  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1603  * @frag_cnt: number of fragments
1604  * @quanta: quanta for frag_cnt
1605  */
1606 enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1607 {
1608 	switch (frag_cnt) {
1609 	case 0:
1610 	case 1:
1611 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1612 		break;
1613 	case 2:
1614 	case 3:
1615 		*quanta = 2;
1616 		break;
1617 	case 4:
1618 	case 5:
1619 		*quanta = 3;
1620 		break;
1621 	case 6:
1622 	case 7:
1623 		*quanta = 4;
1624 		break;
1625 	case 8:
1626 	case 9:
1627 		*quanta = 5;
1628 		break;
1629 	case 10:
1630 	case 11:
1631 		*quanta = 6;
1632 		break;
1633 	case 12:
1634 	case 13:
1635 		*quanta = 7;
1636 		break;
1637 	case 14:
1638 	case 15: /* when immediate data is present */
1639 		*quanta = 8;
1640 		break;
1641 	default:
1642 		return IRDMA_ERR_INVALID_FRAG_COUNT;
1643 	}
1644 
1645 	return 0;
1646 }
1647 
1648 /**
1649  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1650  * @frag_cnt: number of fragments
1651  * @wqe_size: size in bytes given frag_cnt
1652  */
1653 enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1654 {
1655 	switch (frag_cnt) {
1656 	case 0:
1657 	case 1:
1658 		*wqe_size = 32;
1659 		break;
1660 	case 2:
1661 	case 3:
1662 		*wqe_size = 64;
1663 		break;
1664 	case 4:
1665 	case 5:
1666 	case 6:
1667 	case 7:
1668 		*wqe_size = 128;
1669 		break;
1670 	case 8:
1671 	case 9:
1672 	case 10:
1673 	case 11:
1674 	case 12:
1675 	case 13:
1676 	case 14:
1677 		*wqe_size = 256;
1678 		break;
1679 	default:
1680 		return IRDMA_ERR_INVALID_FRAG_COUNT;
1681 	}
1682 
1683 	return 0;
1684 }
1685