xref: /linux/drivers/infiniband/hw/irdma/uk.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7 
8 /**
9  * irdma_set_fragment - set fragment in wqe
10  * @wqe: wqe for setting fragment
11  * @offset: offset value
12  * @sge: sge length and stag
13  * @valid: The wqe valid
14  */
15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 			       u8 valid)
17 {
18 	if (sge) {
19 		set_64bit_val(wqe, offset,
20 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 		set_64bit_val(wqe, offset + 8,
22 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 	} else {
26 		set_64bit_val(wqe, offset, 0);
27 		set_64bit_val(wqe, offset + 8,
28 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 	}
30 }
31 
32 /**
33  * irdma_set_fragment_gen_1 - set fragment in wqe
34  * @wqe: wqe for setting fragment
35  * @offset: offset value
36  * @sge: sge length and stag
37  * @valid: wqe valid flag
38  */
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 				     struct ib_sge *sge, u8 valid)
41 {
42 	if (sge) {
43 		set_64bit_val(wqe, offset,
44 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 		set_64bit_val(wqe, offset + 8,
46 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 	} else {
49 		set_64bit_val(wqe, offset, 0);
50 		set_64bit_val(wqe, offset + 8, 0);
51 	}
52 }
53 
54 /**
55  * irdma_nop_1 - insert a NOP wqe
56  * @qp: hw qp ptr
57  */
58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 	u64 hdr;
61 	__le64 *wqe;
62 	u32 wqe_idx;
63 	bool signaled = false;
64 
65 	if (!qp->sq_ring.head)
66 		return -EINVAL;
67 
68 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 	wqe = qp->sq_base[wqe_idx].elem;
70 
71 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72 
73 	set_64bit_val(wqe, 0, 0);
74 	set_64bit_val(wqe, 8, 0);
75 	set_64bit_val(wqe, 16, 0);
76 
77 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80 
81 	/* make sure WQE is written before valid bit is set */
82 	dma_wmb();
83 
84 	set_64bit_val(wqe, 24, hdr);
85 
86 	return 0;
87 }
88 
89 /**
90  * irdma_clr_wqes - clear next 128 sq entries
91  * @qp: hw qp ptr
92  * @qp_wqe_idx: wqe_idx
93  */
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 	struct irdma_qp_quanta *sq;
97 	u32 wqe_idx;
98 
99 	if (!(qp_wqe_idx & 0x7F)) {
100 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 		sq = qp->sq_base + wqe_idx;
102 		if (wqe_idx)
103 			memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 			       128 * sizeof(*sq));
105 		else
106 			memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 			       128 * sizeof(*sq));
108 	}
109 }
110 
111 /**
112  * irdma_uk_qp_post_wr - ring doorbell
113  * @qp: hw qp ptr
114  */
115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
116 {
117 	u64 temp;
118 	u32 hw_sq_tail;
119 	u32 sw_sq_head;
120 
121 	/* valid bit is written and loads completed before reading shadow */
122 	mb();
123 
124 	/* read the doorbell shadow area */
125 	get_64bit_val(qp->shadow_area, 0, &temp);
126 
127 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
128 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
129 	if (sw_sq_head != qp->initial_ring.head) {
130 		if (sw_sq_head != hw_sq_tail) {
131 			if (sw_sq_head > qp->initial_ring.head) {
132 				if (hw_sq_tail >= qp->initial_ring.head &&
133 				    hw_sq_tail < sw_sq_head)
134 					writel(qp->qp_id, qp->wqe_alloc_db);
135 			} else {
136 				if (hw_sq_tail >= qp->initial_ring.head ||
137 				    hw_sq_tail < sw_sq_head)
138 					writel(qp->qp_id, qp->wqe_alloc_db);
139 			}
140 		}
141 	}
142 
143 	qp->initial_ring.head = qp->sq_ring.head;
144 }
145 
146 /**
147  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
148  * @qp: hw qp ptr
149  * @wqe_idx: return wqe index
150  * @quanta: size of WR in quanta
151  * @total_size: size of WR in bytes
152  * @info: info on WR
153  */
154 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
155 				   u16 quanta, u32 total_size,
156 				   struct irdma_post_sq_info *info)
157 {
158 	__le64 *wqe;
159 	__le64 *wqe_0 = NULL;
160 	u16 avail_quanta;
161 	u16 i;
162 
163 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
164 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
165 		       qp->uk_attrs->max_hw_sq_chunk);
166 	if (quanta <= avail_quanta) {
167 		/* WR fits in current chunk */
168 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
169 			return NULL;
170 	} else {
171 		/* Need to pad with NOP */
172 		if (quanta + avail_quanta >
173 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
174 			return NULL;
175 
176 		for (i = 0; i < avail_quanta; i++) {
177 			irdma_nop_1(qp);
178 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
179 		}
180 	}
181 
182 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
183 	if (!*wqe_idx)
184 		qp->swqe_polarity = !qp->swqe_polarity;
185 
186 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
187 
188 	wqe = qp->sq_base[*wqe_idx].elem;
189 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
190 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
191 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
192 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
193 	}
194 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
195 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
196 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
197 
198 	return wqe;
199 }
200 
201 __le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
202 {
203 	int ret_code;
204 	__le64 *wqe;
205 
206 	if (IRDMA_RING_FULL_ERR(srq->srq_ring))
207 		return NULL;
208 
209 	IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
210 	if (ret_code)
211 		return NULL;
212 
213 	if (!*wqe_idx)
214 		srq->srwqe_polarity = !srq->srwqe_polarity;
215 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
216 	wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
217 
218 	return wqe;
219 }
220 
221 /**
222  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
223  * @qp: hw qp ptr
224  * @wqe_idx: return wqe index
225  */
226 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
227 {
228 	__le64 *wqe;
229 	int ret_code;
230 
231 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
232 		return NULL;
233 
234 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
235 	if (ret_code)
236 		return NULL;
237 
238 	if (!*wqe_idx)
239 		qp->rwqe_polarity = !qp->rwqe_polarity;
240 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
241 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
242 
243 	return wqe;
244 }
245 
246 /**
247  * irdma_uk_rdma_write - rdma write operation
248  * @qp: hw qp ptr
249  * @info: post sq information
250  * @post_sq: flag to post sq
251  */
252 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
253 			bool post_sq)
254 {
255 	u64 hdr;
256 	__le64 *wqe;
257 	struct irdma_rdma_write *op_info;
258 	u32 i, wqe_idx;
259 	u32 total_size = 0, byte_off;
260 	int ret_code;
261 	u32 frag_cnt, addl_frag_cnt;
262 	bool read_fence = false;
263 	u16 quanta;
264 
265 	op_info = &info->op.rdma_write;
266 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
267 		return -EINVAL;
268 
269 	for (i = 0; i < op_info->num_lo_sges; i++)
270 		total_size += op_info->lo_sg_list[i].length;
271 
272 	read_fence |= info->read_fence;
273 
274 	if (info->imm_data_valid)
275 		frag_cnt = op_info->num_lo_sges + 1;
276 	else
277 		frag_cnt = op_info->num_lo_sges;
278 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
279 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
280 	if (ret_code)
281 		return ret_code;
282 
283 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
284 					 info);
285 	if (!wqe)
286 		return -ENOMEM;
287 
288 	irdma_clr_wqes(qp, wqe_idx);
289 
290 	set_64bit_val(wqe, 16,
291 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
292 
293 	if (info->imm_data_valid) {
294 		set_64bit_val(wqe, 0,
295 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
296 		i = 0;
297 	} else {
298 		qp->wqe_ops.iw_set_fragment(wqe, 0,
299 					    op_info->lo_sg_list,
300 					    qp->swqe_polarity);
301 		i = 1;
302 	}
303 
304 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
305 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
306 					    &op_info->lo_sg_list[i],
307 					    qp->swqe_polarity);
308 		byte_off += 16;
309 	}
310 
311 	/* if not an odd number set valid bit in next fragment */
312 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
313 	    frag_cnt) {
314 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
315 					    qp->swqe_polarity);
316 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
317 			++addl_frag_cnt;
318 	}
319 
320 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
321 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
322 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
323 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
324 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
325 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
326 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
327 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
328 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
329 
330 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
331 
332 	set_64bit_val(wqe, 24, hdr);
333 
334 	if (post_sq)
335 		irdma_uk_qp_post_wr(qp);
336 
337 	return 0;
338 }
339 
340 /**
341  * irdma_uk_atomic_fetch_add - atomic fetch and add operation
342  * @qp: hw qp ptr
343  * @info: post sq information
344  * @post_sq: flag to post sq
345  */
346 int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
347 			      struct irdma_post_sq_info *info, bool post_sq)
348 {
349 	struct irdma_atomic_fetch_add *op_info;
350 	u32 total_size = 0;
351 	u16 quanta = 2;
352 	u32 wqe_idx;
353 	__le64 *wqe;
354 	u64 hdr;
355 
356 	op_info = &info->op.atomic_fetch_add;
357 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
358 					 info);
359 	if (!wqe)
360 		return -ENOMEM;
361 
362 	set_64bit_val(wqe, 0, op_info->tagged_offset);
363 	set_64bit_val(wqe, 8,
364 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
365 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
366 
367 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
368 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
369 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
370 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
371 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
372 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
373 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
374 
375 	set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
376 	set_64bit_val(wqe, 40, 0);
377 	set_64bit_val(wqe, 48, 0);
378 	set_64bit_val(wqe, 56,
379 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
380 
381 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
382 
383 	set_64bit_val(wqe, 24, hdr);
384 
385 	if (post_sq)
386 		irdma_uk_qp_post_wr(qp);
387 
388 	return 0;
389 }
390 
391 /**
392  * irdma_uk_atomic_compare_swap - atomic compare and swap operation
393  * @qp: hw qp ptr
394  * @info: post sq information
395  * @post_sq: flag to post sq
396  */
397 int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
398 				 struct irdma_post_sq_info *info, bool post_sq)
399 {
400 	struct irdma_atomic_compare_swap *op_info;
401 	u32 total_size = 0;
402 	u16 quanta = 2;
403 	u32 wqe_idx;
404 	__le64 *wqe;
405 	u64 hdr;
406 
407 	op_info = &info->op.atomic_compare_swap;
408 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
409 					 info);
410 	if (!wqe)
411 		return -ENOMEM;
412 
413 	set_64bit_val(wqe, 0, op_info->tagged_offset);
414 	set_64bit_val(wqe, 8,
415 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
416 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
417 
418 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
419 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
420 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
421 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
422 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
423 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
424 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
425 
426 	set_64bit_val(wqe, 32, op_info->swap_data_bytes);
427 	set_64bit_val(wqe, 40, op_info->compare_data_bytes);
428 	set_64bit_val(wqe, 48, 0);
429 	set_64bit_val(wqe, 56,
430 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
431 
432 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
433 
434 	set_64bit_val(wqe, 24, hdr);
435 
436 	if (post_sq)
437 		irdma_uk_qp_post_wr(qp);
438 
439 	return 0;
440 }
441 
442 /**
443  * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
444  * @srq: shared rq ptr
445  * @info: post rq information
446  */
447 int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
448 			      struct irdma_post_rq_info *info)
449 {
450 	u32 wqe_idx, i, byte_off;
451 	u32 addl_frag_cnt;
452 	__le64 *wqe;
453 	u64 hdr;
454 
455 	if (srq->max_srq_frag_cnt < info->num_sges)
456 		return -EINVAL;
457 
458 	wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
459 	if (!wqe)
460 		return -ENOMEM;
461 
462 	addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
463 	srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
464 				     srq->srwqe_polarity);
465 
466 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
467 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
468 					     srq->srwqe_polarity);
469 		byte_off += 16;
470 	}
471 
472 	/* if not an odd number set valid bit in next fragment */
473 	if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
474 	    info->num_sges) {
475 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
476 					     srq->srwqe_polarity);
477 		if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
478 			++addl_frag_cnt;
479 	}
480 
481 	set_64bit_val(wqe, 16, (u64)info->wr_id);
482 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
483 	      FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
484 
485 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
486 
487 	set_64bit_val(wqe, 24, hdr);
488 
489 	set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
490 
491 	return 0;
492 }
493 
494 /**
495  * irdma_uk_rdma_read - rdma read command
496  * @qp: hw qp ptr
497  * @info: post sq information
498  * @inv_stag: flag for inv_stag
499  * @post_sq: flag to post sq
500  */
501 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
502 		       bool inv_stag, bool post_sq)
503 {
504 	struct irdma_rdma_read *op_info;
505 	int ret_code;
506 	u32 i, byte_off, total_size = 0;
507 	bool local_fence = false;
508 	u32 addl_frag_cnt;
509 	__le64 *wqe;
510 	u32 wqe_idx;
511 	u16 quanta;
512 	u64 hdr;
513 
514 	op_info = &info->op.rdma_read;
515 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
516 		return -EINVAL;
517 
518 	for (i = 0; i < op_info->num_lo_sges; i++)
519 		total_size += op_info->lo_sg_list[i].length;
520 
521 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
522 	if (ret_code)
523 		return ret_code;
524 
525 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
526 					 info);
527 	if (!wqe)
528 		return -ENOMEM;
529 
530 	irdma_clr_wqes(qp, wqe_idx);
531 
532 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
533 			(op_info->num_lo_sges - 1) : 0;
534 	local_fence |= info->local_fence;
535 
536 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
537 				    qp->swqe_polarity);
538 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
539 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
540 					    &op_info->lo_sg_list[i],
541 					    qp->swqe_polarity);
542 		byte_off += 16;
543 	}
544 
545 	/* if not an odd number set valid bit in next fragment */
546 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
547 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
548 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
549 					    qp->swqe_polarity);
550 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
551 			++addl_frag_cnt;
552 	}
553 	set_64bit_val(wqe, 16,
554 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
555 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
556 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
557 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
558 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
559 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
560 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
561 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
562 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
563 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
564 
565 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
566 
567 	set_64bit_val(wqe, 24, hdr);
568 
569 	if (post_sq)
570 		irdma_uk_qp_post_wr(qp);
571 
572 	return 0;
573 }
574 
575 /**
576  * irdma_uk_send - rdma send command
577  * @qp: hw qp ptr
578  * @info: post sq information
579  * @post_sq: flag to post sq
580  */
581 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
582 		  bool post_sq)
583 {
584 	__le64 *wqe;
585 	struct irdma_post_send *op_info;
586 	u64 hdr;
587 	u32 i, wqe_idx, total_size = 0, byte_off;
588 	int ret_code;
589 	u32 frag_cnt, addl_frag_cnt;
590 	bool read_fence = false;
591 	u16 quanta;
592 
593 	op_info = &info->op.send;
594 	if (qp->max_sq_frag_cnt < op_info->num_sges)
595 		return -EINVAL;
596 
597 	for (i = 0; i < op_info->num_sges; i++)
598 		total_size += op_info->sg_list[i].length;
599 
600 	if (info->imm_data_valid)
601 		frag_cnt = op_info->num_sges + 1;
602 	else
603 		frag_cnt = op_info->num_sges;
604 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
605 	if (ret_code)
606 		return ret_code;
607 
608 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
609 					 info);
610 	if (!wqe)
611 		return -ENOMEM;
612 
613 	irdma_clr_wqes(qp, wqe_idx);
614 
615 	read_fence |= info->read_fence;
616 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
617 	if (info->imm_data_valid) {
618 		set_64bit_val(wqe, 0,
619 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
620 		i = 0;
621 	} else {
622 		qp->wqe_ops.iw_set_fragment(wqe, 0,
623 					    frag_cnt ? op_info->sg_list : NULL,
624 					    qp->swqe_polarity);
625 		i = 1;
626 	}
627 
628 	for (byte_off = 32; i < op_info->num_sges; i++) {
629 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
630 					    qp->swqe_polarity);
631 		byte_off += 16;
632 	}
633 
634 	/* if not an odd number set valid bit in next fragment */
635 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
636 	    frag_cnt) {
637 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
638 					    qp->swqe_polarity);
639 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
640 			++addl_frag_cnt;
641 	}
642 
643 	set_64bit_val(wqe, 16,
644 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
645 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
646 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
647 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
648 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
649 			 (info->imm_data_valid ? 1 : 0)) |
650 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
651 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
652 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
653 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
654 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
655 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
656 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
657 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
658 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
659 
660 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
661 
662 	set_64bit_val(wqe, 24, hdr);
663 
664 	if (post_sq)
665 		irdma_uk_qp_post_wr(qp);
666 
667 	return 0;
668 }
669 
670 /**
671  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
672  * @wqe: wqe for setting fragment
673  * @op_info: info for setting bind wqe values
674  */
675 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
676 					struct irdma_bind_window *op_info)
677 {
678 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
679 	set_64bit_val(wqe, 8,
680 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
681 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
682 	set_64bit_val(wqe, 16, op_info->bind_len);
683 }
684 
685 /**
686  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
687  * @wqe: pointer to wqe
688  * @sge_list: table of pointers to inline data
689  * @num_sges: Total inline data length
690  * @polarity: compatibility parameter
691  */
692 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
693 					 u32 num_sges, u8 polarity)
694 {
695 	u32 quanta_bytes_remaining = 16;
696 	int i;
697 
698 	for (i = 0; i < num_sges; i++) {
699 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
700 		u32 sge_len = sge_list[i].length;
701 
702 		while (sge_len) {
703 			u32 bytes_copied;
704 
705 			bytes_copied = min(sge_len, quanta_bytes_remaining);
706 			memcpy(wqe, cur_sge, bytes_copied);
707 			wqe += bytes_copied;
708 			cur_sge += bytes_copied;
709 			quanta_bytes_remaining -= bytes_copied;
710 			sge_len -= bytes_copied;
711 
712 			if (!quanta_bytes_remaining) {
713 				/* Remaining inline bytes reside after hdr */
714 				wqe += 16;
715 				quanta_bytes_remaining = 32;
716 			}
717 		}
718 	}
719 }
720 
721 /**
722  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
723  * @data_size: data size for inline
724  *
725  * Gets the quanta based on inline and immediate data.
726  */
727 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
728 {
729 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
730 }
731 
732 /**
733  * irdma_set_mw_bind_wqe - set mw bind in wqe
734  * @wqe: wqe for setting mw bind
735  * @op_info: info for setting wqe values
736  */
737 static void irdma_set_mw_bind_wqe(__le64 *wqe,
738 				  struct irdma_bind_window *op_info)
739 {
740 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
741 	set_64bit_val(wqe, 8,
742 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
743 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
744 	set_64bit_val(wqe, 16, op_info->bind_len);
745 }
746 
747 /**
748  * irdma_copy_inline_data - Copy inline data to wqe
749  * @wqe: pointer to wqe
750  * @sge_list: table of pointers to inline data
751  * @num_sges: number of SGE's
752  * @polarity: polarity of wqe valid bit
753  */
754 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
755 				   u32 num_sges, u8 polarity)
756 {
757 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
758 	u32 quanta_bytes_remaining = 8;
759 	bool first_quanta = true;
760 	int i;
761 
762 	wqe += 8;
763 
764 	for (i = 0; i < num_sges; i++) {
765 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
766 		u32 sge_len = sge_list[i].length;
767 
768 		while (sge_len) {
769 			u32 bytes_copied;
770 
771 			bytes_copied = min(sge_len, quanta_bytes_remaining);
772 			memcpy(wqe, cur_sge, bytes_copied);
773 			wqe += bytes_copied;
774 			cur_sge += bytes_copied;
775 			quanta_bytes_remaining -= bytes_copied;
776 			sge_len -= bytes_copied;
777 
778 			if (!quanta_bytes_remaining) {
779 				quanta_bytes_remaining = 31;
780 
781 				/* Remaining inline bytes reside after hdr */
782 				if (first_quanta) {
783 					first_quanta = false;
784 					wqe += 16;
785 				} else {
786 					*wqe = inline_valid;
787 					wqe++;
788 				}
789 			}
790 		}
791 	}
792 	if (!first_quanta && quanta_bytes_remaining < 31)
793 		*(wqe + quanta_bytes_remaining) = inline_valid;
794 }
795 
796 /**
797  * irdma_inline_data_size_to_quanta - based on inline data, quanta
798  * @data_size: data size for inline
799  *
800  * Gets the quanta based on inline and immediate data.
801  */
802 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
803 {
804 	if (data_size <= 8)
805 		return IRDMA_QP_WQE_MIN_QUANTA;
806 	else if (data_size <= 39)
807 		return 2;
808 	else if (data_size <= 70)
809 		return 3;
810 	else if (data_size <= 101)
811 		return 4;
812 	else if (data_size <= 132)
813 		return 5;
814 	else if (data_size <= 163)
815 		return 6;
816 	else if (data_size <= 194)
817 		return 7;
818 	else
819 		return 8;
820 }
821 
822 /**
823  * irdma_uk_inline_rdma_write - inline rdma write operation
824  * @qp: hw qp ptr
825  * @info: post sq information
826  * @post_sq: flag to post sq
827  */
828 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
829 			       struct irdma_post_sq_info *info, bool post_sq)
830 {
831 	__le64 *wqe;
832 	struct irdma_rdma_write *op_info;
833 	u64 hdr = 0;
834 	u32 wqe_idx;
835 	bool read_fence = false;
836 	u32 i, total_size = 0;
837 	u16 quanta;
838 
839 	op_info = &info->op.rdma_write;
840 
841 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
842 		return -EINVAL;
843 
844 	for (i = 0; i < op_info->num_lo_sges; i++)
845 		total_size += op_info->lo_sg_list[i].length;
846 
847 	if (unlikely(total_size > qp->max_inline_data))
848 		return -EINVAL;
849 
850 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
851 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
852 					 info);
853 	if (!wqe)
854 		return -ENOMEM;
855 
856 	irdma_clr_wqes(qp, wqe_idx);
857 
858 	read_fence |= info->read_fence;
859 	set_64bit_val(wqe, 16,
860 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
861 
862 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
863 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
864 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
865 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
866 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
867 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
868 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
869 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
870 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
871 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
872 
873 	if (info->imm_data_valid)
874 		set_64bit_val(wqe, 0,
875 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
876 
877 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
878 					op_info->num_lo_sges,
879 					qp->swqe_polarity);
880 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
881 
882 	set_64bit_val(wqe, 24, hdr);
883 
884 	if (post_sq)
885 		irdma_uk_qp_post_wr(qp);
886 
887 	return 0;
888 }
889 
890 /**
891  * irdma_uk_inline_send - inline send operation
892  * @qp: hw qp ptr
893  * @info: post sq information
894  * @post_sq: flag to post sq
895  */
896 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
897 			 struct irdma_post_sq_info *info, bool post_sq)
898 {
899 	__le64 *wqe;
900 	struct irdma_post_send *op_info;
901 	u64 hdr;
902 	u32 wqe_idx;
903 	bool read_fence = false;
904 	u32 i, total_size = 0;
905 	u16 quanta;
906 
907 	op_info = &info->op.send;
908 
909 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
910 		return -EINVAL;
911 
912 	for (i = 0; i < op_info->num_sges; i++)
913 		total_size += op_info->sg_list[i].length;
914 
915 	if (unlikely(total_size > qp->max_inline_data))
916 		return -EINVAL;
917 
918 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
919 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
920 					 info);
921 	if (!wqe)
922 		return -ENOMEM;
923 
924 	irdma_clr_wqes(qp, wqe_idx);
925 
926 	set_64bit_val(wqe, 16,
927 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
928 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
929 
930 	read_fence |= info->read_fence;
931 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
932 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
933 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
934 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
935 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
936 			 (info->imm_data_valid ? 1 : 0)) |
937 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
938 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
939 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
940 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
941 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
942 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
943 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
944 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
945 
946 	if (info->imm_data_valid)
947 		set_64bit_val(wqe, 0,
948 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
949 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
950 					op_info->num_sges, qp->swqe_polarity);
951 
952 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
953 
954 	set_64bit_val(wqe, 24, hdr);
955 
956 	if (post_sq)
957 		irdma_uk_qp_post_wr(qp);
958 
959 	return 0;
960 }
961 
962 /**
963  * irdma_uk_stag_local_invalidate - stag invalidate operation
964  * @qp: hw qp ptr
965  * @info: post sq information
966  * @post_sq: flag to post sq
967  */
968 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
969 				   struct irdma_post_sq_info *info,
970 				   bool post_sq)
971 {
972 	__le64 *wqe;
973 	struct irdma_inv_local_stag *op_info;
974 	u64 hdr;
975 	u32 wqe_idx;
976 	bool local_fence = false;
977 	struct ib_sge sge = {};
978 
979 	op_info = &info->op.inv_local_stag;
980 	local_fence = info->local_fence;
981 
982 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
983 					 0, info);
984 	if (!wqe)
985 		return -ENOMEM;
986 
987 	irdma_clr_wqes(qp, wqe_idx);
988 
989 	sge.lkey = op_info->target_stag;
990 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
991 
992 	set_64bit_val(wqe, 16, 0);
993 
994 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
995 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
996 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
997 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
998 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
999 
1000 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1001 
1002 	set_64bit_val(wqe, 24, hdr);
1003 
1004 	if (post_sq)
1005 		irdma_uk_qp_post_wr(qp);
1006 
1007 	return 0;
1008 }
1009 
1010 /**
1011  * irdma_uk_post_receive - post receive wqe
1012  * @qp: hw qp ptr
1013  * @info: post rq information
1014  */
1015 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
1016 			  struct irdma_post_rq_info *info)
1017 {
1018 	u32 wqe_idx, i, byte_off;
1019 	u32 addl_frag_cnt;
1020 	__le64 *wqe;
1021 	u64 hdr;
1022 
1023 	if (qp->max_rq_frag_cnt < info->num_sges)
1024 		return -EINVAL;
1025 
1026 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1027 	if (!wqe)
1028 		return -ENOMEM;
1029 
1030 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
1031 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
1032 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
1033 				    qp->rwqe_polarity);
1034 
1035 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
1036 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
1037 					    qp->rwqe_polarity);
1038 		byte_off += 16;
1039 	}
1040 
1041 	/* if not an odd number set valid bit in next fragment */
1042 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
1043 	    info->num_sges) {
1044 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
1045 					    qp->rwqe_polarity);
1046 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
1047 			++addl_frag_cnt;
1048 	}
1049 
1050 	set_64bit_val(wqe, 16, 0);
1051 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
1052 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
1053 
1054 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1055 
1056 	set_64bit_val(wqe, 24, hdr);
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * irdma_uk_cq_resize - reset the cq buffer info
1063  * @cq: cq to resize
1064  * @cq_base: new cq buffer addr
1065  * @cq_size: number of cqes
1066  */
1067 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1068 {
1069 	cq->cq_base = cq_base;
1070 	cq->cq_size = cq_size;
1071 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1072 	cq->polarity = 1;
1073 }
1074 
1075 /**
1076  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1077  * @cq: cq to resize
1078  * @cq_cnt: the count of the resized cq buffers
1079  */
1080 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1081 {
1082 	u64 temp_val;
1083 	u16 sw_cq_sel;
1084 	u8 arm_next_se;
1085 	u8 arm_next;
1086 	u8 arm_seq_num;
1087 
1088 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1089 
1090 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1091 	sw_cq_sel += cq_cnt;
1092 
1093 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1094 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1095 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1096 
1097 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1098 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1099 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1100 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1101 
1102 	set_64bit_val(cq->shadow_area, 32, temp_val);
1103 }
1104 
1105 /**
1106  * irdma_uk_cq_request_notification - cq notification request (door bell)
1107  * @cq: hw cq
1108  * @cq_notify: notification type
1109  */
1110 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1111 				      enum irdma_cmpl_notify cq_notify)
1112 {
1113 	u64 temp_val;
1114 	u16 sw_cq_sel;
1115 	u8 arm_next_se = 0;
1116 	u8 arm_next = 0;
1117 	u8 arm_seq_num;
1118 
1119 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1120 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1121 	arm_seq_num++;
1122 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1123 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1124 	arm_next_se |= 1;
1125 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1126 		arm_next = 1;
1127 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1128 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1129 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1130 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1131 
1132 	set_64bit_val(cq->shadow_area, 32, temp_val);
1133 
1134 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1135 
1136 	writel(cq->cq_id, cq->cqe_alloc_db);
1137 }
1138 
1139 /**
1140  * irdma_uk_cq_poll_cmpl - get cq completion info
1141  * @cq: hw cq
1142  * @info: cq poll information returned
1143  */
1144 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1145 			  struct irdma_cq_poll_info *info)
1146 {
1147 	u64 comp_ctx, qword0, qword2, qword3;
1148 	__le64 *cqe;
1149 	struct irdma_qp_uk *qp;
1150 	struct irdma_srq_uk *srq;
1151 	struct qp_err_code qp_err;
1152 	u8 is_srq;
1153 	struct irdma_ring *pring = NULL;
1154 	u32 wqe_idx;
1155 	int ret_code;
1156 	bool move_cq_head = true;
1157 	u8 polarity;
1158 	bool ext_valid;
1159 	__le64 *ext_cqe;
1160 
1161 	if (cq->avoid_mem_cflct)
1162 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1163 	else
1164 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1165 
1166 	get_64bit_val(cqe, 24, &qword3);
1167 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1168 	if (polarity != cq->polarity)
1169 		return -ENOENT;
1170 
1171 	/* Ensure CQE contents are read after valid bit is checked */
1172 	dma_rmb();
1173 
1174 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1175 	if (ext_valid) {
1176 		u64 qword6, qword7;
1177 		u32 peek_head;
1178 
1179 		if (cq->avoid_mem_cflct) {
1180 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1181 			get_64bit_val(ext_cqe, 24, &qword7);
1182 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1183 		} else {
1184 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1185 			ext_cqe = cq->cq_base[peek_head].buf;
1186 			get_64bit_val(ext_cqe, 24, &qword7);
1187 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1188 			if (!peek_head)
1189 				polarity ^= 1;
1190 		}
1191 		if (polarity != cq->polarity)
1192 			return -ENOENT;
1193 
1194 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1195 		dma_rmb();
1196 
1197 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1198 		if (info->imm_valid) {
1199 			u64 qword4;
1200 
1201 			get_64bit_val(ext_cqe, 0, &qword4);
1202 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1203 		}
1204 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1205 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1206 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1207 			get_64bit_val(ext_cqe, 16, &qword6);
1208 			if (info->ud_vlan_valid)
1209 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1210 			if (info->ud_smac_valid) {
1211 				info->ud_smac[5] = qword6 & 0xFF;
1212 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1213 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1214 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1215 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1216 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1217 			}
1218 		}
1219 	} else {
1220 		info->imm_valid = false;
1221 		info->ud_smac_valid = false;
1222 		info->ud_vlan_valid = false;
1223 	}
1224 
1225 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1226 	is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
1227 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1228 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1229 	get_64bit_val(cqe, 8, &comp_ctx);
1230 	if (is_srq)
1231 		get_64bit_val(cqe, 40, (u64 *)&qp);
1232 	else
1233 		qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1234 	if (info->error) {
1235 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1236 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1237 		switch (info->major_err) {
1238 		case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
1239 			qp_err = irdma_ae_to_qp_err_code(info->minor_err);
1240 			info->minor_err = qp_err.flush_code;
1241 			fallthrough;
1242 		case IRDMA_FLUSH_MAJOR_ERR:
1243 			/* Set the min error to standard flush error code for remaining cqes */
1244 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1245 				qword3 &= ~IRDMA_CQ_MINERR;
1246 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1247 				set_64bit_val(cqe, 24, qword3);
1248 			}
1249 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1250 			break;
1251 		default:
1252 #define IRDMA_CIE_SIGNATURE 0xE
1253 #define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
1254 			if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
1255 			    qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
1256 			    FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
1257 			    == IRDMA_CIE_SIGNATURE) {
1258 				info->error = 0;
1259 				info->major_err = 0;
1260 				info->minor_err = 0;
1261 				info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1262 			} else {
1263 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1264 			}
1265 			break;
1266 		}
1267 	} else {
1268 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1269 	}
1270 
1271 	get_64bit_val(cqe, 0, &qword0);
1272 	get_64bit_val(cqe, 16, &qword2);
1273 
1274 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1275 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1276 
1277 	get_64bit_val(cqe, 8, &comp_ctx);
1278 
1279 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1280 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1281 	if (!qp || qp->destroy_pending) {
1282 		ret_code = -EFAULT;
1283 		goto exit;
1284 	}
1285 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1286 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1287 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1288 
1289 	if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
1290 		srq = qp->srq_uk;
1291 
1292 		get_64bit_val(cqe, 8, &info->wr_id);
1293 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1294 
1295 		if (qword3 & IRDMACQ_STAG) {
1296 			info->stag_invalid_set = true;
1297 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
1298 							qword2);
1299 		} else {
1300 			info->stag_invalid_set = false;
1301 		}
1302 		IRDMA_RING_MOVE_TAIL(srq->srq_ring);
1303 		pring = &srq->srq_ring;
1304 	} else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
1305 		u32 array_idx;
1306 
1307 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1308 
1309 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1310 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1311 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1312 				ret_code = -ENOENT;
1313 				goto exit;
1314 			}
1315 
1316 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1317 			array_idx = qp->rq_ring.tail;
1318 		} else {
1319 			info->wr_id = qp->rq_wrid_array[array_idx];
1320 		}
1321 
1322 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1323 
1324 		if (qword3 & IRDMACQ_STAG) {
1325 			info->stag_invalid_set = true;
1326 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1327 		} else {
1328 			info->stag_invalid_set = false;
1329 		}
1330 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1331 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1332 			qp->rq_flush_seen = true;
1333 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1334 				qp->rq_flush_complete = true;
1335 			else
1336 				move_cq_head = false;
1337 		}
1338 		pring = &qp->rq_ring;
1339 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1340 		if (qp->first_sq_wq) {
1341 			if (wqe_idx + 1 >= qp->conn_wqes)
1342 				qp->first_sq_wq = false;
1343 
1344 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1345 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1346 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1347 				set_64bit_val(cq->shadow_area, 0,
1348 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1349 				memset(info, 0,
1350 				       sizeof(struct irdma_cq_poll_info));
1351 				return irdma_uk_cq_poll_cmpl(cq, info);
1352 			}
1353 		}
1354 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1355 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1356 			if (!info->comp_status)
1357 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1358 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1359 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1360 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1361 		} else {
1362 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1363 				ret_code = -ENOENT;
1364 				goto exit;
1365 			}
1366 
1367 			do {
1368 				__le64 *sw_wqe;
1369 				u64 wqe_qword;
1370 				u32 tail;
1371 
1372 				tail = qp->sq_ring.tail;
1373 				sw_wqe = qp->sq_base[tail].elem;
1374 				get_64bit_val(sw_wqe, 24,
1375 					      &wqe_qword);
1376 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1377 							      wqe_qword);
1378 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1379 						    tail + qp->sq_wrtrk_array[tail].quanta);
1380 				if (info->op_type != IRDMAQP_OP_NOP) {
1381 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1382 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1383 					break;
1384 				}
1385 			} while (1);
1386 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1387 			    info->minor_err == FLUSH_PROT_ERR)
1388 				info->minor_err = FLUSH_MW_BIND_ERR;
1389 			qp->sq_flush_seen = true;
1390 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1391 				qp->sq_flush_complete = true;
1392 		}
1393 		pring = &qp->sq_ring;
1394 	}
1395 
1396 	ret_code = 0;
1397 
1398 exit:
1399 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1400 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1401 		/* Park CQ head during a flush to generate additional CQEs
1402 		 * from SW for all unprocessed WQEs. For GEN3 and beyond
1403 		 * FW will generate/flush these CQEs so move to the next CQE
1404 		 */
1405 			move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
1406 						false : true;
1407 	}
1408 
1409 	if (move_cq_head) {
1410 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1411 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1412 			cq->polarity ^= 1;
1413 
1414 		if (ext_valid && !cq->avoid_mem_cflct) {
1415 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1416 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1417 				cq->polarity ^= 1;
1418 		}
1419 
1420 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1421 		if (!cq->avoid_mem_cflct && ext_valid)
1422 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1423 		set_64bit_val(cq->shadow_area, 0,
1424 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1425 	} else {
1426 		qword3 &= ~IRDMA_CQ_WQEIDX;
1427 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1428 		set_64bit_val(cqe, 24, qword3);
1429 	}
1430 
1431 	return ret_code;
1432 }
1433 
1434 /**
1435  * irdma_round_up_wq - return round up qp wq depth
1436  * @wqdepth: wq depth in quanta to round up
1437  */
1438 static int irdma_round_up_wq(u32 wqdepth)
1439 {
1440 	int scount = 1;
1441 
1442 	for (wqdepth--; scount <= 16; scount *= 2)
1443 		wqdepth |= wqdepth >> scount;
1444 
1445 	return ++wqdepth;
1446 }
1447 
1448 /**
1449  * irdma_get_wqe_shift - get shift count for maximum wqe size
1450  * @uk_attrs: qp HW attributes
1451  * @sge: Maximum Scatter Gather Elements wqe
1452  * @inline_data: Maximum inline data size
1453  * @shift: Returns the shift needed based on sge
1454  *
1455  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1456  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1457  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1458  * size of 64 bytes).
1459  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1460  * size of 256 bytes).
1461  */
1462 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1463 			 u32 inline_data, u8 *shift)
1464 {
1465 	*shift = 0;
1466 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1467 		if (sge > 1 || inline_data > 8) {
1468 			if (sge < 4 && inline_data <= 39)
1469 				*shift = 1;
1470 			else if (sge < 8 && inline_data <= 101)
1471 				*shift = 2;
1472 			else
1473 				*shift = 3;
1474 		}
1475 	} else if (sge > 1 || inline_data > 16) {
1476 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1477 	}
1478 }
1479 
1480 /*
1481  * irdma_get_sqdepth - get SQ depth (quanta)
1482  * @uk_attrs: qp HW attributes
1483  * @sq_size: SQ size
1484  * @shift: shift which determines size of WQE
1485  * @sqdepth: depth of SQ
1486  *
1487  */
1488 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1489 		      u32 *sqdepth)
1490 {
1491 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1492 
1493 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1494 
1495 	if (*sqdepth < min_size)
1496 		*sqdepth = min_size;
1497 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1498 		return -EINVAL;
1499 
1500 	return 0;
1501 }
1502 
1503 /*
1504  * irdma_get_rqdepth - get RQ depth (quanta)
1505  * @uk_attrs: qp HW attributes
1506  * @rq_size: RQ size
1507  * @shift: shift which determines size of WQE
1508  * @rqdepth: depth of RQ
1509  */
1510 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1511 		      u32 *rqdepth)
1512 {
1513 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1514 
1515 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1516 
1517 	if (*rqdepth < min_size)
1518 		*rqdepth = min_size;
1519 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1520 		return -EINVAL;
1521 
1522 	return 0;
1523 }
1524 
1525 /*
1526  * irdma_get_srqdepth - get SRQ depth (quanta)
1527  * @uk_attrs: qp HW attributes
1528  * @srq_size: SRQ size
1529  * @shift: shift which determines size of WQE
1530  * @srqdepth: depth of SRQ
1531  */
1532 int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
1533 		       u32 *srqdepth)
1534 {
1535 	*srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
1536 
1537 	if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1538 		*srqdepth = uk_attrs->min_hw_wq_size << shift;
1539 	else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
1540 		return -EINVAL;
1541 
1542 	return 0;
1543 }
1544 
1545 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1546 	.iw_copy_inline_data = irdma_copy_inline_data,
1547 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1548 	.iw_set_fragment = irdma_set_fragment,
1549 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1550 };
1551 
1552 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1553 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1554 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1555 	.iw_set_fragment = irdma_set_fragment_gen_1,
1556 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1557 };
1558 
1559 /**
1560  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1561  * connection.
1562  * @qp: hw qp (user and kernel)
1563  * @info: qp initialization info
1564  */
1565 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1566 					struct irdma_qp_uk_init_info *info)
1567 {
1568 	u16 move_cnt = 1;
1569 
1570 	if (!info->legacy_mode &&
1571 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1572 		move_cnt = 3;
1573 
1574 	qp->conn_wqes = move_cnt;
1575 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1576 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1577 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1578 }
1579 
1580 /**
1581  * irdma_uk_srq_init - initialize shared qp
1582  * @srq: hw srq (user and kernel)
1583  * @info: srq initialization info
1584  *
1585  * Initializes the vars used in both user and kernel mode.
1586  * The size of the wqe depends on number of max fragments
1587  * allowed. Then size of wqe * the number of wqes should be the
1588  * amount of memory allocated for srq.
1589  */
1590 int irdma_uk_srq_init(struct irdma_srq_uk *srq,
1591 		      struct irdma_srq_uk_init_info *info)
1592 {
1593 	u8 rqshift;
1594 
1595 	srq->uk_attrs = info->uk_attrs;
1596 	if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
1597 		return -EINVAL;
1598 
1599 	irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
1600 	srq->srq_caps = info->srq_caps;
1601 	srq->srq_base = info->srq;
1602 	srq->shadow_area = info->shadow_area;
1603 	srq->srq_id = info->srq_id;
1604 	srq->srwqe_polarity = 0;
1605 	srq->srq_size = info->srq_size;
1606 	srq->wqe_size = rqshift;
1607 	srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
1608 				    ((u32)2 << rqshift) - 1);
1609 	IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
1610 	srq->wqe_size_multiplier = 1 << rqshift;
1611 	srq->wqe_ops = iw_wqe_uk_ops;
1612 
1613 	return 0;
1614 }
1615 
1616 /**
1617  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1618  * @ukinfo: qp initialization info
1619  * @sq_shift: Returns shift of SQ
1620  * @rq_shift: Returns shift of RQ
1621  */
1622 void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1623 			    u8 *rq_shift)
1624 {
1625 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1626 
1627 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1628 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1629 					  ukinfo->max_sq_frag_cnt,
1630 			    ukinfo->max_inline_data, sq_shift);
1631 
1632 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1633 			    rq_shift);
1634 
1635 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1636 		if (ukinfo->abi_ver > 4)
1637 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1638 	}
1639 }
1640 
1641 /**
1642  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1643  * @ukinfo: qp initialization info
1644  * @sq_depth: Returns depth of SQ
1645  * @sq_shift: Returns shift of SQ
1646  */
1647 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1648 				 u32 *sq_depth, u8 *sq_shift)
1649 {
1650 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1651 	int status;
1652 
1653 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1654 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1655 			    ukinfo->max_sq_frag_cnt,
1656 			    ukinfo->max_inline_data, sq_shift);
1657 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1658 				   *sq_shift, sq_depth);
1659 
1660 	return status;
1661 }
1662 
1663 /**
1664  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1665  * @ukinfo: qp initialization info
1666  * @rq_depth: Returns depth of RQ
1667  * @rq_shift: Returns shift of RQ
1668  */
1669 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1670 				 u32 *rq_depth, u8 *rq_shift)
1671 {
1672 	int status;
1673 
1674 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1675 			    rq_shift);
1676 
1677 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1678 		if (ukinfo->abi_ver > 4)
1679 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1680 	}
1681 
1682 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1683 				   *rq_shift, rq_depth);
1684 
1685 	return status;
1686 }
1687 
1688 /**
1689  * irdma_uk_qp_init - initialize shared qp
1690  * @qp: hw qp (user and kernel)
1691  * @info: qp initialization info
1692  *
1693  * initializes the vars used in both user and kernel mode.
1694  * size of the wqe depends on numbers of max. fragements
1695  * allowed. Then size of wqe * the number of wqes should be the
1696  * amount of memory allocated for sq and rq.
1697  */
1698 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1699 {
1700 	int ret_code = 0;
1701 	u32 sq_ring_size;
1702 
1703 	qp->uk_attrs = info->uk_attrs;
1704 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1705 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1706 		return -EINVAL;
1707 
1708 	qp->qp_caps = info->qp_caps;
1709 	qp->sq_base = info->sq;
1710 	qp->rq_base = info->rq;
1711 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1712 	qp->shadow_area = info->shadow_area;
1713 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1714 
1715 	qp->rq_wrid_array = info->rq_wrid_array;
1716 	qp->wqe_alloc_db = info->wqe_alloc_db;
1717 	qp->qp_id = info->qp_id;
1718 	qp->sq_size = info->sq_size;
1719 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1720 	sq_ring_size = qp->sq_size << info->sq_shift;
1721 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1722 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1723 	if (info->first_sq_wq) {
1724 		irdma_setup_connection_wqes(qp, info);
1725 		qp->swqe_polarity = 1;
1726 		qp->first_sq_wq = true;
1727 	} else {
1728 		qp->swqe_polarity = 0;
1729 	}
1730 	qp->swqe_polarity_deferred = 1;
1731 	qp->rwqe_polarity = 0;
1732 	qp->rq_size = info->rq_size;
1733 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1734 	qp->max_inline_data = info->max_inline_data;
1735 	qp->rq_wqe_size = info->rq_shift;
1736 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1737 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1738 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1739 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1740 	else
1741 		qp->wqe_ops = iw_wqe_uk_ops;
1742 	qp->srq_uk = info->srq_uk;
1743 	return ret_code;
1744 }
1745 
1746 /**
1747  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1748  * @cq: hw cq
1749  * @info: hw cq initialization info
1750  */
1751 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1752 		      struct irdma_cq_uk_init_info *info)
1753 {
1754 	cq->cq_base = info->cq_base;
1755 	cq->cq_id = info->cq_id;
1756 	cq->cq_size = info->cq_size;
1757 	cq->cqe_alloc_db = info->cqe_alloc_db;
1758 	cq->cq_ack_db = info->cq_ack_db;
1759 	cq->shadow_area = info->shadow_area;
1760 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1761 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1762 	cq->polarity = 1;
1763 }
1764 
1765 /**
1766  * irdma_uk_clean_cq - clean cq entries
1767  * @q: completion context
1768  * @cq: cq to clean
1769  */
1770 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1771 {
1772 	__le64 *cqe;
1773 	u64 qword3, comp_ctx;
1774 	u32 cq_head;
1775 	u8 polarity, temp;
1776 
1777 	cq_head = cq->cq_ring.head;
1778 	temp = cq->polarity;
1779 	do {
1780 		if (cq->avoid_mem_cflct)
1781 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1782 		else
1783 			cqe = cq->cq_base[cq_head].buf;
1784 		get_64bit_val(cqe, 24, &qword3);
1785 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1786 
1787 		if (polarity != temp)
1788 			break;
1789 
1790 		/* Ensure CQE contents are read after valid bit is checked */
1791 		dma_rmb();
1792 
1793 		get_64bit_val(cqe, 8, &comp_ctx);
1794 		if ((void *)(unsigned long)comp_ctx == q)
1795 			set_64bit_val(cqe, 8, 0);
1796 
1797 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1798 		if (!cq_head)
1799 			temp ^= 1;
1800 	} while (true);
1801 }
1802 
1803 /**
1804  * irdma_nop - post a nop
1805  * @qp: hw qp ptr
1806  * @wr_id: work request id
1807  * @signaled: signaled for completion
1808  * @post_sq: ring doorbell
1809  */
1810 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1811 {
1812 	__le64 *wqe;
1813 	u64 hdr;
1814 	u32 wqe_idx;
1815 	struct irdma_post_sq_info info = {};
1816 
1817 	info.wr_id = wr_id;
1818 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1819 					 0, &info);
1820 	if (!wqe)
1821 		return -ENOMEM;
1822 
1823 	irdma_clr_wqes(qp, wqe_idx);
1824 
1825 	set_64bit_val(wqe, 0, 0);
1826 	set_64bit_val(wqe, 8, 0);
1827 	set_64bit_val(wqe, 16, 0);
1828 
1829 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1830 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1831 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1832 
1833 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1834 
1835 	set_64bit_val(wqe, 24, hdr);
1836 	if (post_sq)
1837 		irdma_uk_qp_post_wr(qp);
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1844  * @frag_cnt: number of fragments
1845  * @quanta: quanta for frag_cnt
1846  */
1847 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1848 {
1849 	switch (frag_cnt) {
1850 	case 0:
1851 	case 1:
1852 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1853 		break;
1854 	case 2:
1855 	case 3:
1856 		*quanta = 2;
1857 		break;
1858 	case 4:
1859 	case 5:
1860 		*quanta = 3;
1861 		break;
1862 	case 6:
1863 	case 7:
1864 		*quanta = 4;
1865 		break;
1866 	case 8:
1867 	case 9:
1868 		*quanta = 5;
1869 		break;
1870 	case 10:
1871 	case 11:
1872 		*quanta = 6;
1873 		break;
1874 	case 12:
1875 	case 13:
1876 		*quanta = 7;
1877 		break;
1878 	case 14:
1879 	case 15: /* when immediate data is present */
1880 		*quanta = 8;
1881 		break;
1882 	default:
1883 		return -EINVAL;
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 /**
1890  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1891  * @frag_cnt: number of fragments
1892  * @wqe_size: size in bytes given frag_cnt
1893  */
1894 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1895 {
1896 	switch (frag_cnt) {
1897 	case 0:
1898 	case 1:
1899 		*wqe_size = 32;
1900 		break;
1901 	case 2:
1902 	case 3:
1903 		*wqe_size = 64;
1904 		break;
1905 	case 4:
1906 	case 5:
1907 	case 6:
1908 	case 7:
1909 		*wqe_size = 128;
1910 		break;
1911 	case 8:
1912 	case 9:
1913 	case 10:
1914 	case 11:
1915 	case 12:
1916 	case 13:
1917 	case 14:
1918 		*wqe_size = 256;
1919 		break;
1920 	default:
1921 		return -EINVAL;
1922 	}
1923 
1924 	return 0;
1925 }
1926