xref: /linux/drivers/infiniband/hw/irdma/uk.c (revision 52f3d34c292b62ec151c6a487d267341d47eefa4)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7 
8 /**
9  * irdma_set_fragment - set fragment in wqe
10  * @wqe: wqe for setting fragment
11  * @offset: offset value
12  * @sge: sge length and stag
13  * @valid: The wqe valid
14  */
15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 			       u8 valid)
17 {
18 	if (sge) {
19 		set_64bit_val(wqe, offset,
20 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 		set_64bit_val(wqe, offset + 8,
22 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 	} else {
26 		set_64bit_val(wqe, offset, 0);
27 		set_64bit_val(wqe, offset + 8,
28 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 	}
30 }
31 
32 /**
33  * irdma_set_fragment_gen_1 - set fragment in wqe
34  * @wqe: wqe for setting fragment
35  * @offset: offset value
36  * @sge: sge length and stag
37  * @valid: wqe valid flag
38  */
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 				     struct ib_sge *sge, u8 valid)
41 {
42 	if (sge) {
43 		set_64bit_val(wqe, offset,
44 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 		set_64bit_val(wqe, offset + 8,
46 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 	} else {
49 		set_64bit_val(wqe, offset, 0);
50 		set_64bit_val(wqe, offset + 8, 0);
51 	}
52 }
53 
54 /**
55  * irdma_nop_1 - insert a NOP wqe
56  * @qp: hw qp ptr
57  */
58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 	u64 hdr;
61 	__le64 *wqe;
62 	u32 wqe_idx;
63 	bool signaled = false;
64 
65 	if (!qp->sq_ring.head)
66 		return -EINVAL;
67 
68 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 	wqe = qp->sq_base[wqe_idx].elem;
70 
71 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72 
73 	set_64bit_val(wqe, 0, 0);
74 	set_64bit_val(wqe, 8, 0);
75 	set_64bit_val(wqe, 16, 0);
76 
77 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80 
81 	/* make sure WQE is written before valid bit is set */
82 	dma_wmb();
83 
84 	set_64bit_val(wqe, 24, hdr);
85 
86 	return 0;
87 }
88 
89 /**
90  * irdma_clr_wqes - clear next 128 sq entries
91  * @qp: hw qp ptr
92  * @qp_wqe_idx: wqe_idx
93  */
94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 	struct irdma_qp_quanta *sq;
97 	u32 wqe_idx;
98 
99 	if (!(qp_wqe_idx & 0x7F)) {
100 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 		sq = qp->sq_base + wqe_idx;
102 		if (wqe_idx)
103 			memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 			       128 * sizeof(*sq));
105 		else
106 			memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 			       128 * sizeof(*sq));
108 	}
109 }
110 
111 /**
112  * irdma_uk_qp_post_wr - ring doorbell
113  * @qp: hw qp ptr
114  */
115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
116 {
117 	writel(qp->qp_id, qp->wqe_alloc_db);
118 }
119 
120 /**
121  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
122  * @qp: hw qp ptr
123  * @wqe_idx: return wqe index
124  * @quanta: size of WR in quanta
125  * @total_size: size of WR in bytes
126  * @info: info on WR
127  */
128 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
129 				   u16 quanta, u32 total_size,
130 				   struct irdma_post_sq_info *info)
131 {
132 	__le64 *wqe;
133 	__le64 *wqe_0 = NULL;
134 	u16 avail_quanta;
135 	u16 i;
136 
137 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
138 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
139 		       qp->uk_attrs->max_hw_sq_chunk);
140 	if (quanta <= avail_quanta) {
141 		/* WR fits in current chunk */
142 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
143 			return NULL;
144 	} else {
145 		/* Need to pad with NOP */
146 		if (quanta + avail_quanta >
147 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
148 			return NULL;
149 
150 		for (i = 0; i < avail_quanta; i++) {
151 			irdma_nop_1(qp);
152 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
153 		}
154 	}
155 
156 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
157 	if (!*wqe_idx)
158 		qp->swqe_polarity = !qp->swqe_polarity;
159 
160 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
161 
162 	wqe = qp->sq_base[*wqe_idx].elem;
163 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
164 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
165 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
166 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
167 	}
168 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
169 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
170 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
171 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
172 
173 	return wqe;
174 }
175 
176 __le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
177 {
178 	int ret_code;
179 	__le64 *wqe;
180 
181 	if (IRDMA_RING_FULL_ERR(srq->srq_ring))
182 		return NULL;
183 
184 	IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
185 	if (ret_code)
186 		return NULL;
187 
188 	if (!*wqe_idx)
189 		srq->srwqe_polarity = !srq->srwqe_polarity;
190 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
191 	wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
192 
193 	return wqe;
194 }
195 
196 /**
197  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
198  * @qp: hw qp ptr
199  * @wqe_idx: return wqe index
200  */
201 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
202 {
203 	__le64 *wqe;
204 	int ret_code;
205 
206 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
207 		return NULL;
208 
209 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
210 	if (ret_code)
211 		return NULL;
212 
213 	if (!*wqe_idx)
214 		qp->rwqe_polarity = !qp->rwqe_polarity;
215 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
216 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
217 
218 	return wqe;
219 }
220 
221 /**
222  * irdma_uk_rdma_write - rdma write operation
223  * @qp: hw qp ptr
224  * @info: post sq information
225  * @post_sq: flag to post sq
226  */
227 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
228 			bool post_sq)
229 {
230 	u64 hdr;
231 	__le64 *wqe;
232 	struct irdma_rdma_write *op_info;
233 	u32 i, wqe_idx;
234 	u32 total_size = 0, byte_off;
235 	int ret_code;
236 	u32 frag_cnt, addl_frag_cnt;
237 	bool read_fence = false;
238 	u16 quanta;
239 
240 	op_info = &info->op.rdma_write;
241 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
242 		return -EINVAL;
243 
244 	for (i = 0; i < op_info->num_lo_sges; i++)
245 		total_size += op_info->lo_sg_list[i].length;
246 
247 	read_fence |= info->read_fence;
248 
249 	if (info->imm_data_valid)
250 		frag_cnt = op_info->num_lo_sges + 1;
251 	else
252 		frag_cnt = op_info->num_lo_sges;
253 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
254 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
255 	if (ret_code)
256 		return ret_code;
257 
258 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
259 					 info);
260 	if (!wqe)
261 		return -ENOMEM;
262 
263 	irdma_clr_wqes(qp, wqe_idx);
264 
265 	set_64bit_val(wqe, 16,
266 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
267 
268 	if (info->imm_data_valid) {
269 		set_64bit_val(wqe, 0,
270 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
271 		i = 0;
272 	} else {
273 		qp->wqe_ops.iw_set_fragment(wqe, 0,
274 					    op_info->lo_sg_list,
275 					    qp->swqe_polarity);
276 		i = 1;
277 	}
278 
279 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
280 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
281 					    &op_info->lo_sg_list[i],
282 					    qp->swqe_polarity);
283 		byte_off += 16;
284 	}
285 
286 	/* if not an odd number set valid bit in next fragment */
287 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
288 	    frag_cnt) {
289 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
290 					    qp->swqe_polarity);
291 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
292 			++addl_frag_cnt;
293 	}
294 
295 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
296 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
297 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
298 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
299 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
300 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
301 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
302 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
303 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
304 
305 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
306 
307 	set_64bit_val(wqe, 24, hdr);
308 
309 	if (post_sq)
310 		irdma_uk_qp_post_wr(qp);
311 
312 	return 0;
313 }
314 
315 /**
316  * irdma_uk_atomic_fetch_add - atomic fetch and add operation
317  * @qp: hw qp ptr
318  * @info: post sq information
319  * @post_sq: flag to post sq
320  */
321 int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
322 			      struct irdma_post_sq_info *info, bool post_sq)
323 {
324 	struct irdma_atomic_fetch_add *op_info;
325 	u32 total_size = 0;
326 	u16 quanta = 2;
327 	u32 wqe_idx;
328 	__le64 *wqe;
329 	u64 hdr;
330 
331 	op_info = &info->op.atomic_fetch_add;
332 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
333 					 info);
334 	if (!wqe)
335 		return -ENOMEM;
336 
337 	set_64bit_val(wqe, 0, op_info->tagged_offset);
338 	set_64bit_val(wqe, 8,
339 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
340 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
341 
342 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
343 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
344 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
345 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
346 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
347 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
348 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
349 
350 	set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
351 	set_64bit_val(wqe, 40, 0);
352 	set_64bit_val(wqe, 48, 0);
353 	set_64bit_val(wqe, 56,
354 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
355 
356 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
357 
358 	set_64bit_val(wqe, 24, hdr);
359 
360 	if (post_sq)
361 		irdma_uk_qp_post_wr(qp);
362 
363 	return 0;
364 }
365 
366 /**
367  * irdma_uk_atomic_compare_swap - atomic compare and swap operation
368  * @qp: hw qp ptr
369  * @info: post sq information
370  * @post_sq: flag to post sq
371  */
372 int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
373 				 struct irdma_post_sq_info *info, bool post_sq)
374 {
375 	struct irdma_atomic_compare_swap *op_info;
376 	u32 total_size = 0;
377 	u16 quanta = 2;
378 	u32 wqe_idx;
379 	__le64 *wqe;
380 	u64 hdr;
381 
382 	op_info = &info->op.atomic_compare_swap;
383 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
384 					 info);
385 	if (!wqe)
386 		return -ENOMEM;
387 
388 	set_64bit_val(wqe, 0, op_info->tagged_offset);
389 	set_64bit_val(wqe, 8,
390 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
391 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
392 
393 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
394 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
395 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
396 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
397 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
398 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
399 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
400 
401 	set_64bit_val(wqe, 32, op_info->swap_data_bytes);
402 	set_64bit_val(wqe, 40, op_info->compare_data_bytes);
403 	set_64bit_val(wqe, 48, 0);
404 	set_64bit_val(wqe, 56,
405 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
406 
407 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
408 
409 	set_64bit_val(wqe, 24, hdr);
410 
411 	if (post_sq)
412 		irdma_uk_qp_post_wr(qp);
413 
414 	return 0;
415 }
416 
417 /**
418  * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
419  * @srq: shared rq ptr
420  * @info: post rq information
421  */
422 int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
423 			      struct irdma_post_rq_info *info)
424 {
425 	u32 wqe_idx, i, byte_off;
426 	u32 addl_frag_cnt;
427 	__le64 *wqe;
428 	u64 hdr;
429 
430 	if (srq->max_srq_frag_cnt < info->num_sges)
431 		return -EINVAL;
432 
433 	wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
434 	if (!wqe)
435 		return -ENOMEM;
436 
437 	addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
438 	srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
439 				     srq->srwqe_polarity);
440 
441 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
442 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
443 					     srq->srwqe_polarity);
444 		byte_off += 16;
445 	}
446 
447 	/* if not an odd number set valid bit in next fragment */
448 	if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
449 	    info->num_sges) {
450 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
451 					     srq->srwqe_polarity);
452 		if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
453 			++addl_frag_cnt;
454 	}
455 
456 	set_64bit_val(wqe, 16, (u64)info->wr_id);
457 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
458 	      FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
459 
460 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
461 
462 	set_64bit_val(wqe, 24, hdr);
463 
464 	set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
465 
466 	return 0;
467 }
468 
469 /**
470  * irdma_uk_rdma_read - rdma read command
471  * @qp: hw qp ptr
472  * @info: post sq information
473  * @inv_stag: flag for inv_stag
474  * @post_sq: flag to post sq
475  */
476 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
477 		       bool inv_stag, bool post_sq)
478 {
479 	struct irdma_rdma_read *op_info;
480 	int ret_code;
481 	u32 i, byte_off, total_size = 0;
482 	bool local_fence = false;
483 	u32 addl_frag_cnt;
484 	__le64 *wqe;
485 	u32 wqe_idx;
486 	u16 quanta;
487 	u64 hdr;
488 
489 	op_info = &info->op.rdma_read;
490 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
491 		return -EINVAL;
492 
493 	for (i = 0; i < op_info->num_lo_sges; i++)
494 		total_size += op_info->lo_sg_list[i].length;
495 
496 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
497 	if (ret_code)
498 		return ret_code;
499 
500 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
501 					 info);
502 	if (!wqe)
503 		return -ENOMEM;
504 
505 	irdma_clr_wqes(qp, wqe_idx);
506 
507 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
508 			(op_info->num_lo_sges - 1) : 0;
509 	local_fence |= info->local_fence;
510 
511 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
512 				    qp->swqe_polarity);
513 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
514 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
515 					    &op_info->lo_sg_list[i],
516 					    qp->swqe_polarity);
517 		byte_off += 16;
518 	}
519 
520 	/* if not an odd number set valid bit in next fragment */
521 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
522 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
523 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
524 					    qp->swqe_polarity);
525 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
526 			++addl_frag_cnt;
527 	}
528 	set_64bit_val(wqe, 16,
529 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
530 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
531 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
532 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
533 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
534 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
535 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
536 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
537 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
538 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
539 
540 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
541 
542 	set_64bit_val(wqe, 24, hdr);
543 
544 	if (post_sq)
545 		irdma_uk_qp_post_wr(qp);
546 
547 	return 0;
548 }
549 
550 /**
551  * irdma_uk_send - rdma send command
552  * @qp: hw qp ptr
553  * @info: post sq information
554  * @post_sq: flag to post sq
555  */
556 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
557 		  bool post_sq)
558 {
559 	__le64 *wqe;
560 	struct irdma_post_send *op_info;
561 	u64 hdr;
562 	u32 i, wqe_idx, total_size = 0, byte_off;
563 	int ret_code;
564 	u32 frag_cnt, addl_frag_cnt;
565 	bool read_fence = false;
566 	u16 quanta;
567 
568 	op_info = &info->op.send;
569 	if (qp->max_sq_frag_cnt < op_info->num_sges)
570 		return -EINVAL;
571 
572 	for (i = 0; i < op_info->num_sges; i++)
573 		total_size += op_info->sg_list[i].length;
574 
575 	if (info->imm_data_valid)
576 		frag_cnt = op_info->num_sges + 1;
577 	else
578 		frag_cnt = op_info->num_sges;
579 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
580 	if (ret_code)
581 		return ret_code;
582 
583 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
584 					 info);
585 	if (!wqe)
586 		return -ENOMEM;
587 
588 	irdma_clr_wqes(qp, wqe_idx);
589 
590 	read_fence |= info->read_fence;
591 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
592 	if (info->imm_data_valid) {
593 		set_64bit_val(wqe, 0,
594 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
595 		i = 0;
596 	} else {
597 		qp->wqe_ops.iw_set_fragment(wqe, 0,
598 					    frag_cnt ? op_info->sg_list : NULL,
599 					    qp->swqe_polarity);
600 		i = 1;
601 	}
602 
603 	for (byte_off = 32; i < op_info->num_sges; i++) {
604 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
605 					    qp->swqe_polarity);
606 		byte_off += 16;
607 	}
608 
609 	/* if not an odd number set valid bit in next fragment */
610 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
611 	    frag_cnt) {
612 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
613 					    qp->swqe_polarity);
614 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
615 			++addl_frag_cnt;
616 	}
617 
618 	set_64bit_val(wqe, 16,
619 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
620 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
621 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
622 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
623 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
624 			 (info->imm_data_valid ? 1 : 0)) |
625 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
626 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
627 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
628 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
629 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
630 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
631 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
632 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
633 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
634 
635 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
636 
637 	set_64bit_val(wqe, 24, hdr);
638 
639 	if (post_sq)
640 		irdma_uk_qp_post_wr(qp);
641 
642 	return 0;
643 }
644 
645 /**
646  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
647  * @wqe: wqe for setting fragment
648  * @op_info: info for setting bind wqe values
649  */
650 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
651 					struct irdma_bind_window *op_info)
652 {
653 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
654 	set_64bit_val(wqe, 8,
655 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
656 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
657 	set_64bit_val(wqe, 16, op_info->bind_len);
658 }
659 
660 /**
661  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
662  * @wqe: pointer to wqe
663  * @sge_list: table of pointers to inline data
664  * @num_sges: Total inline data length
665  * @polarity: compatibility parameter
666  */
667 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
668 					 u32 num_sges, u8 polarity)
669 {
670 	u32 quanta_bytes_remaining = 16;
671 	int i;
672 
673 	for (i = 0; i < num_sges; i++) {
674 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
675 		u32 sge_len = sge_list[i].length;
676 
677 		while (sge_len) {
678 			u32 bytes_copied;
679 
680 			bytes_copied = min(sge_len, quanta_bytes_remaining);
681 			memcpy(wqe, cur_sge, bytes_copied);
682 			wqe += bytes_copied;
683 			cur_sge += bytes_copied;
684 			quanta_bytes_remaining -= bytes_copied;
685 			sge_len -= bytes_copied;
686 
687 			if (!quanta_bytes_remaining) {
688 				/* Remaining inline bytes reside after hdr */
689 				wqe += 16;
690 				quanta_bytes_remaining = 32;
691 			}
692 		}
693 	}
694 }
695 
696 /**
697  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
698  * @data_size: data size for inline
699  *
700  * Gets the quanta based on inline and immediate data.
701  */
702 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
703 {
704 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
705 }
706 
707 /**
708  * irdma_set_mw_bind_wqe - set mw bind in wqe
709  * @wqe: wqe for setting mw bind
710  * @op_info: info for setting wqe values
711  */
712 static void irdma_set_mw_bind_wqe(__le64 *wqe,
713 				  struct irdma_bind_window *op_info)
714 {
715 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
716 	set_64bit_val(wqe, 8,
717 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
718 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
719 	set_64bit_val(wqe, 16, op_info->bind_len);
720 }
721 
722 /**
723  * irdma_copy_inline_data - Copy inline data to wqe
724  * @wqe: pointer to wqe
725  * @sge_list: table of pointers to inline data
726  * @num_sges: number of SGE's
727  * @polarity: polarity of wqe valid bit
728  */
729 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
730 				   u32 num_sges, u8 polarity)
731 {
732 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
733 	u32 quanta_bytes_remaining = 8;
734 	bool first_quanta = true;
735 	int i;
736 
737 	wqe += 8;
738 
739 	for (i = 0; i < num_sges; i++) {
740 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
741 		u32 sge_len = sge_list[i].length;
742 
743 		while (sge_len) {
744 			u32 bytes_copied;
745 
746 			bytes_copied = min(sge_len, quanta_bytes_remaining);
747 			memcpy(wqe, cur_sge, bytes_copied);
748 			wqe += bytes_copied;
749 			cur_sge += bytes_copied;
750 			quanta_bytes_remaining -= bytes_copied;
751 			sge_len -= bytes_copied;
752 
753 			if (!quanta_bytes_remaining) {
754 				quanta_bytes_remaining = 31;
755 
756 				/* Remaining inline bytes reside after hdr */
757 				if (first_quanta) {
758 					first_quanta = false;
759 					wqe += 16;
760 				} else {
761 					*wqe = inline_valid;
762 					wqe++;
763 				}
764 			}
765 		}
766 	}
767 	if (!first_quanta && quanta_bytes_remaining < 31)
768 		*(wqe + quanta_bytes_remaining) = inline_valid;
769 }
770 
771 /**
772  * irdma_inline_data_size_to_quanta - based on inline data, quanta
773  * @data_size: data size for inline
774  *
775  * Gets the quanta based on inline and immediate data.
776  */
777 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
778 {
779 	if (data_size <= 8)
780 		return IRDMA_QP_WQE_MIN_QUANTA;
781 	else if (data_size <= 39)
782 		return 2;
783 	else if (data_size <= 70)
784 		return 3;
785 	else if (data_size <= 101)
786 		return 4;
787 	else if (data_size <= 132)
788 		return 5;
789 	else if (data_size <= 163)
790 		return 6;
791 	else if (data_size <= 194)
792 		return 7;
793 	else
794 		return 8;
795 }
796 
797 /**
798  * irdma_uk_inline_rdma_write - inline rdma write operation
799  * @qp: hw qp ptr
800  * @info: post sq information
801  * @post_sq: flag to post sq
802  */
803 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
804 			       struct irdma_post_sq_info *info, bool post_sq)
805 {
806 	__le64 *wqe;
807 	struct irdma_rdma_write *op_info;
808 	u64 hdr = 0;
809 	u32 wqe_idx;
810 	bool read_fence = false;
811 	u32 i, total_size = 0;
812 	u16 quanta;
813 
814 	op_info = &info->op.rdma_write;
815 
816 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
817 		return -EINVAL;
818 
819 	for (i = 0; i < op_info->num_lo_sges; i++)
820 		total_size += op_info->lo_sg_list[i].length;
821 
822 	if (unlikely(total_size > qp->max_inline_data))
823 		return -EINVAL;
824 
825 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
826 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
827 					 info);
828 	if (!wqe)
829 		return -ENOMEM;
830 
831 	irdma_clr_wqes(qp, wqe_idx);
832 
833 	read_fence |= info->read_fence;
834 	set_64bit_val(wqe, 16,
835 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
836 
837 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
838 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
839 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
840 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
841 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
842 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
843 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
844 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
845 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
846 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
847 
848 	if (info->imm_data_valid)
849 		set_64bit_val(wqe, 0,
850 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
851 
852 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
853 					op_info->num_lo_sges,
854 					qp->swqe_polarity);
855 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
856 
857 	set_64bit_val(wqe, 24, hdr);
858 
859 	if (post_sq)
860 		irdma_uk_qp_post_wr(qp);
861 
862 	return 0;
863 }
864 
865 /**
866  * irdma_uk_inline_send - inline send operation
867  * @qp: hw qp ptr
868  * @info: post sq information
869  * @post_sq: flag to post sq
870  */
871 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
872 			 struct irdma_post_sq_info *info, bool post_sq)
873 {
874 	__le64 *wqe;
875 	struct irdma_post_send *op_info;
876 	u64 hdr;
877 	u32 wqe_idx;
878 	bool read_fence = false;
879 	u32 i, total_size = 0;
880 	u16 quanta;
881 
882 	op_info = &info->op.send;
883 
884 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
885 		return -EINVAL;
886 
887 	for (i = 0; i < op_info->num_sges; i++)
888 		total_size += op_info->sg_list[i].length;
889 
890 	if (unlikely(total_size > qp->max_inline_data))
891 		return -EINVAL;
892 
893 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
894 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
895 					 info);
896 	if (!wqe)
897 		return -ENOMEM;
898 
899 	irdma_clr_wqes(qp, wqe_idx);
900 
901 	set_64bit_val(wqe, 16,
902 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
903 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
904 
905 	read_fence |= info->read_fence;
906 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
907 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
908 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
909 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
910 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
911 			 (info->imm_data_valid ? 1 : 0)) |
912 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
913 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
914 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
915 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
916 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
917 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
918 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
919 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
920 
921 	if (info->imm_data_valid)
922 		set_64bit_val(wqe, 0,
923 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
924 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
925 					op_info->num_sges, qp->swqe_polarity);
926 
927 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
928 
929 	set_64bit_val(wqe, 24, hdr);
930 
931 	if (post_sq)
932 		irdma_uk_qp_post_wr(qp);
933 
934 	return 0;
935 }
936 
937 /**
938  * irdma_uk_stag_local_invalidate - stag invalidate operation
939  * @qp: hw qp ptr
940  * @info: post sq information
941  * @post_sq: flag to post sq
942  */
943 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
944 				   struct irdma_post_sq_info *info,
945 				   bool post_sq)
946 {
947 	__le64 *wqe;
948 	struct irdma_inv_local_stag *op_info;
949 	u64 hdr;
950 	u32 wqe_idx;
951 	bool local_fence = false;
952 	struct ib_sge sge = {};
953 
954 	op_info = &info->op.inv_local_stag;
955 	local_fence = info->local_fence;
956 
957 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
958 					 0, info);
959 	if (!wqe)
960 		return -ENOMEM;
961 
962 	irdma_clr_wqes(qp, wqe_idx);
963 
964 	sge.lkey = op_info->target_stag;
965 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
966 
967 	set_64bit_val(wqe, 16, 0);
968 
969 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
970 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
971 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
972 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
973 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
974 
975 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
976 
977 	set_64bit_val(wqe, 24, hdr);
978 
979 	if (post_sq)
980 		irdma_uk_qp_post_wr(qp);
981 
982 	return 0;
983 }
984 
985 /**
986  * irdma_uk_post_receive - post receive wqe
987  * @qp: hw qp ptr
988  * @info: post rq information
989  */
990 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
991 			  struct irdma_post_rq_info *info)
992 {
993 	u32 wqe_idx, i, byte_off;
994 	u32 addl_frag_cnt;
995 	__le64 *wqe;
996 	u64 hdr;
997 
998 	if (qp->max_rq_frag_cnt < info->num_sges)
999 		return -EINVAL;
1000 
1001 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1002 	if (!wqe)
1003 		return -ENOMEM;
1004 
1005 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
1006 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
1007 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
1008 				    qp->rwqe_polarity);
1009 
1010 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
1011 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
1012 					    qp->rwqe_polarity);
1013 		byte_off += 16;
1014 	}
1015 
1016 	/* if not an odd number set valid bit in next fragment */
1017 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
1018 	    info->num_sges) {
1019 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
1020 					    qp->rwqe_polarity);
1021 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
1022 			++addl_frag_cnt;
1023 	}
1024 
1025 	set_64bit_val(wqe, 16, 0);
1026 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
1027 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
1028 
1029 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1030 
1031 	set_64bit_val(wqe, 24, hdr);
1032 
1033 	return 0;
1034 }
1035 
1036 /**
1037  * irdma_uk_cq_resize - reset the cq buffer info
1038  * @cq: cq to resize
1039  * @cq_base: new cq buffer addr
1040  * @cq_size: number of cqes
1041  */
1042 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1043 {
1044 	cq->cq_base = cq_base;
1045 	cq->cq_size = cq_size;
1046 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1047 	cq->polarity = 1;
1048 }
1049 
1050 /**
1051  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1052  * @cq: cq to resize
1053  * @cq_cnt: the count of the resized cq buffers
1054  */
1055 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1056 {
1057 	u64 temp_val;
1058 	u16 sw_cq_sel;
1059 	u8 arm_next_se;
1060 	u8 arm_next;
1061 	u8 arm_seq_num;
1062 
1063 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1064 
1065 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1066 	sw_cq_sel += cq_cnt;
1067 
1068 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1069 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1070 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1071 
1072 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1073 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1074 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1075 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1076 
1077 	set_64bit_val(cq->shadow_area, 32, temp_val);
1078 }
1079 
1080 /**
1081  * irdma_uk_cq_request_notification - cq notification request (door bell)
1082  * @cq: hw cq
1083  * @cq_notify: notification type
1084  */
1085 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1086 				      enum irdma_cmpl_notify cq_notify)
1087 {
1088 	u64 temp_val;
1089 	u16 sw_cq_sel;
1090 	u8 arm_next_se = 0;
1091 	u8 arm_next = 0;
1092 	u8 arm_seq_num;
1093 
1094 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1095 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1096 	arm_seq_num++;
1097 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1098 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1099 	arm_next_se |= 1;
1100 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1101 		arm_next = 1;
1102 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1103 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1104 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1105 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1106 
1107 	set_64bit_val(cq->shadow_area, 32, temp_val);
1108 
1109 	writel(cq->cq_id, cq->cqe_alloc_db);
1110 }
1111 
1112 /**
1113  * irdma_uk_cq_empty - Check if CQ is empty
1114  * @cq: hw cq
1115  */
1116 bool irdma_uk_cq_empty(struct irdma_cq_uk *cq)
1117 {
1118 	__le64 *cqe;
1119 	u8 polarity;
1120 	u64 qword3;
1121 
1122 	if (cq->avoid_mem_cflct)
1123 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1124 	else
1125 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1126 
1127 	get_64bit_val(cqe, 24, &qword3);
1128 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1129 
1130 	return polarity != cq->polarity;
1131 }
1132 
1133 /**
1134  * irdma_uk_cq_poll_cmpl - get cq completion info
1135  * @cq: hw cq
1136  * @info: cq poll information returned
1137  */
1138 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1139 			  struct irdma_cq_poll_info *info)
1140 {
1141 	u64 comp_ctx, qword0, qword2, qword3;
1142 	__le64 *cqe;
1143 	struct irdma_qp_uk *qp;
1144 	struct irdma_srq_uk *srq;
1145 	struct qp_err_code qp_err;
1146 	u8 is_srq;
1147 	struct irdma_ring *pring = NULL;
1148 	u32 wqe_idx;
1149 	int ret_code;
1150 	bool move_cq_head = true;
1151 	u8 polarity;
1152 	bool ext_valid;
1153 	__le64 *ext_cqe;
1154 
1155 	if (cq->avoid_mem_cflct)
1156 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1157 	else
1158 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1159 
1160 	get_64bit_val(cqe, 24, &qword3);
1161 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1162 	if (polarity != cq->polarity)
1163 		return -ENOENT;
1164 
1165 	/* Ensure CQE contents are read after valid bit is checked */
1166 	dma_rmb();
1167 
1168 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1169 	if (ext_valid) {
1170 		u64 qword6, qword7;
1171 		u32 peek_head;
1172 
1173 		if (cq->avoid_mem_cflct) {
1174 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1175 			get_64bit_val(ext_cqe, 24, &qword7);
1176 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1177 		} else {
1178 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1179 			ext_cqe = cq->cq_base[peek_head].buf;
1180 			get_64bit_val(ext_cqe, 24, &qword7);
1181 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1182 			if (!peek_head)
1183 				polarity ^= 1;
1184 		}
1185 		if (polarity != cq->polarity)
1186 			return -ENOENT;
1187 
1188 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1189 		dma_rmb();
1190 
1191 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1192 		if (info->imm_valid) {
1193 			u64 qword4;
1194 
1195 			get_64bit_val(ext_cqe, 0, &qword4);
1196 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1197 		}
1198 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1199 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1200 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1201 			get_64bit_val(ext_cqe, 16, &qword6);
1202 			if (info->ud_vlan_valid)
1203 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1204 			if (info->ud_smac_valid) {
1205 				info->ud_smac[5] = qword6 & 0xFF;
1206 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1207 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1208 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1209 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1210 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1211 			}
1212 		}
1213 	} else {
1214 		info->imm_valid = false;
1215 		info->ud_smac_valid = false;
1216 		info->ud_vlan_valid = false;
1217 	}
1218 
1219 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1220 	is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
1221 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1222 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1223 	get_64bit_val(cqe, 8, &comp_ctx);
1224 	if (is_srq)
1225 		get_64bit_val(cqe, 40, (u64 *)&qp);
1226 	else
1227 		qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1228 	if (info->error) {
1229 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1230 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1231 		switch (info->major_err) {
1232 		case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
1233 			qp_err = irdma_ae_to_qp_err_code(info->minor_err);
1234 			info->minor_err = qp_err.flush_code;
1235 			fallthrough;
1236 		case IRDMA_FLUSH_MAJOR_ERR:
1237 			/* Set the min error to standard flush error code for remaining cqes */
1238 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1239 				qword3 &= ~IRDMA_CQ_MINERR;
1240 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1241 				set_64bit_val(cqe, 24, qword3);
1242 			}
1243 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1244 			break;
1245 		default:
1246 #define IRDMA_CIE_SIGNATURE 0xE
1247 #define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
1248 			if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
1249 			    qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
1250 			    FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
1251 			    == IRDMA_CIE_SIGNATURE) {
1252 				info->error = 0;
1253 				info->major_err = 0;
1254 				info->minor_err = 0;
1255 				info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1256 			} else {
1257 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1258 			}
1259 			break;
1260 		}
1261 	} else {
1262 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1263 	}
1264 
1265 	get_64bit_val(cqe, 0, &qword0);
1266 	get_64bit_val(cqe, 16, &qword2);
1267 
1268 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1269 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1270 
1271 	get_64bit_val(cqe, 8, &comp_ctx);
1272 
1273 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1274 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1275 	if (!qp || qp->destroy_pending) {
1276 		ret_code = -EFAULT;
1277 		goto exit;
1278 	}
1279 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1280 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1281 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1282 
1283 	if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
1284 		unsigned long flags;
1285 
1286 		srq = qp->srq_uk;
1287 
1288 		get_64bit_val(cqe, 8, &info->wr_id);
1289 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1290 
1291 		if (qword3 & IRDMACQ_STAG) {
1292 			info->stag_invalid_set = true;
1293 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
1294 							qword2);
1295 		} else {
1296 			info->stag_invalid_set = false;
1297 		}
1298 		spin_lock_irqsave(srq->lock, flags);
1299 		IRDMA_RING_MOVE_TAIL(srq->srq_ring);
1300 		spin_unlock_irqrestore(srq->lock, flags);
1301 		pring = &srq->srq_ring;
1302 
1303 	} else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
1304 		u32 array_idx;
1305 
1306 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1307 
1308 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1309 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1310 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1311 				ret_code = -ENOENT;
1312 				goto exit;
1313 			}
1314 
1315 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1316 			array_idx = qp->rq_ring.tail;
1317 		} else {
1318 			info->wr_id = qp->rq_wrid_array[array_idx];
1319 		}
1320 
1321 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1322 
1323 		if (qword3 & IRDMACQ_STAG) {
1324 			info->stag_invalid_set = true;
1325 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1326 		} else {
1327 			info->stag_invalid_set = false;
1328 		}
1329 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1330 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1331 			qp->rq_flush_seen = true;
1332 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1333 				qp->rq_flush_complete = true;
1334 			else
1335 				move_cq_head = false;
1336 		}
1337 		pring = &qp->rq_ring;
1338 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1339 		if (qp->first_sq_wq) {
1340 			if (wqe_idx + 1 >= qp->conn_wqes)
1341 				qp->first_sq_wq = false;
1342 
1343 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1344 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1345 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1346 				set_64bit_val(cq->shadow_area, 0,
1347 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1348 				memset(info, 0,
1349 				       sizeof(struct irdma_cq_poll_info));
1350 				return irdma_uk_cq_poll_cmpl(cq, info);
1351 			}
1352 		}
1353 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1354 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1355 			if (!info->comp_status)
1356 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1357 			if (!qp->sq_wrtrk_array[wqe_idx].signaled) {
1358 				ret_code = -EFAULT;
1359 				goto exit;
1360 			}
1361 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1362 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1363 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1364 		} else {
1365 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1366 				ret_code = -ENOENT;
1367 				goto exit;
1368 			}
1369 
1370 			do {
1371 				__le64 *sw_wqe;
1372 				u64 wqe_qword;
1373 				u32 tail;
1374 
1375 				tail = qp->sq_ring.tail;
1376 				sw_wqe = qp->sq_base[tail].elem;
1377 				get_64bit_val(sw_wqe, 24,
1378 					      &wqe_qword);
1379 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1380 							      wqe_qword);
1381 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1382 						    tail + qp->sq_wrtrk_array[tail].quanta);
1383 				if (info->op_type != IRDMAQP_OP_NOP) {
1384 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1385 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1386 					break;
1387 				}
1388 			} while (1);
1389 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1390 			    info->minor_err == FLUSH_PROT_ERR)
1391 				info->minor_err = FLUSH_MW_BIND_ERR;
1392 			qp->sq_flush_seen = true;
1393 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1394 				qp->sq_flush_complete = true;
1395 		}
1396 		pring = &qp->sq_ring;
1397 	}
1398 
1399 	ret_code = 0;
1400 
1401 exit:
1402 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1403 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1404 		/* Park CQ head during a flush to generate additional CQEs
1405 		 * from SW for all unprocessed WQEs. For GEN3 and beyond
1406 		 * FW will generate/flush these CQEs so move to the next CQE
1407 		 */
1408 			move_cq_head = qp->uk_attrs->hw_rev > IRDMA_GEN_2;
1409 	}
1410 
1411 	if (move_cq_head) {
1412 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1413 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1414 			cq->polarity ^= 1;
1415 
1416 		if (ext_valid && !cq->avoid_mem_cflct) {
1417 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1418 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1419 				cq->polarity ^= 1;
1420 		}
1421 
1422 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1423 		if (!cq->avoid_mem_cflct && ext_valid)
1424 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1425 		if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
1426 			set_64bit_val(cq->shadow_area, 0,
1427 				      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1428 	} else {
1429 		qword3 &= ~IRDMA_CQ_WQEIDX;
1430 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1431 		set_64bit_val(cqe, 24, qword3);
1432 	}
1433 
1434 	return ret_code;
1435 }
1436 
1437 /**
1438  * irdma_round_up_wq - return round up qp wq depth
1439  * @wqdepth: wq depth in quanta to round up
1440  */
1441 static int irdma_round_up_wq(u32 wqdepth)
1442 {
1443 	int scount = 1;
1444 
1445 	for (wqdepth--; scount <= 16; scount *= 2)
1446 		wqdepth |= wqdepth >> scount;
1447 
1448 	return ++wqdepth;
1449 }
1450 
1451 /**
1452  * irdma_get_wqe_shift - get shift count for maximum wqe size
1453  * @uk_attrs: qp HW attributes
1454  * @sge: Maximum Scatter Gather Elements wqe
1455  * @inline_data: Maximum inline data size
1456  * @shift: Returns the shift needed based on sge
1457  *
1458  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1459  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1460  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1461  * size of 64 bytes).
1462  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1463  * size of 256 bytes).
1464  */
1465 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1466 			 u32 inline_data, u8 *shift)
1467 {
1468 	*shift = 0;
1469 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1470 		if (sge > 1 || inline_data > 8) {
1471 			if (sge < 4 && inline_data <= 39)
1472 				*shift = 1;
1473 			else if (sge < 8 && inline_data <= 101)
1474 				*shift = 2;
1475 			else
1476 				*shift = 3;
1477 		}
1478 	} else if (sge > 1 || inline_data > 16) {
1479 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1480 	}
1481 }
1482 
1483 /*
1484  * irdma_get_sqdepth - get SQ depth (quanta)
1485  * @uk_attrs: qp HW attributes
1486  * @sq_size: SQ size
1487  * @shift: shift which determines size of WQE
1488  * @sqdepth: depth of SQ
1489  *
1490  */
1491 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1492 		      u32 *sqdepth)
1493 {
1494 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1495 
1496 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1497 
1498 	if (*sqdepth < min_size)
1499 		*sqdepth = min_size;
1500 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1501 		return -EINVAL;
1502 
1503 	return 0;
1504 }
1505 
1506 /*
1507  * irdma_get_rqdepth - get RQ depth (quanta)
1508  * @uk_attrs: qp HW attributes
1509  * @rq_size: RQ size
1510  * @shift: shift which determines size of WQE
1511  * @rqdepth: depth of RQ
1512  */
1513 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1514 		      u32 *rqdepth)
1515 {
1516 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1517 
1518 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1519 
1520 	if (*rqdepth < min_size)
1521 		*rqdepth = min_size;
1522 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1523 		return -EINVAL;
1524 
1525 	return 0;
1526 }
1527 
1528 /*
1529  * irdma_get_srqdepth - get SRQ depth (quanta)
1530  * @uk_attrs: qp HW attributes
1531  * @srq_size: SRQ size
1532  * @shift: shift which determines size of WQE
1533  * @srqdepth: depth of SRQ
1534  */
1535 int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
1536 		       u32 *srqdepth)
1537 {
1538 	*srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
1539 
1540 	if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1541 		*srqdepth = uk_attrs->min_hw_wq_size << shift;
1542 	else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
1543 		return -EINVAL;
1544 
1545 	return 0;
1546 }
1547 
1548 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1549 	.iw_copy_inline_data = irdma_copy_inline_data,
1550 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1551 	.iw_set_fragment = irdma_set_fragment,
1552 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1553 };
1554 
1555 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1556 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1557 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1558 	.iw_set_fragment = irdma_set_fragment_gen_1,
1559 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1560 };
1561 
1562 /**
1563  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1564  * connection.
1565  * @qp: hw qp (user and kernel)
1566  * @info: qp initialization info
1567  */
1568 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1569 					struct irdma_qp_uk_init_info *info)
1570 {
1571 	u16 move_cnt = 1;
1572 
1573 	if (!info->legacy_mode &&
1574 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1575 		move_cnt = 3;
1576 
1577 	qp->conn_wqes = move_cnt;
1578 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1579 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1580 }
1581 
1582 /**
1583  * irdma_uk_srq_init - initialize shared qp
1584  * @srq: hw srq (user and kernel)
1585  * @info: srq initialization info
1586  *
1587  * Initializes the vars used in both user and kernel mode.
1588  * The size of the wqe depends on number of max fragments
1589  * allowed. Then size of wqe * the number of wqes should be the
1590  * amount of memory allocated for srq.
1591  */
1592 int irdma_uk_srq_init(struct irdma_srq_uk *srq,
1593 		      struct irdma_srq_uk_init_info *info)
1594 {
1595 	u8 rqshift;
1596 
1597 	srq->uk_attrs = info->uk_attrs;
1598 	if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
1599 		return -EINVAL;
1600 
1601 	irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
1602 	srq->srq_caps = info->srq_caps;
1603 	srq->srq_base = info->srq;
1604 	srq->shadow_area = info->shadow_area;
1605 	srq->srq_id = info->srq_id;
1606 	srq->srwqe_polarity = 0;
1607 	srq->srq_size = info->srq_size;
1608 	srq->wqe_size = rqshift;
1609 	srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
1610 				    ((u32)2 << rqshift) - 1);
1611 	IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
1612 	srq->wqe_size_multiplier = 1 << rqshift;
1613 	srq->wqe_ops = iw_wqe_uk_ops;
1614 
1615 	return 0;
1616 }
1617 
1618 /**
1619  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1620  * @ukinfo: qp initialization info
1621  * @sq_shift: Returns shift of SQ
1622  * @rq_shift: Returns shift of RQ
1623  */
1624 void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1625 			    u8 *rq_shift)
1626 {
1627 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1628 
1629 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1630 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1631 					  ukinfo->max_sq_frag_cnt,
1632 			    ukinfo->max_inline_data, sq_shift);
1633 
1634 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1635 			    rq_shift);
1636 
1637 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1638 		if (ukinfo->abi_ver > 4)
1639 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1640 	}
1641 }
1642 
1643 /**
1644  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1645  * @ukinfo: qp initialization info
1646  * @sq_depth: Returns depth of SQ
1647  * @sq_shift: Returns shift of SQ
1648  */
1649 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1650 				 u32 *sq_depth, u8 *sq_shift)
1651 {
1652 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1653 	int status;
1654 
1655 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1656 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1657 			    ukinfo->max_sq_frag_cnt,
1658 			    ukinfo->max_inline_data, sq_shift);
1659 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1660 				   *sq_shift, sq_depth);
1661 
1662 	return status;
1663 }
1664 
1665 /**
1666  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1667  * @ukinfo: qp initialization info
1668  * @rq_depth: Returns depth of RQ
1669  * @rq_shift: Returns shift of RQ
1670  */
1671 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1672 				 u32 *rq_depth, u8 *rq_shift)
1673 {
1674 	int status;
1675 
1676 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1677 			    rq_shift);
1678 
1679 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1680 		if (ukinfo->abi_ver > 4)
1681 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1682 	}
1683 
1684 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1685 				   *rq_shift, rq_depth);
1686 
1687 	return status;
1688 }
1689 
1690 /**
1691  * irdma_uk_qp_init - initialize shared qp
1692  * @qp: hw qp (user and kernel)
1693  * @info: qp initialization info
1694  *
1695  * initializes the vars used in both user and kernel mode.
1696  * size of the wqe depends on numbers of max. fragements
1697  * allowed. Then size of wqe * the number of wqes should be the
1698  * amount of memory allocated for sq and rq.
1699  */
1700 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1701 {
1702 	int ret_code = 0;
1703 	u32 sq_ring_size;
1704 
1705 	qp->uk_attrs = info->uk_attrs;
1706 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1707 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1708 		return -EINVAL;
1709 
1710 	qp->qp_caps = info->qp_caps;
1711 	qp->sq_base = info->sq;
1712 	qp->rq_base = info->rq;
1713 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1714 	qp->shadow_area = info->shadow_area;
1715 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1716 
1717 	qp->rq_wrid_array = info->rq_wrid_array;
1718 	qp->wqe_alloc_db = info->wqe_alloc_db;
1719 	qp->qp_id = info->qp_id;
1720 	qp->sq_size = info->sq_size;
1721 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1722 	sq_ring_size = qp->sq_size << info->sq_shift;
1723 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1724 	if (info->first_sq_wq) {
1725 		irdma_setup_connection_wqes(qp, info);
1726 		qp->swqe_polarity = 1;
1727 		qp->first_sq_wq = true;
1728 	} else {
1729 		qp->swqe_polarity = 0;
1730 	}
1731 	qp->swqe_polarity_deferred = 1;
1732 	qp->rwqe_polarity = 0;
1733 	qp->rq_size = info->rq_size;
1734 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1735 	qp->max_inline_data = info->max_inline_data;
1736 	qp->rq_wqe_size = info->rq_shift;
1737 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1738 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1739 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1740 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1741 	else
1742 		qp->wqe_ops = iw_wqe_uk_ops;
1743 	qp->srq_uk = info->srq_uk;
1744 	return ret_code;
1745 }
1746 
1747 /**
1748  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1749  * @cq: hw cq
1750  * @info: hw cq initialization info
1751  */
1752 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1753 		      struct irdma_cq_uk_init_info *info)
1754 {
1755 	cq->cq_base = info->cq_base;
1756 	cq->cq_id = info->cq_id;
1757 	cq->cq_size = info->cq_size;
1758 	cq->cqe_alloc_db = info->cqe_alloc_db;
1759 	cq->cq_ack_db = info->cq_ack_db;
1760 	cq->shadow_area = info->shadow_area;
1761 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1762 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1763 	cq->polarity = 1;
1764 }
1765 
1766 /**
1767  * irdma_uk_clean_cq - clean cq entries
1768  * @q: completion context
1769  * @cq: cq to clean
1770  */
1771 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1772 {
1773 	__le64 *cqe;
1774 	u64 qword3, comp_ctx;
1775 	u32 cq_head;
1776 	u8 polarity, temp;
1777 
1778 	cq_head = cq->cq_ring.head;
1779 	temp = cq->polarity;
1780 	do {
1781 		if (cq->avoid_mem_cflct)
1782 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1783 		else
1784 			cqe = cq->cq_base[cq_head].buf;
1785 		get_64bit_val(cqe, 24, &qword3);
1786 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1787 
1788 		if (polarity != temp)
1789 			break;
1790 
1791 		/* Ensure CQE contents are read after valid bit is checked */
1792 		dma_rmb();
1793 
1794 		get_64bit_val(cqe, 8, &comp_ctx);
1795 		if ((void *)(unsigned long)comp_ctx == q)
1796 			set_64bit_val(cqe, 8, 0);
1797 
1798 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1799 		if (!cq_head)
1800 			temp ^= 1;
1801 	} while (true);
1802 }
1803 
1804 /**
1805  * irdma_nop - post a nop
1806  * @qp: hw qp ptr
1807  * @wr_id: work request id
1808  * @signaled: signaled for completion
1809  * @post_sq: ring doorbell
1810  */
1811 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1812 {
1813 	__le64 *wqe;
1814 	u64 hdr;
1815 	u32 wqe_idx;
1816 	struct irdma_post_sq_info info = {};
1817 
1818 	info.wr_id = wr_id;
1819 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1820 					 0, &info);
1821 	if (!wqe)
1822 		return -ENOMEM;
1823 
1824 	irdma_clr_wqes(qp, wqe_idx);
1825 
1826 	set_64bit_val(wqe, 0, 0);
1827 	set_64bit_val(wqe, 8, 0);
1828 	set_64bit_val(wqe, 16, 0);
1829 
1830 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1831 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1832 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1833 
1834 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1835 
1836 	set_64bit_val(wqe, 24, hdr);
1837 	if (post_sq)
1838 		irdma_uk_qp_post_wr(qp);
1839 
1840 	return 0;
1841 }
1842 
1843 /**
1844  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1845  * @frag_cnt: number of fragments
1846  * @quanta: quanta for frag_cnt
1847  */
1848 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1849 {
1850 	switch (frag_cnt) {
1851 	case 0:
1852 	case 1:
1853 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1854 		break;
1855 	case 2:
1856 	case 3:
1857 		*quanta = 2;
1858 		break;
1859 	case 4:
1860 	case 5:
1861 		*quanta = 3;
1862 		break;
1863 	case 6:
1864 	case 7:
1865 		*quanta = 4;
1866 		break;
1867 	case 8:
1868 	case 9:
1869 		*quanta = 5;
1870 		break;
1871 	case 10:
1872 	case 11:
1873 		*quanta = 6;
1874 		break;
1875 	case 12:
1876 	case 13:
1877 		*quanta = 7;
1878 		break;
1879 	case 14:
1880 	case 15: /* when immediate data is present */
1881 		*quanta = 8;
1882 		break;
1883 	default:
1884 		return -EINVAL;
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 /**
1891  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1892  * @frag_cnt: number of fragments
1893  * @wqe_size: size in bytes given frag_cnt
1894  */
1895 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1896 {
1897 	switch (frag_cnt) {
1898 	case 0:
1899 	case 1:
1900 		*wqe_size = 32;
1901 		break;
1902 	case 2:
1903 	case 3:
1904 		*wqe_size = 64;
1905 		break;
1906 	case 4:
1907 	case 5:
1908 	case 6:
1909 	case 7:
1910 		*wqe_size = 128;
1911 		break;
1912 	case 8:
1913 	case 9:
1914 	case 10:
1915 	case 11:
1916 	case 12:
1917 	case 13:
1918 	case 14:
1919 		*wqe_size = 256;
1920 		break;
1921 	default:
1922 		return -EINVAL;
1923 	}
1924 
1925 	return 0;
1926 }
1927