xref: /freebsd/sys/dev/irdma/irdma_uk.c (revision 5b5f7d0e77a9eee73eb5d596f43aef4e1a3674d8)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "irdma_defs.h"
37 #include "irdma_user.h"
38 #include "irdma.h"
39 
40 /**
41  * irdma_set_fragment - set fragment in wqe
42  * @wqe: wqe for setting fragment
43  * @offset: offset value
44  * @sge: sge length and stag
45  * @valid: The wqe valid
46  */
47 static void
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ib_sge *sge,
49 		   u8 valid)
50 {
51 	if (sge) {
52 		set_64bit_val(wqe, offset,
53 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
54 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
55 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
56 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
57 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
58 	} else {
59 		set_64bit_val(wqe, offset, 0);
60 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
61 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
62 	}
63 }
64 
65 /**
66  * irdma_set_fragment_gen_1 - set fragment in wqe
67  * @wqe: wqe for setting fragment
68  * @offset: offset value
69  * @sge: sge length and stag
70  * @valid: wqe valid flag
71  */
72 static void
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
74 			 struct ib_sge *sge, u8 valid)
75 {
76 	if (sge) {
77 		set_64bit_val(wqe, offset,
78 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
79 		set_64bit_val(wqe, offset + IRDMA_BYTE_8,
80 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
81 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
82 	} else {
83 		set_64bit_val(wqe, offset, 0);
84 		set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
85 	}
86 }
87 
88 /**
89  * irdma_nop_hdr - Format header section of noop WQE
90  * @qp: hw qp ptr
91  */
irdma_nop_hdr(struct irdma_qp_uk * qp)92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
93 	return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
94 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
95 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
96 }
97 
98 /**
99  * irdma_nop_1 - insert a NOP wqe
100  * @qp: hw qp ptr
101  */
102 static int
irdma_nop_1(struct irdma_qp_uk * qp)103 irdma_nop_1(struct irdma_qp_uk *qp)
104 {
105 	__le64 *wqe;
106 	u32 wqe_idx;
107 
108 	if (!qp->sq_ring.head)
109 		return -EINVAL;
110 
111 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 	wqe = qp->sq_base[wqe_idx].elem;
113 
114 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
115 
116 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
117 	set_64bit_val(wqe, IRDMA_BYTE_8, 0);
118 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
119 
120 	/* make sure WQE is written before valid bit is set */
121 	irdma_wmb();
122 
123 	set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
124 
125 	return 0;
126 }
127 
128 /**
129  * irdma_clr_wqes - clear next 128 sq entries
130  * @qp: hw qp ptr
131  * @qp_wqe_idx: wqe_idx
132  */
133 void
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
135 {
136 	__le64 *wqe;
137 	u32 wqe_idx;
138 
139 	if (!(qp_wqe_idx & 0x7F)) {
140 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
141 		wqe = qp->sq_base[wqe_idx].elem;
142 		if (wqe_idx)
143 			memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
144 		else
145 			memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
146 	}
147 }
148 
149 /**
150  * irdma_uk_qp_post_wr - ring doorbell
151  * @qp: hw qp ptr
152  */
153 void
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
155 {
156 	u64 temp;
157 	u32 hw_sq_tail;
158 	u32 sw_sq_head;
159 
160 	/* valid bit is written and loads completed before reading shadow */
161 	irdma_mb();
162 
163 	/* read the doorbell shadow area */
164 	get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
165 
166 	hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
167 	sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
168 	if (sw_sq_head != qp->initial_ring.head) {
169 		if (qp->push_dropped) {
170 			db_wr32(qp->qp_id, qp->wqe_alloc_db);
171 			qp->push_dropped = false;
172 		} else if (sw_sq_head != hw_sq_tail) {
173 			if (sw_sq_head > qp->initial_ring.head) {
174 				if (hw_sq_tail >= qp->initial_ring.head &&
175 				    hw_sq_tail < sw_sq_head)
176 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
177 			} else {
178 				if (hw_sq_tail >= qp->initial_ring.head ||
179 				    hw_sq_tail < sw_sq_head)
180 					db_wr32(qp->qp_id, qp->wqe_alloc_db);
181 			}
182 		}
183 	}
184 
185 	qp->initial_ring.head = qp->sq_ring.head;
186 }
187 
188 /**
189  * irdma_qp_ring_push_db -  ring qp doorbell
190  * @qp: hw qp ptr
191  * @wqe_idx: wqe index
192  */
193 static void
irdma_qp_ring_push_db(struct irdma_qp_uk * qp,u32 wqe_idx)194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
195 {
196 	set_32bit_val(qp->push_db, 0,
197 		      FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
198 	qp->initial_ring.head = qp->sq_ring.head;
199 	qp->push_mode = true;
200 	qp->push_dropped = false;
201 }
202 
203 void
irdma_qp_push_wqe(struct irdma_qp_uk * qp,__le64 * wqe,u16 quanta,u32 wqe_idx,bool post_sq)204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
205 		  u32 wqe_idx, bool post_sq)
206 {
207 	__le64 *push;
208 
209 	if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
210 	    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
211 	    !qp->push_mode) {
212 		irdma_uk_qp_post_wr(qp);
213 	} else {
214 		push = (__le64 *) ((uintptr_t)qp->push_wqe +
215 				   (wqe_idx & 0x7) * 0x20);
216 		irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
217 		irdma_qp_ring_push_db(qp, wqe_idx);
218 	}
219 }
220 
221 /**
222  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
223  * @qp: hw qp ptr
224  * @wqe_idx: return wqe index
225  * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
226  * @total_size: size of WR in bytes
227  * @info: info on WR
228  */
229 __le64 *
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 * quanta,u32 total_size,struct irdma_post_sq_info * info)230 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
231 			   u16 *quanta, u32 total_size,
232 			   struct irdma_post_sq_info *info)
233 {
234 	__le64 *wqe;
235 	__le64 *wqe_0 = NULL;
236 	u32 nop_wqe_idx;
237 	u16 avail_quanta, wqe_quanta = *quanta;
238 	u16 i;
239 
240 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
241 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
242 	     qp->uk_attrs->max_hw_sq_chunk);
243 
244 	if (*quanta <= avail_quanta) {
245 		/* WR fits in current chunk */
246 		if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
247 			return NULL;
248 	} else {
249 		/* Need to pad with NOP */
250 		if (*quanta + avail_quanta >
251 		    IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
252 			return NULL;
253 
254 		nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
255 		for (i = 0; i < avail_quanta; i++) {
256 			irdma_nop_1(qp);
257 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
258 		}
259 		if (qp->push_db && info->push_wqe)
260 			irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
261 					  avail_quanta, nop_wqe_idx, true);
262 	}
263 
264 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
265 	if (!*wqe_idx)
266 		qp->swqe_polarity = !qp->swqe_polarity;
267 
268 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
269 
270 	irdma_clr_wqes(qp, *wqe_idx);
271 
272 	wqe = qp->sq_base[*wqe_idx].elem;
273 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
274 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
275 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
276 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID,
277 						  qp->swqe_polarity ? 0 : 1));
278 	}
279 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
280 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
281 	qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
282 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
283 
284 	return wqe;
285 }
286 
287 /**
288  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
289  * @qp: hw qp ptr
290  * @wqe_idx: return wqe index
291  */
292 __le64 *
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)293 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
294 {
295 	__le64 *wqe;
296 	int ret_code;
297 
298 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
299 		return NULL;
300 
301 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
302 	if (ret_code)
303 		return NULL;
304 
305 	if (!*wqe_idx)
306 		qp->rwqe_polarity = !qp->rwqe_polarity;
307 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
308 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
309 
310 	return wqe;
311 }
312 
313 /**
314  * irdma_uk_rdma_write - rdma write operation
315  * @qp: hw qp ptr
316  * @info: post sq information
317  * @post_sq: flag to post sq
318  */
319 int
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)320 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
321 		    bool post_sq)
322 {
323 	u64 hdr;
324 	__le64 *wqe;
325 	struct irdma_rdma_write *op_info;
326 	u32 i, wqe_idx;
327 	u32 total_size = 0, byte_off;
328 	int ret_code;
329 	u32 frag_cnt, addl_frag_cnt;
330 	bool read_fence = false;
331 	u16 quanta;
332 
333 	info->push_wqe = qp->push_db ? true : false;
334 
335 	op_info = &info->op.rdma_write;
336 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
337 		return -EINVAL;
338 
339 	for (i = 0; i < op_info->num_lo_sges; i++)
340 		total_size += op_info->lo_sg_list[i].length;
341 
342 	read_fence |= info->read_fence;
343 
344 	if (info->imm_data_valid)
345 		frag_cnt = op_info->num_lo_sges + 1;
346 	else
347 		frag_cnt = op_info->num_lo_sges;
348 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
349 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
350 	if (ret_code)
351 		return ret_code;
352 
353 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
354 	if (!wqe)
355 		return -ENOSPC;
356 
357 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
358 	set_64bit_val(wqe, IRDMA_BYTE_16,
359 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
360 
361 	if (info->imm_data_valid) {
362 		set_64bit_val(wqe, IRDMA_BYTE_0,
363 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
364 		i = 0;
365 	} else {
366 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
367 					    op_info->lo_sg_list,
368 					    qp->swqe_polarity);
369 		i = 1;
370 	}
371 
372 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
373 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
374 					    &op_info->lo_sg_list[i],
375 					    qp->swqe_polarity);
376 		byte_off += 16;
377 	}
378 
379 	/* if not an odd number set valid bit in next fragment */
380 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
381 	    frag_cnt) {
382 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
383 					    qp->swqe_polarity);
384 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
385 			++addl_frag_cnt;
386 	}
387 
388 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
389 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
390 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
391 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
392 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
393 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
394 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
395 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
396 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
397 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
398 
399 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
400 
401 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
402 	if (info->push_wqe)
403 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
404 	else if (post_sq)
405 		irdma_uk_qp_post_wr(qp);
406 
407 	return 0;
408 }
409 
410 /**
411  * irdma_uk_rdma_read - rdma read command
412  * @qp: hw qp ptr
413  * @info: post sq information
414  * @inv_stag: flag for inv_stag
415  * @post_sq: flag to post sq
416  */
417 int
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)418 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
419 		   bool inv_stag, bool post_sq)
420 {
421 	struct irdma_rdma_read *op_info;
422 	int ret_code;
423 	u32 i, byte_off, total_size = 0;
424 	bool local_fence = false;
425 	bool ord_fence = false;
426 	u32 addl_frag_cnt;
427 	__le64 *wqe;
428 	u32 wqe_idx;
429 	u16 quanta;
430 	u64 hdr;
431 
432 	info->push_wqe = qp->push_db ? true : false;
433 
434 	op_info = &info->op.rdma_read;
435 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
436 		return -EINVAL;
437 
438 	for (i = 0; i < op_info->num_lo_sges; i++)
439 		total_size += op_info->lo_sg_list[i].length;
440 
441 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
442 	if (ret_code)
443 		return ret_code;
444 
445 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
446 	if (!wqe)
447 		return -ENOSPC;
448 
449 	if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
450 		ord_fence = true;
451 		qp->ord_cnt = 0;
452 	}
453 
454 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
455 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
456 	    (op_info->num_lo_sges - 1) : 0;
457 	local_fence |= info->local_fence;
458 
459 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
460 				    qp->swqe_polarity);
461 	for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
462 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
463 					    &op_info->lo_sg_list[i],
464 					    qp->swqe_polarity);
465 		byte_off += IRDMA_BYTE_16;
466 	}
467 
468 	/* if not an odd number set valid bit in next fragment */
469 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
470 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
471 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
472 					    qp->swqe_polarity);
473 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
474 			++addl_frag_cnt;
475 	}
476 	set_64bit_val(wqe, IRDMA_BYTE_16,
477 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
478 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
479 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
480 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
481 	    FIELD_PREP(IRDMAQPSQ_OPCODE,
482 		       (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
483 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
484 	    FIELD_PREP(IRDMAQPSQ_READFENCE,
485 		       info->read_fence || ord_fence ? 1 : 0) |
486 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
487 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
488 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
489 
490 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
491 
492 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
493 	if (info->push_wqe)
494 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
495 	else if (post_sq)
496 		irdma_uk_qp_post_wr(qp);
497 
498 	return 0;
499 }
500 
501 /**
502  * irdma_uk_send - rdma send command
503  * @qp: hw qp ptr
504  * @info: post sq information
505  * @post_sq: flag to post sq
506  */
507 int
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)508 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
509 	      bool post_sq)
510 {
511 	__le64 *wqe;
512 	struct irdma_post_send *op_info;
513 	u64 hdr;
514 	u32 i, wqe_idx, total_size = 0, byte_off;
515 	int ret_code;
516 	u32 frag_cnt, addl_frag_cnt;
517 	bool read_fence = false;
518 	u16 quanta;
519 
520 	info->push_wqe = qp->push_db ? true : false;
521 
522 	op_info = &info->op.send;
523 	if (qp->max_sq_frag_cnt < op_info->num_sges)
524 		return -EINVAL;
525 
526 	for (i = 0; i < op_info->num_sges; i++)
527 		total_size += op_info->sg_list[i].length;
528 
529 	if (info->imm_data_valid)
530 		frag_cnt = op_info->num_sges + 1;
531 	else
532 		frag_cnt = op_info->num_sges;
533 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
534 	if (ret_code)
535 		return ret_code;
536 
537 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
538 	if (!wqe)
539 		return -ENOSPC;
540 
541 	read_fence |= info->read_fence;
542 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
543 	if (info->imm_data_valid) {
544 		set_64bit_val(wqe, IRDMA_BYTE_0,
545 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
546 		i = 0;
547 	} else {
548 		qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
549 					    frag_cnt ? op_info->sg_list : NULL,
550 					    qp->swqe_polarity);
551 		i = 1;
552 	}
553 
554 	for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
555 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
556 					    qp->swqe_polarity);
557 		byte_off += IRDMA_BYTE_16;
558 	}
559 
560 	/* if not an odd number set valid bit in next fragment */
561 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
562 	    frag_cnt) {
563 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
564 					    qp->swqe_polarity);
565 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
566 			++addl_frag_cnt;
567 	}
568 
569 	set_64bit_val(wqe, IRDMA_BYTE_16,
570 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
571 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
572 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
573 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
574 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
575 		       (info->imm_data_valid ? 1 : 0)) |
576 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
577 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
578 	    FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
579 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
580 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
581 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
582 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
583 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
584 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
585 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
586 
587 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
588 
589 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
590 	if (info->push_wqe)
591 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
592 	else if (post_sq)
593 		irdma_uk_qp_post_wr(qp);
594 
595 	return 0;
596 }
597 
598 /**
599  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
600  * @wqe: pointer to wqe
601  * @sge_list: table of pointers to inline data
602  * @num_sges: Total inline data length
603  * @polarity: compatibility parameter
604  */
605 static void
irdma_copy_inline_data_gen_1(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)606 irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
607 			     u32 num_sges, u8 polarity)
608 {
609 	u32 quanta_bytes_remaining = 16;
610 	u32 i;
611 
612 	for (i = 0; i < num_sges; i++) {
613 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
614 		u32 sge_len = sge_list[i].length;
615 
616 		while (sge_len) {
617 			u32 bytes_copied;
618 
619 			bytes_copied = min(sge_len, quanta_bytes_remaining);
620 			irdma_memcpy(wqe, cur_sge, bytes_copied);
621 			wqe += bytes_copied;
622 			cur_sge += bytes_copied;
623 			quanta_bytes_remaining -= bytes_copied;
624 			sge_len -= bytes_copied;
625 
626 			if (!quanta_bytes_remaining) {
627 				/* Remaining inline bytes reside after hdr */
628 				wqe += 16;
629 				quanta_bytes_remaining = 32;
630 			}
631 		}
632 	}
633 }
634 
635 /**
636  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
637  * @data_size: data size for inline
638  *
639  * Gets the quanta based on inline and immediate data.
640  */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)641 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
642 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
643 }
644 
645 /**
646  * irdma_copy_inline_data - Copy inline data to wqe
647  * @wqe: pointer to wqe
648  * @sge_list: table of pointers to inline data
649  * @num_sges: number of SGE's
650  * @polarity: polarity of wqe valid bit
651  */
652 static void
irdma_copy_inline_data(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)653 irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
654 		       u32 num_sges, u8 polarity)
655 {
656 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
657 	u32 quanta_bytes_remaining = 8;
658 	u32 i;
659 	bool first_quanta = true;
660 
661 	wqe += 8;
662 
663 	for (i = 0; i < num_sges; i++) {
664 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
665 		u32 sge_len = sge_list[i].length;
666 
667 		while (sge_len) {
668 			u32 bytes_copied;
669 
670 			bytes_copied = min(sge_len, quanta_bytes_remaining);
671 			irdma_memcpy(wqe, cur_sge, bytes_copied);
672 			wqe += bytes_copied;
673 			cur_sge += bytes_copied;
674 			quanta_bytes_remaining -= bytes_copied;
675 			sge_len -= bytes_copied;
676 
677 			if (!quanta_bytes_remaining) {
678 				quanta_bytes_remaining = 31;
679 
680 				/* Remaining inline bytes reside after hdr */
681 				if (first_quanta) {
682 					first_quanta = false;
683 					wqe += 16;
684 				} else {
685 					*wqe = inline_valid;
686 					wqe++;
687 				}
688 			}
689 		}
690 	}
691 	if (!first_quanta && quanta_bytes_remaining < 31)
692 		*(wqe + quanta_bytes_remaining) = inline_valid;
693 }
694 
695 /**
696  * irdma_inline_data_size_to_quanta - based on inline data, quanta
697  * @data_size: data size for inline
698  *
699  * Gets the quanta based on inline and immediate data.
700  */
irdma_inline_data_size_to_quanta(u32 data_size)701 static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
702 	if (data_size <= 8)
703 		return IRDMA_QP_WQE_MIN_QUANTA;
704 	else if (data_size <= 39)
705 		return 2;
706 	else if (data_size <= 70)
707 		return 3;
708 	else if (data_size <= 101)
709 		return 4;
710 	else if (data_size <= 132)
711 		return 5;
712 	else if (data_size <= 163)
713 		return 6;
714 	else if (data_size <= 194)
715 		return 7;
716 	else
717 		return 8;
718 }
719 
720 /**
721  * irdma_uk_inline_rdma_write - inline rdma write operation
722  * @qp: hw qp ptr
723  * @info: post sq information
724  * @post_sq: flag to post sq
725  */
726 int
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)727 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
728 			   struct irdma_post_sq_info *info, bool post_sq)
729 {
730 	__le64 *wqe;
731 	struct irdma_rdma_write *op_info;
732 	u64 hdr = 0;
733 	u32 wqe_idx;
734 	bool read_fence = false;
735 	u16 quanta;
736 	u32 i, total_size = 0;
737 
738 	info->push_wqe = qp->push_db ? true : false;
739 	op_info = &info->op.rdma_write;
740 
741 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
742 		return -EINVAL;
743 
744 	for (i = 0; i < op_info->num_lo_sges; i++)
745 		total_size += op_info->lo_sg_list[i].length;
746 
747 	if (unlikely(total_size > qp->max_inline_data))
748 		return -EINVAL;
749 
750 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
751 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
752 	if (!wqe)
753 		return -ENOSPC;
754 
755 	qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
756 	read_fence |= info->read_fence;
757 	set_64bit_val(wqe, IRDMA_BYTE_16,
758 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
759 
760 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
761 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
762 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
763 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
764 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
765 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
766 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
767 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
768 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
769 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
770 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
771 
772 	if (info->imm_data_valid)
773 		set_64bit_val(wqe, IRDMA_BYTE_0,
774 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
775 
776 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
777 					op_info->num_lo_sges, qp->swqe_polarity);
778 
779 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
780 
781 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
782 
783 	if (info->push_wqe)
784 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
785 	else if (post_sq)
786 		irdma_uk_qp_post_wr(qp);
787 
788 	return 0;
789 }
790 
791 /**
792  * irdma_uk_inline_send - inline send operation
793  * @qp: hw qp ptr
794  * @info: post sq information
795  * @post_sq: flag to post sq
796  */
797 int
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)798 irdma_uk_inline_send(struct irdma_qp_uk *qp,
799 		     struct irdma_post_sq_info *info, bool post_sq)
800 {
801 	__le64 *wqe;
802 	struct irdma_post_send *op_info;
803 	u64 hdr;
804 	u32 wqe_idx;
805 	bool read_fence = false;
806 	u16 quanta;
807 	u32 i, total_size = 0;
808 
809 	info->push_wqe = qp->push_db ? true : false;
810 	op_info = &info->op.send;
811 
812 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
813 		return -EINVAL;
814 
815 	for (i = 0; i < op_info->num_sges; i++)
816 		total_size += op_info->sg_list[i].length;
817 
818 	if (unlikely(total_size > qp->max_inline_data))
819 		return -EINVAL;
820 
821 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
822 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
823 	if (!wqe)
824 		return -ENOSPC;
825 
826 	set_64bit_val(wqe, IRDMA_BYTE_16,
827 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
828 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
829 
830 	read_fence |= info->read_fence;
831 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
832 	    FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
833 	    FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
834 	    FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
835 	    FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
836 		       (info->imm_data_valid ? 1 : 0)) |
837 	    FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
838 	    FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
839 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
840 	    FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
841 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
842 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
843 	    FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
844 	    FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
845 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
846 
847 	if (info->imm_data_valid)
848 		set_64bit_val(wqe, IRDMA_BYTE_0,
849 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
850 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
851 					op_info->num_sges, qp->swqe_polarity);
852 
853 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
854 
855 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
856 
857 	if (info->push_wqe)
858 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
859 	else if (post_sq)
860 		irdma_uk_qp_post_wr(qp);
861 
862 	return 0;
863 }
864 
865 /**
866  * irdma_uk_stag_local_invalidate - stag invalidate operation
867  * @qp: hw qp ptr
868  * @info: post sq information
869  * @post_sq: flag to post sq
870  */
871 int
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)872 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
873 			       struct irdma_post_sq_info *info,
874 			       bool post_sq)
875 {
876 	__le64 *wqe;
877 	struct irdma_inv_local_stag *op_info;
878 	u64 hdr;
879 	u32 wqe_idx;
880 	bool local_fence = false;
881 	struct ib_sge sge = {0};
882 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
883 
884 	info->push_wqe = qp->push_db ? true : false;
885 	op_info = &info->op.inv_local_stag;
886 	local_fence = info->local_fence;
887 
888 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
889 	if (!wqe)
890 		return -ENOSPC;
891 
892 	sge.lkey = op_info->target_stag;
893 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
894 
895 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
896 
897 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
898 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
899 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
900 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
901 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
902 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
903 
904 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
905 
906 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
907 
908 	if (info->push_wqe)
909 		irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
910 	else if (post_sq)
911 		irdma_uk_qp_post_wr(qp);
912 
913 	return 0;
914 }
915 
916 /**
917  * irdma_uk_post_receive - post receive wqe
918  * @qp: hw qp ptr
919  * @info: post rq information
920  */
921 int
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)922 irdma_uk_post_receive(struct irdma_qp_uk *qp,
923 		      struct irdma_post_rq_info *info)
924 {
925 	u32 wqe_idx, i, byte_off;
926 	u32 addl_frag_cnt;
927 	__le64 *wqe;
928 	u64 hdr;
929 
930 	if (qp->max_rq_frag_cnt < info->num_sges)
931 		return -EINVAL;
932 
933 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
934 	if (!wqe)
935 		return -ENOSPC;
936 
937 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
938 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
939 	qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
940 				    qp->rwqe_polarity);
941 
942 	for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
943 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
944 					    qp->rwqe_polarity);
945 		byte_off += 16;
946 	}
947 
948 	/* if not an odd number set valid bit in next fragment */
949 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
950 	    info->num_sges) {
951 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
952 					    qp->rwqe_polarity);
953 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
954 			++addl_frag_cnt;
955 	}
956 
957 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
958 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
959 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
960 
961 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
962 
963 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
964 
965 	return 0;
966 }
967 
968 /**
969  * irdma_uk_cq_resize - reset the cq buffer info
970  * @cq: cq to resize
971  * @cq_base: new cq buffer addr
972  * @cq_size: number of cqes
973  */
974 void
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)975 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
976 {
977 	cq->cq_base = cq_base;
978 	cq->cq_size = cq_size;
979 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
980 	cq->polarity = 1;
981 }
982 
983 /**
984  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
985  * @cq: cq to resize
986  * @cq_cnt: the count of the resized cq buffers
987  */
988 void
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)989 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
990 {
991 	u64 temp_val;
992 	u16 sw_cq_sel;
993 	u8 arm_next_se;
994 	u8 arm_next;
995 	u8 arm_seq_num;
996 
997 	get_64bit_val(cq->shadow_area, 32, &temp_val);
998 
999 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1000 	sw_cq_sel += cq_cnt;
1001 
1002 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1003 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1004 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1005 
1006 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1007 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1008 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1009 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1010 
1011 	set_64bit_val(cq->shadow_area, 32, temp_val);
1012 }
1013 
1014 /**
1015  * irdma_uk_cq_request_notification - cq notification request (door bell)
1016  * @cq: hw cq
1017  * @cq_notify: notification type
1018  */
1019 void
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)1020 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1021 				 enum irdma_cmpl_notify cq_notify)
1022 {
1023 	u64 temp_val;
1024 	u16 sw_cq_sel;
1025 	u8 arm_next_se = 0;
1026 	u8 arm_next = 0;
1027 	u8 arm_seq_num;
1028 
1029 	get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
1030 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1031 	arm_seq_num++;
1032 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1033 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1034 	arm_next_se |= 1;
1035 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1036 		arm_next = 1;
1037 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1038 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1039 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1040 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1041 
1042 	set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
1043 
1044 	irdma_wmb();		/* make sure WQE is populated before valid bit is set */
1045 
1046 	db_wr32(cq->cq_id, cq->cqe_alloc_db);
1047 }
1048 
1049 static int
irdma_check_rq_cqe(struct irdma_qp_uk * qp,u32 * array_idx)1050 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
1051 {
1052 	u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
1053 
1054 	if (*array_idx != exp_idx) {
1055 
1056 		*array_idx = exp_idx;
1057 		qp->last_rx_cmpl_idx = exp_idx;
1058 
1059 		return -1;
1060 	}
1061 
1062 	qp->last_rx_cmpl_idx = *array_idx;
1063 
1064 	return 0;
1065 }
1066 
1067 /**
1068  * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
1069  *
1070  * @ring: sq/rq ring
1071  * @flush_seen: information if flush for specific ring was already seen
1072  * @comp_status: completion status
1073  * @wqe_idx: new value of WQE index returned if there is more work on ring
1074  */
1075 static inline int
irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring,u8 flush_seen,enum irdma_cmpl_status comp_status,u32 * wqe_idx)1076 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
1077 				enum irdma_cmpl_status comp_status,
1078 				u32 *wqe_idx)
1079 {
1080 	if (flush_seen) {
1081 		if (IRDMA_RING_MORE_WORK(ring))
1082 			*wqe_idx = ring.tail;
1083 		else
1084 			return -ENOENT;
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 /**
1091  * irdma_detect_unsignaled_cmpls - check if unsignaled cmpl is to be reported
1092  * @cq: hw cq
1093  * @qp: hw qp
1094  * @info: cq poll information collected
1095  * @wge_idx: index of the WR in SQ ring
1096  */
1097 static int
irdma_detect_unsignaled_cmpls(struct irdma_cq_uk * cq,struct irdma_qp_uk * qp,struct irdma_cq_poll_info * info,u32 wqe_idx)1098 irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq,
1099 			      struct irdma_qp_uk *qp,
1100 			      struct irdma_cq_poll_info *info,
1101 			      u32 wqe_idx)
1102 {
1103 	u64 qword0, qword1, qword2, qword3;
1104 	__le64 *cqe, *wqe;
1105 	int i;
1106 	u32 widx;
1107 
1108 	if (qp->sq_wrtrk_array[wqe_idx].signaled == 0) {
1109 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1110 		irdma_pr_err("%p %d %d\n", cqe, cq->cq_ring.head, wqe_idx);
1111 		for (i = -10; i <= 10; i++) {
1112 			IRDMA_GET_CQ_ELEM_AT_OFFSET(cq, i + cq->cq_ring.size, cqe);
1113 			get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1114 			get_64bit_val(cqe, IRDMA_BYTE_8, &qword1);
1115 			get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1116 			get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1117 			widx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1118 			irdma_pr_err("%d %04x %p %016lx %016lx %016lx %016lx ",
1119 				     i, widx, cqe, qword0, qword1, qword2, qword3);
1120 			if ((u8)FIELD_GET(IRDMA_CQ_SQ, qword3)) {
1121 				irdma_pr_err("%lx %x %x %x ",
1122 					     qp->sq_wrtrk_array[widx].wrid, qp->sq_wrtrk_array[widx].wr_len,
1123 					     qp->sq_wrtrk_array[widx].quanta, qp->sq_wrtrk_array[widx].signaled);
1124 				wqe = qp->sq_base[widx].elem;
1125 				get_64bit_val(wqe, IRDMA_BYTE_0, &qword0);
1126 				get_64bit_val(wqe, IRDMA_BYTE_8, &qword1);
1127 				get_64bit_val(wqe, IRDMA_BYTE_16, &qword2);
1128 				get_64bit_val(wqe, IRDMA_BYTE_24, &qword3);
1129 
1130 				irdma_pr_err("%016lx %016lx %016lx %016lx \n",
1131 					     qword0, qword1, qword2, qword3);
1132 			} else {
1133 				irdma_pr_err("\n");
1134 			}
1135 		}
1136 		return -ENOENT;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 /**
1143  * irdma_uk_cq_poll_cmpl - get cq completion info
1144  * @cq: hw cq
1145  * @info: cq poll information returned
1146  */
1147 int
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)1148 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1149 		      struct irdma_cq_poll_info *info)
1150 {
1151 	u64 comp_ctx, qword0, qword2, qword3;
1152 	__le64 *cqe;
1153 	struct irdma_qp_uk *qp;
1154 	struct irdma_ring *pring = NULL;
1155 	u32 wqe_idx;
1156 	int ret_code;
1157 	bool move_cq_head = true;
1158 	u8 polarity;
1159 	bool ext_valid;
1160 	__le64 *ext_cqe;
1161 
1162 	if (cq->avoid_mem_cflct)
1163 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1164 	else
1165 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1166 
1167 	get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1168 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1169 	if (polarity != cq->polarity)
1170 		return -ENOENT;
1171 
1172 	/* Ensure CQE contents are read after valid bit is checked */
1173 	rmb();
1174 
1175 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1176 	if (ext_valid) {
1177 		u64 qword6, qword7;
1178 		u32 peek_head;
1179 
1180 		if (cq->avoid_mem_cflct) {
1181 			ext_cqe = (__le64 *) ((u8 *)cqe + 32);
1182 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1183 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1184 		} else {
1185 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1186 			ext_cqe = cq->cq_base[peek_head].buf;
1187 			get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1188 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1189 			if (!peek_head)
1190 				polarity ^= 1;
1191 		}
1192 		if (polarity != cq->polarity)
1193 			return -ENOENT;
1194 
1195 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1196 		rmb();
1197 
1198 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1199 		if (info->imm_valid) {
1200 			u64 qword4;
1201 
1202 			get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
1203 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1204 		}
1205 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1206 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1207 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1208 			get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
1209 			if (info->ud_vlan_valid)
1210 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1211 			if (info->ud_smac_valid) {
1212 				info->ud_smac[5] = qword6 & 0xFF;
1213 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1214 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1215 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1216 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1217 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1218 			}
1219 		}
1220 	} else {
1221 		info->imm_valid = false;
1222 		info->ud_smac_valid = false;
1223 		info->ud_vlan_valid = false;
1224 	}
1225 
1226 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1227 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1228 	info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1229 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1230 	get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1231 	qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
1232 	if (info->error) {
1233 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1234 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1235 		switch (info->major_err) {
1236 		case IRDMA_FLUSH_MAJOR_ERR:
1237 			/* Set the min error to standard flush error code for remaining cqes */
1238 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1239 				qword3 &= ~IRDMA_CQ_MINERR;
1240 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1241 				set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1242 			}
1243 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1244 			break;
1245 		default:
1246 			info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1247 			break;
1248 		}
1249 	} else {
1250 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1251 	}
1252 
1253 	get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1254 	get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1255 
1256 	info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
1257 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1258 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1259 
1260 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1261 	if (!qp || qp->destroy_pending) {
1262 		ret_code = -EFAULT;
1263 		goto exit;
1264 	}
1265 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1266 	info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
1267 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1268 
1269 	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1270 		u32 array_idx;
1271 
1272 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
1273 							   qp->rq_flush_seen,
1274 							   info->comp_status,
1275 							   &wqe_idx);
1276 		if (ret_code != 0)
1277 			goto exit;
1278 
1279 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1280 
1281 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1282 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1283 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1284 				ret_code = -ENOENT;
1285 				goto exit;
1286 			}
1287 
1288 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1289 			info->signaled = 1;
1290 			array_idx = qp->rq_ring.tail;
1291 		} else {
1292 			info->wr_id = qp->rq_wrid_array[array_idx];
1293 			info->signaled = 1;
1294 			if (irdma_check_rq_cqe(qp, &array_idx)) {
1295 				info->wr_id = qp->rq_wrid_array[array_idx];
1296 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1297 				IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1298 				return 0;
1299 			}
1300 		}
1301 
1302 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1303 
1304 		if (qword3 & IRDMACQ_STAG) {
1305 			info->stag_invalid_set = true;
1306 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1307 		} else {
1308 			info->stag_invalid_set = false;
1309 		}
1310 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1311 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1312 			qp->rq_flush_seen = true;
1313 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1314 				qp->rq_flush_complete = true;
1315 			else
1316 				move_cq_head = false;
1317 		}
1318 		pring = &qp->rq_ring;
1319 	} else {		/* q_type is IRDMA_CQE_QTYPE_SQ */
1320 		if (qp->first_sq_wq) {
1321 			if (wqe_idx + 1 >= qp->conn_wqes)
1322 				qp->first_sq_wq = false;
1323 
1324 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1325 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1326 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1327 				set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1328 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1329 				memset(info, 0, sizeof(*info));
1330 				return irdma_uk_cq_poll_cmpl(cq, info);
1331 			}
1332 		}
1333 		/* cease posting push mode on push drop */
1334 		if (info->push_dropped) {
1335 			qp->push_mode = false;
1336 			qp->push_dropped = true;
1337 		}
1338 		ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
1339 							   qp->sq_flush_seen,
1340 							   info->comp_status,
1341 							   &wqe_idx);
1342 		if (ret_code != 0)
1343 			goto exit;
1344 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1345 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1346 			info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
1347 			if (!info->comp_status)
1348 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1349 			ret_code = irdma_detect_unsignaled_cmpls(cq, qp, info, wqe_idx);
1350 			if (ret_code != 0)
1351 				goto exit;
1352 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1353 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1354 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1355 		} else {
1356 			unsigned long flags;
1357 
1358 			spin_lock_irqsave(qp->lock, flags);
1359 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1360 				spin_unlock_irqrestore(qp->lock, flags);
1361 				ret_code = -ENOENT;
1362 				goto exit;
1363 			}
1364 
1365 			do {
1366 				__le64 *sw_wqe;
1367 				u64 wqe_qword;
1368 				u32 tail;
1369 
1370 				tail = qp->sq_ring.tail;
1371 				sw_wqe = qp->sq_base[tail].elem;
1372 				get_64bit_val(sw_wqe, IRDMA_BYTE_24,
1373 					      &wqe_qword);
1374 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1375 							      wqe_qword);
1376 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1377 						    tail + qp->sq_wrtrk_array[tail].quanta);
1378 				if (info->op_type != IRDMAQP_OP_NOP) {
1379 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1380 					info->signaled = qp->sq_wrtrk_array[tail].signaled;
1381 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1382 					break;
1383 				}
1384 			} while (1);
1385 
1386 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1387 			    info->minor_err == FLUSH_PROT_ERR)
1388 				info->minor_err = FLUSH_MW_BIND_ERR;
1389 			qp->sq_flush_seen = true;
1390 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1391 				qp->sq_flush_complete = true;
1392 			spin_unlock_irqrestore(qp->lock, flags);
1393 		}
1394 		pring = &qp->sq_ring;
1395 	}
1396 
1397 	ret_code = 0;
1398 
1399 exit:
1400 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1401 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1402 			move_cq_head = false;
1403 	}
1404 
1405 	if (move_cq_head) {
1406 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1407 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1408 			cq->polarity ^= 1;
1409 
1410 		if (ext_valid && !cq->avoid_mem_cflct) {
1411 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1412 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1413 				cq->polarity ^= 1;
1414 		}
1415 
1416 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1417 		if (!cq->avoid_mem_cflct && ext_valid)
1418 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1419 		set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1420 			      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1421 	} else {
1422 		qword3 &= ~IRDMA_CQ_WQEIDX;
1423 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1424 		set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1425 	}
1426 
1427 	return ret_code;
1428 }
1429 
1430 /**
1431  * irdma_round_up_wq - return round up qp wq depth
1432  * @wqdepth: wq depth in quanta to round up
1433  */
1434 static int
irdma_round_up_wq(u32 wqdepth)1435 irdma_round_up_wq(u32 wqdepth)
1436 {
1437 	int scount = 1;
1438 
1439 	for (wqdepth--; scount <= 16; scount *= 2)
1440 		wqdepth |= wqdepth >> scount;
1441 
1442 	return ++wqdepth;
1443 }
1444 
1445 /**
1446  * irdma_get_wqe_shift - get shift count for maximum wqe size
1447  * @uk_attrs: qp HW attributes
1448  * @sge: Maximum Scatter Gather Elements wqe
1449  * @inline_data: Maximum inline data size
1450  * @shift: Returns the shift needed based on sge
1451  *
1452  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1453  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1454  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1455  * size of 64 bytes).
1456  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1457  * size of 256 bytes).
1458  */
1459 void
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1460 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1461 		    u32 inline_data, u8 *shift)
1462 {
1463 	*shift = 0;
1464 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1465 		if (sge > 1 || inline_data > 8) {
1466 			if (sge < 4 && inline_data <= 39)
1467 				*shift = 1;
1468 			else if (sge < 8 && inline_data <= 101)
1469 				*shift = 2;
1470 			else
1471 				*shift = 3;
1472 		}
1473 	} else if (sge > 1 || inline_data > 16) {
1474 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1475 	}
1476 }
1477 
1478 /*
1479  * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
1480  * determines size of WQE @sqdepth: depth of SQ
1481  */
1482 int
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1483 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
1484 {
1485 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1486 
1487 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1488 
1489 	if (*sqdepth < min_size)
1490 		*sqdepth = min_size;
1491 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1492 		return -EINVAL;
1493 
1494 	return 0;
1495 }
1496 
1497 /*
1498  * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
1499  * determines size of WQE @rqdepth: depth of RQ/SRQ
1500  */
1501 int
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1502 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
1503 {
1504 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1505 
1506 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1507 
1508 	if (*rqdepth < min_size)
1509 		*rqdepth = min_size;
1510 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1511 		return -EINVAL;
1512 
1513 	return 0;
1514 }
1515 
1516 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1517 	.iw_copy_inline_data = irdma_copy_inline_data,
1518 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1519 	.iw_set_fragment = irdma_set_fragment,
1520 };
1521 
1522 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1523 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1524 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1525 	.iw_set_fragment = irdma_set_fragment_gen_1,
1526 };
1527 
1528 /**
1529  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1530  * connection.
1531  * @qp: hw qp (user and kernel)
1532  * @info: qp initialization info
1533  */
1534 static void
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1535 irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1536 			    struct irdma_qp_uk_init_info *info)
1537 {
1538 	u16 move_cnt = 1;
1539 
1540 	if (info->start_wqe_idx)
1541 		move_cnt = info->start_wqe_idx;
1542 	else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
1543 		move_cnt = 3;
1544 	qp->conn_wqes = move_cnt;
1545 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1546 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1547 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1548 }
1549 
1550 /**
1551  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1552  * @ukinfo: qp initialization info
1553  * @sq_shift: Returns shift of SQ
1554  * @rq_shift: Returns shift of RQ
1555  */
1556 void
irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info * ukinfo,u8 * sq_shift,u8 * rq_shift)1557 irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1558 		       u8 *rq_shift)
1559 {
1560 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1561 
1562 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1563 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1564 			    ukinfo->max_sq_frag_cnt,
1565 			    ukinfo->max_inline_data, sq_shift);
1566 
1567 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1568 			    rq_shift);
1569 
1570 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1571 		if (ukinfo->abi_ver > 4)
1572 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1573 	}
1574 }
1575 
1576 /**
1577  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1578  * @ukinfo: qp initialization info
1579  * @sq_depth: Returns depth of SQ
1580  * @sq_shift: Returns shift of SQ
1581  */
1582 int
irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info * ukinfo,u32 * sq_depth,u8 * sq_shift)1583 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1584 			     u32 *sq_depth, u8 *sq_shift)
1585 {
1586 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1587 	int status;
1588 
1589 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1590 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1591 			    ukinfo->max_sq_frag_cnt,
1592 			    ukinfo->max_inline_data, sq_shift);
1593 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1594 				   *sq_shift, sq_depth);
1595 
1596 	return status;
1597 }
1598 
1599 /**
1600  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1601  * @ukinfo: qp initialization info
1602  * @rq_depth: Returns depth of RQ
1603  * @rq_shift: Returns shift of RQ
1604  */
1605 int
irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info * ukinfo,u32 * rq_depth,u8 * rq_shift)1606 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1607 			     u32 *rq_depth, u8 *rq_shift)
1608 {
1609 	int status;
1610 
1611 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1612 			    rq_shift);
1613 
1614 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1615 		if (ukinfo->abi_ver > 4)
1616 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1617 	}
1618 
1619 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1620 				   *rq_shift, rq_depth);
1621 
1622 	return status;
1623 }
1624 
1625 /**
1626  * irdma_uk_qp_init - initialize shared qp
1627  * @qp: hw qp (user and kernel)
1628  * @info: qp initialization info
1629  *
1630  * initializes the vars used in both user and kernel mode.
1631  * size of the wqe depends on numbers of max. fragements
1632  * allowed. Then size of wqe * the number of wqes should be the
1633  * amount of memory allocated for sq and rq.
1634  */
1635 int
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1636 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1637 {
1638 	int ret_code = 0;
1639 	u32 sq_ring_size;
1640 
1641 	qp->uk_attrs = info->uk_attrs;
1642 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1643 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1644 		return -EINVAL;
1645 
1646 	qp->qp_caps = info->qp_caps;
1647 	qp->sq_base = info->sq;
1648 	qp->rq_base = info->rq;
1649 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1650 	qp->shadow_area = info->shadow_area;
1651 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1652 
1653 	qp->rq_wrid_array = info->rq_wrid_array;
1654 	qp->wqe_alloc_db = info->wqe_alloc_db;
1655 	qp->last_rx_cmpl_idx = 0xffffffff;
1656 	qp->rd_fence_rate = info->rd_fence_rate;
1657 	qp->qp_id = info->qp_id;
1658 	qp->sq_size = info->sq_size;
1659 	qp->push_mode = false;
1660 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1661 	sq_ring_size = qp->sq_size << info->sq_shift;
1662 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1663 	IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1664 	if (info->first_sq_wq) {
1665 		irdma_setup_connection_wqes(qp, info);
1666 		qp->swqe_polarity = 1;
1667 		qp->first_sq_wq = true;
1668 	} else {
1669 		qp->swqe_polarity = 0;
1670 	}
1671 	qp->swqe_polarity_deferred = 1;
1672 	qp->rwqe_polarity = 0;
1673 	qp->rq_size = info->rq_size;
1674 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1675 	qp->max_inline_data = info->max_inline_data;
1676 	qp->rq_wqe_size = info->rq_shift;
1677 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1678 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1679 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1680 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1681 	else
1682 		qp->wqe_ops = iw_wqe_uk_ops;
1683 	qp->start_wqe_idx = info->start_wqe_idx;
1684 
1685 	return ret_code;
1686 }
1687 
1688 /**
1689  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1690  * @cq: hw cq
1691  * @info: hw cq initialization info
1692  */
1693 int
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1694 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
1695 {
1696 	cq->cq_base = info->cq_base;
1697 	cq->cq_id = info->cq_id;
1698 	cq->cq_size = info->cq_size;
1699 	cq->cqe_alloc_db = info->cqe_alloc_db;
1700 	cq->cq_ack_db = info->cq_ack_db;
1701 	cq->shadow_area = info->shadow_area;
1702 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1703 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1704 	cq->polarity = 1;
1705 
1706 	return 0;
1707 }
1708 
1709 /**
1710  * irdma_uk_clean_cq - clean cq entries
1711  * @q: completion context
1712  * @cq: cq to clean
1713  */
1714 int
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1715 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1716 {
1717 	__le64 *cqe;
1718 	u64 qword3, comp_ctx;
1719 	u32 cq_head;
1720 	u8 polarity, temp;
1721 
1722 	cq_head = cq->cq_ring.head;
1723 	temp = cq->polarity;
1724 	do {
1725 		if (cq->avoid_mem_cflct)
1726 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1727 		else
1728 			cqe = cq->cq_base[cq_head].buf;
1729 		get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1730 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1731 
1732 		if (polarity != temp)
1733 			break;
1734 
1735 		/* Ensure CQE contents are read after valid bit is checked */
1736 		rmb();
1737 
1738 		get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1739 		if ((void *)(irdma_uintptr) comp_ctx == q)
1740 			set_64bit_val(cqe, IRDMA_BYTE_8, 0);
1741 
1742 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1743 		if (!cq_head)
1744 			temp ^= 1;
1745 	} while (true);
1746 	return 0;
1747 }
1748 
1749 /**
1750  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1751  * @frag_cnt: number of fragments
1752  * @quanta: quanta for frag_cnt
1753  */
1754 int
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1755 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1756 {
1757 	switch (frag_cnt) {
1758 	case 0:
1759 	case 1:
1760 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1761 		break;
1762 	case 2:
1763 	case 3:
1764 		*quanta = 2;
1765 		break;
1766 	case 4:
1767 	case 5:
1768 		*quanta = 3;
1769 		break;
1770 	case 6:
1771 	case 7:
1772 		*quanta = 4;
1773 		break;
1774 	case 8:
1775 	case 9:
1776 		*quanta = 5;
1777 		break;
1778 	case 10:
1779 	case 11:
1780 		*quanta = 6;
1781 		break;
1782 	case 12:
1783 	case 13:
1784 		*quanta = 7;
1785 		break;
1786 	case 14:
1787 	case 15:		/* when immediate data is present */
1788 		*quanta = 8;
1789 		break;
1790 	default:
1791 		return -EINVAL;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 /**
1798  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1799  * @frag_cnt: number of fragments
1800  * @wqe_size: size in bytes given frag_cnt
1801  */
1802 int
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1803 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1804 {
1805 	switch (frag_cnt) {
1806 	case 0:
1807 	case 1:
1808 		*wqe_size = 32;
1809 		break;
1810 	case 2:
1811 	case 3:
1812 		*wqe_size = 64;
1813 		break;
1814 	case 4:
1815 	case 5:
1816 	case 6:
1817 	case 7:
1818 		*wqe_size = 128;
1819 		break;
1820 	case 8:
1821 	case 9:
1822 	case 10:
1823 	case 11:
1824 	case 12:
1825 	case 13:
1826 	case 14:
1827 		*wqe_size = 256;
1828 		break;
1829 	default:
1830 		return -EINVAL;
1831 	}
1832 
1833 	return 0;
1834 }
1835