xref: /linux/drivers/infiniband/hw/irdma/uk.c (revision 55aa394a5ed871208eac11c5f4677cafd258c4dd)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "defs.h"
5 #include "user.h"
6 #include "irdma.h"
7 
8 /**
9  * irdma_set_fragment - set fragment in wqe
10  * @wqe: wqe for setting fragment
11  * @offset: offset value
12  * @sge: sge length and stag
13  * @valid: The wqe valid
14  */
irdma_set_fragment(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
16 			       u8 valid)
17 {
18 	if (sge) {
19 		set_64bit_val(wqe, offset,
20 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
21 		set_64bit_val(wqe, offset + 8,
22 			      FIELD_PREP(IRDMAQPSQ_VALID, valid) |
23 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
24 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
25 	} else {
26 		set_64bit_val(wqe, offset, 0);
27 		set_64bit_val(wqe, offset + 8,
28 			      FIELD_PREP(IRDMAQPSQ_VALID, valid));
29 	}
30 }
31 
32 /**
33  * irdma_set_fragment_gen_1 - set fragment in wqe
34  * @wqe: wqe for setting fragment
35  * @offset: offset value
36  * @sge: sge length and stag
37  * @valid: wqe valid flag
38  */
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid)39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
40 				     struct ib_sge *sge, u8 valid)
41 {
42 	if (sge) {
43 		set_64bit_val(wqe, offset,
44 			      FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
45 		set_64bit_val(wqe, offset + 8,
46 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
47 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
48 	} else {
49 		set_64bit_val(wqe, offset, 0);
50 		set_64bit_val(wqe, offset + 8, 0);
51 	}
52 }
53 
54 /**
55  * irdma_nop_1 - insert a NOP wqe
56  * @qp: hw qp ptr
57  */
irdma_nop_1(struct irdma_qp_uk * qp)58 static int irdma_nop_1(struct irdma_qp_uk *qp)
59 {
60 	u64 hdr;
61 	__le64 *wqe;
62 	u32 wqe_idx;
63 	bool signaled = false;
64 
65 	if (!qp->sq_ring.head)
66 		return -EINVAL;
67 
68 	wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
69 	wqe = qp->sq_base[wqe_idx].elem;
70 
71 	qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
72 
73 	set_64bit_val(wqe, 0, 0);
74 	set_64bit_val(wqe, 8, 0);
75 	set_64bit_val(wqe, 16, 0);
76 
77 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
78 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
79 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
80 
81 	/* make sure WQE is written before valid bit is set */
82 	dma_wmb();
83 
84 	set_64bit_val(wqe, 24, hdr);
85 
86 	return 0;
87 }
88 
89 /**
90  * irdma_clr_wqes - clear next 128 sq entries
91  * @qp: hw qp ptr
92  * @qp_wqe_idx: wqe_idx
93  */
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)94 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
95 {
96 	struct irdma_qp_quanta *sq;
97 	u32 wqe_idx;
98 
99 	if (!(qp_wqe_idx & 0x7F)) {
100 		wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
101 		sq = qp->sq_base + wqe_idx;
102 		if (wqe_idx)
103 			memset(sq, qp->swqe_polarity ? 0 : 0xFF,
104 			       128 * sizeof(*sq));
105 		else
106 			memset(sq, qp->swqe_polarity ? 0xFF : 0,
107 			       128 * sizeof(*sq));
108 	}
109 }
110 
111 /**
112  * irdma_uk_qp_post_wr - ring doorbell
113  * @qp: hw qp ptr
114  */
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)115 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
116 {
117 	dma_wmb();
118 	writel(qp->qp_id, qp->wqe_alloc_db);
119 }
120 
121 /**
122  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
123  * @qp: hw qp ptr
124  * @wqe_idx: return wqe index
125  * @quanta: size of WR in quanta
126  * @total_size: size of WR in bytes
127  * @info: info on WR
128  */
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 quanta,u32 total_size,struct irdma_post_sq_info * info)129 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
130 				   u16 quanta, u32 total_size,
131 				   struct irdma_post_sq_info *info)
132 {
133 	__le64 *wqe;
134 	__le64 *wqe_0 = NULL;
135 	u16 avail_quanta;
136 	u16 i;
137 
138 	avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
139 		       (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
140 		       qp->uk_attrs->max_hw_sq_chunk);
141 	if (quanta <= avail_quanta) {
142 		/* WR fits in current chunk */
143 		if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
144 			return NULL;
145 	} else {
146 		/* Need to pad with NOP */
147 		if (quanta + avail_quanta >
148 			IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
149 			return NULL;
150 
151 		for (i = 0; i < avail_quanta; i++) {
152 			irdma_nop_1(qp);
153 			IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
154 		}
155 	}
156 
157 	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
158 	if (!*wqe_idx)
159 		qp->swqe_polarity = !qp->swqe_polarity;
160 
161 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
162 
163 	wqe = qp->sq_base[*wqe_idx].elem;
164 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
165 	    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
166 		wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
167 		wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
168 	}
169 	qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
170 	qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
171 	qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
172 	qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
173 
174 	return wqe;
175 }
176 
irdma_srq_get_next_recv_wqe(struct irdma_srq_uk * srq,u32 * wqe_idx)177 __le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
178 {
179 	int ret_code;
180 	__le64 *wqe;
181 
182 	if (IRDMA_RING_FULL_ERR(srq->srq_ring))
183 		return NULL;
184 
185 	IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
186 	if (ret_code)
187 		return NULL;
188 
189 	if (!*wqe_idx)
190 		srq->srwqe_polarity = !srq->srwqe_polarity;
191 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
192 	wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
193 
194 	return wqe;
195 }
196 
197 /**
198  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
199  * @qp: hw qp ptr
200  * @wqe_idx: return wqe index
201  */
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)202 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
203 {
204 	__le64 *wqe;
205 	int ret_code;
206 
207 	if (IRDMA_RING_FULL_ERR(qp->rq_ring))
208 		return NULL;
209 
210 	IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
211 	if (ret_code)
212 		return NULL;
213 
214 	if (!*wqe_idx)
215 		qp->rwqe_polarity = !qp->rwqe_polarity;
216 	/* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
217 	wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
218 
219 	return wqe;
220 }
221 
222 /**
223  * irdma_uk_rdma_write - rdma write operation
224  * @qp: hw qp ptr
225  * @info: post sq information
226  * @post_sq: flag to post sq
227  */
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)228 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
229 			bool post_sq)
230 {
231 	u64 hdr;
232 	__le64 *wqe;
233 	struct irdma_rdma_write *op_info;
234 	u32 i, wqe_idx;
235 	u32 total_size = 0, byte_off;
236 	int ret_code;
237 	u32 frag_cnt, addl_frag_cnt;
238 	bool read_fence = false;
239 	u16 quanta;
240 
241 	op_info = &info->op.rdma_write;
242 	if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
243 		return -EINVAL;
244 
245 	for (i = 0; i < op_info->num_lo_sges; i++)
246 		total_size += op_info->lo_sg_list[i].length;
247 
248 	read_fence |= info->read_fence;
249 
250 	if (info->imm_data_valid)
251 		frag_cnt = op_info->num_lo_sges + 1;
252 	else
253 		frag_cnt = op_info->num_lo_sges;
254 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
255 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
256 	if (ret_code)
257 		return ret_code;
258 
259 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
260 					 info);
261 	if (!wqe)
262 		return -ENOMEM;
263 
264 	irdma_clr_wqes(qp, wqe_idx);
265 
266 	set_64bit_val(wqe, 16,
267 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
268 
269 	if (info->imm_data_valid) {
270 		set_64bit_val(wqe, 0,
271 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
272 		i = 0;
273 	} else {
274 		qp->wqe_ops.iw_set_fragment(wqe, 0,
275 					    op_info->lo_sg_list,
276 					    qp->swqe_polarity);
277 		i = 1;
278 	}
279 
280 	for (byte_off = 32; i < op_info->num_lo_sges; i++) {
281 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
282 					    &op_info->lo_sg_list[i],
283 					    qp->swqe_polarity);
284 		byte_off += 16;
285 	}
286 
287 	/* if not an odd number set valid bit in next fragment */
288 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
289 	    frag_cnt) {
290 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
291 					    qp->swqe_polarity);
292 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
293 			++addl_frag_cnt;
294 	}
295 
296 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
297 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
298 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
299 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
300 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
301 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
302 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
303 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
304 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
305 
306 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
307 
308 	set_64bit_val(wqe, 24, hdr);
309 
310 	if (post_sq)
311 		irdma_uk_qp_post_wr(qp);
312 
313 	return 0;
314 }
315 
316 /**
317  * irdma_uk_atomic_fetch_add - atomic fetch and add operation
318  * @qp: hw qp ptr
319  * @info: post sq information
320  * @post_sq: flag to post sq
321  */
irdma_uk_atomic_fetch_add(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)322 int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
323 			      struct irdma_post_sq_info *info, bool post_sq)
324 {
325 	struct irdma_atomic_fetch_add *op_info;
326 	u32 total_size = 0;
327 	u16 quanta = 2;
328 	u32 wqe_idx;
329 	__le64 *wqe;
330 	u64 hdr;
331 
332 	op_info = &info->op.atomic_fetch_add;
333 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
334 					 info);
335 	if (!wqe)
336 		return -ENOMEM;
337 
338 	set_64bit_val(wqe, 0, op_info->tagged_offset);
339 	set_64bit_val(wqe, 8,
340 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
341 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
342 
343 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
344 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
345 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
346 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
347 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
348 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
349 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
350 
351 	set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
352 	set_64bit_val(wqe, 40, 0);
353 	set_64bit_val(wqe, 48, 0);
354 	set_64bit_val(wqe, 56,
355 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
356 
357 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
358 
359 	set_64bit_val(wqe, 24, hdr);
360 
361 	if (post_sq)
362 		irdma_uk_qp_post_wr(qp);
363 
364 	return 0;
365 }
366 
367 /**
368  * irdma_uk_atomic_compare_swap - atomic compare and swap operation
369  * @qp: hw qp ptr
370  * @info: post sq information
371  * @post_sq: flag to post sq
372  */
irdma_uk_atomic_compare_swap(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)373 int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
374 				 struct irdma_post_sq_info *info, bool post_sq)
375 {
376 	struct irdma_atomic_compare_swap *op_info;
377 	u32 total_size = 0;
378 	u16 quanta = 2;
379 	u32 wqe_idx;
380 	__le64 *wqe;
381 	u64 hdr;
382 
383 	op_info = &info->op.atomic_compare_swap;
384 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
385 					 info);
386 	if (!wqe)
387 		return -ENOMEM;
388 
389 	set_64bit_val(wqe, 0, op_info->tagged_offset);
390 	set_64bit_val(wqe, 8,
391 		      FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
392 	set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
393 
394 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
395 	      FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
396 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
397 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
398 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
399 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
400 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
401 
402 	set_64bit_val(wqe, 32, op_info->swap_data_bytes);
403 	set_64bit_val(wqe, 40, op_info->compare_data_bytes);
404 	set_64bit_val(wqe, 48, 0);
405 	set_64bit_val(wqe, 56,
406 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
407 
408 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
409 
410 	set_64bit_val(wqe, 24, hdr);
411 
412 	if (post_sq)
413 		irdma_uk_qp_post_wr(qp);
414 
415 	return 0;
416 }
417 
418 /**
419  * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
420  * @srq: shared rq ptr
421  * @info: post rq information
422  */
irdma_uk_srq_post_receive(struct irdma_srq_uk * srq,struct irdma_post_rq_info * info)423 int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
424 			      struct irdma_post_rq_info *info)
425 {
426 	u32 wqe_idx, i, byte_off;
427 	u32 addl_frag_cnt;
428 	__le64 *wqe;
429 	u64 hdr;
430 
431 	if (srq->max_srq_frag_cnt < info->num_sges)
432 		return -EINVAL;
433 
434 	wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
435 	if (!wqe)
436 		return -ENOMEM;
437 
438 	addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
439 	srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
440 				     srq->srwqe_polarity);
441 
442 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
443 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
444 					     srq->srwqe_polarity);
445 		byte_off += 16;
446 	}
447 
448 	/* if not an odd number set valid bit in next fragment */
449 	if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
450 	    info->num_sges) {
451 		srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
452 					     srq->srwqe_polarity);
453 		if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
454 			++addl_frag_cnt;
455 	}
456 
457 	set_64bit_val(wqe, 16, (u64)info->wr_id);
458 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
459 	      FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
460 
461 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
462 
463 	set_64bit_val(wqe, 24, hdr);
464 
465 	set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
466 
467 	return 0;
468 }
469 
470 /**
471  * irdma_uk_rdma_read - rdma read command
472  * @qp: hw qp ptr
473  * @info: post sq information
474  * @inv_stag: flag for inv_stag
475  * @post_sq: flag to post sq
476  */
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)477 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
478 		       bool inv_stag, bool post_sq)
479 {
480 	struct irdma_rdma_read *op_info;
481 	int ret_code;
482 	u32 i, byte_off, total_size = 0;
483 	bool local_fence = false;
484 	u32 addl_frag_cnt;
485 	__le64 *wqe;
486 	u32 wqe_idx;
487 	u16 quanta;
488 	u64 hdr;
489 
490 	op_info = &info->op.rdma_read;
491 	if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
492 		return -EINVAL;
493 
494 	for (i = 0; i < op_info->num_lo_sges; i++)
495 		total_size += op_info->lo_sg_list[i].length;
496 
497 	ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
498 	if (ret_code)
499 		return ret_code;
500 
501 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
502 					 info);
503 	if (!wqe)
504 		return -ENOMEM;
505 
506 	irdma_clr_wqes(qp, wqe_idx);
507 
508 	addl_frag_cnt = op_info->num_lo_sges > 1 ?
509 			(op_info->num_lo_sges - 1) : 0;
510 	local_fence |= info->local_fence;
511 
512 	qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
513 				    qp->swqe_polarity);
514 	for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
515 		qp->wqe_ops.iw_set_fragment(wqe, byte_off,
516 					    &op_info->lo_sg_list[i],
517 					    qp->swqe_polarity);
518 		byte_off += 16;
519 	}
520 
521 	/* if not an odd number set valid bit in next fragment */
522 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
523 	    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
524 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
525 					    qp->swqe_polarity);
526 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
527 			++addl_frag_cnt;
528 	}
529 	set_64bit_val(wqe, 16,
530 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
531 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
532 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
533 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
534 	      FIELD_PREP(IRDMAQPSQ_OPCODE,
535 			 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
536 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
537 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
538 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
539 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
540 
541 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
542 
543 	set_64bit_val(wqe, 24, hdr);
544 
545 	if (post_sq)
546 		irdma_uk_qp_post_wr(qp);
547 
548 	return 0;
549 }
550 
551 /**
552  * irdma_uk_send - rdma send command
553  * @qp: hw qp ptr
554  * @info: post sq information
555  * @post_sq: flag to post sq
556  */
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)557 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
558 		  bool post_sq)
559 {
560 	__le64 *wqe;
561 	struct irdma_post_send *op_info;
562 	u64 hdr;
563 	u32 i, wqe_idx, total_size = 0, byte_off;
564 	int ret_code;
565 	u32 frag_cnt, addl_frag_cnt;
566 	bool read_fence = false;
567 	u16 quanta;
568 
569 	op_info = &info->op.send;
570 	if (qp->max_sq_frag_cnt < op_info->num_sges)
571 		return -EINVAL;
572 
573 	for (i = 0; i < op_info->num_sges; i++)
574 		total_size += op_info->sg_list[i].length;
575 
576 	if (info->imm_data_valid)
577 		frag_cnt = op_info->num_sges + 1;
578 	else
579 		frag_cnt = op_info->num_sges;
580 	ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
581 	if (ret_code)
582 		return ret_code;
583 
584 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
585 					 info);
586 	if (!wqe)
587 		return -ENOMEM;
588 
589 	irdma_clr_wqes(qp, wqe_idx);
590 
591 	read_fence |= info->read_fence;
592 	addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
593 	if (info->imm_data_valid) {
594 		set_64bit_val(wqe, 0,
595 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
596 		i = 0;
597 	} else {
598 		qp->wqe_ops.iw_set_fragment(wqe, 0,
599 					    frag_cnt ? op_info->sg_list : NULL,
600 					    qp->swqe_polarity);
601 		i = 1;
602 	}
603 
604 	for (byte_off = 32; i < op_info->num_sges; i++) {
605 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
606 					    qp->swqe_polarity);
607 		byte_off += 16;
608 	}
609 
610 	/* if not an odd number set valid bit in next fragment */
611 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
612 	    frag_cnt) {
613 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
614 					    qp->swqe_polarity);
615 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
616 			++addl_frag_cnt;
617 	}
618 
619 	set_64bit_val(wqe, 16,
620 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
621 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
622 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
623 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
624 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
625 			 (info->imm_data_valid ? 1 : 0)) |
626 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
627 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
628 	      FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
629 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
630 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
631 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
632 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
633 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
634 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
635 
636 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
637 
638 	set_64bit_val(wqe, 24, hdr);
639 
640 	if (post_sq)
641 		irdma_uk_qp_post_wr(qp);
642 
643 	return 0;
644 }
645 
646 /**
647  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
648  * @wqe: wqe for setting fragment
649  * @op_info: info for setting bind wqe values
650  */
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)651 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
652 					struct irdma_bind_window *op_info)
653 {
654 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
655 	set_64bit_val(wqe, 8,
656 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
657 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
658 	set_64bit_val(wqe, 16, op_info->bind_len);
659 }
660 
661 /**
662  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
663  * @wqe: pointer to wqe
664  * @sge_list: table of pointers to inline data
665  * @num_sges: Total inline data length
666  * @polarity: compatibility parameter
667  */
irdma_copy_inline_data_gen_1(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)668 static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
669 					 u32 num_sges, u8 polarity)
670 {
671 	u32 quanta_bytes_remaining = 16;
672 	int i;
673 
674 	for (i = 0; i < num_sges; i++) {
675 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
676 		u32 sge_len = sge_list[i].length;
677 
678 		while (sge_len) {
679 			u32 bytes_copied;
680 
681 			bytes_copied = min(sge_len, quanta_bytes_remaining);
682 			memcpy(wqe, cur_sge, bytes_copied);
683 			wqe += bytes_copied;
684 			cur_sge += bytes_copied;
685 			quanta_bytes_remaining -= bytes_copied;
686 			sge_len -= bytes_copied;
687 
688 			if (!quanta_bytes_remaining) {
689 				/* Remaining inline bytes reside after hdr */
690 				wqe += 16;
691 				quanta_bytes_remaining = 32;
692 			}
693 		}
694 	}
695 }
696 
697 /**
698  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
699  * @data_size: data size for inline
700  *
701  * Gets the quanta based on inline and immediate data.
702  */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)703 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
704 {
705 	return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
706 }
707 
708 /**
709  * irdma_set_mw_bind_wqe - set mw bind in wqe
710  * @wqe: wqe for setting mw bind
711  * @op_info: info for setting wqe values
712  */
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)713 static void irdma_set_mw_bind_wqe(__le64 *wqe,
714 				  struct irdma_bind_window *op_info)
715 {
716 	set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
717 	set_64bit_val(wqe, 8,
718 		      FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
719 		      FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
720 	set_64bit_val(wqe, 16, op_info->bind_len);
721 }
722 
723 /**
724  * irdma_copy_inline_data - Copy inline data to wqe
725  * @wqe: pointer to wqe
726  * @sge_list: table of pointers to inline data
727  * @num_sges: number of SGE's
728  * @polarity: polarity of wqe valid bit
729  */
irdma_copy_inline_data(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity)730 static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
731 				   u32 num_sges, u8 polarity)
732 {
733 	u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
734 	u32 quanta_bytes_remaining = 8;
735 	bool first_quanta = true;
736 	int i;
737 
738 	wqe += 8;
739 
740 	for (i = 0; i < num_sges; i++) {
741 		u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
742 		u32 sge_len = sge_list[i].length;
743 
744 		while (sge_len) {
745 			u32 bytes_copied;
746 
747 			bytes_copied = min(sge_len, quanta_bytes_remaining);
748 			memcpy(wqe, cur_sge, bytes_copied);
749 			wqe += bytes_copied;
750 			cur_sge += bytes_copied;
751 			quanta_bytes_remaining -= bytes_copied;
752 			sge_len -= bytes_copied;
753 
754 			if (!quanta_bytes_remaining) {
755 				quanta_bytes_remaining = 31;
756 
757 				/* Remaining inline bytes reside after hdr */
758 				if (first_quanta) {
759 					first_quanta = false;
760 					wqe += 16;
761 				} else {
762 					*wqe = inline_valid;
763 					wqe++;
764 				}
765 			}
766 		}
767 	}
768 	if (!first_quanta && quanta_bytes_remaining < 31)
769 		*(wqe + quanta_bytes_remaining) = inline_valid;
770 }
771 
772 /**
773  * irdma_inline_data_size_to_quanta - based on inline data, quanta
774  * @data_size: data size for inline
775  *
776  * Gets the quanta based on inline and immediate data.
777  */
irdma_inline_data_size_to_quanta(u32 data_size)778 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
779 {
780 	if (data_size <= 8)
781 		return IRDMA_QP_WQE_MIN_QUANTA;
782 	else if (data_size <= 39)
783 		return 2;
784 	else if (data_size <= 70)
785 		return 3;
786 	else if (data_size <= 101)
787 		return 4;
788 	else if (data_size <= 132)
789 		return 5;
790 	else if (data_size <= 163)
791 		return 6;
792 	else if (data_size <= 194)
793 		return 7;
794 	else
795 		return 8;
796 }
797 
798 /**
799  * irdma_uk_inline_rdma_write - inline rdma write operation
800  * @qp: hw qp ptr
801  * @info: post sq information
802  * @post_sq: flag to post sq
803  */
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)804 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
805 			       struct irdma_post_sq_info *info, bool post_sq)
806 {
807 	__le64 *wqe;
808 	struct irdma_rdma_write *op_info;
809 	u64 hdr = 0;
810 	u32 wqe_idx;
811 	bool read_fence = false;
812 	u32 i, total_size = 0;
813 	u16 quanta;
814 
815 	op_info = &info->op.rdma_write;
816 
817 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
818 		return -EINVAL;
819 
820 	for (i = 0; i < op_info->num_lo_sges; i++)
821 		total_size += op_info->lo_sg_list[i].length;
822 
823 	if (unlikely(total_size > qp->max_inline_data))
824 		return -EINVAL;
825 
826 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
827 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
828 					 info);
829 	if (!wqe)
830 		return -ENOMEM;
831 
832 	irdma_clr_wqes(qp, wqe_idx);
833 
834 	read_fence |= info->read_fence;
835 	set_64bit_val(wqe, 16,
836 		      FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
837 
838 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
839 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
840 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
841 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
842 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
843 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
844 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
845 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
846 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
847 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
848 
849 	if (info->imm_data_valid)
850 		set_64bit_val(wqe, 0,
851 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
852 
853 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
854 					op_info->num_lo_sges,
855 					qp->swqe_polarity);
856 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
857 
858 	set_64bit_val(wqe, 24, hdr);
859 
860 	if (post_sq)
861 		irdma_uk_qp_post_wr(qp);
862 
863 	return 0;
864 }
865 
866 /**
867  * irdma_uk_inline_send - inline send operation
868  * @qp: hw qp ptr
869  * @info: post sq information
870  * @post_sq: flag to post sq
871  */
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)872 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
873 			 struct irdma_post_sq_info *info, bool post_sq)
874 {
875 	__le64 *wqe;
876 	struct irdma_post_send *op_info;
877 	u64 hdr;
878 	u32 wqe_idx;
879 	bool read_fence = false;
880 	u32 i, total_size = 0;
881 	u16 quanta;
882 
883 	op_info = &info->op.send;
884 
885 	if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
886 		return -EINVAL;
887 
888 	for (i = 0; i < op_info->num_sges; i++)
889 		total_size += op_info->sg_list[i].length;
890 
891 	if (unlikely(total_size > qp->max_inline_data))
892 		return -EINVAL;
893 
894 	quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
895 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
896 					 info);
897 	if (!wqe)
898 		return -ENOMEM;
899 
900 	irdma_clr_wqes(qp, wqe_idx);
901 
902 	set_64bit_val(wqe, 16,
903 		      FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
904 		      FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
905 
906 	read_fence |= info->read_fence;
907 	hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
908 	      FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
909 	      FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
910 	      FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
911 	      FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
912 			 (info->imm_data_valid ? 1 : 0)) |
913 	      FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
914 	      FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
915 	      FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
916 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
917 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
918 	      FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
919 	      FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
920 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
921 
922 	if (info->imm_data_valid)
923 		set_64bit_val(wqe, 0,
924 			      FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
925 	qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
926 					op_info->num_sges, qp->swqe_polarity);
927 
928 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
929 
930 	set_64bit_val(wqe, 24, hdr);
931 
932 	if (post_sq)
933 		irdma_uk_qp_post_wr(qp);
934 
935 	return 0;
936 }
937 
938 /**
939  * irdma_uk_stag_local_invalidate - stag invalidate operation
940  * @qp: hw qp ptr
941  * @info: post sq information
942  * @post_sq: flag to post sq
943  */
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)944 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
945 				   struct irdma_post_sq_info *info,
946 				   bool post_sq)
947 {
948 	__le64 *wqe;
949 	struct irdma_inv_local_stag *op_info;
950 	u64 hdr;
951 	u32 wqe_idx;
952 	bool local_fence = false;
953 	struct ib_sge sge = {};
954 
955 	op_info = &info->op.inv_local_stag;
956 	local_fence = info->local_fence;
957 
958 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
959 					 0, info);
960 	if (!wqe)
961 		return -ENOMEM;
962 
963 	irdma_clr_wqes(qp, wqe_idx);
964 
965 	sge.lkey = op_info->target_stag;
966 	qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
967 
968 	set_64bit_val(wqe, 16, 0);
969 
970 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
971 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
972 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
973 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
974 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
975 
976 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
977 
978 	set_64bit_val(wqe, 24, hdr);
979 
980 	if (post_sq)
981 		irdma_uk_qp_post_wr(qp);
982 
983 	return 0;
984 }
985 
986 /**
987  * irdma_uk_post_receive - post receive wqe
988  * @qp: hw qp ptr
989  * @info: post rq information
990  */
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)991 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
992 			  struct irdma_post_rq_info *info)
993 {
994 	u32 wqe_idx, i, byte_off;
995 	u32 addl_frag_cnt;
996 	__le64 *wqe;
997 	u64 hdr;
998 
999 	if (qp->max_rq_frag_cnt < info->num_sges)
1000 		return -EINVAL;
1001 
1002 	wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1003 	if (!wqe)
1004 		return -ENOMEM;
1005 
1006 	qp->rq_wrid_array[wqe_idx] = info->wr_id;
1007 	addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
1008 	qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
1009 				    qp->rwqe_polarity);
1010 
1011 	for (i = 1, byte_off = 32; i < info->num_sges; i++) {
1012 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
1013 					    qp->rwqe_polarity);
1014 		byte_off += 16;
1015 	}
1016 
1017 	/* if not an odd number set valid bit in next fragment */
1018 	if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
1019 	    info->num_sges) {
1020 		qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
1021 					    qp->rwqe_polarity);
1022 		if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
1023 			++addl_frag_cnt;
1024 	}
1025 
1026 	set_64bit_val(wqe, 16, 0);
1027 	hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
1028 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
1029 
1030 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1031 
1032 	set_64bit_val(wqe, 24, hdr);
1033 
1034 	return 0;
1035 }
1036 
1037 /**
1038  * irdma_uk_cq_resize - reset the cq buffer info
1039  * @cq: cq to resize
1040  * @cq_base: new cq buffer addr
1041  * @cq_size: number of cqes
1042  */
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)1043 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1044 {
1045 	cq->cq_base = cq_base;
1046 	cq->cq_size = cq_size;
1047 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1048 	cq->polarity = 1;
1049 }
1050 
1051 /**
1052  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1053  * @cq: cq to resize
1054  * @cq_cnt: the count of the resized cq buffers
1055  */
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)1056 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1057 {
1058 	u64 temp_val;
1059 	u16 sw_cq_sel;
1060 	u8 arm_next_se;
1061 	u8 arm_next;
1062 	u8 arm_seq_num;
1063 
1064 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1065 
1066 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1067 	sw_cq_sel += cq_cnt;
1068 
1069 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1070 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1071 	arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1072 
1073 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1074 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1075 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1076 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1077 
1078 	set_64bit_val(cq->shadow_area, 32, temp_val);
1079 }
1080 
1081 /**
1082  * irdma_uk_cq_request_notification - cq notification request (door bell)
1083  * @cq: hw cq
1084  * @cq_notify: notification type
1085  */
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)1086 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1087 				      enum irdma_cmpl_notify cq_notify)
1088 {
1089 	u64 temp_val;
1090 	u16 sw_cq_sel;
1091 	u8 arm_next_se = 0;
1092 	u8 arm_next = 0;
1093 	u8 arm_seq_num;
1094 
1095 	get_64bit_val(cq->shadow_area, 32, &temp_val);
1096 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1097 	arm_seq_num++;
1098 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1099 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1100 	arm_next_se |= 1;
1101 	if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1102 		arm_next = 1;
1103 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1104 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1105 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1106 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1107 
1108 	set_64bit_val(cq->shadow_area, 32, temp_val);
1109 
1110 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1111 
1112 	writel(cq->cq_id, cq->cqe_alloc_db);
1113 }
1114 
1115 /**
1116  * irdma_uk_cq_empty - Check if CQ is empty
1117  * @cq: hw cq
1118  */
irdma_uk_cq_empty(struct irdma_cq_uk * cq)1119 bool irdma_uk_cq_empty(struct irdma_cq_uk *cq)
1120 {
1121 	__le64 *cqe;
1122 	u8 polarity;
1123 	u64 qword3;
1124 
1125 	if (cq->avoid_mem_cflct)
1126 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1127 	else
1128 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1129 
1130 	get_64bit_val(cqe, 24, &qword3);
1131 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1132 
1133 	return polarity != cq->polarity;
1134 }
1135 
1136 /**
1137  * irdma_uk_cq_poll_cmpl - get cq completion info
1138  * @cq: hw cq
1139  * @info: cq poll information returned
1140  */
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)1141 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1142 			  struct irdma_cq_poll_info *info)
1143 {
1144 	u64 comp_ctx, qword0, qword2, qword3;
1145 	__le64 *cqe;
1146 	struct irdma_qp_uk *qp;
1147 	struct irdma_srq_uk *srq;
1148 	struct qp_err_code qp_err;
1149 	u8 is_srq;
1150 	struct irdma_ring *pring = NULL;
1151 	u32 wqe_idx;
1152 	int ret_code;
1153 	bool move_cq_head = true;
1154 	u8 polarity;
1155 	bool ext_valid;
1156 	__le64 *ext_cqe;
1157 
1158 	if (cq->avoid_mem_cflct)
1159 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1160 	else
1161 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1162 
1163 	get_64bit_val(cqe, 24, &qword3);
1164 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1165 	if (polarity != cq->polarity)
1166 		return -ENOENT;
1167 
1168 	/* Ensure CQE contents are read after valid bit is checked */
1169 	dma_rmb();
1170 
1171 	ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1172 	if (ext_valid) {
1173 		u64 qword6, qword7;
1174 		u32 peek_head;
1175 
1176 		if (cq->avoid_mem_cflct) {
1177 			ext_cqe = (__le64 *)((u8 *)cqe + 32);
1178 			get_64bit_val(ext_cqe, 24, &qword7);
1179 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1180 		} else {
1181 			peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1182 			ext_cqe = cq->cq_base[peek_head].buf;
1183 			get_64bit_val(ext_cqe, 24, &qword7);
1184 			polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1185 			if (!peek_head)
1186 				polarity ^= 1;
1187 		}
1188 		if (polarity != cq->polarity)
1189 			return -ENOENT;
1190 
1191 		/* Ensure ext CQE contents are read after ext valid bit is checked */
1192 		dma_rmb();
1193 
1194 		info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1195 		if (info->imm_valid) {
1196 			u64 qword4;
1197 
1198 			get_64bit_val(ext_cqe, 0, &qword4);
1199 			info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1200 		}
1201 		info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1202 		info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1203 		if (info->ud_smac_valid || info->ud_vlan_valid) {
1204 			get_64bit_val(ext_cqe, 16, &qword6);
1205 			if (info->ud_vlan_valid)
1206 				info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1207 			if (info->ud_smac_valid) {
1208 				info->ud_smac[5] = qword6 & 0xFF;
1209 				info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1210 				info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1211 				info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1212 				info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1213 				info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1214 			}
1215 		}
1216 	} else {
1217 		info->imm_valid = false;
1218 		info->ud_smac_valid = false;
1219 		info->ud_vlan_valid = false;
1220 	}
1221 
1222 	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1223 	is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
1224 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1225 	info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1226 	get_64bit_val(cqe, 8, &comp_ctx);
1227 	if (is_srq)
1228 		get_64bit_val(cqe, 40, (u64 *)&qp);
1229 	else
1230 		qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1231 	if (info->error) {
1232 		info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1233 		info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1234 		switch (info->major_err) {
1235 		case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
1236 			qp_err = irdma_ae_to_qp_err_code(info->minor_err);
1237 			info->minor_err = qp_err.flush_code;
1238 			fallthrough;
1239 		case IRDMA_FLUSH_MAJOR_ERR:
1240 			/* Set the min error to standard flush error code for remaining cqes */
1241 			if (info->minor_err != FLUSH_GENERAL_ERR) {
1242 				qword3 &= ~IRDMA_CQ_MINERR;
1243 				qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1244 				set_64bit_val(cqe, 24, qword3);
1245 			}
1246 			info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1247 			break;
1248 		default:
1249 #define IRDMA_CIE_SIGNATURE 0xE
1250 #define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
1251 			if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
1252 			    qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
1253 			    FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
1254 			    == IRDMA_CIE_SIGNATURE) {
1255 				info->error = 0;
1256 				info->major_err = 0;
1257 				info->minor_err = 0;
1258 				info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1259 			} else {
1260 				info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1261 			}
1262 			break;
1263 		}
1264 	} else {
1265 		info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1266 	}
1267 
1268 	get_64bit_val(cqe, 0, &qword0);
1269 	get_64bit_val(cqe, 16, &qword2);
1270 
1271 	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1272 	info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1273 
1274 	get_64bit_val(cqe, 8, &comp_ctx);
1275 
1276 	info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1277 	qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1278 	if (!qp || qp->destroy_pending) {
1279 		ret_code = -EFAULT;
1280 		goto exit;
1281 	}
1282 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1283 	info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1284 	info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1285 
1286 	if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
1287 		unsigned long flags;
1288 
1289 		srq = qp->srq_uk;
1290 
1291 		get_64bit_val(cqe, 8, &info->wr_id);
1292 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1293 
1294 		if (qword3 & IRDMACQ_STAG) {
1295 			info->stag_invalid_set = true;
1296 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
1297 							qword2);
1298 		} else {
1299 			info->stag_invalid_set = false;
1300 		}
1301 		spin_lock_irqsave(srq->lock, flags);
1302 		IRDMA_RING_MOVE_TAIL(srq->srq_ring);
1303 		spin_unlock_irqrestore(srq->lock, flags);
1304 		pring = &srq->srq_ring;
1305 
1306 	} else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
1307 		u32 array_idx;
1308 
1309 		array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1310 
1311 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1312 		    info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1313 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1314 				ret_code = -ENOENT;
1315 				goto exit;
1316 			}
1317 
1318 			info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1319 			array_idx = qp->rq_ring.tail;
1320 		} else {
1321 			info->wr_id = qp->rq_wrid_array[array_idx];
1322 		}
1323 
1324 		info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1325 
1326 		if (qword3 & IRDMACQ_STAG) {
1327 			info->stag_invalid_set = true;
1328 			info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1329 		} else {
1330 			info->stag_invalid_set = false;
1331 		}
1332 		IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1333 		if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1334 			qp->rq_flush_seen = true;
1335 			if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1336 				qp->rq_flush_complete = true;
1337 			else
1338 				move_cq_head = false;
1339 		}
1340 		pring = &qp->rq_ring;
1341 	} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1342 		if (qp->first_sq_wq) {
1343 			if (wqe_idx + 1 >= qp->conn_wqes)
1344 				qp->first_sq_wq = false;
1345 
1346 			if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1347 				IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1348 				IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1349 				set_64bit_val(cq->shadow_area, 0,
1350 					      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1351 				memset(info, 0,
1352 				       sizeof(struct irdma_cq_poll_info));
1353 				return irdma_uk_cq_poll_cmpl(cq, info);
1354 			}
1355 		}
1356 		if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1357 			info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1358 			if (!info->comp_status)
1359 				info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1360 			if (!qp->sq_wrtrk_array[wqe_idx].signaled) {
1361 				ret_code = -EFAULT;
1362 				goto exit;
1363 			}
1364 			info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1365 			IRDMA_RING_SET_TAIL(qp->sq_ring,
1366 					    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1367 		} else {
1368 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1369 				ret_code = -ENOENT;
1370 				goto exit;
1371 			}
1372 
1373 			do {
1374 				__le64 *sw_wqe;
1375 				u64 wqe_qword;
1376 				u32 tail;
1377 
1378 				tail = qp->sq_ring.tail;
1379 				sw_wqe = qp->sq_base[tail].elem;
1380 				get_64bit_val(sw_wqe, 24,
1381 					      &wqe_qword);
1382 				info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1383 							      wqe_qword);
1384 				IRDMA_RING_SET_TAIL(qp->sq_ring,
1385 						    tail + qp->sq_wrtrk_array[tail].quanta);
1386 				if (info->op_type != IRDMAQP_OP_NOP) {
1387 					info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1388 					info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1389 					break;
1390 				}
1391 			} while (1);
1392 			if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1393 			    info->minor_err == FLUSH_PROT_ERR)
1394 				info->minor_err = FLUSH_MW_BIND_ERR;
1395 			qp->sq_flush_seen = true;
1396 			if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1397 				qp->sq_flush_complete = true;
1398 		}
1399 		pring = &qp->sq_ring;
1400 	}
1401 
1402 	ret_code = 0;
1403 
1404 exit:
1405 	if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1406 		if (pring && IRDMA_RING_MORE_WORK(*pring))
1407 		/* Park CQ head during a flush to generate additional CQEs
1408 		 * from SW for all unprocessed WQEs. For GEN3 and beyond
1409 		 * FW will generate/flush these CQEs so move to the next CQE
1410 		 */
1411 			move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
1412 						false : true;
1413 	}
1414 
1415 	if (move_cq_head) {
1416 		IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1417 		if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1418 			cq->polarity ^= 1;
1419 
1420 		if (ext_valid && !cq->avoid_mem_cflct) {
1421 			IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1422 			if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1423 				cq->polarity ^= 1;
1424 		}
1425 
1426 		IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1427 		if (!cq->avoid_mem_cflct && ext_valid)
1428 			IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1429 		if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
1430 			set_64bit_val(cq->shadow_area, 0,
1431 				      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1432 	} else {
1433 		qword3 &= ~IRDMA_CQ_WQEIDX;
1434 		qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1435 		set_64bit_val(cqe, 24, qword3);
1436 	}
1437 
1438 	return ret_code;
1439 }
1440 
1441 /**
1442  * irdma_round_up_wq - return round up qp wq depth
1443  * @wqdepth: wq depth in quanta to round up
1444  */
irdma_round_up_wq(u32 wqdepth)1445 static int irdma_round_up_wq(u32 wqdepth)
1446 {
1447 	int scount = 1;
1448 
1449 	for (wqdepth--; scount <= 16; scount *= 2)
1450 		wqdepth |= wqdepth >> scount;
1451 
1452 	return ++wqdepth;
1453 }
1454 
1455 /**
1456  * irdma_get_wqe_shift - get shift count for maximum wqe size
1457  * @uk_attrs: qp HW attributes
1458  * @sge: Maximum Scatter Gather Elements wqe
1459  * @inline_data: Maximum inline data size
1460  * @shift: Returns the shift needed based on sge
1461  *
1462  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1463  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1464  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1465  * size of 64 bytes).
1466  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1467  * size of 256 bytes).
1468  */
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1469 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1470 			 u32 inline_data, u8 *shift)
1471 {
1472 	*shift = 0;
1473 	if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1474 		if (sge > 1 || inline_data > 8) {
1475 			if (sge < 4 && inline_data <= 39)
1476 				*shift = 1;
1477 			else if (sge < 8 && inline_data <= 101)
1478 				*shift = 2;
1479 			else
1480 				*shift = 3;
1481 		}
1482 	} else if (sge > 1 || inline_data > 16) {
1483 		*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1484 	}
1485 }
1486 
1487 /*
1488  * irdma_get_sqdepth - get SQ depth (quanta)
1489  * @uk_attrs: qp HW attributes
1490  * @sq_size: SQ size
1491  * @shift: shift which determines size of WQE
1492  * @sqdepth: depth of SQ
1493  *
1494  */
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1495 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
1496 		      u32 *sqdepth)
1497 {
1498 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1499 
1500 	*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1501 
1502 	if (*sqdepth < min_size)
1503 		*sqdepth = min_size;
1504 	else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1505 		return -EINVAL;
1506 
1507 	return 0;
1508 }
1509 
1510 /*
1511  * irdma_get_rqdepth - get RQ depth (quanta)
1512  * @uk_attrs: qp HW attributes
1513  * @rq_size: RQ size
1514  * @shift: shift which determines size of WQE
1515  * @rqdepth: depth of RQ
1516  */
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1517 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
1518 		      u32 *rqdepth)
1519 {
1520 	u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1521 
1522 	*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1523 
1524 	if (*rqdepth < min_size)
1525 		*rqdepth = min_size;
1526 	else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1527 		return -EINVAL;
1528 
1529 	return 0;
1530 }
1531 
1532 /*
1533  * irdma_get_srqdepth - get SRQ depth (quanta)
1534  * @uk_attrs: qp HW attributes
1535  * @srq_size: SRQ size
1536  * @shift: shift which determines size of WQE
1537  * @srqdepth: depth of SRQ
1538  */
irdma_get_srqdepth(struct irdma_uk_attrs * uk_attrs,u32 srq_size,u8 shift,u32 * srqdepth)1539 int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
1540 		       u32 *srqdepth)
1541 {
1542 	*srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
1543 
1544 	if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
1545 		*srqdepth = uk_attrs->min_hw_wq_size << shift;
1546 	else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
1547 		return -EINVAL;
1548 
1549 	return 0;
1550 }
1551 
1552 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1553 	.iw_copy_inline_data = irdma_copy_inline_data,
1554 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1555 	.iw_set_fragment = irdma_set_fragment,
1556 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1557 };
1558 
1559 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1560 	.iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1561 	.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1562 	.iw_set_fragment = irdma_set_fragment_gen_1,
1563 	.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1564 };
1565 
1566 /**
1567  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1568  * connection.
1569  * @qp: hw qp (user and kernel)
1570  * @info: qp initialization info
1571  */
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1572 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1573 					struct irdma_qp_uk_init_info *info)
1574 {
1575 	u16 move_cnt = 1;
1576 
1577 	if (!info->legacy_mode &&
1578 	    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1579 		move_cnt = 3;
1580 
1581 	qp->conn_wqes = move_cnt;
1582 	IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1583 	IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1584 }
1585 
1586 /**
1587  * irdma_uk_srq_init - initialize shared qp
1588  * @srq: hw srq (user and kernel)
1589  * @info: srq initialization info
1590  *
1591  * Initializes the vars used in both user and kernel mode.
1592  * The size of the wqe depends on number of max fragments
1593  * allowed. Then size of wqe * the number of wqes should be the
1594  * amount of memory allocated for srq.
1595  */
irdma_uk_srq_init(struct irdma_srq_uk * srq,struct irdma_srq_uk_init_info * info)1596 int irdma_uk_srq_init(struct irdma_srq_uk *srq,
1597 		      struct irdma_srq_uk_init_info *info)
1598 {
1599 	u8 rqshift;
1600 
1601 	srq->uk_attrs = info->uk_attrs;
1602 	if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
1603 		return -EINVAL;
1604 
1605 	irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
1606 	srq->srq_caps = info->srq_caps;
1607 	srq->srq_base = info->srq;
1608 	srq->shadow_area = info->shadow_area;
1609 	srq->srq_id = info->srq_id;
1610 	srq->srwqe_polarity = 0;
1611 	srq->srq_size = info->srq_size;
1612 	srq->wqe_size = rqshift;
1613 	srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
1614 				    ((u32)2 << rqshift) - 1);
1615 	IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
1616 	srq->wqe_size_multiplier = 1 << rqshift;
1617 	srq->wqe_ops = iw_wqe_uk_ops;
1618 
1619 	return 0;
1620 }
1621 
1622 /**
1623  * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
1624  * @ukinfo: qp initialization info
1625  * @sq_shift: Returns shift of SQ
1626  * @rq_shift: Returns shift of RQ
1627  */
irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info * ukinfo,u8 * sq_shift,u8 * rq_shift)1628 void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
1629 			    u8 *rq_shift)
1630 {
1631 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1632 
1633 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1634 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1635 					  ukinfo->max_sq_frag_cnt,
1636 			    ukinfo->max_inline_data, sq_shift);
1637 
1638 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1639 			    rq_shift);
1640 
1641 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1642 		if (ukinfo->abi_ver > 4)
1643 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1644 	}
1645 }
1646 
1647 /**
1648  * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1649  * @ukinfo: qp initialization info
1650  * @sq_depth: Returns depth of SQ
1651  * @sq_shift: Returns shift of SQ
1652  */
irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info * ukinfo,u32 * sq_depth,u8 * sq_shift)1653 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1654 				 u32 *sq_depth, u8 *sq_shift)
1655 {
1656 	bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
1657 	int status;
1658 
1659 	irdma_get_wqe_shift(ukinfo->uk_attrs,
1660 			    imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1661 			    ukinfo->max_sq_frag_cnt,
1662 			    ukinfo->max_inline_data, sq_shift);
1663 	status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1664 				   *sq_shift, sq_depth);
1665 
1666 	return status;
1667 }
1668 
1669 /**
1670  * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1671  * @ukinfo: qp initialization info
1672  * @rq_depth: Returns depth of RQ
1673  * @rq_shift: Returns shift of RQ
1674  */
irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info * ukinfo,u32 * rq_depth,u8 * rq_shift)1675 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1676 				 u32 *rq_depth, u8 *rq_shift)
1677 {
1678 	int status;
1679 
1680 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1681 			    rq_shift);
1682 
1683 	if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1684 		if (ukinfo->abi_ver > 4)
1685 			*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1686 	}
1687 
1688 	status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1689 				   *rq_shift, rq_depth);
1690 
1691 	return status;
1692 }
1693 
1694 /**
1695  * irdma_uk_qp_init - initialize shared qp
1696  * @qp: hw qp (user and kernel)
1697  * @info: qp initialization info
1698  *
1699  * initializes the vars used in both user and kernel mode.
1700  * size of the wqe depends on numbers of max. fragements
1701  * allowed. Then size of wqe * the number of wqes should be the
1702  * amount of memory allocated for sq and rq.
1703  */
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1704 int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1705 {
1706 	int ret_code = 0;
1707 	u32 sq_ring_size;
1708 
1709 	qp->uk_attrs = info->uk_attrs;
1710 	if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1711 	    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1712 		return -EINVAL;
1713 
1714 	qp->qp_caps = info->qp_caps;
1715 	qp->sq_base = info->sq;
1716 	qp->rq_base = info->rq;
1717 	qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1718 	qp->shadow_area = info->shadow_area;
1719 	qp->sq_wrtrk_array = info->sq_wrtrk_array;
1720 
1721 	qp->rq_wrid_array = info->rq_wrid_array;
1722 	qp->wqe_alloc_db = info->wqe_alloc_db;
1723 	qp->qp_id = info->qp_id;
1724 	qp->sq_size = info->sq_size;
1725 	qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1726 	sq_ring_size = qp->sq_size << info->sq_shift;
1727 	IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1728 	if (info->first_sq_wq) {
1729 		irdma_setup_connection_wqes(qp, info);
1730 		qp->swqe_polarity = 1;
1731 		qp->first_sq_wq = true;
1732 	} else {
1733 		qp->swqe_polarity = 0;
1734 	}
1735 	qp->swqe_polarity_deferred = 1;
1736 	qp->rwqe_polarity = 0;
1737 	qp->rq_size = info->rq_size;
1738 	qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1739 	qp->max_inline_data = info->max_inline_data;
1740 	qp->rq_wqe_size = info->rq_shift;
1741 	IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1742 	qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1743 	if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1744 		qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1745 	else
1746 		qp->wqe_ops = iw_wqe_uk_ops;
1747 	qp->srq_uk = info->srq_uk;
1748 	return ret_code;
1749 }
1750 
1751 /**
1752  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1753  * @cq: hw cq
1754  * @info: hw cq initialization info
1755  */
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1756 void irdma_uk_cq_init(struct irdma_cq_uk *cq,
1757 		      struct irdma_cq_uk_init_info *info)
1758 {
1759 	cq->cq_base = info->cq_base;
1760 	cq->cq_id = info->cq_id;
1761 	cq->cq_size = info->cq_size;
1762 	cq->cqe_alloc_db = info->cqe_alloc_db;
1763 	cq->cq_ack_db = info->cq_ack_db;
1764 	cq->shadow_area = info->shadow_area;
1765 	cq->avoid_mem_cflct = info->avoid_mem_cflct;
1766 	IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1767 	cq->polarity = 1;
1768 }
1769 
1770 /**
1771  * irdma_uk_clean_cq - clean cq entries
1772  * @q: completion context
1773  * @cq: cq to clean
1774  */
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1775 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1776 {
1777 	__le64 *cqe;
1778 	u64 qword3, comp_ctx;
1779 	u32 cq_head;
1780 	u8 polarity, temp;
1781 
1782 	cq_head = cq->cq_ring.head;
1783 	temp = cq->polarity;
1784 	do {
1785 		if (cq->avoid_mem_cflct)
1786 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1787 		else
1788 			cqe = cq->cq_base[cq_head].buf;
1789 		get_64bit_val(cqe, 24, &qword3);
1790 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1791 
1792 		if (polarity != temp)
1793 			break;
1794 
1795 		/* Ensure CQE contents are read after valid bit is checked */
1796 		dma_rmb();
1797 
1798 		get_64bit_val(cqe, 8, &comp_ctx);
1799 		if ((void *)(unsigned long)comp_ctx == q)
1800 			set_64bit_val(cqe, 8, 0);
1801 
1802 		cq_head = (cq_head + 1) % cq->cq_ring.size;
1803 		if (!cq_head)
1804 			temp ^= 1;
1805 	} while (true);
1806 }
1807 
1808 /**
1809  * irdma_nop - post a nop
1810  * @qp: hw qp ptr
1811  * @wr_id: work request id
1812  * @signaled: signaled for completion
1813  * @post_sq: ring doorbell
1814  */
irdma_nop(struct irdma_qp_uk * qp,u64 wr_id,bool signaled,bool post_sq)1815 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
1816 {
1817 	__le64 *wqe;
1818 	u64 hdr;
1819 	u32 wqe_idx;
1820 	struct irdma_post_sq_info info = {};
1821 
1822 	info.wr_id = wr_id;
1823 	wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1824 					 0, &info);
1825 	if (!wqe)
1826 		return -ENOMEM;
1827 
1828 	irdma_clr_wqes(qp, wqe_idx);
1829 
1830 	set_64bit_val(wqe, 0, 0);
1831 	set_64bit_val(wqe, 8, 0);
1832 	set_64bit_val(wqe, 16, 0);
1833 
1834 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1835 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1836 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1837 
1838 	dma_wmb(); /* make sure WQE is populated before valid bit is set */
1839 
1840 	set_64bit_val(wqe, 24, hdr);
1841 	if (post_sq)
1842 		irdma_uk_qp_post_wr(qp);
1843 
1844 	return 0;
1845 }
1846 
1847 /**
1848  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1849  * @frag_cnt: number of fragments
1850  * @quanta: quanta for frag_cnt
1851  */
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1852 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1853 {
1854 	switch (frag_cnt) {
1855 	case 0:
1856 	case 1:
1857 		*quanta = IRDMA_QP_WQE_MIN_QUANTA;
1858 		break;
1859 	case 2:
1860 	case 3:
1861 		*quanta = 2;
1862 		break;
1863 	case 4:
1864 	case 5:
1865 		*quanta = 3;
1866 		break;
1867 	case 6:
1868 	case 7:
1869 		*quanta = 4;
1870 		break;
1871 	case 8:
1872 	case 9:
1873 		*quanta = 5;
1874 		break;
1875 	case 10:
1876 	case 11:
1877 		*quanta = 6;
1878 		break;
1879 	case 12:
1880 	case 13:
1881 		*quanta = 7;
1882 		break;
1883 	case 14:
1884 	case 15: /* when immediate data is present */
1885 		*quanta = 8;
1886 		break;
1887 	default:
1888 		return -EINVAL;
1889 	}
1890 
1891 	return 0;
1892 }
1893 
1894 /**
1895  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1896  * @frag_cnt: number of fragments
1897  * @wqe_size: size in bytes given frag_cnt
1898  */
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1899 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1900 {
1901 	switch (frag_cnt) {
1902 	case 0:
1903 	case 1:
1904 		*wqe_size = 32;
1905 		break;
1906 	case 2:
1907 	case 3:
1908 		*wqe_size = 64;
1909 		break;
1910 	case 4:
1911 	case 5:
1912 	case 6:
1913 	case 7:
1914 		*wqe_size = 128;
1915 		break;
1916 	case 8:
1917 	case 9:
1918 	case 10:
1919 	case 11:
1920 	case 12:
1921 	case 13:
1922 	case 14:
1923 		*wqe_size = 256;
1924 		break;
1925 	default:
1926 		return -EINVAL;
1927 	}
1928 
1929 	return 0;
1930 }
1931