1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include "osdep.h"
36 #include "irdma_defs.h"
37 #include "irdma_user.h"
38 #include "irdma.h"
39
40 /**
41 * irdma_set_fragment - set fragment in wqe
42 * @wqe: wqe for setting fragment
43 * @offset: offset value
44 * @sge: sge length and stag
45 * @valid: The wqe valid
46 */
47 static void
irdma_set_fragment(__le64 * wqe,u32 offset,struct ibv_sge * sge,u8 valid)48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ibv_sge *sge,
49 u8 valid)
50 {
51 if (sge) {
52 set_64bit_val(wqe, offset,
53 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
54 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
55 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
56 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
57 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
58 } else {
59 set_64bit_val(wqe, offset, 0);
60 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
61 FIELD_PREP(IRDMAQPSQ_VALID, valid));
62 }
63 }
64
65 /**
66 * irdma_set_fragment_gen_1 - set fragment in wqe
67 * @wqe: wqe for setting fragment
68 * @offset: offset value
69 * @sge: sge length and stag
70 * @valid: wqe valid flag
71 */
72 static void
irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ibv_sge * sge,u8 valid)73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
74 struct ibv_sge *sge, u8 valid)
75 {
76 if (sge) {
77 set_64bit_val(wqe, offset,
78 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
79 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
80 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
81 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
82 } else {
83 set_64bit_val(wqe, offset, 0);
84 set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
85 }
86 }
87
88 /**
89 * irdma_nop_hdr - Format header section of noop WQE
90 * @qp: hw qp ptr
91 */
irdma_nop_hdr(struct irdma_qp_uk * qp)92 static inline u64 irdma_nop_hdr(struct irdma_qp_uk *qp){
93 return FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
94 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, false) |
95 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
96 }
97
98 /**
99 * irdma_nop_1 - insert a NOP wqe
100 * @qp: hw qp ptr
101 */
102 static int
irdma_nop_1(struct irdma_qp_uk * qp)103 irdma_nop_1(struct irdma_qp_uk *qp)
104 {
105 __le64 *wqe;
106 u32 wqe_idx;
107
108 if (!qp->sq_ring.head)
109 return EINVAL;
110
111 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
112 wqe = qp->sq_base[wqe_idx].elem;
113
114 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
115
116 set_64bit_val(wqe, IRDMA_BYTE_0, 0);
117 set_64bit_val(wqe, IRDMA_BYTE_8, 0);
118 set_64bit_val(wqe, IRDMA_BYTE_16, 0);
119
120 /* make sure WQE is written before valid bit is set */
121 udma_to_device_barrier();
122
123 set_64bit_val(wqe, IRDMA_BYTE_24, irdma_nop_hdr(qp));
124
125 return 0;
126 }
127
128 /**
129 * irdma_clr_wqes - clear next 128 sq entries
130 * @qp: hw qp ptr
131 * @qp_wqe_idx: wqe_idx
132 */
133 void
irdma_clr_wqes(struct irdma_qp_uk * qp,u32 qp_wqe_idx)134 irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
135 {
136 __le64 *wqe;
137 u32 wqe_idx;
138
139 if (!(qp_wqe_idx & 0x7F)) {
140 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
141 wqe = qp->sq_base[wqe_idx].elem;
142 if (wqe_idx)
143 memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
144 else
145 memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
146 }
147 }
148
149 /**
150 * irdma_uk_qp_post_wr - ring doorbell
151 * @qp: hw qp ptr
152 */
153 void
irdma_uk_qp_post_wr(struct irdma_qp_uk * qp)154 irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
155 {
156 u64 temp;
157 u32 hw_sq_tail;
158 u32 sw_sq_head;
159
160 /* valid bit is written and loads completed before reading shadow */
161 atomic_thread_fence(memory_order_seq_cst);
162
163 /* read the doorbell shadow area */
164 get_64bit_val(qp->shadow_area, IRDMA_BYTE_0, &temp);
165
166 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
167 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
168 if (sw_sq_head != qp->initial_ring.head) {
169 if (qp->push_dropped) {
170 db_wr32(qp->qp_id, qp->wqe_alloc_db);
171 qp->push_dropped = false;
172 } else if (sw_sq_head != hw_sq_tail) {
173 if (sw_sq_head > qp->initial_ring.head) {
174 if (hw_sq_tail >= qp->initial_ring.head &&
175 hw_sq_tail < sw_sq_head)
176 db_wr32(qp->qp_id, qp->wqe_alloc_db);
177 } else {
178 if (hw_sq_tail >= qp->initial_ring.head ||
179 hw_sq_tail < sw_sq_head)
180 db_wr32(qp->qp_id, qp->wqe_alloc_db);
181 }
182 }
183 }
184
185 qp->initial_ring.head = qp->sq_ring.head;
186 }
187
188 /**
189 * irdma_qp_ring_push_db - ring qp doorbell
190 * @qp: hw qp ptr
191 * @wqe_idx: wqe index
192 */
193 static void
irdma_qp_ring_push_db(struct irdma_qp_uk * qp,u32 wqe_idx)194 irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
195 {
196 set_32bit_val(qp->push_db, 0,
197 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
198 qp->initial_ring.head = qp->sq_ring.head;
199 qp->push_mode = true;
200 qp->push_dropped = false;
201 }
202
203 void
irdma_qp_push_wqe(struct irdma_qp_uk * qp,__le64 * wqe,u16 quanta,u32 wqe_idx,bool post_sq)204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
205 u32 wqe_idx, bool post_sq)
206 {
207 __le64 *push;
208
209 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
210 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
211 !qp->push_mode) {
212 irdma_uk_qp_post_wr(qp);
213 } else {
214 push = (__le64 *) ((uintptr_t)qp->push_wqe +
215 (wqe_idx & 0x7) * 0x20);
216 irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
217 irdma_qp_ring_push_db(qp, wqe_idx);
218 }
219 }
220
221 /**
222 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
223 * @qp: hw qp ptr
224 * @wqe_idx: return wqe index
225 * @quanta: (in/out) ptr to size of WR in quanta. Modified in case pad is needed
226 * @total_size: size of WR in bytes
227 * @info: info on WR
228 */
229 __le64 *
irdma_qp_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx,u16 * quanta,u32 total_size,struct irdma_post_sq_info * info)230 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
231 u16 *quanta, u32 total_size,
232 struct irdma_post_sq_info *info)
233 {
234 __le64 *wqe;
235 __le64 *wqe_0 = NULL;
236 u32 nop_wqe_idx;
237 u16 avail_quanta, wqe_quanta = *quanta;
238 u16 i;
239
240 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
241 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
242 qp->uk_attrs->max_hw_sq_chunk);
243
244 if (*quanta <= avail_quanta) {
245 /* WR fits in current chunk */
246 if (*quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
247 return NULL;
248 } else {
249 /* Need to pad with NOP */
250 if (*quanta + avail_quanta >
251 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
252 return NULL;
253
254 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
255 for (i = 0; i < avail_quanta; i++) {
256 irdma_nop_1(qp);
257 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
258 }
259 if (qp->push_db && info->push_wqe)
260 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
261 avail_quanta, nop_wqe_idx, true);
262 }
263
264 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
265 if (!*wqe_idx)
266 qp->swqe_polarity = !qp->swqe_polarity;
267
268 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, *quanta);
269
270 irdma_clr_wqes(qp, *wqe_idx);
271
272 wqe = qp->sq_base[*wqe_idx].elem;
273 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && wqe_quanta == 1 &&
274 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
275 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
276 wqe_0[3] = htole64(FIELD_PREP(IRDMAQPSQ_VALID,
277 qp->swqe_polarity ? 0 : 1));
278 }
279 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
280 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
281 qp->sq_wrtrk_array[*wqe_idx].quanta = wqe_quanta;
282 qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
283
284 return wqe;
285 }
286
287 /**
288 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
289 * @qp: hw qp ptr
290 * @wqe_idx: return wqe index
291 */
292 __le64 *
irdma_qp_get_next_recv_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)293 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
294 {
295 __le64 *wqe;
296 int ret_code;
297
298 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
299 return NULL;
300
301 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
302 if (ret_code)
303 return NULL;
304
305 if (!*wqe_idx)
306 qp->rwqe_polarity = !qp->rwqe_polarity;
307 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
308 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
309
310 return wqe;
311 }
312
313 /**
314 * irdma_uk_rdma_write - rdma write operation
315 * @qp: hw qp ptr
316 * @info: post sq information
317 * @post_sq: flag to post sq
318 */
319 int
irdma_uk_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)320 irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
321 bool post_sq)
322 {
323 u64 hdr;
324 __le64 *wqe;
325 struct irdma_rdma_write *op_info;
326 u32 i, wqe_idx;
327 u32 total_size = 0, byte_off;
328 int ret_code;
329 u32 frag_cnt, addl_frag_cnt;
330 bool read_fence = false;
331 u16 quanta;
332
333 info->push_wqe = qp->push_db ? true : false;
334
335 op_info = &info->op.rdma_write;
336 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
337 return EINVAL;
338
339 for (i = 0; i < op_info->num_lo_sges; i++)
340 total_size += op_info->lo_sg_list[i].length;
341
342 read_fence |= info->read_fence;
343
344 if (info->imm_data_valid)
345 frag_cnt = op_info->num_lo_sges + 1;
346 else
347 frag_cnt = op_info->num_lo_sges;
348 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
349 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
350 if (ret_code)
351 return ret_code;
352
353 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
354 if (!wqe)
355 return ENOSPC;
356
357 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
358 set_64bit_val(wqe, IRDMA_BYTE_16,
359 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
360
361 if (info->imm_data_valid) {
362 set_64bit_val(wqe, IRDMA_BYTE_0,
363 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
364 i = 0;
365 } else {
366 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
367 op_info->lo_sg_list,
368 qp->swqe_polarity);
369 i = 1;
370 }
371
372 for (byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; i++) {
373 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
374 &op_info->lo_sg_list[i],
375 qp->swqe_polarity);
376 byte_off += 16;
377 }
378
379 /* if not an odd number set valid bit in next fragment */
380 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
381 frag_cnt) {
382 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
383 qp->swqe_polarity);
384 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
385 ++addl_frag_cnt;
386 }
387
388 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
389 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
390 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
391 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
392 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
393 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
394 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
395 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
396 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
397 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
398
399 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
400
401 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
402 if (info->push_wqe)
403 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
404 else if (post_sq)
405 irdma_uk_qp_post_wr(qp);
406
407 return 0;
408 }
409
410 /**
411 * irdma_uk_rdma_read - rdma read command
412 * @qp: hw qp ptr
413 * @info: post sq information
414 * @inv_stag: flag for inv_stag
415 * @post_sq: flag to post sq
416 */
417 int
irdma_uk_rdma_read(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool inv_stag,bool post_sq)418 irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
419 bool inv_stag, bool post_sq)
420 {
421 struct irdma_rdma_read *op_info;
422 int ret_code;
423 u32 i, byte_off, total_size = 0;
424 bool local_fence = false;
425 bool ord_fence = false;
426 u32 addl_frag_cnt;
427 __le64 *wqe;
428 u32 wqe_idx;
429 u16 quanta;
430 u64 hdr;
431
432 info->push_wqe = qp->push_db ? true : false;
433
434 op_info = &info->op.rdma_read;
435 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
436 return EINVAL;
437
438 for (i = 0; i < op_info->num_lo_sges; i++)
439 total_size += op_info->lo_sg_list[i].length;
440
441 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
442 if (ret_code)
443 return ret_code;
444
445 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
446 if (!wqe)
447 return ENOSPC;
448
449 if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) {
450 ord_fence = true;
451 qp->ord_cnt = 0;
452 }
453
454 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
455 addl_frag_cnt = op_info->num_lo_sges > 1 ?
456 (op_info->num_lo_sges - 1) : 0;
457 local_fence |= info->local_fence;
458
459 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, op_info->lo_sg_list,
460 qp->swqe_polarity);
461 for (i = 1, byte_off = IRDMA_BYTE_32; i < op_info->num_lo_sges; ++i) {
462 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
463 &op_info->lo_sg_list[i],
464 qp->swqe_polarity);
465 byte_off += IRDMA_BYTE_16;
466 }
467
468 /* if not an odd number set valid bit in next fragment */
469 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
470 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
471 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
472 qp->swqe_polarity);
473 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
474 ++addl_frag_cnt;
475 }
476 set_64bit_val(wqe, IRDMA_BYTE_16,
477 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
478 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
479 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
480 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
481 FIELD_PREP(IRDMAQPSQ_OPCODE,
482 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
483 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
484 FIELD_PREP(IRDMAQPSQ_READFENCE,
485 info->read_fence || ord_fence ? 1 : 0) |
486 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
487 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
488 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
489
490 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
491
492 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
493 if (info->push_wqe)
494 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
495 else if (post_sq)
496 irdma_uk_qp_post_wr(qp);
497
498 return 0;
499 }
500
501 /**
502 * irdma_uk_send - rdma send command
503 * @qp: hw qp ptr
504 * @info: post sq information
505 * @post_sq: flag to post sq
506 */
507 int
irdma_uk_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)508 irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
509 bool post_sq)
510 {
511 __le64 *wqe;
512 struct irdma_post_send *op_info;
513 u64 hdr;
514 u32 i, wqe_idx, total_size = 0, byte_off;
515 int ret_code;
516 u32 frag_cnt, addl_frag_cnt;
517 bool read_fence = false;
518 u16 quanta;
519
520 info->push_wqe = qp->push_db ? true : false;
521
522 op_info = &info->op.send;
523 if (qp->max_sq_frag_cnt < op_info->num_sges)
524 return EINVAL;
525
526 for (i = 0; i < op_info->num_sges; i++)
527 total_size += op_info->sg_list[i].length;
528
529 if (info->imm_data_valid)
530 frag_cnt = op_info->num_sges + 1;
531 else
532 frag_cnt = op_info->num_sges;
533 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
534 if (ret_code)
535 return ret_code;
536
537 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
538 if (!wqe)
539 return ENOSPC;
540
541 read_fence |= info->read_fence;
542 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
543 if (info->imm_data_valid) {
544 set_64bit_val(wqe, IRDMA_BYTE_0,
545 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
546 i = 0;
547 } else {
548 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0,
549 frag_cnt ? op_info->sg_list : NULL,
550 qp->swqe_polarity);
551 i = 1;
552 }
553
554 for (byte_off = IRDMA_BYTE_32; i < op_info->num_sges; i++) {
555 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
556 qp->swqe_polarity);
557 byte_off += IRDMA_BYTE_16;
558 }
559
560 /* if not an odd number set valid bit in next fragment */
561 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
562 frag_cnt) {
563 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
564 qp->swqe_polarity);
565 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
566 ++addl_frag_cnt;
567 }
568
569 set_64bit_val(wqe, IRDMA_BYTE_16,
570 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
571 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
572 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
573 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
574 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
575 (info->imm_data_valid ? 1 : 0)) |
576 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
577 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
578 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
579 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
580 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
581 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
582 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
583 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
584 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
585 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
586
587 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
588
589 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
590 if (info->push_wqe)
591 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
592 else if (post_sq)
593 irdma_uk_qp_post_wr(qp);
594
595 return 0;
596 }
597
598 /**
599 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
600 * @wqe: wqe for setting fragment
601 * @op_info: info for setting bind wqe values
602 */
603 static void
irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info)604 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
605 struct irdma_bind_window *op_info)
606 {
607 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
608 set_64bit_val(wqe, IRDMA_BYTE_8,
609 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
610 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
611 set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
612 }
613
614 /**
615 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
616 * @wqe: pointer to wqe
617 * @sge_list: table of pointers to inline data
618 * @num_sges: Total inline data length
619 * @polarity: compatibility parameter
620 */
621 static void
irdma_copy_inline_data_gen_1(u8 * wqe,struct ibv_sge * sge_list,u32 num_sges,u8 polarity)622 irdma_copy_inline_data_gen_1(u8 *wqe, struct ibv_sge *sge_list,
623 u32 num_sges, u8 polarity)
624 {
625 u32 quanta_bytes_remaining = 16;
626 u32 i;
627
628 for (i = 0; i < num_sges; i++) {
629 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
630 u32 sge_len = sge_list[i].length;
631
632 while (sge_len) {
633 u32 bytes_copied;
634
635 bytes_copied = min(sge_len, quanta_bytes_remaining);
636 irdma_memcpy(wqe, cur_sge, bytes_copied);
637 wqe += bytes_copied;
638 cur_sge += bytes_copied;
639 quanta_bytes_remaining -= bytes_copied;
640 sge_len -= bytes_copied;
641
642 if (!quanta_bytes_remaining) {
643 /* Remaining inline bytes reside after hdr */
644 wqe += 16;
645 quanta_bytes_remaining = 32;
646 }
647 }
648 }
649 }
650
651 /**
652 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
653 * @data_size: data size for inline
654 *
655 * Gets the quanta based on inline and immediate data.
656 */
irdma_inline_data_size_to_quanta_gen_1(u32 data_size)657 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
658 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
659 }
660
661 /**
662 * irdma_set_mw_bind_wqe - set mw bind in wqe
663 * @wqe: wqe for setting mw bind
664 * @op_info: info for setting wqe values
665 */
666 static void
irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info)667 irdma_set_mw_bind_wqe(__le64 * wqe,
668 struct irdma_bind_window *op_info)
669 {
670 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)op_info->va);
671 set_64bit_val(wqe, IRDMA_BYTE_8,
672 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
673 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
674 set_64bit_val(wqe, IRDMA_BYTE_16, op_info->bind_len);
675 }
676
677 /**
678 * irdma_copy_inline_data - Copy inline data to wqe
679 * @wqe: pointer to wqe
680 * @sge_list: table of pointers to inline data
681 * @num_sges: number of SGE's
682 * @polarity: polarity of wqe valid bit
683 */
684 static void
irdma_copy_inline_data(u8 * wqe,struct ibv_sge * sge_list,u32 num_sges,u8 polarity)685 irdma_copy_inline_data(u8 *wqe, struct ibv_sge *sge_list,
686 u32 num_sges, u8 polarity)
687 {
688 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
689 u32 quanta_bytes_remaining = 8;
690 u32 i;
691 bool first_quanta = true;
692
693 wqe += 8;
694
695 for (i = 0; i < num_sges; i++) {
696 u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
697 u32 sge_len = sge_list[i].length;
698
699 while (sge_len) {
700 u32 bytes_copied;
701
702 bytes_copied = min(sge_len, quanta_bytes_remaining);
703 irdma_memcpy(wqe, cur_sge, bytes_copied);
704 wqe += bytes_copied;
705 cur_sge += bytes_copied;
706 quanta_bytes_remaining -= bytes_copied;
707 sge_len -= bytes_copied;
708
709 if (!quanta_bytes_remaining) {
710 quanta_bytes_remaining = 31;
711
712 /* Remaining inline bytes reside after hdr */
713 if (first_quanta) {
714 first_quanta = false;
715 wqe += 16;
716 } else {
717 *wqe = inline_valid;
718 wqe++;
719 }
720 }
721 }
722 }
723 if (!first_quanta && quanta_bytes_remaining < 31)
724 *(wqe + quanta_bytes_remaining) = inline_valid;
725 }
726
727 /**
728 * irdma_inline_data_size_to_quanta - based on inline data, quanta
729 * @data_size: data size for inline
730 *
731 * Gets the quanta based on inline and immediate data.
732 */
irdma_inline_data_size_to_quanta(u32 data_size)733 static u16 irdma_inline_data_size_to_quanta(u32 data_size) {
734 if (data_size <= 8)
735 return IRDMA_QP_WQE_MIN_QUANTA;
736 else if (data_size <= 39)
737 return 2;
738 else if (data_size <= 70)
739 return 3;
740 else if (data_size <= 101)
741 return 4;
742 else if (data_size <= 132)
743 return 5;
744 else if (data_size <= 163)
745 return 6;
746 else if (data_size <= 194)
747 return 7;
748 else
749 return 8;
750 }
751
752 /**
753 * irdma_uk_inline_rdma_write - inline rdma write operation
754 * @qp: hw qp ptr
755 * @info: post sq information
756 * @post_sq: flag to post sq
757 */
758 int
irdma_uk_inline_rdma_write(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)759 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
760 struct irdma_post_sq_info *info, bool post_sq)
761 {
762 __le64 *wqe;
763 struct irdma_rdma_write *op_info;
764 u64 hdr = 0;
765 u32 wqe_idx;
766 bool read_fence = false;
767 u16 quanta;
768 u32 i, total_size = 0;
769
770 info->push_wqe = qp->push_db ? true : false;
771 op_info = &info->op.rdma_write;
772
773 if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
774 return EINVAL;
775
776 for (i = 0; i < op_info->num_lo_sges; i++)
777 total_size += op_info->lo_sg_list[i].length;
778
779 if (unlikely(total_size > qp->max_inline_data))
780 return EINVAL;
781
782 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
783 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
784 if (!wqe)
785 return ENOSPC;
786
787 qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
788 read_fence |= info->read_fence;
789 set_64bit_val(wqe, IRDMA_BYTE_16,
790 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
791
792 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
793 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
794 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
795 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
796 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
797 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
798 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
799 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
800 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
801 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
802 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
803
804 if (info->imm_data_valid)
805 set_64bit_val(wqe, IRDMA_BYTE_0,
806 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
807
808 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
809 op_info->num_lo_sges, qp->swqe_polarity);
810
811 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
812
813 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
814
815 if (info->push_wqe)
816 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
817 else if (post_sq)
818 irdma_uk_qp_post_wr(qp);
819
820 return 0;
821 }
822
823 /**
824 * irdma_uk_inline_send - inline send operation
825 * @qp: hw qp ptr
826 * @info: post sq information
827 * @post_sq: flag to post sq
828 */
829 int
irdma_uk_inline_send(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)830 irdma_uk_inline_send(struct irdma_qp_uk *qp,
831 struct irdma_post_sq_info *info, bool post_sq)
832 {
833 __le64 *wqe;
834 struct irdma_post_send *op_info;
835 u64 hdr;
836 u32 wqe_idx;
837 bool read_fence = false;
838 u16 quanta;
839 u32 i, total_size = 0;
840
841 info->push_wqe = qp->push_db ? true : false;
842 op_info = &info->op.send;
843
844 if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
845 return EINVAL;
846
847 for (i = 0; i < op_info->num_sges; i++)
848 total_size += op_info->sg_list[i].length;
849
850 if (unlikely(total_size > qp->max_inline_data))
851 return EINVAL;
852
853 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
854 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
855 if (!wqe)
856 return ENOSPC;
857
858 set_64bit_val(wqe, IRDMA_BYTE_16,
859 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
860 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
861
862 read_fence |= info->read_fence;
863 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
864 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
865 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
866 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
867 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
868 (info->imm_data_valid ? 1 : 0)) |
869 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
870 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
871 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
872 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
873 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
874 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
875 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
876 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
877 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
878
879 if (info->imm_data_valid)
880 set_64bit_val(wqe, IRDMA_BYTE_0,
881 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
882 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
883 op_info->num_sges, qp->swqe_polarity);
884
885 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
886
887 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
888
889 if (info->push_wqe)
890 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
891 else if (post_sq)
892 irdma_uk_qp_post_wr(qp);
893
894 return 0;
895 }
896
897 /**
898 * irdma_uk_stag_local_invalidate - stag invalidate operation
899 * @qp: hw qp ptr
900 * @info: post sq information
901 * @post_sq: flag to post sq
902 */
903 int
irdma_uk_stag_local_invalidate(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)904 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
905 struct irdma_post_sq_info *info,
906 bool post_sq)
907 {
908 __le64 *wqe;
909 struct irdma_inv_local_stag *op_info;
910 u64 hdr;
911 u32 wqe_idx;
912 bool local_fence = false;
913 struct ibv_sge sge = {0};
914 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
915
916 info->push_wqe = qp->push_db ? true : false;
917 op_info = &info->op.inv_local_stag;
918 local_fence = info->local_fence;
919
920 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
921 if (!wqe)
922 return ENOSPC;
923
924 sge.lkey = op_info->target_stag;
925 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
926
927 set_64bit_val(wqe, IRDMA_BYTE_16, 0);
928
929 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
930 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
931 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
932 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
933 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
934 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
935
936 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
937
938 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
939
940 if (info->push_wqe)
941 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
942 else if (post_sq)
943 irdma_uk_qp_post_wr(qp);
944
945 return 0;
946 }
947
948 /**
949 * irdma_uk_mw_bind - bind Memory Window
950 * @qp: hw qp ptr
951 * @info: post sq information
952 * @post_sq: flag to post sq
953 */
954 int
irdma_uk_mw_bind(struct irdma_qp_uk * qp,struct irdma_post_sq_info * info,bool post_sq)955 irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
956 bool post_sq)
957 {
958 __le64 *wqe;
959 struct irdma_bind_window *op_info;
960 u64 hdr;
961 u32 wqe_idx;
962 bool local_fence;
963 u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
964
965 info->push_wqe = qp->push_db ? true : false;
966 op_info = &info->op.bind_window;
967 local_fence = info->local_fence;
968
969 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
970 if (!wqe)
971 return ENOSPC;
972
973 qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
974
975 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
976 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
977 ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
978 FIELD_PREP(IRDMAQPSQ_VABASEDTO,
979 (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
980 FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
981 (op_info->mem_window_type_1 ? 1 : 0)) |
982 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
983 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
984 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
985 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
986 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
987
988 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
989
990 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
991
992 if (info->push_wqe)
993 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
994 else if (post_sq)
995 irdma_uk_qp_post_wr(qp);
996
997 return 0;
998 }
999
1000 /**
1001 * irdma_uk_post_receive - post receive wqe
1002 * @qp: hw qp ptr
1003 * @info: post rq information
1004 */
1005 int
irdma_uk_post_receive(struct irdma_qp_uk * qp,struct irdma_post_rq_info * info)1006 irdma_uk_post_receive(struct irdma_qp_uk *qp,
1007 struct irdma_post_rq_info *info)
1008 {
1009 u32 wqe_idx, i, byte_off;
1010 u32 addl_frag_cnt;
1011 __le64 *wqe;
1012 u64 hdr;
1013
1014 if (qp->max_rq_frag_cnt < info->num_sges)
1015 return EINVAL;
1016
1017 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1018 if (!wqe)
1019 return ENOSPC;
1020
1021 qp->rq_wrid_array[wqe_idx] = info->wr_id;
1022 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
1023 qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, info->sg_list,
1024 qp->rwqe_polarity);
1025
1026 for (i = 1, byte_off = IRDMA_BYTE_32; i < info->num_sges; i++) {
1027 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
1028 qp->rwqe_polarity);
1029 byte_off += 16;
1030 }
1031
1032 /* if not an odd number set valid bit in next fragment */
1033 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
1034 info->num_sges) {
1035 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
1036 qp->rwqe_polarity);
1037 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
1038 ++addl_frag_cnt;
1039 }
1040
1041 set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1042 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
1043 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
1044
1045 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
1046
1047 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1048
1049 return 0;
1050 }
1051
1052 /**
1053 * irdma_uk_cq_resize - reset the cq buffer info
1054 * @cq: cq to resize
1055 * @cq_base: new cq buffer addr
1056 * @cq_size: number of cqes
1057 */
1058 void
irdma_uk_cq_resize(struct irdma_cq_uk * cq,void * cq_base,int cq_size)1059 irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
1060 {
1061 cq->cq_base = cq_base;
1062 cq->cq_size = cq_size;
1063 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1064 cq->polarity = 1;
1065 }
1066
1067 /**
1068 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
1069 * @cq: cq to resize
1070 * @cq_cnt: the count of the resized cq buffers
1071 */
1072 void
irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk * cq,u16 cq_cnt)1073 irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1074 {
1075 u64 temp_val;
1076 u16 sw_cq_sel;
1077 u8 arm_next_se;
1078 u8 arm_next;
1079 u8 arm_seq_num;
1080
1081 get_64bit_val(cq->shadow_area, 32, &temp_val);
1082
1083 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1084 sw_cq_sel += cq_cnt;
1085
1086 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1087 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1088 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1089
1090 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1091 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1092 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1093 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1094
1095 set_64bit_val(cq->shadow_area, 32, temp_val);
1096 }
1097
1098 /**
1099 * irdma_uk_cq_request_notification - cq notification request (door bell)
1100 * @cq: hw cq
1101 * @cq_notify: notification type
1102 */
1103 void
irdma_uk_cq_request_notification(struct irdma_cq_uk * cq,enum irdma_cmpl_notify cq_notify)1104 irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1105 enum irdma_cmpl_notify cq_notify)
1106 {
1107 u64 temp_val;
1108 u16 sw_cq_sel;
1109 u8 arm_next_se = 0;
1110 u8 arm_next = 0;
1111 u8 arm_seq_num;
1112
1113 get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
1114 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1115 arm_seq_num++;
1116 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1117 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1118 arm_next_se |= 1;
1119 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1120 arm_next = 1;
1121 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1122 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1123 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1124 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1125
1126 set_64bit_val(cq->shadow_area, IRDMA_BYTE_32, temp_val);
1127
1128 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
1129
1130 db_wr32(cq->cq_id, cq->cqe_alloc_db);
1131 }
1132
1133 static void
irdma_copy_quanta(__le64 * dst,__le64 * src,u32 offset,bool flip,bool barrier)1134 irdma_copy_quanta(__le64 * dst, __le64 * src, u32 offset, bool flip,
1135 bool barrier)
1136 {
1137 __le64 val;
1138
1139 get_64bit_val(src, offset, &val);
1140 set_64bit_val(dst, offset, val);
1141
1142 get_64bit_val(src, offset + 8, &val);
1143 if (flip)
1144 val ^= IRDMAQPSQ_VALID;
1145 set_64bit_val(dst, offset + 8, val);
1146
1147 get_64bit_val(src, offset + 24, &val);
1148 if (flip)
1149 val ^= IRDMAQPSQ_VALID;
1150 if (barrier)
1151 udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
1152 set_64bit_val(dst, offset + 24, val);
1153 }
1154
1155 static void
irdma_copy_wqe(__le64 * dst,__le64 * src,u8 wqe_quanta,bool flip_polarity)1156 irdma_copy_wqe(__le64 * dst, __le64 * src, u8 wqe_quanta,
1157 bool flip_polarity)
1158 {
1159 u32 offset;
1160
1161 offset = 32;
1162 while (--wqe_quanta) {
1163 irdma_copy_quanta(dst, src, offset, flip_polarity, false);
1164 offset += 32;
1165 }
1166
1167 irdma_copy_quanta(dst, src, 0, flip_polarity, true);
1168 }
1169
1170 static void
irdma_repost_rq_wqes(struct irdma_qp_uk * qp,u32 start_idx,u32 end_idx)1171 irdma_repost_rq_wqes(struct irdma_qp_uk *qp, u32 start_idx,
1172 u32 end_idx)
1173 {
1174 __le64 *dst_wqe, *src_wqe;
1175 u32 wqe_idx = 0;
1176 u8 wqe_quanta = qp->rq_wqe_size_multiplier;
1177 bool flip_polarity;
1178 u64 val;
1179
1180 libirdma_debug("reposting_wqes: from start_idx=%d to end_idx = %d\n", start_idx, end_idx);
1181 if (pthread_spin_lock(qp->lock))
1182 return;
1183 while (start_idx != end_idx) {
1184 IRDMA_RING_SET_TAIL(qp->rq_ring, start_idx + 1);
1185 src_wqe = qp->rq_base[start_idx * qp->rq_wqe_size_multiplier].elem;
1186 dst_wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
1187
1188 /* Check to see if polarity has changed */
1189 get_64bit_val(src_wqe, 24, &val);
1190 if (FIELD_GET(IRDMAQPSQ_VALID, val) != qp->rwqe_polarity)
1191 flip_polarity = true;
1192 else
1193 flip_polarity = false;
1194
1195 qp->rq_wrid_array[wqe_idx] = qp->rq_wrid_array[start_idx];
1196 irdma_copy_wqe(dst_wqe, src_wqe, wqe_quanta, flip_polarity);
1197
1198 start_idx = (start_idx + 1) % qp->rq_size;
1199 }
1200
1201 pthread_spin_unlock(qp->lock);
1202 }
1203
1204 static int
irdma_check_rq_cqe(struct irdma_qp_uk * qp,u32 * array_idx)1205 irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
1206 {
1207 u32 exp_idx = (qp->last_rx_cmpl_idx + 1) % qp->rq_size;
1208
1209 if (*array_idx != exp_idx) {
1210 if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RELAX_RQ_ORDER) {
1211 irdma_repost_rq_wqes(qp, exp_idx, *array_idx);
1212 qp->last_rx_cmpl_idx = *array_idx;
1213
1214 return 0;
1215 }
1216
1217 *array_idx = exp_idx;
1218 qp->last_rx_cmpl_idx = exp_idx;
1219
1220 return -1;
1221 }
1222
1223 qp->last_rx_cmpl_idx = *array_idx;
1224
1225 return 0;
1226 }
1227
1228 /**
1229 * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
1230 *
1231 * @ring: sq/rq ring
1232 * @flush_seen: information if flush for specific ring was already seen
1233 * @comp_status: completion status
1234 * @wqe_idx: new value of WQE index returned if there is more work on ring
1235 */
1236 static inline int
irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring,u8 flush_seen,enum irdma_cmpl_status comp_status,u32 * wqe_idx)1237 irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
1238 enum irdma_cmpl_status comp_status,
1239 u32 *wqe_idx)
1240 {
1241 if (flush_seen) {
1242 if (IRDMA_RING_MORE_WORK(ring))
1243 *wqe_idx = ring.tail;
1244 else
1245 return ENOENT;
1246 }
1247
1248 return 0;
1249 }
1250
1251 /**
1252 * irdma_uk_cq_poll_cmpl - get cq completion info
1253 * @cq: hw cq
1254 * @info: cq poll information returned
1255 */
1256 int
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk * cq,struct irdma_cq_poll_info * info)1257 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
1258 struct irdma_cq_poll_info *info)
1259 {
1260 u64 comp_ctx, qword0, qword2, qword3;
1261 __le64 *cqe;
1262 struct irdma_qp_uk *qp;
1263 struct irdma_ring *pring = NULL;
1264 u32 wqe_idx;
1265 int ret_code;
1266 bool move_cq_head = true;
1267 u8 polarity;
1268 bool ext_valid;
1269 __le64 *ext_cqe;
1270
1271 if (cq->avoid_mem_cflct)
1272 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1273 else
1274 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1275
1276 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1277 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1278 if (polarity != cq->polarity)
1279 return ENOENT;
1280
1281 /* Ensure CQE contents are read after valid bit is checked */
1282 udma_from_device_barrier();
1283
1284 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1285 if (ext_valid) {
1286 u64 qword6, qword7;
1287 u32 peek_head;
1288
1289 if (cq->avoid_mem_cflct) {
1290 ext_cqe = (__le64 *) ((u8 *)cqe + 32);
1291 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1292 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1293 } else {
1294 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1295 ext_cqe = cq->cq_base[peek_head].buf;
1296 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
1297 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
1298 if (!peek_head)
1299 polarity ^= 1;
1300 }
1301 if (polarity != cq->polarity)
1302 return ENOENT;
1303
1304 /* Ensure ext CQE contents are read after ext valid bit is checked */
1305 udma_from_device_barrier();
1306
1307 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1308 if (info->imm_valid) {
1309 u64 qword4;
1310
1311 get_64bit_val(ext_cqe, IRDMA_BYTE_0, &qword4);
1312 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1313 }
1314 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1315 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1316 if (info->ud_smac_valid || info->ud_vlan_valid) {
1317 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
1318 if (info->ud_vlan_valid)
1319 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1320 if (info->ud_smac_valid) {
1321 info->ud_smac[5] = qword6 & 0xFF;
1322 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1323 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1324 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1325 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1326 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1327 }
1328 }
1329 } else {
1330 info->imm_valid = false;
1331 info->ud_smac_valid = false;
1332 info->ud_vlan_valid = false;
1333 }
1334
1335 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1336 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1337 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1338 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1339 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1340 qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
1341 if (info->error) {
1342 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1343 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1344 switch (info->major_err) {
1345 case IRDMA_FLUSH_MAJOR_ERR:
1346 /* Set the min error to standard flush error code for remaining cqes */
1347 if (info->minor_err != FLUSH_GENERAL_ERR) {
1348 qword3 &= ~IRDMA_CQ_MINERR;
1349 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1350 set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1351 }
1352 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1353 break;
1354 default:
1355 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1356 break;
1357 }
1358 } else {
1359 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1360 }
1361
1362 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
1363 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
1364
1365 info->stat.raw = (u32)FIELD_GET(IRDMACQ_TCPSQN_ROCEPSN_RTT_TS, qword0);
1366 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1367 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1368
1369 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1370 if (!qp || qp->destroy_pending) {
1371 ret_code = EFAULT;
1372 goto exit;
1373 }
1374 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1375 info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
1376 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1377
1378 if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
1379 u32 array_idx;
1380
1381 ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
1382 qp->rq_flush_seen,
1383 info->comp_status,
1384 &wqe_idx);
1385 if (ret_code != 0)
1386 goto exit;
1387
1388 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1389
1390 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1391 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1392 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1393 ret_code = ENOENT;
1394 goto exit;
1395 }
1396
1397 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1398 info->signaled = 1;
1399 array_idx = qp->rq_ring.tail;
1400 } else {
1401 info->wr_id = qp->rq_wrid_array[array_idx];
1402 info->signaled = 1;
1403 if (irdma_check_rq_cqe(qp, &array_idx)) {
1404 info->wr_id = qp->rq_wrid_array[array_idx];
1405 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1406 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1407 return 0;
1408 }
1409 }
1410
1411 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1412
1413 if (qword3 & IRDMACQ_STAG) {
1414 info->stag_invalid_set = true;
1415 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1416 } else {
1417 info->stag_invalid_set = false;
1418 }
1419 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1420 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1421 qp->rq_flush_seen = true;
1422 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1423 qp->rq_flush_complete = true;
1424 else
1425 move_cq_head = false;
1426 }
1427 pring = &qp->rq_ring;
1428 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1429 if (qp->first_sq_wq) {
1430 if (wqe_idx + 1 >= qp->conn_wqes)
1431 qp->first_sq_wq = false;
1432
1433 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1434 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1435 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1436 set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1437 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1438 memset(info, 0, sizeof(*info));
1439 return irdma_uk_cq_poll_cmpl(cq, info);
1440 }
1441 }
1442 /* cease posting push mode on push drop */
1443 if (info->push_dropped) {
1444 qp->push_mode = false;
1445 qp->push_dropped = true;
1446 }
1447 ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
1448 qp->sq_flush_seen,
1449 info->comp_status,
1450 &wqe_idx);
1451 if (ret_code != 0)
1452 goto exit;
1453 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1454 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1455 info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
1456 if (!info->comp_status)
1457 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1458 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1459 IRDMA_RING_SET_TAIL(qp->sq_ring,
1460 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1461 } else {
1462 if (pthread_spin_lock(qp->lock)) {
1463 ret_code = ENOENT;
1464 goto exit;
1465 }
1466 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1467 pthread_spin_unlock(qp->lock);
1468 ret_code = ENOENT;
1469 goto exit;
1470 }
1471
1472 do {
1473 __le64 *sw_wqe;
1474 u64 wqe_qword;
1475 u32 tail;
1476
1477 tail = qp->sq_ring.tail;
1478 sw_wqe = qp->sq_base[tail].elem;
1479 get_64bit_val(sw_wqe, IRDMA_BYTE_24,
1480 &wqe_qword);
1481 info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
1482 wqe_qword);
1483 IRDMA_RING_SET_TAIL(qp->sq_ring,
1484 tail + qp->sq_wrtrk_array[tail].quanta);
1485 if (info->op_type != IRDMAQP_OP_NOP) {
1486 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1487 info->signaled = qp->sq_wrtrk_array[tail].signaled;
1488 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1489 break;
1490 }
1491 } while (1);
1492
1493 if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
1494 info->minor_err == FLUSH_PROT_ERR)
1495 info->minor_err = FLUSH_MW_BIND_ERR;
1496 qp->sq_flush_seen = true;
1497 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1498 qp->sq_flush_complete = true;
1499 pthread_spin_unlock(qp->lock);
1500 }
1501 pring = &qp->sq_ring;
1502 }
1503
1504 ret_code = 0;
1505
1506 exit:
1507 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1508 if (pring && IRDMA_RING_MORE_WORK(*pring))
1509 move_cq_head = false;
1510 }
1511 if (move_cq_head) {
1512 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1513 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1514 cq->polarity ^= 1;
1515
1516 if (ext_valid && !cq->avoid_mem_cflct) {
1517 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1518 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1519 cq->polarity ^= 1;
1520 }
1521
1522 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1523 if (!cq->avoid_mem_cflct && ext_valid)
1524 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1525 set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
1526 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1527 } else {
1528 qword3 &= ~IRDMA_CQ_WQEIDX;
1529 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1530 set_64bit_val(cqe, IRDMA_BYTE_24, qword3);
1531 }
1532
1533 return ret_code;
1534 }
1535
1536 /**
1537 * irdma_round_up_wq - return round up qp wq depth
1538 * @wqdepth: wq depth in quanta to round up
1539 */
1540 static int
irdma_round_up_wq(u32 wqdepth)1541 irdma_round_up_wq(u32 wqdepth)
1542 {
1543 int scount = 1;
1544
1545 for (wqdepth--; scount <= 16; scount *= 2)
1546 wqdepth |= wqdepth >> scount;
1547
1548 return ++wqdepth;
1549 }
1550
1551 /**
1552 * irdma_get_wqe_shift - get shift count for maximum wqe size
1553 * @uk_attrs: qp HW attributes
1554 * @sge: Maximum Scatter Gather Elements wqe
1555 * @inline_data: Maximum inline data size
1556 * @shift: Returns the shift needed based on sge
1557 *
1558 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1559 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1560 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1561 * size of 64 bytes).
1562 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1563 * size of 256 bytes).
1564 */
1565 void
irdma_get_wqe_shift(struct irdma_uk_attrs * uk_attrs,u32 sge,u32 inline_data,u8 * shift)1566 irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1567 u32 inline_data, u8 *shift)
1568 {
1569 *shift = 0;
1570 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1571 if (sge > 1 || inline_data > 8) {
1572 if (sge < 4 && inline_data <= 39)
1573 *shift = 1;
1574 else if (sge < 8 && inline_data <= 101)
1575 *shift = 2;
1576 else
1577 *shift = 3;
1578 }
1579 } else if (sge > 1 || inline_data > 16) {
1580 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1581 }
1582 }
1583
1584 /*
1585 * irdma_get_sqdepth - get SQ depth (quanta) @uk_attrs: qp HW attributes @sq_size: SQ size @shift: shift which
1586 * determines size of WQE @sqdepth: depth of SQ
1587 */
1588 int
irdma_get_sqdepth(struct irdma_uk_attrs * uk_attrs,u32 sq_size,u8 shift,u32 * sqdepth)1589 irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
1590 {
1591 u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1592
1593 *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
1594
1595 if (*sqdepth < min_size)
1596 *sqdepth = min_size;
1597 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1598 return EINVAL;
1599
1600 return 0;
1601 }
1602
1603 /*
1604 * irdma_get_rqdepth - get RQ depth (quanta) @uk_attrs: qp HW attributes @rq_size: SRQ size @shift: shift which
1605 * determines size of WQE @rqdepth: depth of RQ/SRQ
1606 */
1607 int
irdma_get_rqdepth(struct irdma_uk_attrs * uk_attrs,u32 rq_size,u8 shift,u32 * rqdepth)1608 irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
1609 {
1610 u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
1611
1612 *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
1613
1614 if (*rqdepth < min_size)
1615 *rqdepth = min_size;
1616 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1617 return EINVAL;
1618
1619 return 0;
1620 }
1621
1622 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1623 .iw_copy_inline_data = irdma_copy_inline_data,
1624 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1625 .iw_set_fragment = irdma_set_fragment,
1626 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1627 };
1628
1629 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1630 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1631 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1632 .iw_set_fragment = irdma_set_fragment_gen_1,
1633 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1634 };
1635
1636 /**
1637 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1638 * connection.
1639 * @qp: hw qp (user and kernel)
1640 * @info: qp initialization info
1641 */
1642 static void
irdma_setup_connection_wqes(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1643 irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1644 struct irdma_qp_uk_init_info *info)
1645 {
1646 u16 move_cnt = 1;
1647
1648 if (info->start_wqe_idx)
1649 move_cnt = info->start_wqe_idx;
1650 else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
1651 move_cnt = 3;
1652 qp->conn_wqes = move_cnt;
1653 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1654 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1655 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1656 }
1657
1658 /**
1659 * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
1660 * @ukinfo: qp initialization info
1661 * @sq_depth: Returns depth of SQ
1662 * @sq_shift: Returns shift of SQ
1663 */
1664 int
irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info * ukinfo,u32 * sq_depth,u8 * sq_shift)1665 irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
1666 u32 *sq_depth, u8 *sq_shift)
1667 {
1668 bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
1669 int status;
1670
1671 irdma_get_wqe_shift(ukinfo->uk_attrs,
1672 imm_support ? ukinfo->max_sq_frag_cnt + 1 :
1673 ukinfo->max_sq_frag_cnt,
1674 ukinfo->max_inline_data, sq_shift);
1675 status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
1676 *sq_shift, sq_depth);
1677
1678 return status;
1679 }
1680
1681 /**
1682 * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
1683 * @ukinfo: qp initialization info
1684 * @rq_depth: Returns depth of RQ
1685 * @rq_shift: Returns shift of RQ
1686 */
1687 int
irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info * ukinfo,u32 * rq_depth,u8 * rq_shift)1688 irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
1689 u32 *rq_depth, u8 *rq_shift)
1690 {
1691 int status;
1692
1693 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
1694 rq_shift);
1695
1696 if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
1697 if (ukinfo->abi_ver > 4)
1698 *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1699 }
1700
1701 status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
1702 *rq_shift, rq_depth);
1703
1704 return status;
1705 }
1706
1707 /**
1708 * irdma_uk_qp_init - initialize shared qp
1709 * @qp: hw qp (user and kernel)
1710 * @info: qp initialization info
1711 *
1712 * initializes the vars used in both user and kernel mode.
1713 * size of the wqe depends on numbers of max. fragements
1714 * allowed. Then size of wqe * the number of wqes should be the
1715 * amount of memory allocated for sq and rq.
1716 */
1717 int
irdma_uk_qp_init(struct irdma_qp_uk * qp,struct irdma_qp_uk_init_info * info)1718 irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
1719 {
1720 int ret_code = 0;
1721 u32 sq_ring_size;
1722
1723 qp->uk_attrs = info->uk_attrs;
1724 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1725 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1726 return EINVAL;
1727
1728 qp->qp_caps = info->qp_caps;
1729 qp->sq_base = info->sq;
1730 qp->rq_base = info->rq;
1731 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1732 qp->shadow_area = info->shadow_area;
1733 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1734
1735 qp->rq_wrid_array = info->rq_wrid_array;
1736 qp->wqe_alloc_db = info->wqe_alloc_db;
1737 qp->last_rx_cmpl_idx = 0xffffffff;
1738 qp->rd_fence_rate = info->rd_fence_rate;
1739 qp->qp_id = info->qp_id;
1740 qp->sq_size = info->sq_size;
1741 qp->push_mode = false;
1742 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1743 sq_ring_size = qp->sq_size << info->sq_shift;
1744 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1745 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1746 if (info->first_sq_wq) {
1747 irdma_setup_connection_wqes(qp, info);
1748 qp->swqe_polarity = 1;
1749 qp->first_sq_wq = true;
1750 } else {
1751 qp->swqe_polarity = 0;
1752 }
1753 qp->swqe_polarity_deferred = 1;
1754 qp->rwqe_polarity = 0;
1755 qp->rq_size = info->rq_size;
1756 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1757 qp->max_inline_data = info->max_inline_data;
1758 qp->rq_wqe_size = info->rq_shift;
1759 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1760 qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
1761 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1762 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1763 else
1764 qp->wqe_ops = iw_wqe_uk_ops;
1765 qp->start_wqe_idx = info->start_wqe_idx;
1766
1767 return ret_code;
1768 }
1769
1770 /**
1771 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1772 * @cq: hw cq
1773 * @info: hw cq initialization info
1774 */
1775 int
irdma_uk_cq_init(struct irdma_cq_uk * cq,struct irdma_cq_uk_init_info * info)1776 irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info)
1777 {
1778 cq->cq_base = info->cq_base;
1779 cq->cq_id = info->cq_id;
1780 cq->cq_size = info->cq_size;
1781 cq->cqe_alloc_db = info->cqe_alloc_db;
1782 cq->cq_ack_db = info->cq_ack_db;
1783 cq->shadow_area = info->shadow_area;
1784 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1785 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1786 cq->polarity = 1;
1787
1788 return 0;
1789 }
1790
1791 /**
1792 * irdma_uk_clean_cq - clean cq entries
1793 * @q: completion context
1794 * @cq: cq to clean
1795 */
1796 int
irdma_uk_clean_cq(void * q,struct irdma_cq_uk * cq)1797 irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1798 {
1799 __le64 *cqe;
1800 u64 qword3, comp_ctx;
1801 u32 cq_head;
1802 u8 polarity, temp;
1803
1804 cq_head = cq->cq_ring.head;
1805 temp = cq->polarity;
1806 do {
1807 if (cq->avoid_mem_cflct)
1808 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1809 else
1810 cqe = cq->cq_base[cq_head].buf;
1811 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
1812 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1813
1814 if (polarity != temp)
1815 break;
1816
1817 /* Ensure CQE contents are read after valid bit is checked */
1818 udma_from_device_barrier();
1819
1820 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
1821 if ((void *)(irdma_uintptr) comp_ctx == q)
1822 set_64bit_val(cqe, IRDMA_BYTE_8, 0);
1823
1824 cq_head = (cq_head + 1) % cq->cq_ring.size;
1825 if (!cq_head)
1826 temp ^= 1;
1827 } while (true);
1828 return 0;
1829 }
1830
1831 /**
1832 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1833 * @frag_cnt: number of fragments
1834 * @quanta: quanta for frag_cnt
1835 */
1836 int
irdma_fragcnt_to_quanta_sq(u32 frag_cnt,u16 * quanta)1837 irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1838 {
1839 switch (frag_cnt) {
1840 case 0:
1841 case 1:
1842 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1843 break;
1844 case 2:
1845 case 3:
1846 *quanta = 2;
1847 break;
1848 case 4:
1849 case 5:
1850 *quanta = 3;
1851 break;
1852 case 6:
1853 case 7:
1854 *quanta = 4;
1855 break;
1856 case 8:
1857 case 9:
1858 *quanta = 5;
1859 break;
1860 case 10:
1861 case 11:
1862 *quanta = 6;
1863 break;
1864 case 12:
1865 case 13:
1866 *quanta = 7;
1867 break;
1868 case 14:
1869 case 15: /* when immediate data is present */
1870 *quanta = 8;
1871 break;
1872 default:
1873 return EINVAL;
1874 }
1875
1876 return 0;
1877 }
1878
1879 /**
1880 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1881 * @frag_cnt: number of fragments
1882 * @wqe_size: size in bytes given frag_cnt
1883 */
1884 int
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt,u16 * wqe_size)1885 irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1886 {
1887 switch (frag_cnt) {
1888 case 0:
1889 case 1:
1890 *wqe_size = 32;
1891 break;
1892 case 2:
1893 case 3:
1894 *wqe_size = 64;
1895 break;
1896 case 4:
1897 case 5:
1898 case 6:
1899 case 7:
1900 *wqe_size = 128;
1901 break;
1902 case 8:
1903 case 9:
1904 case 10:
1905 case 11:
1906 case 12:
1907 case 13:
1908 case 14:
1909 *wqe_size = 256;
1910 break;
1911 default:
1912 return EINVAL;
1913 }
1914
1915 return 0;
1916 }
1917