1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include "osdep.h"
36 #include "irdma_hmc.h"
37 #include "irdma_defs.h"
38 #include "irdma_type.h"
39 #include "irdma_protos.h"
40 #include "irdma_puda.h"
41 #include "irdma_ws.h"
42
43 static void
44 irdma_ieq_receive(struct irdma_sc_vsi *vsi,
45 struct irdma_puda_buf *buf);
46 static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
47 static void
48 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
49 struct irdma_puda_buf *buf, u32 wqe_idx);
50
51 /**
52 * irdma_puda_get_listbuf - get buffer from puda list
53 * @list: list to use for buffers (ILQ or IEQ)
54 */
55 static struct irdma_puda_buf *
irdma_puda_get_listbuf(struct list_head * list)56 irdma_puda_get_listbuf(struct list_head *list)
57 {
58 struct irdma_puda_buf *buf = NULL;
59
60 if (!list_empty(list)) {
61 buf = (struct irdma_puda_buf *)(list)->next;
62 list_del((struct list_head *)&buf->list);
63 }
64
65 return buf;
66 }
67
68 /**
69 * irdma_puda_get_bufpool - return buffer from resource
70 * @rsrc: resource to use for buffer
71 */
72 struct irdma_puda_buf *
irdma_puda_get_bufpool(struct irdma_puda_rsrc * rsrc)73 irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
74 {
75 struct irdma_puda_buf *buf = NULL;
76 struct list_head *list = &rsrc->bufpool;
77 unsigned long flags;
78
79 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
80 buf = irdma_puda_get_listbuf(list);
81 if (buf) {
82 rsrc->avail_buf_count--;
83 buf->vsi = rsrc->vsi;
84 } else {
85 rsrc->stats_buf_alloc_fail++;
86 }
87 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
88
89 return buf;
90 }
91
92 /**
93 * irdma_puda_ret_bufpool - return buffer to rsrc list
94 * @rsrc: resource to use for buffer
95 * @buf: buffer to return to resource
96 */
97 void
irdma_puda_ret_bufpool(struct irdma_puda_rsrc * rsrc,struct irdma_puda_buf * buf)98 irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
99 struct irdma_puda_buf *buf)
100 {
101 unsigned long flags;
102
103 buf->do_lpb = false;
104 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
105 list_add(&buf->list, &rsrc->bufpool);
106 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
107 rsrc->avail_buf_count++;
108 }
109
110 /**
111 * irdma_puda_post_recvbuf - set wqe for rcv buffer
112 * @rsrc: resource ptr
113 * @wqe_idx: wqe index to use
114 * @buf: puda buffer for rcv q
115 * @initial: flag if during init time
116 */
117 static void
irdma_puda_post_recvbuf(struct irdma_puda_rsrc * rsrc,u32 wqe_idx,struct irdma_puda_buf * buf,bool initial)118 irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
119 struct irdma_puda_buf *buf, bool initial)
120 {
121 __le64 *wqe;
122 struct irdma_sc_qp *qp = &rsrc->qp;
123 u64 offset24 = 0;
124
125 /* Synch buffer for use by device */
126 dma_sync_single_for_device(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
127 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
128 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
129 if (!initial)
130 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
131
132 offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
133
134 set_64bit_val(wqe, IRDMA_BYTE_16, 0);
135 set_64bit_val(wqe, 0, buf->mem.pa);
136 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
137 set_64bit_val(wqe, IRDMA_BYTE_8,
138 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
139 } else {
140 set_64bit_val(wqe, IRDMA_BYTE_8,
141 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
142 offset24);
143 }
144 irdma_wmb(); /* make sure WQE is written before valid bit is set */
145
146 set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
147 }
148
149 /**
150 * irdma_puda_replenish_rq - post rcv buffers
151 * @rsrc: resource to use for buffer
152 * @initial: flag if during init time
153 */
154 static int
irdma_puda_replenish_rq(struct irdma_puda_rsrc * rsrc,bool initial)155 irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
156 {
157 u32 i;
158 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
159 struct irdma_puda_buf *buf = NULL;
160
161 for (i = 0; i < invalid_cnt; i++) {
162 buf = irdma_puda_get_bufpool(rsrc);
163 if (!buf)
164 return -ENOBUFS;
165 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
166 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
167 rsrc->rxq_invalid_cnt--;
168 }
169
170 return 0;
171 }
172
173 /**
174 * irdma_puda_alloc_buf - allocate mem for buffer
175 * @dev: iwarp device
176 * @len: length of buffer
177 */
178 static struct irdma_puda_buf *
irdma_puda_alloc_buf(struct irdma_sc_dev * dev,u32 len)179 irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
180 u32 len)
181 {
182 struct irdma_puda_buf *buf;
183 struct irdma_virt_mem buf_mem;
184
185 buf_mem.size = sizeof(*buf);
186 buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
187 if (!buf_mem.va)
188 return NULL;
189
190 buf = buf_mem.va;
191 buf->mem.size = len;
192 buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
193 if (!buf->mem.va)
194 goto free_virt;
195 buf->mem.pa = dma_map_single(hw_to_dev(dev->hw), buf->mem.va, buf->mem.size, DMA_BIDIRECTIONAL);
196 if (dma_mapping_error(hw_to_dev(dev->hw), buf->mem.pa)) {
197 kfree(buf->mem.va);
198 goto free_virt;
199 }
200
201 buf->buf_mem.va = buf_mem.va;
202 buf->buf_mem.size = buf_mem.size;
203
204 return buf;
205
206 free_virt:
207 kfree(buf_mem.va);
208 return NULL;
209 }
210
211 /**
212 * irdma_puda_dele_buf - delete buffer back to system
213 * @dev: iwarp device
214 * @buf: buffer to free
215 */
216 static void
irdma_puda_dele_buf(struct irdma_sc_dev * dev,struct irdma_puda_buf * buf)217 irdma_puda_dele_buf(struct irdma_sc_dev *dev,
218 struct irdma_puda_buf *buf)
219 {
220 if (!buf->virtdma) {
221 irdma_free_dma_mem(dev->hw, &buf->mem);
222 kfree(buf->buf_mem.va);
223 }
224 }
225
226 /**
227 * irdma_puda_get_next_send_wqe - return next wqe for processing
228 * @qp: puda qp for wqe
229 * @wqe_idx: wqe index for caller
230 */
irdma_puda_get_next_send_wqe(struct irdma_qp_uk * qp,u32 * wqe_idx)231 static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
232 u32 *wqe_idx){
233 int ret_code = 0;
234
235 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
236 if (!*wqe_idx)
237 qp->swqe_polarity = !qp->swqe_polarity;
238 IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
239 if (ret_code)
240 return NULL;
241
242 return qp->sq_base[*wqe_idx].elem;
243 }
244
245 /**
246 * irdma_puda_poll_info - poll cq for completion
247 * @cq: cq for poll
248 * @info: info return for successful completion
249 */
250 static int
irdma_puda_poll_info(struct irdma_sc_cq * cq,struct irdma_puda_cmpl_info * info)251 irdma_puda_poll_info(struct irdma_sc_cq *cq,
252 struct irdma_puda_cmpl_info *info)
253 {
254 struct irdma_cq_uk *cq_uk = &cq->cq_uk;
255 u64 qword0, qword2, qword3, qword6;
256 __le64 *cqe;
257 __le64 *ext_cqe = NULL;
258 u64 qword7 = 0;
259 u64 comp_ctx;
260 bool valid_bit;
261 bool ext_valid = 0;
262 u32 major_err, minor_err;
263 u32 peek_head;
264 bool error;
265 u8 polarity;
266
267 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
268 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
269 valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
270 if (valid_bit != cq_uk->polarity)
271 return -ENOENT;
272
273 /* Ensure CQE contents are read after valid bit is checked */
274 rmb();
275
276 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
277 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
278
279 if (ext_valid) {
280 peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
281 ext_cqe = cq_uk->cq_base[peek_head].buf;
282 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
283 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
284 if (!peek_head)
285 polarity ^= 1;
286 if (polarity != cq_uk->polarity)
287 return -ENOENT;
288
289 /* Ensure ext CQE contents are read after ext valid bit is checked */
290 rmb();
291
292 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
293 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
294 cq_uk->polarity = !cq_uk->polarity;
295 /* update cq tail in cq shadow memory also */
296 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
297 }
298
299 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA CQE", cqe, 32);
300 if (ext_valid)
301 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE",
302 ext_cqe, 32);
303
304 error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
305 if (error) {
306 irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n");
307 major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
308 minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
309 info->compl_error = major_err << 16 | minor_err;
310 return -EIO;
311 }
312
313 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
314 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
315
316 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
317 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
318 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
319 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
320
321 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
322 info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
323 info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
324
325 if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
326 if (ext_valid) {
327 info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
328 if (info->vlan_valid) {
329 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
330 info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
331 }
332 info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
333 if (info->smac_valid) {
334 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
335 info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
336 info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
337 info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
338 info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
339 info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
340 info->smac[5] = (u8)(qword6 & 0xFF);
341 }
342 }
343
344 if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
345 info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
346 info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
347 info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
348 }
349
350 info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
351 }
352
353 return 0;
354 }
355
356 /**
357 * irdma_puda_poll_cmpl - processes completion for cq
358 * @dev: iwarp device
359 * @cq: cq getting interrupt
360 * @compl_err: return any completion err
361 */
362 int
irdma_puda_poll_cmpl(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq,u32 * compl_err)363 irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
364 u32 *compl_err)
365 {
366 struct irdma_qp_uk *qp;
367 struct irdma_cq_uk *cq_uk = &cq->cq_uk;
368 struct irdma_puda_cmpl_info info = {0};
369 int ret = 0;
370 struct irdma_puda_buf *buf;
371 struct irdma_puda_rsrc *rsrc;
372 u8 cq_type = cq->cq_type;
373 unsigned long flags;
374
375 if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
376 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
377 cq->vsi->ieq;
378 } else {
379 irdma_debug(dev, IRDMA_DEBUG_PUDA, "qp_type error\n");
380 return -EFAULT;
381 }
382
383 ret = irdma_puda_poll_info(cq, &info);
384 *compl_err = info.compl_error;
385 if (ret == -ENOENT)
386 return ret;
387 if (ret)
388 goto done;
389
390 qp = info.qp;
391 if (!qp || !rsrc) {
392 ret = -EFAULT;
393 goto done;
394 }
395
396 if (qp->qp_id != rsrc->qp_id) {
397 ret = -EFAULT;
398 goto done;
399 }
400
401 if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
402 buf = (struct irdma_puda_buf *)(uintptr_t)
403 qp->rq_wrid_array[info.wqe_idx];
404
405 /* reusing so synch the buffer for CPU use */
406 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
407 /* Get all the tcpip information in the buf header */
408 ret = irdma_puda_get_tcpip_info(&info, buf);
409 if (ret) {
410 rsrc->stats_rcvd_pkt_err++;
411 if (cq_type == IRDMA_CQ_TYPE_ILQ) {
412 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
413 info.wqe_idx);
414 } else {
415 irdma_puda_ret_bufpool(rsrc, buf);
416 irdma_puda_replenish_rq(rsrc, false);
417 }
418 goto done;
419 }
420
421 rsrc->stats_pkt_rcvd++;
422 rsrc->compl_rxwqe_idx = info.wqe_idx;
423 irdma_debug(dev, IRDMA_DEBUG_PUDA, "RQ completion\n");
424 rsrc->receive(rsrc->vsi, buf);
425 if (cq_type == IRDMA_CQ_TYPE_ILQ)
426 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
427 else
428 irdma_puda_replenish_rq(rsrc, false);
429
430 } else {
431 irdma_debug(dev, IRDMA_DEBUG_PUDA, "SQ completion\n");
432 buf = (struct irdma_puda_buf *)(uintptr_t)
433 qp->sq_wrtrk_array[info.wqe_idx].wrid;
434
435 /* reusing so synch the buffer for CPU use */
436 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
437 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
438 rsrc->xmit_complete(rsrc->vsi, buf);
439 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
440 rsrc->tx_wqe_avail_cnt++;
441 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
442 if (!list_empty(&rsrc->txpend))
443 irdma_puda_send_buf(rsrc, NULL);
444 }
445
446 done:
447 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
448 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
449 cq_uk->polarity = !cq_uk->polarity;
450 /* update cq tail in cq shadow memory also */
451 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
452 set_64bit_val(cq_uk->shadow_area, IRDMA_BYTE_0,
453 IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
454
455 return ret;
456 }
457
458 /**
459 * irdma_puda_send - complete send wqe for transmit
460 * @qp: puda qp for send
461 * @info: buffer information for transmit
462 */
463 int
irdma_puda_send(struct irdma_sc_qp * qp,struct irdma_puda_send_info * info)464 irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
465 {
466 __le64 *wqe;
467 u32 iplen, l4len;
468 u64 hdr[2];
469 u32 wqe_idx;
470 u8 iipt;
471
472 /* number of 32 bits DWORDS in header */
473 l4len = info->tcplen >> 2;
474 if (info->ipv4) {
475 iipt = 3;
476 iplen = 5;
477 } else {
478 iipt = 1;
479 iplen = 10;
480 }
481
482 wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
483 if (!wqe)
484 return -ENOSPC;
485
486 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
487 /* Third line of WQE descriptor */
488 /* maclen is in words */
489
490 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
491 hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
492 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
493 FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
494 FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
495 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
496 FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
497 qp->qp_uk.swqe_polarity);
498
499 /* Forth line of WQE descriptor */
500
501 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
502 set_64bit_val(wqe, IRDMA_BYTE_8,
503 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
504 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
505 } else {
506 hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
507 FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
508 FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
509 FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
510 FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
511
512 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
513 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
514 FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
515 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
516
517 /* Forth line of WQE descriptor */
518
519 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
520 set_64bit_val(wqe, IRDMA_BYTE_8,
521 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
522 }
523
524 set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]);
525 irdma_wmb(); /* make sure WQE is written before valid bit is set */
526
527 set_64bit_val(wqe, IRDMA_BYTE_24, hdr[1]);
528
529 irdma_debug_buf(qp->dev, IRDMA_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
530 irdma_uk_qp_post_wr(&qp->qp_uk);
531 return 0;
532 }
533
534 /**
535 * irdma_puda_send_buf - transmit puda buffer
536 * @rsrc: resource to use for buffer
537 * @buf: puda buffer to transmit
538 */
539 void
irdma_puda_send_buf(struct irdma_puda_rsrc * rsrc,struct irdma_puda_buf * buf)540 irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
541 struct irdma_puda_buf *buf)
542 {
543 struct irdma_puda_send_info info;
544 int ret = 0;
545 unsigned long flags;
546
547 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
548 /*
549 * if no wqe available or not from a completion and we have pending buffers, we must queue new buffer
550 */
551 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
552 list_add_tail(&buf->list, &rsrc->txpend);
553 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
554 rsrc->stats_sent_pkt_q++;
555 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
556 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
557 "adding to txpend\n");
558 return;
559 }
560 rsrc->tx_wqe_avail_cnt--;
561 /*
562 * if we are coming from a completion and have pending buffers then Get one from pending list
563 */
564 if (!buf) {
565 buf = irdma_puda_get_listbuf(&rsrc->txpend);
566 if (!buf)
567 goto done;
568 }
569
570 info.scratch = buf;
571 info.paddr = buf->mem.pa;
572 info.len = buf->totallen;
573 info.tcplen = buf->tcphlen;
574 info.ipv4 = buf->ipv4;
575
576 if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
577 info.ah_id = buf->ah_id;
578 } else {
579 info.maclen = buf->maclen;
580 info.do_lpb = buf->do_lpb;
581 }
582
583 /* Synch buffer for use by device */
584 dma_sync_single_for_cpu(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
585 ret = irdma_puda_send(&rsrc->qp, &info);
586 if (ret) {
587 rsrc->tx_wqe_avail_cnt++;
588 rsrc->stats_sent_pkt_q++;
589 list_add(&buf->list, &rsrc->txpend);
590 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
591 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
592 "adding to puda_send\n");
593 } else {
594 rsrc->stats_pkt_sent++;
595 }
596 done:
597 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
598 }
599
600 /**
601 * irdma_puda_qp_setctx - during init, set qp's context
602 * @rsrc: qp's resource
603 */
604 static void
irdma_puda_qp_setctx(struct irdma_puda_rsrc * rsrc)605 irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
606 {
607 struct irdma_sc_qp *qp = &rsrc->qp;
608 __le64 *qp_ctx = qp->hw_host_ctx;
609
610 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
611 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
612 set_64bit_val(qp_ctx, IRDMA_BYTE_24,
613 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
614 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
615 set_64bit_val(qp_ctx, IRDMA_BYTE_48,
616 FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
617 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0);
618 if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
619 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1);
620 set_64bit_val(qp_ctx, IRDMA_BYTE_136,
621 FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
622 FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
623 set_64bit_val(qp_ctx, IRDMA_BYTE_144,
624 FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
625 set_64bit_val(qp_ctx, IRDMA_BYTE_160,
626 FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
627 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
628 set_64bit_val(qp_ctx, IRDMA_BYTE_168,
629 FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
630 set_64bit_val(qp_ctx, IRDMA_BYTE_176,
631 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
632 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
633 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
634
635 irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx,
636 IRDMA_QP_CTX_SIZE);
637 }
638
639 /**
640 * irdma_puda_qp_wqe - setup wqe for qp create
641 * @dev: Device
642 * @qp: Resource qp
643 */
644 static int
irdma_puda_qp_wqe(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)645 irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
646 {
647 struct irdma_sc_cqp *cqp;
648 __le64 *wqe;
649 u64 hdr;
650 struct irdma_ccq_cqe_info compl_info;
651 int status = 0;
652
653 cqp = dev->cqp;
654 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
655 if (!wqe)
656 return -ENOSPC;
657
658 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
659 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
660
661 hdr = qp->qp_uk.qp_id |
662 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
663 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
664 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
665 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
666 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
667 irdma_wmb(); /* make sure WQE is written before valid bit is set */
668
669 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
670
671 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_PUDA, "PUDA QP CREATE", wqe, 40);
672 irdma_sc_cqp_post_sq(cqp);
673 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
674 &compl_info);
675
676 return status;
677 }
678
679 /**
680 * irdma_puda_qp_create - create qp for resource
681 * @rsrc: resource to use for buffer
682 */
683 static int
irdma_puda_qp_create(struct irdma_puda_rsrc * rsrc)684 irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
685 {
686 struct irdma_sc_qp *qp = &rsrc->qp;
687 struct irdma_qp_uk *ukqp = &qp->qp_uk;
688 int ret = 0;
689 u32 sq_size, rq_size;
690 struct irdma_dma_mem *mem;
691
692 sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
693 rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
694 rsrc->qpmem.size = (sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) +
695 IRDMA_QP_CTX_SIZE);
696 rsrc->qpmem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem,
697 rsrc->qpmem.size, IRDMA_HW_PAGE_SIZE);
698 if (!rsrc->qpmem.va)
699 return -ENOMEM;
700
701 mem = &rsrc->qpmem;
702 memset(mem->va, 0, rsrc->qpmem.size);
703 qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
704 qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
705 qp->pd = &rsrc->sc_pd;
706 qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
707 qp->dev = rsrc->dev;
708 qp->qp_uk.back_qp = rsrc;
709 qp->sq_pa = mem->pa;
710 qp->rq_pa = qp->sq_pa + sq_size;
711 qp->vsi = rsrc->vsi;
712 ukqp->sq_base = mem->va;
713 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
714 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
715 ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
716 qp->shadow_area_pa = qp->rq_pa + rq_size;
717 qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
718 qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
719 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
720 ukqp->qp_id = rsrc->qp_id;
721 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
722 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
723 ukqp->sq_size = rsrc->sq_size;
724 ukqp->rq_size = rsrc->rq_size;
725
726 IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
727 IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
728 IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
729 ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
730
731 ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
732 if (ret) {
733 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
734 return ret;
735 }
736
737 irdma_qp_add_qos(qp);
738 irdma_puda_qp_setctx(rsrc);
739
740 if (rsrc->dev->ceq_valid)
741 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
742 else
743 ret = irdma_puda_qp_wqe(rsrc->dev, qp);
744 if (ret) {
745 irdma_qp_rem_qos(qp);
746 rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
747 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
748 }
749
750 return ret;
751 }
752
753 /**
754 * irdma_puda_cq_wqe - setup wqe for CQ create
755 * @dev: Device
756 * @cq: resource for cq
757 */
758 static int
irdma_puda_cq_wqe(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq)759 irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
760 {
761 __le64 *wqe;
762 struct irdma_sc_cqp *cqp;
763 u64 hdr;
764 struct irdma_ccq_cqe_info compl_info;
765 int status = 0;
766
767 cqp = dev->cqp;
768 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
769 if (!wqe)
770 return -ENOSPC;
771
772 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
773 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
774 set_64bit_val(wqe, IRDMA_BYTE_16,
775 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
776 set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa);
777 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
778 set_64bit_val(wqe, IRDMA_BYTE_56,
779 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
780 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
781
782 hdr = cq->cq_uk.cq_id |
783 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
784 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
785 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
786 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
787 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
788 irdma_wmb(); /* make sure WQE is written before valid bit is set */
789
790 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
791
792 irdma_debug_buf(dev, IRDMA_DEBUG_PUDA, "PUDA CREATE CQ", wqe,
793 IRDMA_CQP_WQE_SIZE * 8);
794 irdma_sc_cqp_post_sq(dev->cqp);
795 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
796 &compl_info);
797 if (!status) {
798 struct irdma_sc_ceq *ceq = dev->ceq[0];
799
800 if (ceq && ceq->reg_cq)
801 status = irdma_sc_add_cq_ctx(ceq, cq);
802 }
803
804 return status;
805 }
806
807 /**
808 * irdma_puda_cq_create - create cq for resource
809 * @rsrc: resource for which cq to create
810 */
811 static int
irdma_puda_cq_create(struct irdma_puda_rsrc * rsrc)812 irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
813 {
814 struct irdma_sc_dev *dev = rsrc->dev;
815 struct irdma_sc_cq *cq = &rsrc->cq;
816 int ret = 0;
817 u32 cqsize;
818 struct irdma_dma_mem *mem;
819 struct irdma_cq_init_info info = {0};
820 struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
821
822 cq->vsi = rsrc->vsi;
823 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
824 rsrc->cqmem.size = cqsize + sizeof(struct irdma_cq_shadow_area);
825 rsrc->cqmem.va = irdma_allocate_dma_mem(dev->hw, &rsrc->cqmem,
826 rsrc->cqmem.size,
827 IRDMA_CQ0_ALIGNMENT);
828 if (!rsrc->cqmem.va)
829 return -ENOMEM;
830
831 mem = &rsrc->cqmem;
832 info.dev = dev;
833 info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
834 IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
835 info.shadow_read_threshold = rsrc->cq_size >> 2;
836 info.cq_base_pa = mem->pa;
837 info.shadow_area_pa = mem->pa + cqsize;
838 init_info->cq_base = mem->va;
839 init_info->shadow_area = (__le64 *) ((u8 *)mem->va + cqsize);
840 init_info->cq_size = rsrc->cq_size;
841 init_info->cq_id = rsrc->cq_id;
842 info.ceqe_mask = true;
843 info.ceq_id_valid = true;
844 info.vsi = rsrc->vsi;
845
846 ret = irdma_sc_cq_init(cq, &info);
847 if (ret)
848 goto error;
849
850 if (rsrc->dev->ceq_valid)
851 ret = irdma_cqp_cq_create_cmd(dev, cq);
852 else
853 ret = irdma_puda_cq_wqe(dev, cq);
854 error:
855 if (ret)
856 irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
857
858 return ret;
859 }
860
861 /**
862 * irdma_puda_free_qp - free qp for resource
863 * @rsrc: resource for which qp to free
864 */
865 static void
irdma_puda_free_qp(struct irdma_puda_rsrc * rsrc)866 irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
867 {
868 int ret;
869 struct irdma_ccq_cqe_info compl_info;
870 struct irdma_sc_dev *dev = rsrc->dev;
871
872 if (rsrc->dev->ceq_valid) {
873 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
874 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
875 return;
876 }
877
878 ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
879 if (ret)
880 irdma_debug(dev, IRDMA_DEBUG_PUDA,
881 "error puda qp destroy wqe, status = %d\n", ret);
882 if (!ret) {
883 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
884 &compl_info);
885 if (ret)
886 irdma_debug(dev, IRDMA_DEBUG_PUDA,
887 "error puda qp destroy failed, status = %d\n",
888 ret);
889 }
890 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
891 }
892
893 /**
894 * irdma_puda_free_cq - free cq for resource
895 * @rsrc: resource for which cq to free
896 */
897 static void
irdma_puda_free_cq(struct irdma_puda_rsrc * rsrc)898 irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
899 {
900 int ret;
901 struct irdma_ccq_cqe_info compl_info;
902 struct irdma_sc_dev *dev = rsrc->dev;
903
904 if (rsrc->dev->ceq_valid) {
905 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
906 return;
907 }
908
909 ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
910 if (ret)
911 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error ieq cq destroy\n");
912 if (!ret) {
913 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
914 &compl_info);
915 if (ret)
916 irdma_debug(dev, IRDMA_DEBUG_PUDA,
917 "error ieq qp destroy done\n");
918 }
919 }
920
921 /**
922 * irdma_puda_dele_rsrc - delete all resources during close
923 * @vsi: VSI structure of device
924 * @type: type of resource to dele
925 * @reset: true if reset chip
926 */
927 void
irdma_puda_dele_rsrc(struct irdma_sc_vsi * vsi,enum puda_rsrc_type type,bool reset)928 irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
929 bool reset)
930 {
931 struct irdma_sc_dev *dev = vsi->dev;
932 struct irdma_puda_rsrc *rsrc;
933 struct irdma_puda_buf *buf = NULL;
934 struct irdma_puda_buf *nextbuf = NULL;
935 struct irdma_virt_mem *vmem;
936 struct irdma_sc_ceq *ceq;
937
938 ceq = vsi->dev->ceq[0];
939
940 switch (type) {
941 case IRDMA_PUDA_RSRC_TYPE_ILQ:
942 rsrc = vsi->ilq;
943 vmem = &vsi->ilq_mem;
944 vsi->ilq = NULL;
945 if (ceq && ceq->reg_cq)
946 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
947 break;
948 case IRDMA_PUDA_RSRC_TYPE_IEQ:
949 rsrc = vsi->ieq;
950 vmem = &vsi->ieq_mem;
951 vsi->ieq = NULL;
952 if (ceq && ceq->reg_cq)
953 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
954 break;
955 default:
956 irdma_debug(dev, IRDMA_DEBUG_PUDA,
957 "error resource type = 0x%x\n", type);
958 return;
959 }
960
961 spin_lock_destroy(&rsrc->bufpool_lock);
962 switch (rsrc->cmpl) {
963 case PUDA_HASH_CRC_COMPLETE:
964 irdma_free_hash_desc(rsrc->hash_desc);
965 /* fallthrough */
966 case PUDA_QP_CREATED:
967 irdma_qp_rem_qos(&rsrc->qp);
968
969 if (!reset)
970 irdma_puda_free_qp(rsrc);
971
972 irdma_free_dma_mem(dev->hw, &rsrc->qpmem);
973 /* fallthrough */
974 case PUDA_CQ_CREATED:
975 if (!reset)
976 irdma_puda_free_cq(rsrc);
977
978 irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
979 break;
980 default:
981 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
982 "error no resources\n");
983 break;
984 }
985 /* Free all allocated puda buffers for both tx and rx */
986 buf = rsrc->alloclist;
987 while (buf) {
988 nextbuf = buf->next;
989 irdma_puda_dele_buf(dev, buf);
990 buf = nextbuf;
991 rsrc->alloc_buf_count--;
992 }
993
994 kfree(vmem->va);
995 }
996
997 /**
998 * irdma_puda_allocbufs - allocate buffers for resource
999 * @rsrc: resource for buffer allocation
1000 * @count: number of buffers to create
1001 */
1002 static int
irdma_puda_allocbufs(struct irdma_puda_rsrc * rsrc,u32 count)1003 irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
1004 {
1005 u32 i;
1006 struct irdma_puda_buf *buf;
1007 struct irdma_puda_buf *nextbuf;
1008 struct irdma_virt_mem buf_mem;
1009 struct irdma_dma_mem *dma_mem;
1010 bool virtdma = false;
1011 unsigned long flags;
1012
1013 buf_mem.size = count * sizeof(*buf);
1014 buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
1015 if (!buf_mem.va) {
1016 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
1017 "error virt_mem for buf\n");
1018 rsrc->stats_buf_alloc_fail++;
1019 goto trysmall;
1020 }
1021
1022 /*
1023 * Allocate the large dma chunk and setup dma attributes into first puda buffer. This is required during free
1024 */
1025 buf = (struct irdma_puda_buf *)buf_mem.va;
1026 buf->mem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &buf->mem,
1027 rsrc->buf_size * count, 1);
1028 if (!buf->mem.va) {
1029 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
1030 "error dma_mem for buf\n");
1031 kfree(buf_mem.va);
1032 rsrc->stats_buf_alloc_fail++;
1033 goto trysmall;
1034 }
1035
1036 /*
1037 * dma_mem points to start of the large DMA chunk
1038 */
1039 dma_mem = &buf->mem;
1040
1041 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
1042 for (i = 0; i < count; i++) {
1043 buf = ((struct irdma_puda_buf *)buf_mem.va) + i;
1044
1045 buf->mem.va = (char *)dma_mem->va + (i * rsrc->buf_size);
1046 buf->mem.pa = dma_mem->pa + (i * rsrc->buf_size);
1047 buf->mem.size = rsrc->buf_size;
1048 buf->virtdma = virtdma;
1049 virtdma = true;
1050
1051 buf->buf_mem.va = buf_mem.va;
1052 buf->buf_mem.size = buf_mem.size;
1053
1054 list_add(&buf->list, &rsrc->bufpool);
1055 rsrc->alloc_buf_count++;
1056 if (!rsrc->alloclist) {
1057 rsrc->alloclist = buf;
1058 } else {
1059 nextbuf = rsrc->alloclist;
1060 rsrc->alloclist = buf;
1061 buf->next = nextbuf;
1062 }
1063 }
1064 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
1065
1066 rsrc->avail_buf_count = rsrc->alloc_buf_count;
1067 return 0;
1068 trysmall:
1069 for (i = 0; i < count; i++) {
1070 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
1071 if (!buf) {
1072 rsrc->stats_buf_alloc_fail++;
1073 return -ENOMEM;
1074 }
1075 irdma_puda_ret_bufpool(rsrc, buf);
1076 rsrc->alloc_buf_count++;
1077 if (!rsrc->alloclist) {
1078 rsrc->alloclist = buf;
1079 } else {
1080 nextbuf = rsrc->alloclist;
1081 rsrc->alloclist = buf;
1082 buf->next = nextbuf;
1083 }
1084 }
1085
1086 rsrc->avail_buf_count = rsrc->alloc_buf_count;
1087
1088 return 0;
1089 }
1090
1091 /**
1092 * irdma_puda_create_rsrc - create resource (ilq or ieq)
1093 * @vsi: sc VSI struct
1094 * @info: resource information
1095 */
1096 int
irdma_puda_create_rsrc(struct irdma_sc_vsi * vsi,struct irdma_puda_rsrc_info * info)1097 irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
1098 struct irdma_puda_rsrc_info *info)
1099 {
1100 struct irdma_sc_dev *dev = vsi->dev;
1101 int ret = 0;
1102 struct irdma_puda_rsrc *rsrc;
1103 u32 pudasize;
1104 u32 sqwridsize, rqwridsize;
1105 struct irdma_virt_mem *vmem;
1106
1107 info->count = 1;
1108 pudasize = sizeof(*rsrc);
1109 sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
1110 rqwridsize = info->rq_size * 8;
1111 switch (info->type) {
1112 case IRDMA_PUDA_RSRC_TYPE_ILQ:
1113 vmem = &vsi->ilq_mem;
1114 break;
1115 case IRDMA_PUDA_RSRC_TYPE_IEQ:
1116 vmem = &vsi->ieq_mem;
1117 break;
1118 default:
1119 return -EOPNOTSUPP;
1120 }
1121 vmem->size = pudasize + sqwridsize + rqwridsize;
1122 vmem->va = kzalloc(vmem->size, GFP_KERNEL);
1123 if (!vmem->va)
1124 return -ENOMEM;
1125
1126 rsrc = vmem->va;
1127 spin_lock_init(&rsrc->bufpool_lock);
1128 switch (info->type) {
1129 case IRDMA_PUDA_RSRC_TYPE_ILQ:
1130 vsi->ilq = vmem->va;
1131 vsi->ilq_count = info->count;
1132 rsrc->receive = info->receive;
1133 rsrc->xmit_complete = info->xmit_complete;
1134 break;
1135 case IRDMA_PUDA_RSRC_TYPE_IEQ:
1136 vsi->ieq_count = info->count;
1137 vsi->ieq = vmem->va;
1138 rsrc->receive = irdma_ieq_receive;
1139 rsrc->xmit_complete = irdma_ieq_tx_compl;
1140 break;
1141 default:
1142 return -EOPNOTSUPP;
1143 }
1144
1145 rsrc->type = info->type;
1146 rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
1147 ((u8 *)vmem->va + pudasize);
1148 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
1149 /* Initialize all ieq lists */
1150 INIT_LIST_HEAD(&rsrc->bufpool);
1151 INIT_LIST_HEAD(&rsrc->txpend);
1152
1153 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
1154 irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
1155 rsrc->qp_id = info->qp_id;
1156 rsrc->cq_id = info->cq_id;
1157 rsrc->sq_size = info->sq_size;
1158 rsrc->rq_size = info->rq_size;
1159 rsrc->cq_size = info->rq_size + info->sq_size;
1160 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1161 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
1162 rsrc->cq_size += info->rq_size;
1163 }
1164 rsrc->buf_size = info->buf_size;
1165 rsrc->dev = dev;
1166 rsrc->vsi = vsi;
1167 rsrc->stats_idx = info->stats_idx;
1168 rsrc->stats_idx_valid = info->stats_idx_valid;
1169
1170 ret = irdma_puda_cq_create(rsrc);
1171 if (!ret) {
1172 rsrc->cmpl = PUDA_CQ_CREATED;
1173 ret = irdma_puda_qp_create(rsrc);
1174 }
1175 if (ret) {
1176 irdma_debug(dev, IRDMA_DEBUG_PUDA,
1177 "error qp_create type=%d, status=%d\n", rsrc->type,
1178 ret);
1179 goto error;
1180 }
1181 rsrc->cmpl = PUDA_QP_CREATED;
1182
1183 ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
1184 if (ret) {
1185 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error alloc_buf\n");
1186 goto error;
1187 }
1188
1189 rsrc->rxq_invalid_cnt = info->rq_size;
1190 ret = irdma_puda_replenish_rq(rsrc, true);
1191 if (ret)
1192 goto error;
1193
1194 if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
1195 if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
1196 rsrc->check_crc = true;
1197 rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
1198 ret = 0;
1199 }
1200 }
1201
1202 irdma_sc_ccq_arm(&rsrc->cq);
1203 return ret;
1204
1205 error:
1206 irdma_puda_dele_rsrc(vsi, info->type, false);
1207
1208 return ret;
1209 }
1210
1211 /**
1212 * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
1213 * @qp: ilq's qp resource
1214 * @buf: puda buffer for rcv q
1215 * @wqe_idx: wqe index of completed rcvbuf
1216 */
1217 static void
irdma_ilq_putback_rcvbuf(struct irdma_sc_qp * qp,struct irdma_puda_buf * buf,u32 wqe_idx)1218 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
1219 struct irdma_puda_buf *buf, u32 wqe_idx)
1220 {
1221 __le64 *wqe;
1222 u64 offset8, offset24;
1223
1224 /* Synch buffer for use by device */
1225 dma_sync_single_for_device(hw_to_dev(qp->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL);
1226 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
1227 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
1228 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1229 get_64bit_val(wqe, IRDMA_BYTE_8, &offset8);
1230 if (offset24)
1231 offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
1232 else
1233 offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
1234 set_64bit_val(wqe, IRDMA_BYTE_8, offset8);
1235 irdma_wmb(); /* make sure WQE is written before valid bit is set */
1236 }
1237 if (offset24)
1238 offset24 = 0;
1239 else
1240 offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
1241
1242 set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
1243 }
1244
1245 /**
1246 * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
1247 * @pfpdu: pointer to fpdu
1248 * @datap: pointer to data in the buffer
1249 * @rcv_seq: seqnum of the data buffer
1250 */
irdma_ieq_get_fpdu_len(struct irdma_pfpdu * pfpdu,u8 * datap,u32 rcv_seq)1251 static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
1252 u32 rcv_seq){
1253 u32 marker_seq, end_seq, blk_start;
1254 u8 marker_len = pfpdu->marker_len;
1255 u16 total_len = 0;
1256 u16 fpdu_len;
1257
1258 blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
1259 if (!blk_start) {
1260 total_len = marker_len;
1261 marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
1262 if (marker_len && *(u32 *)datap)
1263 return 0;
1264 } else {
1265 marker_seq = rcv_seq + blk_start;
1266 }
1267
1268 datap += total_len;
1269 fpdu_len = IRDMA_NTOHS(*(__be16 *) datap);
1270 fpdu_len += IRDMA_IEQ_MPA_FRAMING;
1271 fpdu_len = (fpdu_len + 3) & 0xfffc;
1272
1273 if (fpdu_len > pfpdu->max_fpdu_data)
1274 return 0;
1275
1276 total_len += fpdu_len;
1277 end_seq = rcv_seq + total_len;
1278 while ((int)(marker_seq - end_seq) < 0) {
1279 total_len += marker_len;
1280 end_seq += marker_len;
1281 marker_seq += IRDMA_MRK_BLK_SZ;
1282 }
1283
1284 return total_len;
1285 }
1286
1287 /**
1288 * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1289 * @buf: rcv buffer with partial
1290 * @txbuf: tx buffer for sending back
1291 * @buf_offset: rcv buffer offset to copy from
1292 * @txbuf_offset: at offset in tx buf to copy
1293 * @len: length of data to copy
1294 */
1295 static void
irdma_ieq_copy_to_txbuf(struct irdma_puda_buf * buf,struct irdma_puda_buf * txbuf,u16 buf_offset,u32 txbuf_offset,u32 len)1296 irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
1297 struct irdma_puda_buf *txbuf,
1298 u16 buf_offset, u32 txbuf_offset, u32 len)
1299 {
1300 void *mem1 = (u8 *)buf->mem.va + buf_offset;
1301 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
1302
1303 irdma_memcpy(mem2, mem1, len);
1304 }
1305
1306 /**
1307 * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
1308 * @buf: reeive buffer with partial
1309 * @txbuf: buffer to prepare
1310 */
1311 static void
irdma_ieq_setup_tx_buf(struct irdma_puda_buf * buf,struct irdma_puda_buf * txbuf)1312 irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
1313 struct irdma_puda_buf *txbuf)
1314 {
1315 txbuf->tcphlen = buf->tcphlen;
1316 txbuf->ipv4 = buf->ipv4;
1317
1318 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1319 txbuf->hdrlen = txbuf->tcphlen;
1320 irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
1321 txbuf->hdrlen);
1322 } else {
1323 txbuf->maclen = buf->maclen;
1324 txbuf->hdrlen = buf->hdrlen;
1325 irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
1326 }
1327 }
1328
1329 /**
1330 * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
1331 * @buf: receive exception buffer
1332 * @fps: first partial sequence number
1333 */
1334 static void
irdma_ieq_check_first_buf(struct irdma_puda_buf * buf,u32 fps)1335 irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
1336 {
1337 u32 offset;
1338
1339 if (buf->seqnum < fps) {
1340 offset = fps - buf->seqnum;
1341 if (offset > buf->datalen)
1342 return;
1343 buf->data += offset;
1344 buf->datalen -= (u16)offset;
1345 buf->seqnum = fps;
1346 }
1347 }
1348
1349 /**
1350 * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
1351 * @ieq: ieq resource
1352 * @rxlist: ieq's received buffer list
1353 * @pbufl: temporary list for buffers for fpddu
1354 * @txbuf: tx buffer for fpdu
1355 * @fpdu_len: total length of fpdu
1356 */
1357 static void
irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc * ieq,struct list_head * rxlist,struct list_head * pbufl,struct irdma_puda_buf * txbuf,u16 fpdu_len)1358 irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
1359 struct list_head *rxlist,
1360 struct list_head *pbufl,
1361 struct irdma_puda_buf *txbuf, u16 fpdu_len)
1362 {
1363 struct irdma_puda_buf *buf;
1364 u32 nextseqnum;
1365 u16 txoffset, bufoffset;
1366
1367 buf = irdma_puda_get_listbuf(pbufl);
1368 if (!buf)
1369 return;
1370
1371 nextseqnum = buf->seqnum + fpdu_len;
1372 irdma_ieq_setup_tx_buf(buf, txbuf);
1373 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1374 txoffset = txbuf->hdrlen;
1375 txbuf->totallen = txbuf->hdrlen + fpdu_len;
1376 txbuf->data = (u8 *)txbuf->mem.va + txoffset;
1377 } else {
1378 txoffset = buf->hdrlen;
1379 txbuf->totallen = buf->hdrlen + fpdu_len;
1380 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1381 }
1382 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1383
1384 do {
1385 if (buf->datalen >= fpdu_len) {
1386 /* copied full fpdu */
1387 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
1388 fpdu_len);
1389 buf->datalen -= fpdu_len;
1390 buf->data += fpdu_len;
1391 buf->seqnum = nextseqnum;
1392 break;
1393 }
1394 /* copy partial fpdu */
1395 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
1396 buf->datalen);
1397 txoffset += buf->datalen;
1398 fpdu_len -= buf->datalen;
1399 irdma_puda_ret_bufpool(ieq, buf);
1400 buf = irdma_puda_get_listbuf(pbufl);
1401 if (!buf)
1402 return;
1403
1404 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1405 } while (1);
1406
1407 /* last buffer on the list */
1408 if (buf->datalen)
1409 list_add(&buf->list, rxlist);
1410 else
1411 irdma_puda_ret_bufpool(ieq, buf);
1412 }
1413
1414 /**
1415 * irdma_ieq_create_pbufl - create buffer list for single fpdu
1416 * @pfpdu: pointer to fpdu
1417 * @rxlist: resource list for receive ieq buffes
1418 * @pbufl: temp. list for buffers for fpddu
1419 * @buf: first receive buffer
1420 * @fpdu_len: total length of fpdu
1421 */
1422 static int
irdma_ieq_create_pbufl(struct irdma_pfpdu * pfpdu,struct list_head * rxlist,struct list_head * pbufl,struct irdma_puda_buf * buf,u16 fpdu_len)1423 irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
1424 struct list_head *rxlist,
1425 struct list_head *pbufl,
1426 struct irdma_puda_buf *buf, u16 fpdu_len)
1427 {
1428 int status = 0;
1429 struct irdma_puda_buf *nextbuf;
1430 u32 nextseqnum;
1431 u16 plen = fpdu_len - buf->datalen;
1432 bool done = false;
1433
1434 nextseqnum = buf->seqnum + buf->datalen;
1435 do {
1436 nextbuf = irdma_puda_get_listbuf(rxlist);
1437 if (!nextbuf) {
1438 status = -ENOBUFS;
1439 break;
1440 }
1441 list_add_tail(&nextbuf->list, pbufl);
1442 if (nextbuf->seqnum != nextseqnum) {
1443 pfpdu->bad_seq_num++;
1444 status = -ERANGE;
1445 break;
1446 }
1447 if (nextbuf->datalen >= plen) {
1448 done = true;
1449 } else {
1450 plen -= nextbuf->datalen;
1451 nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1452 }
1453
1454 } while (!done);
1455
1456 return status;
1457 }
1458
1459 /**
1460 * irdma_ieq_handle_partial - process partial fpdu buffer
1461 * @ieq: ieq resource
1462 * @pfpdu: partial management per user qp
1463 * @buf: receive buffer
1464 * @fpdu_len: fpdu len in the buffer
1465 */
1466 static int
irdma_ieq_handle_partial(struct irdma_puda_rsrc * ieq,struct irdma_pfpdu * pfpdu,struct irdma_puda_buf * buf,u16 fpdu_len)1467 irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
1468 struct irdma_pfpdu *pfpdu,
1469 struct irdma_puda_buf *buf, u16 fpdu_len)
1470 {
1471 int status = 0;
1472 u8 *crcptr;
1473 u32 mpacrc;
1474 u32 seqnum = buf->seqnum;
1475 struct list_head pbufl; /* partial buffer list */
1476 struct irdma_puda_buf *txbuf = NULL;
1477 struct list_head *rxlist = &pfpdu->rxlist;
1478
1479 ieq->partials_handled++;
1480
1481 INIT_LIST_HEAD(&pbufl);
1482 list_add(&buf->list, &pbufl);
1483
1484 status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1485 if (status)
1486 goto error;
1487
1488 txbuf = irdma_puda_get_bufpool(ieq);
1489 if (!txbuf) {
1490 pfpdu->no_tx_bufs++;
1491 status = -ENOBUFS;
1492 goto error;
1493 }
1494
1495 irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1496 irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1497
1498 crcptr = txbuf->data + fpdu_len - 4;
1499 mpacrc = *(u32 *)crcptr;
1500 if (ieq->check_crc) {
1501 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1502 (fpdu_len - 4), mpacrc);
1503 if (status) {
1504 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
1505 "error bad crc\n");
1506 pfpdu->mpa_crc_err = true;
1507 goto error;
1508 }
1509 }
1510
1511 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER",
1512 txbuf->mem.va, txbuf->totallen);
1513 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1514 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
1515 txbuf->do_lpb = true;
1516 irdma_puda_send_buf(ieq, txbuf);
1517 pfpdu->rcv_nxt = seqnum + fpdu_len;
1518 return status;
1519
1520 error:
1521 while (!list_empty(&pbufl)) {
1522 buf = (struct irdma_puda_buf *)(&pbufl)->prev;
1523 list_move(&buf->list, rxlist);
1524 }
1525 if (txbuf)
1526 irdma_puda_ret_bufpool(ieq, txbuf);
1527
1528 return status;
1529 }
1530
1531 /**
1532 * irdma_ieq_process_buf - process buffer rcvd for ieq
1533 * @ieq: ieq resource
1534 * @pfpdu: partial management per user qp
1535 * @buf: receive buffer
1536 */
1537 static int
irdma_ieq_process_buf(struct irdma_puda_rsrc * ieq,struct irdma_pfpdu * pfpdu,struct irdma_puda_buf * buf)1538 irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
1539 struct irdma_pfpdu *pfpdu,
1540 struct irdma_puda_buf *buf)
1541 {
1542 u16 fpdu_len = 0;
1543 u16 datalen = buf->datalen;
1544 u8 *datap = buf->data;
1545 u8 *crcptr;
1546 u16 ioffset = 0;
1547 u32 mpacrc;
1548 u32 seqnum = buf->seqnum;
1549 u16 len = 0;
1550 u16 full = 0;
1551 bool partial = false;
1552 struct irdma_puda_buf *txbuf;
1553 struct list_head *rxlist = &pfpdu->rxlist;
1554 int ret = 0;
1555
1556 ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1557 while (datalen) {
1558 fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
1559 if (!fpdu_len) {
1560 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
1561 "error bad fpdu len\n");
1562 list_add(&buf->list, rxlist);
1563 pfpdu->mpa_crc_err = true;
1564 return -EINVAL;
1565 }
1566
1567 if (datalen < fpdu_len) {
1568 partial = true;
1569 break;
1570 }
1571 crcptr = datap + fpdu_len - 4;
1572 mpacrc = *(u32 *)crcptr;
1573 if (ieq->check_crc)
1574 ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
1575 fpdu_len - 4, mpacrc);
1576 if (ret) {
1577 list_add(&buf->list, rxlist);
1578 irdma_debug(ieq->dev, IRDMA_DEBUG_ERR,
1579 "IRDMA_ERR_MPA_CRC\n");
1580 pfpdu->mpa_crc_err = true;
1581 return ret;
1582 }
1583 full++;
1584 pfpdu->fpdu_processed++;
1585 ieq->fpdu_processed++;
1586 datap += fpdu_len;
1587 len += fpdu_len;
1588 datalen -= fpdu_len;
1589 }
1590 if (full) {
1591 /* copy full pdu's in the txbuf and send them out */
1592 txbuf = irdma_puda_get_bufpool(ieq);
1593 if (!txbuf) {
1594 pfpdu->no_tx_bufs++;
1595 list_add(&buf->list, rxlist);
1596 return -ENOBUFS;
1597 }
1598 /* modify txbuf's buffer header */
1599 irdma_ieq_setup_tx_buf(buf, txbuf);
1600 /* copy full fpdu's to new buffer */
1601 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1602 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
1603 txbuf->hdrlen, len);
1604 txbuf->totallen = txbuf->hdrlen + len;
1605 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
1606 } else {
1607 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
1608 buf->hdrlen, len);
1609 txbuf->totallen = buf->hdrlen + len;
1610 }
1611 irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
1612 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER",
1613 txbuf->mem.va, txbuf->totallen);
1614 txbuf->do_lpb = true;
1615 irdma_puda_send_buf(ieq, txbuf);
1616
1617 if (!datalen) {
1618 pfpdu->rcv_nxt = buf->seqnum + len;
1619 irdma_puda_ret_bufpool(ieq, buf);
1620 return 0;
1621 }
1622 buf->data = datap;
1623 buf->seqnum = seqnum + len;
1624 buf->datalen = datalen;
1625 pfpdu->rcv_nxt = buf->seqnum;
1626 }
1627 if (partial)
1628 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1629
1630 return 0;
1631 }
1632
1633 /**
1634 * irdma_ieq_process_fpdus - process fpdu's buffers on its list
1635 * @qp: qp for which partial fpdus
1636 * @ieq: ieq resource
1637 */
1638 void
irdma_ieq_process_fpdus(struct irdma_sc_qp * qp,struct irdma_puda_rsrc * ieq)1639 irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
1640 struct irdma_puda_rsrc *ieq)
1641 {
1642 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1643 struct list_head *rxlist = &pfpdu->rxlist;
1644 struct irdma_puda_buf *buf;
1645 int status;
1646
1647 do {
1648 if (list_empty(rxlist))
1649 break;
1650 buf = irdma_puda_get_listbuf(rxlist);
1651 if (!buf) {
1652 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
1653 "error no buf\n");
1654 break;
1655 }
1656 if (buf->seqnum != pfpdu->rcv_nxt) {
1657 /* This could be out of order or missing packet */
1658 pfpdu->out_of_order++;
1659 list_add(&buf->list, rxlist);
1660 break;
1661 }
1662 /* keep processing buffers from the head of the list */
1663 status = irdma_ieq_process_buf(ieq, pfpdu, buf);
1664 if (status && pfpdu->mpa_crc_err) {
1665 while (!list_empty(rxlist)) {
1666 buf = irdma_puda_get_listbuf(rxlist);
1667 irdma_puda_ret_bufpool(ieq, buf);
1668 pfpdu->crc_err++;
1669 ieq->crc_err++;
1670 }
1671 /* create CQP for AE */
1672 irdma_ieq_mpa_crc_ae(ieq->dev, qp);
1673 }
1674 } while (!status);
1675 }
1676
1677 /**
1678 * irdma_ieq_create_ah - create an address handle for IEQ
1679 * @qp: qp pointer
1680 * @buf: buf received on IEQ used to create AH
1681 */
1682 static int
irdma_ieq_create_ah(struct irdma_sc_qp * qp,struct irdma_puda_buf * buf)1683 irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
1684 {
1685 struct irdma_ah_info ah_info = {0};
1686
1687 qp->pfpdu.ah_buf = buf;
1688 irdma_puda_ieq_get_ah_info(qp, &ah_info);
1689 return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
1690 IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
1691 &qp->pfpdu.ah);
1692 }
1693
1694 /**
1695 * irdma_ieq_handle_exception - handle qp's exception
1696 * @ieq: ieq resource
1697 * @qp: qp receiving excpetion
1698 * @buf: receive buffer
1699 */
1700 static void
irdma_ieq_handle_exception(struct irdma_puda_rsrc * ieq,struct irdma_sc_qp * qp,struct irdma_puda_buf * buf)1701 irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
1702 struct irdma_sc_qp *qp,
1703 struct irdma_puda_buf *buf)
1704 {
1705 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1706 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1707 u32 rcv_wnd = hw_host_ctx[23];
1708
1709 /* first partial seq # in q2 */
1710 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
1711 struct list_head *rxlist = &pfpdu->rxlist;
1712 struct list_head *plist;
1713 struct irdma_puda_buf *tmpbuf = NULL;
1714 unsigned long flags = 0;
1715 u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
1716
1717 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ RX BUFFER", buf->mem.va,
1718 buf->totallen);
1719
1720 spin_lock_irqsave(&pfpdu->lock, flags);
1721 pfpdu->total_ieq_bufs++;
1722 if (pfpdu->mpa_crc_err) {
1723 pfpdu->crc_err++;
1724 goto error;
1725 }
1726 if (pfpdu->mode && fps != pfpdu->fps) {
1727 /* clean up qp as it is new partial sequence */
1728 irdma_ieq_cleanup_qp(ieq, qp);
1729 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ,
1730 "restarting new partial\n");
1731 pfpdu->mode = false;
1732 }
1733
1734 if (!pfpdu->mode) {
1735 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "Q2 BUFFER",
1736 (u64 *)qp->q2_buf, 128);
1737 /* First_Partial_Sequence_Number check */
1738 pfpdu->rcv_nxt = fps;
1739 pfpdu->fps = fps;
1740 pfpdu->mode = true;
1741 pfpdu->max_fpdu_data = (buf->ipv4) ?
1742 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
1743 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
1744 pfpdu->pmode_count++;
1745 ieq->pmode_count++;
1746 INIT_LIST_HEAD(rxlist);
1747 irdma_ieq_check_first_buf(buf, fps);
1748 }
1749
1750 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1751 pfpdu->bad_seq_num++;
1752 ieq->bad_seq_num++;
1753 goto error;
1754 }
1755
1756 if (!list_empty(rxlist)) {
1757 tmpbuf = (struct irdma_puda_buf *)(rxlist)->next;
1758 while ((struct list_head *)tmpbuf != rxlist) {
1759 if (buf->seqnum == tmpbuf->seqnum)
1760 goto error;
1761 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1762 break;
1763 plist = &tmpbuf->list;
1764 tmpbuf = (struct irdma_puda_buf *)(plist)->next;
1765 }
1766 /* Insert buf before tmpbuf */
1767 list_add_tail(&buf->list, &tmpbuf->list);
1768 } else {
1769 list_add_tail(&buf->list, rxlist);
1770 }
1771 pfpdu->nextseqnum = buf->seqnum + buf->datalen;
1772 pfpdu->lastrcv_buf = buf;
1773 if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
1774 irdma_ieq_create_ah(qp, buf);
1775 if (!pfpdu->ah)
1776 goto error;
1777 goto exit;
1778 }
1779 if (hw_rev == IRDMA_GEN_1)
1780 irdma_ieq_process_fpdus(qp, ieq);
1781 else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
1782 irdma_ieq_process_fpdus(qp, ieq);
1783 exit:
1784 spin_unlock_irqrestore(&pfpdu->lock, flags);
1785
1786 return;
1787
1788 error:
1789 irdma_puda_ret_bufpool(ieq, buf);
1790 spin_unlock_irqrestore(&pfpdu->lock, flags);
1791 }
1792
1793 /**
1794 * irdma_ieq_receive - received exception buffer
1795 * @vsi: VSI of device
1796 * @buf: exception buffer received
1797 */
1798 static void
irdma_ieq_receive(struct irdma_sc_vsi * vsi,struct irdma_puda_buf * buf)1799 irdma_ieq_receive(struct irdma_sc_vsi *vsi,
1800 struct irdma_puda_buf *buf)
1801 {
1802 struct irdma_puda_rsrc *ieq = vsi->ieq;
1803 struct irdma_sc_qp *qp = NULL;
1804 u32 wqe_idx = ieq->compl_rxwqe_idx;
1805
1806 qp = irdma_ieq_get_qp(vsi->dev, buf);
1807 if (!qp) {
1808 ieq->stats_bad_qp_id++;
1809 irdma_puda_ret_bufpool(ieq, buf);
1810 } else {
1811 irdma_ieq_handle_exception(ieq, qp, buf);
1812 }
1813 /*
1814 * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() on which wqe_idx to start replenish rq
1815 */
1816 if (!ieq->rxq_invalid_cnt)
1817 ieq->rx_wqe_idx = wqe_idx;
1818 ieq->rxq_invalid_cnt++;
1819 }
1820
1821 /**
1822 * irdma_ieq_tx_compl - put back after sending completed exception buffer
1823 * @vsi: sc VSI struct
1824 * @sqwrid: pointer to puda buffer
1825 */
1826 static void
irdma_ieq_tx_compl(struct irdma_sc_vsi * vsi,void * sqwrid)1827 irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
1828 {
1829 struct irdma_puda_rsrc *ieq = vsi->ieq;
1830 struct irdma_puda_buf *buf = sqwrid;
1831
1832 irdma_puda_ret_bufpool(ieq, buf);
1833 }
1834
1835 /**
1836 * irdma_ieq_cleanup_qp - qp is being destroyed
1837 * @ieq: ieq resource
1838 * @qp: all pending fpdu buffers
1839 */
1840 void
irdma_ieq_cleanup_qp(struct irdma_puda_rsrc * ieq,struct irdma_sc_qp * qp)1841 irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
1842 {
1843 struct irdma_puda_buf *buf;
1844 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1845 struct list_head *rxlist = &pfpdu->rxlist;
1846
1847 if (qp->pfpdu.ah) {
1848 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
1849 qp->pfpdu.ah = NULL;
1850 qp->pfpdu.ah_buf = NULL;
1851 }
1852
1853 if (!pfpdu->mode)
1854 return;
1855
1856 while (!list_empty(rxlist)) {
1857 buf = irdma_puda_get_listbuf(rxlist);
1858 irdma_puda_ret_bufpool(ieq, buf);
1859 }
1860 }
1861