1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2021 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "osdep.h" 37 #include "irdma_hmc.h" 38 #include "irdma_defs.h" 39 #include "irdma_type.h" 40 #include "irdma_protos.h" 41 #include "irdma_puda.h" 42 #include "irdma_ws.h" 43 44 static void 45 irdma_ieq_receive(struct irdma_sc_vsi *vsi, 46 struct irdma_puda_buf *buf); 47 static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid); 48 static void 49 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, 50 struct irdma_puda_buf *buf, u32 wqe_idx); 51 /** 52 * irdma_puda_get_listbuf - get buffer from puda list 53 * @list: list to use for buffers (ILQ or IEQ) 54 */ 55 static struct irdma_puda_buf * 56 irdma_puda_get_listbuf(struct list_head *list) 57 { 58 struct irdma_puda_buf *buf = NULL; 59 60 if (!list_empty(list)) { 61 buf = (struct irdma_puda_buf *)(list)->next; 62 list_del((struct list_head *)&buf->list); 63 } 64 65 return buf; 66 } 67 68 /** 69 * irdma_puda_get_bufpool - return buffer from resource 70 * @rsrc: resource to use for buffer 71 */ 72 struct irdma_puda_buf * 73 irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc) 74 { 75 struct irdma_puda_buf *buf = NULL; 76 struct list_head *list = &rsrc->bufpool; 77 unsigned long flags; 78 79 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 80 buf = irdma_puda_get_listbuf(list); 81 if (buf) { 82 rsrc->avail_buf_count--; 83 buf->vsi = rsrc->vsi; 84 } else { 85 rsrc->stats_buf_alloc_fail++; 86 } 87 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 88 89 return buf; 90 } 91 92 /** 93 * irdma_puda_ret_bufpool - return buffer to rsrc list 94 * @rsrc: resource to use for buffer 95 * @buf: buffer to return to resource 96 */ 97 void 98 irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, 99 struct irdma_puda_buf *buf) 100 { 101 unsigned long flags; 102 103 buf->do_lpb = false; 104 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 105 list_add(&buf->list, &rsrc->bufpool); 106 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 107 rsrc->avail_buf_count++; 108 } 109 110 /** 111 * irdma_puda_post_recvbuf - set wqe for rcv buffer 112 * @rsrc: resource ptr 113 * @wqe_idx: wqe index to use 114 * @buf: puda buffer for rcv q 115 * @initial: flag if during init time 116 */ 117 static void 118 irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, 119 struct irdma_puda_buf *buf, bool initial) 120 { 121 __le64 *wqe; 122 struct irdma_sc_qp *qp = &rsrc->qp; 123 u64 offset24 = 0; 124 125 /* Synch buffer for use by device */ 126 dma_sync_single_for_device(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 127 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; 128 wqe = qp->qp_uk.rq_base[wqe_idx].elem; 129 if (!initial) 130 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24); 131 132 offset24 = (offset24) ? 0 : LS_64(1, IRDMAQPSQ_VALID); 133 134 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 135 set_64bit_val(wqe, 0, buf->mem.pa); 136 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 137 set_64bit_val(wqe, IRDMA_BYTE_8, 138 LS_64(buf->mem.size, IRDMAQPSQ_GEN1_FRAG_LEN)); 139 } else { 140 set_64bit_val(wqe, IRDMA_BYTE_8, 141 LS_64(buf->mem.size, 142 IRDMAQPSQ_FRAG_LEN) | (offset24 & IRDMAQPSQ_VALID_M)); 143 } 144 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 145 146 set_64bit_val(wqe, IRDMA_BYTE_24, offset24); 147 } 148 149 /** 150 * irdma_puda_replenish_rq - post rcv buffers 151 * @rsrc: resource to use for buffer 152 * @initial: flag if during init time 153 */ 154 static int 155 irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) 156 { 157 u32 i; 158 u32 invalid_cnt = rsrc->rxq_invalid_cnt; 159 struct irdma_puda_buf *buf = NULL; 160 161 for (i = 0; i < invalid_cnt; i++) { 162 buf = irdma_puda_get_bufpool(rsrc); 163 if (!buf) 164 return -ENOBUFS; 165 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial); 166 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); 167 rsrc->rxq_invalid_cnt--; 168 } 169 170 return 0; 171 } 172 173 /** 174 * irdma_puda_alloc_buf - allocate mem for buffer 175 * @dev: iwarp device 176 * @len: length of buffer 177 */ 178 static struct irdma_puda_buf * 179 irdma_puda_alloc_buf(struct irdma_sc_dev *dev, 180 u32 len) 181 { 182 struct irdma_puda_buf *buf; 183 struct irdma_virt_mem buf_mem; 184 185 buf_mem.size = sizeof(struct irdma_puda_buf); 186 buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC); 187 if (!buf_mem.va) 188 return NULL; 189 190 buf = buf_mem.va; 191 buf->mem.size = len; 192 buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL); 193 if (!buf->mem.va) 194 goto free_virt; 195 buf->mem.pa = dma_map_single(hw_to_dev(dev->hw), buf->mem.va, buf->mem.size, DMA_BIDIRECTIONAL); 196 if (dma_mapping_error(hw_to_dev(dev->hw), buf->mem.pa)) { 197 kfree(buf->mem.va); 198 goto free_virt; 199 } 200 201 buf->buf_mem.va = buf_mem.va; 202 buf->buf_mem.size = buf_mem.size; 203 204 return buf; 205 206 free_virt: 207 kfree(buf_mem.va); 208 return NULL; 209 } 210 211 /** 212 * irdma_puda_dele_buf - delete buffer back to system 213 * @dev: iwarp device 214 * @buf: buffer to free 215 */ 216 static void 217 irdma_puda_dele_buf(struct irdma_sc_dev *dev, 218 struct irdma_puda_buf *buf) 219 { 220 if (!buf->virtdma) { 221 irdma_free_dma_mem(dev->hw, &buf->mem); 222 kfree(buf->buf_mem.va); 223 } 224 } 225 226 /** 227 * irdma_puda_get_next_send_wqe - return next wqe for processing 228 * @qp: puda qp for wqe 229 * @wqe_idx: wqe index for caller 230 */ 231 static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, 232 u32 *wqe_idx){ 233 __le64 *wqe = NULL; 234 int ret_code = 0; 235 236 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 237 if (!*wqe_idx) 238 qp->swqe_polarity = !qp->swqe_polarity; 239 IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); 240 if (ret_code) 241 return wqe; 242 243 wqe = qp->sq_base[*wqe_idx].elem; 244 245 return wqe; 246 } 247 248 /** 249 * irdma_puda_poll_info - poll cq for completion 250 * @cq: cq for poll 251 * @info: info return for successful completion 252 */ 253 static int 254 irdma_puda_poll_info(struct irdma_sc_cq *cq, 255 struct irdma_puda_cmpl_info *info) 256 { 257 struct irdma_cq_uk *cq_uk = &cq->cq_uk; 258 u64 qword0, qword2, qword3, qword6; 259 __le64 *cqe; 260 __le64 *ext_cqe = NULL; 261 u64 qword7 = 0; 262 u64 comp_ctx; 263 bool valid_bit; 264 bool ext_valid = 0; 265 u32 major_err, minor_err; 266 u32 peek_head; 267 bool error; 268 u8 polarity; 269 270 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk); 271 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); 272 valid_bit = (bool)RS_64(qword3, IRDMA_CQ_VALID); 273 if (valid_bit != cq_uk->polarity) 274 return -ENOENT; 275 276 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 277 ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE); 278 279 if (ext_valid) { 280 peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size; 281 ext_cqe = cq_uk->cq_base[peek_head].buf; 282 get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7); 283 polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID); 284 if (!peek_head) 285 polarity ^= 1; 286 if (polarity != cq_uk->polarity) 287 return -ENOENT; 288 289 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); 290 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) 291 cq_uk->polarity = !cq_uk->polarity; 292 /* update cq tail in cq shadow memory also */ 293 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); 294 } 295 296 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA CQE", cqe, 32); 297 if (ext_valid) 298 irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE", 299 ext_cqe, 32); 300 301 error = (bool)RS_64(qword3, IRDMA_CQ_ERROR); 302 if (error) { 303 irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n"); 304 major_err = (u32)(RS_64(qword3, IRDMA_CQ_MAJERR)); 305 minor_err = (u32)(RS_64(qword3, IRDMA_CQ_MINERR)); 306 info->compl_error = major_err << 16 | minor_err; 307 return -EIO; 308 } 309 310 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0); 311 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2); 312 313 info->q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ); 314 info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID); 315 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 316 info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4); 317 318 get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx); 319 info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx; 320 info->wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX); 321 322 if (info->q_type == IRDMA_CQE_QTYPE_RQ) { 323 if (ext_valid) { 324 info->vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID); 325 if (info->vlan_valid) { 326 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6); 327 info->vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN); 328 } 329 info->smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID); 330 if (info->smac_valid) { 331 get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6); 332 info->smac[0] = (u8)((qword6 >> 40) & 0xFF); 333 info->smac[1] = (u8)((qword6 >> 32) & 0xFF); 334 info->smac[2] = (u8)((qword6 >> 24) & 0xFF); 335 info->smac[3] = (u8)((qword6 >> 16) & 0xFF); 336 info->smac[4] = (u8)((qword6 >> 8) & 0xFF); 337 info->smac[5] = (u8)(qword6 & 0xFF); 338 } 339 } 340 341 if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 342 info->vlan_valid = (bool)RS_64(qword3, IRDMA_VLAN_TAG_VALID); 343 info->l4proto = (u8)RS_64(qword2, IRDMA_UDA_L4PROTO); 344 info->l3proto = (u8)RS_64(qword2, IRDMA_UDA_L3PROTO); 345 } 346 347 info->payload_len = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN); 348 } 349 350 return 0; 351 } 352 353 /** 354 * irdma_puda_poll_cmpl - processes completion for cq 355 * @dev: iwarp device 356 * @cq: cq getting interrupt 357 * @compl_err: return any completion err 358 */ 359 int 360 irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, 361 u32 *compl_err) 362 { 363 struct irdma_qp_uk *qp; 364 struct irdma_cq_uk *cq_uk = &cq->cq_uk; 365 struct irdma_puda_cmpl_info info = {0}; 366 int ret = 0; 367 struct irdma_puda_buf *buf; 368 struct irdma_puda_rsrc *rsrc; 369 u8 cq_type = cq->cq_type; 370 unsigned long flags; 371 372 if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) { 373 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq : 374 cq->vsi->ieq; 375 } else { 376 irdma_debug(dev, IRDMA_DEBUG_PUDA, "qp_type error\n"); 377 return -EFAULT; 378 } 379 380 ret = irdma_puda_poll_info(cq, &info); 381 *compl_err = info.compl_error; 382 if (ret == -ENOENT) 383 return ret; 384 if (ret) 385 goto done; 386 387 qp = info.qp; 388 if (!qp || !rsrc) { 389 ret = -EFAULT; 390 goto done; 391 } 392 393 if (qp->qp_id != rsrc->qp_id) { 394 ret = -EFAULT; 395 goto done; 396 } 397 398 if (info.q_type == IRDMA_CQE_QTYPE_RQ) { 399 buf = (struct irdma_puda_buf *)(uintptr_t) 400 qp->rq_wrid_array[info.wqe_idx]; 401 402 /* reusing so synch the buffer for CPU use */ 403 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 404 /* Get all the tcpip information in the buf header */ 405 ret = irdma_puda_get_tcpip_info(&info, buf); 406 if (ret) { 407 rsrc->stats_rcvd_pkt_err++; 408 if (cq_type == IRDMA_CQ_TYPE_ILQ) { 409 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, 410 info.wqe_idx); 411 } else { 412 irdma_puda_ret_bufpool(rsrc, buf); 413 irdma_puda_replenish_rq(rsrc, false); 414 } 415 goto done; 416 } 417 418 rsrc->stats_pkt_rcvd++; 419 rsrc->compl_rxwqe_idx = info.wqe_idx; 420 irdma_debug(dev, IRDMA_DEBUG_PUDA, "RQ completion\n"); 421 rsrc->receive(rsrc->vsi, buf); 422 if (cq_type == IRDMA_CQ_TYPE_ILQ) 423 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx); 424 else 425 irdma_puda_replenish_rq(rsrc, false); 426 427 } else { 428 irdma_debug(dev, IRDMA_DEBUG_PUDA, "SQ completion\n"); 429 buf = (struct irdma_puda_buf *)(uintptr_t) 430 qp->sq_wrtrk_array[info.wqe_idx].wrid; 431 432 /* reusing so synch the buffer for CPU use */ 433 dma_sync_single_for_cpu(hw_to_dev(dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 434 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); 435 rsrc->xmit_complete(rsrc->vsi, buf); 436 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 437 rsrc->tx_wqe_avail_cnt++; 438 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 439 if (!list_empty(&rsrc->txpend)) 440 irdma_puda_send_buf(rsrc, NULL); 441 } 442 443 done: 444 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); 445 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) 446 cq_uk->polarity = !cq_uk->polarity; 447 /* update cq tail in cq shadow memory also */ 448 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring); 449 set_64bit_val(cq_uk->shadow_area, IRDMA_BYTE_0, 450 IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)); 451 452 return ret; 453 } 454 455 /** 456 * irdma_puda_send - complete send wqe for transmit 457 * @qp: puda qp for send 458 * @info: buffer information for transmit 459 */ 460 int 461 irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info) 462 { 463 __le64 *wqe; 464 u32 iplen, l4len; 465 u64 hdr[2]; 466 u32 wqe_idx; 467 u8 iipt; 468 469 /* number of 32 bits DWORDS in header */ 470 l4len = info->tcplen >> 2; 471 if (info->ipv4) { 472 iipt = 3; 473 iplen = 5; 474 } else { 475 iipt = 1; 476 iplen = 10; 477 } 478 479 wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); 480 if (!wqe) 481 return -ENOSPC; 482 483 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; 484 /* Third line of WQE descriptor */ 485 /* maclen is in words */ 486 487 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 488 hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ 489 hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) | 490 LS_64(l4len, IRDMA_UDA_QPSQ_L4LEN) | 491 LS_64(info->ah_id, IRDMAQPSQ_AHID) | 492 LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) | 493 LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID); 494 495 /* Forth line of WQE descriptor */ 496 497 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr); 498 set_64bit_val(wqe, IRDMA_BYTE_8, 499 LS_64(info->len, IRDMAQPSQ_FRAG_LEN) | 500 LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID)); 501 } else { 502 hdr[0] = LS_64((info->maclen >> 1), IRDMA_UDA_QPSQ_MACLEN) | 503 LS_64(iplen, IRDMA_UDA_QPSQ_IPLEN) | 504 LS_64(1, IRDMA_UDA_QPSQ_L4T) | 505 LS_64(iipt, IRDMA_UDA_QPSQ_IIPT) | 506 LS_64(l4len, IRDMA_GEN1_UDA_QPSQ_L4LEN); 507 508 hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) | 509 LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) | 510 LS_64(info->do_lpb, IRDMA_UDA_QPSQ_DOLOOPBACK) | 511 LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID); 512 513 /* Forth line of WQE descriptor */ 514 515 set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr); 516 set_64bit_val(wqe, IRDMA_BYTE_8, 517 LS_64(info->len, IRDMAQPSQ_GEN1_FRAG_LEN)); 518 } 519 520 set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]); 521 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 522 523 set_64bit_val(wqe, IRDMA_BYTE_24, hdr[1]); 524 525 irdma_debug_buf(qp->dev, IRDMA_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 526 irdma_uk_qp_post_wr(&qp->qp_uk); 527 return 0; 528 } 529 530 /** 531 * irdma_puda_send_buf - transmit puda buffer 532 * @rsrc: resource to use for buffer 533 * @buf: puda buffer to transmit 534 */ 535 void 536 irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, 537 struct irdma_puda_buf *buf) 538 { 539 struct irdma_puda_send_info info; 540 int ret = 0; 541 unsigned long flags; 542 543 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 544 /* 545 * if no wqe available or not from a completion and we have pending buffers, we must queue new buffer 546 */ 547 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { 548 list_add_tail(&buf->list, &rsrc->txpend); 549 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 550 rsrc->stats_sent_pkt_q++; 551 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 552 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 553 "adding to txpend\n"); 554 return; 555 } 556 rsrc->tx_wqe_avail_cnt--; 557 /* 558 * if we are coming from a completion and have pending buffers then Get one from pending list 559 */ 560 if (!buf) { 561 buf = irdma_puda_get_listbuf(&rsrc->txpend); 562 if (!buf) 563 goto done; 564 } 565 566 info.scratch = buf; 567 info.paddr = buf->mem.pa; 568 info.len = buf->totallen; 569 info.tcplen = buf->tcphlen; 570 info.ipv4 = buf->ipv4; 571 572 if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 573 info.ah_id = buf->ah_id; 574 } else { 575 info.maclen = buf->maclen; 576 info.do_lpb = buf->do_lpb; 577 } 578 579 /* Synch buffer for use by device */ 580 dma_sync_single_for_cpu(hw_to_dev(rsrc->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 581 ret = irdma_puda_send(&rsrc->qp, &info); 582 if (ret) { 583 rsrc->tx_wqe_avail_cnt++; 584 rsrc->stats_sent_pkt_q++; 585 list_add(&buf->list, &rsrc->txpend); 586 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 587 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 588 "adding to puda_send\n"); 589 } else { 590 rsrc->stats_pkt_sent++; 591 } 592 done: 593 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 594 } 595 596 /** 597 * irdma_puda_qp_setctx - during init, set qp's context 598 * @rsrc: qp's resource 599 */ 600 static void 601 irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc) 602 { 603 struct irdma_sc_qp *qp = &rsrc->qp; 604 __le64 *qp_ctx = qp->hw_host_ctx; 605 606 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 607 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 608 set_64bit_val(qp_ctx, IRDMA_BYTE_24, 609 LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) | 610 LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE)); 611 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 612 LS_64(rsrc->buf_size, IRDMAQPC_SNDMSS)); 613 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0); 614 if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 615 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1); 616 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 617 LS_64(rsrc->cq_id, IRDMAQPC_TXCQNUM) | 618 LS_64(rsrc->cq_id, IRDMAQPC_RXCQNUM)); 619 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 620 LS_64(rsrc->stats_idx, IRDMAQPC_STAT_INDEX)); 621 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 622 LS_64(1, IRDMAQPC_PRIVEN) | 623 LS_64(rsrc->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE)); 624 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 625 LS_64((uintptr_t)qp, IRDMAQPC_QPCOMPCTX)); 626 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 627 LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) | 628 LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) | 629 LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE)); 630 631 irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx, 632 IRDMA_QP_CTX_SIZE); 633 } 634 635 /** 636 * irdma_puda_qp_wqe - setup wqe for qp create 637 * @dev: Device 638 * @qp: Resource qp 639 */ 640 static int 641 irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) 642 { 643 struct irdma_sc_cqp *cqp; 644 __le64 *wqe; 645 u64 hdr; 646 struct irdma_ccq_cqe_info compl_info; 647 int status = 0; 648 649 cqp = dev->cqp; 650 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); 651 if (!wqe) 652 return -ENOSPC; 653 654 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 655 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 656 657 hdr = qp->qp_uk.qp_id | 658 LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) | 659 LS_64(IRDMA_QP_TYPE_UDA, IRDMA_CQPSQ_QP_QPTYPE) | 660 LS_64(1, IRDMA_CQPSQ_QP_CQNUMVALID) | 661 LS_64(2, IRDMA_CQPSQ_QP_NEXTIWSTATE) | 662 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 663 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 664 665 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 666 667 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_PUDA, "PUDA QP CREATE", wqe, 40); 668 irdma_sc_cqp_post_sq(cqp); 669 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP, 670 &compl_info); 671 672 return status; 673 } 674 675 /** 676 * irdma_puda_qp_create - create qp for resource 677 * @rsrc: resource to use for buffer 678 */ 679 static int 680 irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) 681 { 682 struct irdma_sc_qp *qp = &rsrc->qp; 683 struct irdma_qp_uk *ukqp = &qp->qp_uk; 684 int ret = 0; 685 u32 sq_size, rq_size; 686 struct irdma_dma_mem *mem; 687 688 sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE; 689 rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE; 690 rsrc->qpmem.size = (sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + 691 IRDMA_QP_CTX_SIZE); 692 rsrc->qpmem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, 693 rsrc->qpmem.size, IRDMA_HW_PAGE_SIZE); 694 if (!rsrc->qpmem.va) 695 return -ENOMEM; 696 697 mem = &rsrc->qpmem; 698 memset(mem->va, 0, rsrc->qpmem.size); 699 qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ); 700 qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ); 701 qp->pd = &rsrc->sc_pd; 702 qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA; 703 qp->dev = rsrc->dev; 704 qp->qp_uk.back_qp = rsrc; 705 qp->sq_pa = mem->pa; 706 qp->rq_pa = qp->sq_pa + sq_size; 707 qp->vsi = rsrc->vsi; 708 ukqp->sq_base = mem->va; 709 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; 710 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; 711 ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs; 712 qp->shadow_area_pa = qp->rq_pa + rq_size; 713 qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE; 714 qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3); 715 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 716 ukqp->qp_id = rsrc->qp_id; 717 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; 718 ukqp->rq_wrid_array = rsrc->rq_wrid_array; 719 ukqp->sq_size = rsrc->sq_size; 720 ukqp->rq_size = rsrc->rq_size; 721 722 IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); 723 IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size); 724 IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size); 725 ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db; 726 727 ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri); 728 if (ret) { 729 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); 730 return ret; 731 } 732 733 irdma_qp_add_qos(qp); 734 irdma_puda_qp_setctx(rsrc); 735 736 if (rsrc->dev->ceq_valid) 737 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp); 738 else 739 ret = irdma_puda_qp_wqe(rsrc->dev, qp); 740 if (ret) { 741 irdma_qp_rem_qos(qp); 742 rsrc->dev->ws_remove(qp->vsi, qp->user_pri); 743 irdma_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); 744 } 745 746 return ret; 747 } 748 749 /** 750 * irdma_puda_cq_wqe - setup wqe for CQ create 751 * @dev: Device 752 * @cq: resource for cq 753 */ 754 static int 755 irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) 756 { 757 __le64 *wqe; 758 struct irdma_sc_cqp *cqp; 759 u64 hdr; 760 struct irdma_ccq_cqe_info compl_info; 761 int status = 0; 762 763 cqp = dev->cqp; 764 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); 765 if (!wqe) 766 return -ENOSPC; 767 768 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 769 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 770 set_64bit_val(wqe, IRDMA_BYTE_16, 771 LS_64(cq->shadow_read_threshold, 772 IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); 773 set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa); 774 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 775 set_64bit_val(wqe, IRDMA_BYTE_56, 776 LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) | 777 LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX)); 778 779 hdr = cq->cq_uk.cq_id | 780 LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) | 781 LS_64(1, IRDMA_CQPSQ_CQ_CHKOVERFLOW) | 782 LS_64(1, IRDMA_CQPSQ_CQ_ENCEQEMASK) | 783 LS_64(1, IRDMA_CQPSQ_CQ_CEQIDVALID) | 784 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 785 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 786 787 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 788 789 irdma_debug_buf(dev, IRDMA_DEBUG_PUDA, "PUDA CREATE CQ", wqe, 790 IRDMA_CQP_WQE_SIZE * 8); 791 irdma_sc_cqp_post_sq(dev->cqp); 792 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ, 793 &compl_info); 794 if (!status) { 795 struct irdma_sc_ceq *ceq = dev->ceq[0]; 796 797 if (ceq && ceq->reg_cq) 798 status = irdma_sc_add_cq_ctx(ceq, cq); 799 } 800 801 return status; 802 } 803 804 /** 805 * irdma_puda_cq_create - create cq for resource 806 * @rsrc: resource for which cq to create 807 */ 808 static int 809 irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) 810 { 811 struct irdma_sc_dev *dev = rsrc->dev; 812 struct irdma_sc_cq *cq = &rsrc->cq; 813 int ret = 0; 814 u32 cqsize; 815 struct irdma_dma_mem *mem; 816 struct irdma_cq_init_info info = {0}; 817 struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info; 818 819 cq->vsi = rsrc->vsi; 820 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); 821 rsrc->cqmem.size = cqsize + sizeof(struct irdma_cq_shadow_area); 822 rsrc->cqmem.va = irdma_allocate_dma_mem(dev->hw, &rsrc->cqmem, 823 rsrc->cqmem.size, 824 IRDMA_CQ0_ALIGNMENT); 825 if (!rsrc->cqmem.va) 826 return -ENOMEM; 827 828 mem = &rsrc->cqmem; 829 info.dev = dev; 830 info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ? 831 IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ; 832 info.shadow_read_threshold = rsrc->cq_size >> 2; 833 info.cq_base_pa = mem->pa; 834 info.shadow_area_pa = mem->pa + cqsize; 835 init_info->cq_base = mem->va; 836 init_info->shadow_area = (__le64 *) ((u8 *)mem->va + cqsize); 837 init_info->cq_size = rsrc->cq_size; 838 init_info->cq_id = rsrc->cq_id; 839 info.ceqe_mask = true; 840 info.ceq_id_valid = true; 841 info.vsi = rsrc->vsi; 842 843 ret = irdma_sc_cq_init(cq, &info); 844 if (ret) 845 goto error; 846 847 if (rsrc->dev->ceq_valid) 848 ret = irdma_cqp_cq_create_cmd(dev, cq); 849 else 850 ret = irdma_puda_cq_wqe(dev, cq); 851 error: 852 if (ret) 853 irdma_free_dma_mem(dev->hw, &rsrc->cqmem); 854 855 return ret; 856 } 857 858 /** 859 * irdma_puda_free_qp - free qp for resource 860 * @rsrc: resource for which qp to free 861 */ 862 static void 863 irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) 864 { 865 int ret; 866 struct irdma_ccq_cqe_info compl_info; 867 struct irdma_sc_dev *dev = rsrc->dev; 868 869 if (rsrc->dev->ceq_valid) { 870 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp); 871 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); 872 return; 873 } 874 875 ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true); 876 if (ret) 877 irdma_debug(dev, IRDMA_DEBUG_PUDA, 878 "error puda qp destroy wqe, status = %d\n", ret); 879 if (!ret) { 880 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP, 881 &compl_info); 882 if (ret) 883 irdma_debug(dev, IRDMA_DEBUG_PUDA, 884 "error puda qp destroy failed, status = %d\n", 885 ret); 886 } 887 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri); 888 } 889 890 /** 891 * irdma_puda_free_cq - free cq for resource 892 * @rsrc: resource for which cq to free 893 */ 894 static void 895 irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc) 896 { 897 int ret; 898 struct irdma_ccq_cqe_info compl_info; 899 struct irdma_sc_dev *dev = rsrc->dev; 900 901 if (rsrc->dev->ceq_valid) { 902 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq); 903 return; 904 } 905 906 ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true); 907 if (ret) 908 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error ieq cq destroy\n"); 909 if (!ret) { 910 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ, 911 &compl_info); 912 if (ret) 913 irdma_debug(dev, IRDMA_DEBUG_PUDA, 914 "error ieq qp destroy done\n"); 915 } 916 } 917 918 /** 919 * irdma_puda_dele_rsrc - delete all resources during close 920 * @vsi: VSI structure of device 921 * @type: type of resource to dele 922 * @reset: true if reset chip 923 */ 924 void 925 irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, 926 bool reset) 927 { 928 struct irdma_sc_dev *dev = vsi->dev; 929 struct irdma_puda_rsrc *rsrc; 930 struct irdma_puda_buf *buf = NULL; 931 struct irdma_puda_buf *nextbuf = NULL; 932 struct irdma_virt_mem *vmem; 933 struct irdma_sc_ceq *ceq; 934 935 ceq = vsi->dev->ceq[0]; 936 switch (type) { 937 case IRDMA_PUDA_RSRC_TYPE_ILQ: 938 rsrc = vsi->ilq; 939 vmem = &vsi->ilq_mem; 940 vsi->ilq = NULL; 941 if (ceq && ceq->reg_cq) 942 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); 943 break; 944 case IRDMA_PUDA_RSRC_TYPE_IEQ: 945 rsrc = vsi->ieq; 946 vmem = &vsi->ieq_mem; 947 vsi->ieq = NULL; 948 if (ceq && ceq->reg_cq) 949 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); 950 break; 951 default: 952 irdma_debug(dev, IRDMA_DEBUG_PUDA, 953 "error resource type = 0x%x\n", type); 954 return; 955 } 956 957 spin_lock_destroy(&rsrc->bufpool_lock); 958 switch (rsrc->cmpl) { 959 case PUDA_HASH_CRC_COMPLETE: 960 irdma_free_hash_desc(rsrc->hash_desc); 961 /* fallthrough */ 962 case PUDA_QP_CREATED: 963 irdma_qp_rem_qos(&rsrc->qp); 964 965 if (!(reset || dev->no_cqp)) 966 irdma_puda_free_qp(rsrc); 967 968 irdma_free_dma_mem(dev->hw, &rsrc->qpmem); 969 /* fallthrough */ 970 case PUDA_CQ_CREATED: 971 if (!(reset || dev->no_cqp)) 972 irdma_puda_free_cq(rsrc); 973 974 irdma_free_dma_mem(dev->hw, &rsrc->cqmem); 975 break; 976 default: 977 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 978 "error no resources\n"); 979 break; 980 } 981 /* Free all allocated puda buffers for both tx and rx */ 982 buf = rsrc->alloclist; 983 while (buf) { 984 nextbuf = buf->next; 985 irdma_puda_dele_buf(dev, buf); 986 buf = nextbuf; 987 rsrc->alloc_buf_count--; 988 } 989 990 kfree(vmem->va); 991 } 992 993 /** 994 * irdma_puda_allocbufs - allocate buffers for resource 995 * @rsrc: resource for buffer allocation 996 * @count: number of buffers to create 997 */ 998 static int 999 irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count) 1000 { 1001 u32 i; 1002 struct irdma_puda_buf *buf; 1003 struct irdma_puda_buf *nextbuf; 1004 struct irdma_virt_mem buf_mem; 1005 struct irdma_dma_mem *dma_mem; 1006 bool virtdma = false; 1007 unsigned long flags; 1008 1009 buf_mem.size = count * sizeof(struct irdma_puda_buf); 1010 buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC); 1011 if (!buf_mem.va) { 1012 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 1013 "error virt_mem for buf\n"); 1014 rsrc->stats_buf_alloc_fail++; 1015 goto trysmall; 1016 } 1017 1018 /* 1019 * Allocate the large dma chunk and setup dma attributes into first puda buffer. This is required during free 1020 */ 1021 buf = (struct irdma_puda_buf *)buf_mem.va; 1022 buf->mem.va = irdma_allocate_dma_mem(rsrc->dev->hw, &buf->mem, 1023 rsrc->buf_size * count, 1); 1024 if (!buf->mem.va) { 1025 irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA, 1026 "error dma_mem for buf\n"); 1027 kfree(buf_mem.va); 1028 rsrc->stats_buf_alloc_fail++; 1029 goto trysmall; 1030 } 1031 1032 /* 1033 * dma_mem points to start of the large DMA chunk 1034 */ 1035 dma_mem = &buf->mem; 1036 1037 spin_lock_irqsave(&rsrc->bufpool_lock, flags); 1038 for (i = 0; i < count; i++) { 1039 buf = ((struct irdma_puda_buf *)buf_mem.va) + i; 1040 1041 buf->mem.va = (char *)dma_mem->va + (i * rsrc->buf_size); 1042 buf->mem.pa = dma_mem->pa + (i * rsrc->buf_size); 1043 buf->mem.size = rsrc->buf_size; 1044 buf->virtdma = virtdma; 1045 virtdma = true; 1046 1047 buf->buf_mem.va = buf_mem.va; 1048 buf->buf_mem.size = buf_mem.size; 1049 1050 list_add(&buf->list, &rsrc->bufpool); 1051 rsrc->alloc_buf_count++; 1052 if (!rsrc->alloclist) { 1053 rsrc->alloclist = buf; 1054 } else { 1055 nextbuf = rsrc->alloclist; 1056 rsrc->alloclist = buf; 1057 buf->next = nextbuf; 1058 } 1059 } 1060 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); 1061 1062 rsrc->avail_buf_count = rsrc->alloc_buf_count; 1063 return 0; 1064 trysmall: 1065 for (i = 0; i < count; i++) { 1066 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size); 1067 if (!buf) { 1068 rsrc->stats_buf_alloc_fail++; 1069 return -ENOMEM; 1070 } 1071 irdma_puda_ret_bufpool(rsrc, buf); 1072 rsrc->alloc_buf_count++; 1073 if (!rsrc->alloclist) { 1074 rsrc->alloclist = buf; 1075 } else { 1076 nextbuf = rsrc->alloclist; 1077 rsrc->alloclist = buf; 1078 buf->next = nextbuf; 1079 } 1080 } 1081 1082 rsrc->avail_buf_count = rsrc->alloc_buf_count; 1083 1084 return 0; 1085 } 1086 1087 /** 1088 * irdma_puda_create_rsrc - create resource (ilq or ieq) 1089 * @vsi: sc VSI struct 1090 * @info: resource information 1091 */ 1092 int 1093 irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, 1094 struct irdma_puda_rsrc_info *info) 1095 { 1096 struct irdma_sc_dev *dev = vsi->dev; 1097 int ret = 0; 1098 struct irdma_puda_rsrc *rsrc; 1099 u32 pudasize; 1100 u32 sqwridsize, rqwridsize; 1101 struct irdma_virt_mem *vmem; 1102 1103 info->count = 1; 1104 pudasize = sizeof(struct irdma_puda_rsrc); 1105 sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info); 1106 rqwridsize = info->rq_size * 8; 1107 switch (info->type) { 1108 case IRDMA_PUDA_RSRC_TYPE_ILQ: 1109 vmem = &vsi->ilq_mem; 1110 break; 1111 case IRDMA_PUDA_RSRC_TYPE_IEQ: 1112 vmem = &vsi->ieq_mem; 1113 break; 1114 default: 1115 return -EOPNOTSUPP; 1116 } 1117 vmem->size = pudasize + sqwridsize + rqwridsize; 1118 vmem->va = kzalloc(vmem->size, GFP_ATOMIC); 1119 if (!vmem->va) 1120 return -ENOMEM; 1121 1122 rsrc = vmem->va; 1123 spin_lock_init(&rsrc->bufpool_lock); 1124 switch (info->type) { 1125 case IRDMA_PUDA_RSRC_TYPE_ILQ: 1126 vsi->ilq = vmem->va; 1127 vsi->ilq_count = info->count; 1128 rsrc->receive = info->receive; 1129 rsrc->xmit_complete = info->xmit_complete; 1130 break; 1131 case IRDMA_PUDA_RSRC_TYPE_IEQ: 1132 vsi->ieq_count = info->count; 1133 vsi->ieq = vmem->va; 1134 rsrc->receive = irdma_ieq_receive; 1135 rsrc->xmit_complete = irdma_ieq_tx_compl; 1136 break; 1137 default: 1138 return -EOPNOTSUPP; 1139 } 1140 1141 rsrc->type = info->type; 1142 rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *) 1143 ((u8 *)vmem->va + pudasize); 1144 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); 1145 /* Initialize all ieq lists */ 1146 INIT_LIST_HEAD(&rsrc->bufpool); 1147 INIT_LIST_HEAD(&rsrc->txpend); 1148 1149 rsrc->tx_wqe_avail_cnt = info->sq_size - 1; 1150 irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver); 1151 rsrc->qp_id = info->qp_id; 1152 rsrc->cq_id = info->cq_id; 1153 rsrc->sq_size = info->sq_size; 1154 rsrc->rq_size = info->rq_size; 1155 rsrc->cq_size = info->rq_size + info->sq_size; 1156 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1157 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) 1158 rsrc->cq_size += info->rq_size; 1159 } 1160 rsrc->buf_size = info->buf_size; 1161 rsrc->dev = dev; 1162 rsrc->vsi = vsi; 1163 rsrc->stats_idx = info->stats_idx; 1164 rsrc->stats_idx_valid = info->stats_idx_valid; 1165 1166 ret = irdma_puda_cq_create(rsrc); 1167 if (!ret) { 1168 rsrc->cmpl = PUDA_CQ_CREATED; 1169 ret = irdma_puda_qp_create(rsrc); 1170 } 1171 if (ret) { 1172 irdma_debug(dev, IRDMA_DEBUG_PUDA, 1173 "error qp_create type=%d, status=%d\n", rsrc->type, 1174 ret); 1175 goto error; 1176 } 1177 rsrc->cmpl = PUDA_QP_CREATED; 1178 1179 ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); 1180 if (ret) { 1181 irdma_debug(dev, IRDMA_DEBUG_PUDA, "error alloc_buf\n"); 1182 goto error; 1183 } 1184 1185 rsrc->rxq_invalid_cnt = info->rq_size; 1186 ret = irdma_puda_replenish_rq(rsrc, true); 1187 if (ret) 1188 goto error; 1189 1190 if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) { 1191 if (!irdma_init_hash_desc(&rsrc->hash_desc)) { 1192 rsrc->check_crc = true; 1193 rsrc->cmpl = PUDA_HASH_CRC_COMPLETE; 1194 ret = 0; 1195 } 1196 } 1197 1198 irdma_sc_ccq_arm(&rsrc->cq); 1199 return ret; 1200 1201 error: 1202 irdma_puda_dele_rsrc(vsi, info->type, false); 1203 1204 return ret; 1205 } 1206 1207 /** 1208 * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq 1209 * @qp: ilq's qp resource 1210 * @buf: puda buffer for rcv q 1211 * @wqe_idx: wqe index of completed rcvbuf 1212 */ 1213 static void 1214 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, 1215 struct irdma_puda_buf *buf, u32 wqe_idx) 1216 { 1217 __le64 *wqe; 1218 u64 offset8, offset24; 1219 1220 /* Synch buffer for use by device */ 1221 dma_sync_single_for_device(hw_to_dev(qp->dev->hw), buf->mem.pa, buf->mem.size, DMA_BIDIRECTIONAL); 1222 wqe = qp->qp_uk.rq_base[wqe_idx].elem; 1223 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24); 1224 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1225 get_64bit_val(wqe, IRDMA_BYTE_8, &offset8); 1226 if (offset24) 1227 offset8 &= ~LS_64(1, IRDMAQPSQ_VALID); 1228 else 1229 offset8 |= LS_64(1, IRDMAQPSQ_VALID); 1230 set_64bit_val(wqe, IRDMA_BYTE_8, offset8); 1231 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1232 } 1233 if (offset24) 1234 offset24 = 0; 1235 else 1236 offset24 = LS_64(1, IRDMAQPSQ_VALID); 1237 1238 set_64bit_val(wqe, IRDMA_BYTE_24, offset24); 1239 } 1240 1241 /** 1242 * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker 1243 * @pfpdu: pointer to fpdu 1244 * @datap: pointer to data in the buffer 1245 * @rcv_seq: seqnum of the data buffer 1246 */ 1247 static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap, 1248 u32 rcv_seq){ 1249 u32 marker_seq, end_seq, blk_start; 1250 u8 marker_len = pfpdu->marker_len; 1251 u16 total_len = 0; 1252 u16 fpdu_len; 1253 1254 blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1); 1255 if (!blk_start) { 1256 total_len = marker_len; 1257 marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ; 1258 if (marker_len && *(u32 *)datap) 1259 return 0; 1260 } else { 1261 marker_seq = rcv_seq + blk_start; 1262 } 1263 1264 datap += total_len; 1265 fpdu_len = IRDMA_NTOHS(*(__be16 *) datap); 1266 fpdu_len += IRDMA_IEQ_MPA_FRAMING; 1267 fpdu_len = (fpdu_len + 3) & 0xfffc; 1268 1269 if (fpdu_len > pfpdu->max_fpdu_data) 1270 return 0; 1271 1272 total_len += fpdu_len; 1273 end_seq = rcv_seq + total_len; 1274 while ((int)(marker_seq - end_seq) < 0) { 1275 total_len += marker_len; 1276 end_seq += marker_len; 1277 marker_seq += IRDMA_MRK_BLK_SZ; 1278 } 1279 1280 return total_len; 1281 } 1282 1283 /** 1284 * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf 1285 * @buf: rcv buffer with partial 1286 * @txbuf: tx buffer for sending back 1287 * @buf_offset: rcv buffer offset to copy from 1288 * @txbuf_offset: at offset in tx buf to copy 1289 * @len: length of data to copy 1290 */ 1291 static void 1292 irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf, 1293 struct irdma_puda_buf *txbuf, 1294 u16 buf_offset, u32 txbuf_offset, u32 len) 1295 { 1296 void *mem1 = (u8 *)buf->mem.va + buf_offset; 1297 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; 1298 1299 irdma_memcpy(mem2, mem1, len); 1300 } 1301 1302 /** 1303 * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling 1304 * @buf: reeive buffer with partial 1305 * @txbuf: buffer to prepare 1306 */ 1307 static void 1308 irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf, 1309 struct irdma_puda_buf *txbuf) 1310 { 1311 txbuf->tcphlen = buf->tcphlen; 1312 txbuf->ipv4 = buf->ipv4; 1313 1314 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1315 txbuf->hdrlen = txbuf->tcphlen; 1316 irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0, 1317 txbuf->hdrlen); 1318 } else { 1319 txbuf->maclen = buf->maclen; 1320 txbuf->hdrlen = buf->hdrlen; 1321 irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); 1322 } 1323 } 1324 1325 /** 1326 * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range 1327 * @buf: receive exception buffer 1328 * @fps: first partial sequence number 1329 */ 1330 static void 1331 irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps) 1332 { 1333 u32 offset; 1334 1335 if (buf->seqnum < fps) { 1336 offset = fps - buf->seqnum; 1337 if (offset > buf->datalen) 1338 return; 1339 buf->data += offset; 1340 buf->datalen -= (u16)offset; 1341 buf->seqnum = fps; 1342 } 1343 } 1344 1345 /** 1346 * irdma_ieq_compl_pfpdu - write txbuf with full fpdu 1347 * @ieq: ieq resource 1348 * @rxlist: ieq's received buffer list 1349 * @pbufl: temporary list for buffers for fpddu 1350 * @txbuf: tx buffer for fpdu 1351 * @fpdu_len: total length of fpdu 1352 */ 1353 static void 1354 irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq, 1355 struct list_head *rxlist, 1356 struct list_head *pbufl, 1357 struct irdma_puda_buf *txbuf, u16 fpdu_len) 1358 { 1359 struct irdma_puda_buf *buf; 1360 u32 nextseqnum; 1361 u16 txoffset, bufoffset; 1362 1363 buf = irdma_puda_get_listbuf(pbufl); 1364 if (!buf) 1365 return; 1366 1367 nextseqnum = buf->seqnum + fpdu_len; 1368 irdma_ieq_setup_tx_buf(buf, txbuf); 1369 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1370 txoffset = txbuf->hdrlen; 1371 txbuf->totallen = txbuf->hdrlen + fpdu_len; 1372 txbuf->data = (u8 *)txbuf->mem.va + txoffset; 1373 } else { 1374 txoffset = buf->hdrlen; 1375 txbuf->totallen = buf->hdrlen + fpdu_len; 1376 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; 1377 } 1378 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); 1379 1380 do { 1381 if (buf->datalen >= fpdu_len) { 1382 /* copied full fpdu */ 1383 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, 1384 fpdu_len); 1385 buf->datalen -= fpdu_len; 1386 buf->data += fpdu_len; 1387 buf->seqnum = nextseqnum; 1388 break; 1389 } 1390 /* copy partial fpdu */ 1391 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, 1392 buf->datalen); 1393 txoffset += buf->datalen; 1394 fpdu_len -= buf->datalen; 1395 irdma_puda_ret_bufpool(ieq, buf); 1396 buf = irdma_puda_get_listbuf(pbufl); 1397 if (!buf) 1398 return; 1399 1400 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); 1401 } while (1); 1402 1403 /* last buffer on the list */ 1404 if (buf->datalen) 1405 list_add(&buf->list, rxlist); 1406 else 1407 irdma_puda_ret_bufpool(ieq, buf); 1408 } 1409 1410 /** 1411 * irdma_ieq_create_pbufl - create buffer list for single fpdu 1412 * @pfpdu: pointer to fpdu 1413 * @rxlist: resource list for receive ieq buffes 1414 * @pbufl: temp. list for buffers for fpddu 1415 * @buf: first receive buffer 1416 * @fpdu_len: total length of fpdu 1417 */ 1418 static int 1419 irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, 1420 struct list_head *rxlist, 1421 struct list_head *pbufl, 1422 struct irdma_puda_buf *buf, u16 fpdu_len) 1423 { 1424 int status = 0; 1425 struct irdma_puda_buf *nextbuf; 1426 u32 nextseqnum; 1427 u16 plen = fpdu_len - buf->datalen; 1428 bool done = false; 1429 1430 nextseqnum = buf->seqnum + buf->datalen; 1431 do { 1432 nextbuf = irdma_puda_get_listbuf(rxlist); 1433 if (!nextbuf) { 1434 status = -ENOBUFS; 1435 break; 1436 } 1437 list_add_tail(&nextbuf->list, pbufl); 1438 if (nextbuf->seqnum != nextseqnum) { 1439 pfpdu->bad_seq_num++; 1440 status = -ERANGE; 1441 break; 1442 } 1443 if (nextbuf->datalen >= plen) { 1444 done = true; 1445 } else { 1446 plen -= nextbuf->datalen; 1447 nextseqnum = nextbuf->seqnum + nextbuf->datalen; 1448 } 1449 1450 } while (!done); 1451 1452 return status; 1453 } 1454 1455 /** 1456 * irdma_ieq_handle_partial - process partial fpdu buffer 1457 * @ieq: ieq resource 1458 * @pfpdu: partial management per user qp 1459 * @buf: receive buffer 1460 * @fpdu_len: fpdu len in the buffer 1461 */ 1462 static int 1463 irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, 1464 struct irdma_pfpdu *pfpdu, 1465 struct irdma_puda_buf *buf, u16 fpdu_len) 1466 { 1467 int status = 0; 1468 u8 *crcptr; 1469 u32 mpacrc; 1470 u32 seqnum = buf->seqnum; 1471 struct list_head pbufl; /* partial buffer list */ 1472 struct irdma_puda_buf *txbuf = NULL; 1473 struct list_head *rxlist = &pfpdu->rxlist; 1474 1475 ieq->partials_handled++; 1476 1477 INIT_LIST_HEAD(&pbufl); 1478 list_add(&buf->list, &pbufl); 1479 1480 status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); 1481 if (status) 1482 goto error; 1483 1484 txbuf = irdma_puda_get_bufpool(ieq); 1485 if (!txbuf) { 1486 pfpdu->no_tx_bufs++; 1487 status = -ENOBUFS; 1488 goto error; 1489 } 1490 1491 irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); 1492 irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); 1493 1494 crcptr = txbuf->data + fpdu_len - 4; 1495 mpacrc = *(u32 *)crcptr; 1496 if (ieq->check_crc) { 1497 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, 1498 (fpdu_len - 4), mpacrc); 1499 if (status) { 1500 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1501 "error bad crc\n"); 1502 pfpdu->mpa_crc_err = true; 1503 goto error; 1504 } 1505 } 1506 1507 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER", 1508 txbuf->mem.va, txbuf->totallen); 1509 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1510 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; 1511 txbuf->do_lpb = true; 1512 irdma_puda_send_buf(ieq, txbuf); 1513 pfpdu->rcv_nxt = seqnum + fpdu_len; 1514 return status; 1515 1516 error: 1517 while (!list_empty(&pbufl)) { 1518 buf = (struct irdma_puda_buf *)(&pbufl)->prev; 1519 list_del(&buf->list); 1520 list_add(&buf->list, rxlist); 1521 } 1522 if (txbuf) 1523 irdma_puda_ret_bufpool(ieq, txbuf); 1524 1525 return status; 1526 } 1527 1528 /** 1529 * irdma_ieq_process_buf - process buffer rcvd for ieq 1530 * @ieq: ieq resource 1531 * @pfpdu: partial management per user qp 1532 * @buf: receive buffer 1533 */ 1534 static int 1535 irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, 1536 struct irdma_pfpdu *pfpdu, 1537 struct irdma_puda_buf *buf) 1538 { 1539 u16 fpdu_len = 0; 1540 u16 datalen = buf->datalen; 1541 u8 *datap = buf->data; 1542 u8 *crcptr; 1543 u16 ioffset = 0; 1544 u32 mpacrc; 1545 u32 seqnum = buf->seqnum; 1546 u16 len = 0; 1547 u16 full = 0; 1548 bool partial = false; 1549 struct irdma_puda_buf *txbuf; 1550 struct list_head *rxlist = &pfpdu->rxlist; 1551 int ret = 0; 1552 1553 ioffset = (u16)(buf->data - (u8 *)buf->mem.va); 1554 while (datalen) { 1555 fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum); 1556 if (!fpdu_len) { 1557 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1558 "error bad fpdu len\n"); 1559 list_add(&buf->list, rxlist); 1560 pfpdu->mpa_crc_err = true; 1561 return -EINVAL; 1562 } 1563 1564 if (datalen < fpdu_len) { 1565 partial = true; 1566 break; 1567 } 1568 crcptr = datap + fpdu_len - 4; 1569 mpacrc = *(u32 *)crcptr; 1570 if (ieq->check_crc) 1571 ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap, 1572 fpdu_len - 4, mpacrc); 1573 if (ret) { 1574 list_add(&buf->list, rxlist); 1575 irdma_debug(ieq->dev, IRDMA_DEBUG_ERR, 1576 "IRDMA_ERR_MPA_CRC\n"); 1577 pfpdu->mpa_crc_err = true; 1578 return ret; 1579 } 1580 full++; 1581 pfpdu->fpdu_processed++; 1582 ieq->fpdu_processed++; 1583 datap += fpdu_len; 1584 len += fpdu_len; 1585 datalen -= fpdu_len; 1586 } 1587 if (full) { 1588 /* copy full pdu's in the txbuf and send them out */ 1589 txbuf = irdma_puda_get_bufpool(ieq); 1590 if (!txbuf) { 1591 pfpdu->no_tx_bufs++; 1592 list_add(&buf->list, rxlist); 1593 return -ENOBUFS; 1594 } 1595 /* modify txbuf's buffer header */ 1596 irdma_ieq_setup_tx_buf(buf, txbuf); 1597 /* copy full fpdu's to new buffer */ 1598 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1599 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, 1600 txbuf->hdrlen, len); 1601 txbuf->totallen = txbuf->hdrlen + len; 1602 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx; 1603 } else { 1604 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset, 1605 buf->hdrlen, len); 1606 txbuf->totallen = buf->hdrlen + len; 1607 } 1608 irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum); 1609 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ TX BUFFER", 1610 txbuf->mem.va, txbuf->totallen); 1611 txbuf->do_lpb = true; 1612 irdma_puda_send_buf(ieq, txbuf); 1613 1614 if (!datalen) { 1615 pfpdu->rcv_nxt = buf->seqnum + len; 1616 irdma_puda_ret_bufpool(ieq, buf); 1617 return 0; 1618 } 1619 buf->data = datap; 1620 buf->seqnum = seqnum + len; 1621 buf->datalen = datalen; 1622 pfpdu->rcv_nxt = buf->seqnum; 1623 } 1624 if (partial) 1625 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); 1626 1627 return 0; 1628 } 1629 1630 /** 1631 * irdma_ieq_process_fpdus - process fpdu's buffers on its list 1632 * @qp: qp for which partial fpdus 1633 * @ieq: ieq resource 1634 */ 1635 void 1636 irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, 1637 struct irdma_puda_rsrc *ieq) 1638 { 1639 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1640 struct list_head *rxlist = &pfpdu->rxlist; 1641 struct irdma_puda_buf *buf; 1642 int status; 1643 1644 do { 1645 if (list_empty(rxlist)) 1646 break; 1647 buf = irdma_puda_get_listbuf(rxlist); 1648 if (!buf) { 1649 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1650 "error no buf\n"); 1651 break; 1652 } 1653 if (buf->seqnum != pfpdu->rcv_nxt) { 1654 /* This could be out of order or missing packet */ 1655 pfpdu->out_of_order++; 1656 list_add(&buf->list, rxlist); 1657 break; 1658 } 1659 /* keep processing buffers from the head of the list */ 1660 status = irdma_ieq_process_buf(ieq, pfpdu, buf); 1661 if (status && pfpdu->mpa_crc_err) { 1662 while (!list_empty(rxlist)) { 1663 buf = irdma_puda_get_listbuf(rxlist); 1664 irdma_puda_ret_bufpool(ieq, buf); 1665 pfpdu->crc_err++; 1666 ieq->crc_err++; 1667 } 1668 /* create CQP for AE */ 1669 irdma_ieq_mpa_crc_ae(ieq->dev, qp); 1670 } 1671 } while (!status); 1672 } 1673 1674 /** 1675 * irdma_ieq_create_ah - create an address handle for IEQ 1676 * @qp: qp pointer 1677 * @buf: buf received on IEQ used to create AH 1678 */ 1679 static int 1680 irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) 1681 { 1682 struct irdma_ah_info ah_info = {0}; 1683 1684 qp->pfpdu.ah_buf = buf; 1685 irdma_puda_ieq_get_ah_info(qp, &ah_info); 1686 return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false, 1687 IRDMA_PUDA_RSRC_TYPE_IEQ, qp, 1688 &qp->pfpdu.ah); 1689 } 1690 1691 /** 1692 * irdma_ieq_handle_exception - handle qp's exception 1693 * @ieq: ieq resource 1694 * @qp: qp receiving excpetion 1695 * @buf: receive buffer 1696 */ 1697 static void 1698 irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq, 1699 struct irdma_sc_qp *qp, 1700 struct irdma_puda_buf *buf) 1701 { 1702 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1703 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; 1704 u32 rcv_wnd = hw_host_ctx[23]; 1705 /* first partial seq # in q2 */ 1706 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); 1707 struct list_head *rxlist = &pfpdu->rxlist; 1708 struct list_head *plist; 1709 struct irdma_puda_buf *tmpbuf = NULL; 1710 unsigned long flags = 0; 1711 u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev; 1712 1713 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "IEQ RX BUFFER", buf->mem.va, 1714 buf->totallen); 1715 1716 spin_lock_irqsave(&pfpdu->lock, flags); 1717 pfpdu->total_ieq_bufs++; 1718 if (pfpdu->mpa_crc_err) { 1719 pfpdu->crc_err++; 1720 goto error; 1721 } 1722 if (pfpdu->mode && fps != pfpdu->fps) { 1723 /* clean up qp as it is new partial sequence */ 1724 irdma_ieq_cleanup_qp(ieq, qp); 1725 irdma_debug(ieq->dev, IRDMA_DEBUG_IEQ, 1726 "restarting new partial\n"); 1727 pfpdu->mode = false; 1728 } 1729 1730 if (!pfpdu->mode) { 1731 irdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, "Q2 BUFFER", 1732 (u64 *)qp->q2_buf, 128); 1733 /* First_Partial_Sequence_Number check */ 1734 pfpdu->rcv_nxt = fps; 1735 pfpdu->fps = fps; 1736 pfpdu->mode = true; 1737 pfpdu->max_fpdu_data = (buf->ipv4) ? 1738 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) : 1739 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6); 1740 pfpdu->pmode_count++; 1741 ieq->pmode_count++; 1742 INIT_LIST_HEAD(rxlist); 1743 irdma_ieq_check_first_buf(buf, fps); 1744 } 1745 1746 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { 1747 pfpdu->bad_seq_num++; 1748 ieq->bad_seq_num++; 1749 goto error; 1750 } 1751 1752 if (!list_empty(rxlist)) { 1753 tmpbuf = (struct irdma_puda_buf *)(rxlist)->next; 1754 while ((struct list_head *)tmpbuf != rxlist) { 1755 if (buf->seqnum == tmpbuf->seqnum) 1756 goto error; 1757 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) 1758 break; 1759 plist = &tmpbuf->list; 1760 tmpbuf = (struct irdma_puda_buf *)(plist)->next; 1761 } 1762 /* Insert buf before tmpbuf */ 1763 list_add_tail(&buf->list, &tmpbuf->list); 1764 } else { 1765 list_add_tail(&buf->list, rxlist); 1766 } 1767 pfpdu->nextseqnum = buf->seqnum + buf->datalen; 1768 pfpdu->lastrcv_buf = buf; 1769 if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) { 1770 irdma_ieq_create_ah(qp, buf); 1771 if (!pfpdu->ah) 1772 goto error; 1773 goto exit; 1774 } 1775 if (hw_rev == IRDMA_GEN_1) 1776 irdma_ieq_process_fpdus(qp, ieq); 1777 else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid) 1778 irdma_ieq_process_fpdus(qp, ieq); 1779 exit: 1780 spin_unlock_irqrestore(&pfpdu->lock, flags); 1781 1782 return; 1783 1784 error: 1785 irdma_puda_ret_bufpool(ieq, buf); 1786 spin_unlock_irqrestore(&pfpdu->lock, flags); 1787 } 1788 1789 /** 1790 * irdma_ieq_receive - received exception buffer 1791 * @vsi: VSI of device 1792 * @buf: exception buffer received 1793 */ 1794 static void 1795 irdma_ieq_receive(struct irdma_sc_vsi *vsi, 1796 struct irdma_puda_buf *buf) 1797 { 1798 struct irdma_puda_rsrc *ieq = vsi->ieq; 1799 struct irdma_sc_qp *qp = NULL; 1800 u32 wqe_idx = ieq->compl_rxwqe_idx; 1801 1802 qp = irdma_ieq_get_qp(vsi->dev, buf); 1803 if (!qp) { 1804 ieq->stats_bad_qp_id++; 1805 irdma_puda_ret_bufpool(ieq, buf); 1806 } else { 1807 irdma_ieq_handle_exception(ieq, qp, buf); 1808 } 1809 /* 1810 * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq() on which wqe_idx to start replenish rq 1811 */ 1812 if (!ieq->rxq_invalid_cnt) 1813 ieq->rx_wqe_idx = wqe_idx; 1814 ieq->rxq_invalid_cnt++; 1815 } 1816 1817 /** 1818 * irdma_ieq_tx_compl - put back after sending completed exception buffer 1819 * @vsi: sc VSI struct 1820 * @sqwrid: pointer to puda buffer 1821 */ 1822 static void 1823 irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid) 1824 { 1825 struct irdma_puda_rsrc *ieq = vsi->ieq; 1826 struct irdma_puda_buf *buf = sqwrid; 1827 1828 irdma_puda_ret_bufpool(ieq, buf); 1829 } 1830 1831 /** 1832 * irdma_ieq_cleanup_qp - qp is being destroyed 1833 * @ieq: ieq resource 1834 * @qp: all pending fpdu buffers 1835 */ 1836 void 1837 irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp) 1838 { 1839 struct irdma_puda_buf *buf; 1840 struct irdma_pfpdu *pfpdu = &qp->pfpdu; 1841 struct list_head *rxlist = &pfpdu->rxlist; 1842 1843 if (qp->pfpdu.ah) { 1844 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah); 1845 qp->pfpdu.ah = NULL; 1846 qp->pfpdu.ah_buf = NULL; 1847 } 1848 1849 if (!pfpdu->mode) 1850 return; 1851 1852 while (!list_empty(rxlist)) { 1853 buf = irdma_puda_get_listbuf(rxlist); 1854 irdma_puda_ret_bufpool(ieq, buf); 1855 } 1856 } 1857