1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include "hfi.h" 49 #include "verbs_txreq.h" 50 #include "qp.h" 51 52 /* cut down ridiculously long IB macro names */ 53 #define OP(x) IB_OPCODE_UC_##x 54 55 /* only opcode mask for adaptive pio */ 56 const u32 uc_only_opcode = 57 BIT(OP(SEND_ONLY) & 0x1f) | 58 BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) | 59 BIT(OP(RDMA_WRITE_ONLY & 0x1f)) | 60 BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)); 61 62 /** 63 * hfi1_make_uc_req - construct a request packet (SEND, RDMA write) 64 * @qp: a pointer to the QP 65 * 66 * Assume s_lock is held. 67 * 68 * Return 1 if constructed; otherwise, return 0. 69 */ 70 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 71 { 72 struct hfi1_qp_priv *priv = qp->priv; 73 struct hfi1_other_headers *ohdr; 74 struct rvt_swqe *wqe; 75 u32 hwords = 5; 76 u32 bth0 = 0; 77 u32 len; 78 u32 pmtu = qp->pmtu; 79 int middle = 0; 80 81 ps->s_txreq = get_txreq(ps->dev, qp); 82 if (IS_ERR(ps->s_txreq)) 83 goto bail_no_tx; 84 85 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 86 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 87 goto bail; 88 /* We are in the error state, flush the work request. */ 89 smp_read_barrier_depends(); /* see post_one_send() */ 90 if (qp->s_last == ACCESS_ONCE(qp->s_head)) 91 goto bail; 92 /* If DMAs are in progress, we can't flush immediately. */ 93 if (iowait_sdma_pending(&priv->s_iowait)) { 94 qp->s_flags |= RVT_S_WAIT_DMA; 95 goto bail; 96 } 97 clear_ahg(qp); 98 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 99 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 100 goto done_free_tx; 101 } 102 103 ohdr = &ps->s_txreq->phdr.hdr.u.oth; 104 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 105 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; 106 107 /* Get the next send request. */ 108 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 109 qp->s_wqe = NULL; 110 switch (qp->s_state) { 111 default: 112 if (!(ib_rvt_state_ops[qp->state] & 113 RVT_PROCESS_NEXT_SEND_OK)) 114 goto bail; 115 /* Check if send work queue is empty. */ 116 smp_read_barrier_depends(); /* see post_one_send() */ 117 if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { 118 clear_ahg(qp); 119 goto bail; 120 } 121 /* 122 * Local operations are processed immediately 123 * after all prior requests have completed. 124 */ 125 if (wqe->wr.opcode == IB_WR_REG_MR || 126 wqe->wr.opcode == IB_WR_LOCAL_INV) { 127 int local_ops = 0; 128 int err = 0; 129 130 if (qp->s_last != qp->s_cur) 131 goto bail; 132 if (++qp->s_cur == qp->s_size) 133 qp->s_cur = 0; 134 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { 135 err = rvt_invalidate_rkey( 136 qp, wqe->wr.ex.invalidate_rkey); 137 local_ops = 1; 138 } 139 hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR 140 : IB_WC_SUCCESS); 141 if (local_ops) 142 atomic_dec(&qp->local_ops_pending); 143 qp->s_hdrwords = 0; 144 goto done_free_tx; 145 } 146 /* 147 * Start a new request. 148 */ 149 qp->s_psn = wqe->psn; 150 qp->s_sge.sge = wqe->sg_list[0]; 151 qp->s_sge.sg_list = wqe->sg_list + 1; 152 qp->s_sge.num_sge = wqe->wr.num_sge; 153 qp->s_sge.total_len = wqe->length; 154 len = wqe->length; 155 qp->s_len = len; 156 switch (wqe->wr.opcode) { 157 case IB_WR_SEND: 158 case IB_WR_SEND_WITH_IMM: 159 if (len > pmtu) { 160 qp->s_state = OP(SEND_FIRST); 161 len = pmtu; 162 break; 163 } 164 if (wqe->wr.opcode == IB_WR_SEND) { 165 qp->s_state = OP(SEND_ONLY); 166 } else { 167 qp->s_state = 168 OP(SEND_ONLY_WITH_IMMEDIATE); 169 /* Immediate data comes after the BTH */ 170 ohdr->u.imm_data = wqe->wr.ex.imm_data; 171 hwords += 1; 172 } 173 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 174 bth0 |= IB_BTH_SOLICITED; 175 qp->s_wqe = wqe; 176 if (++qp->s_cur >= qp->s_size) 177 qp->s_cur = 0; 178 break; 179 180 case IB_WR_RDMA_WRITE: 181 case IB_WR_RDMA_WRITE_WITH_IMM: 182 ohdr->u.rc.reth.vaddr = 183 cpu_to_be64(wqe->rdma_wr.remote_addr); 184 ohdr->u.rc.reth.rkey = 185 cpu_to_be32(wqe->rdma_wr.rkey); 186 ohdr->u.rc.reth.length = cpu_to_be32(len); 187 hwords += sizeof(struct ib_reth) / 4; 188 if (len > pmtu) { 189 qp->s_state = OP(RDMA_WRITE_FIRST); 190 len = pmtu; 191 break; 192 } 193 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 194 qp->s_state = OP(RDMA_WRITE_ONLY); 195 } else { 196 qp->s_state = 197 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 198 /* Immediate data comes after the RETH */ 199 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 200 hwords += 1; 201 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 202 bth0 |= IB_BTH_SOLICITED; 203 } 204 qp->s_wqe = wqe; 205 if (++qp->s_cur >= qp->s_size) 206 qp->s_cur = 0; 207 break; 208 209 default: 210 goto bail; 211 } 212 break; 213 214 case OP(SEND_FIRST): 215 qp->s_state = OP(SEND_MIDDLE); 216 /* FALLTHROUGH */ 217 case OP(SEND_MIDDLE): 218 len = qp->s_len; 219 if (len > pmtu) { 220 len = pmtu; 221 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 222 break; 223 } 224 if (wqe->wr.opcode == IB_WR_SEND) { 225 qp->s_state = OP(SEND_LAST); 226 } else { 227 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 228 /* Immediate data comes after the BTH */ 229 ohdr->u.imm_data = wqe->wr.ex.imm_data; 230 hwords += 1; 231 } 232 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 233 bth0 |= IB_BTH_SOLICITED; 234 qp->s_wqe = wqe; 235 if (++qp->s_cur >= qp->s_size) 236 qp->s_cur = 0; 237 break; 238 239 case OP(RDMA_WRITE_FIRST): 240 qp->s_state = OP(RDMA_WRITE_MIDDLE); 241 /* FALLTHROUGH */ 242 case OP(RDMA_WRITE_MIDDLE): 243 len = qp->s_len; 244 if (len > pmtu) { 245 len = pmtu; 246 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 247 break; 248 } 249 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 250 qp->s_state = OP(RDMA_WRITE_LAST); 251 } else { 252 qp->s_state = 253 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 254 /* Immediate data comes after the BTH */ 255 ohdr->u.imm_data = wqe->wr.ex.imm_data; 256 hwords += 1; 257 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 258 bth0 |= IB_BTH_SOLICITED; 259 } 260 qp->s_wqe = wqe; 261 if (++qp->s_cur >= qp->s_size) 262 qp->s_cur = 0; 263 break; 264 } 265 qp->s_len -= len; 266 qp->s_hdrwords = hwords; 267 ps->s_txreq->sde = priv->s_sde; 268 qp->s_cur_sge = &qp->s_sge; 269 qp->s_cur_size = len; 270 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), 271 mask_psn(qp->s_psn++), middle, ps); 272 /* pbc */ 273 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 274 return 1; 275 276 done_free_tx: 277 hfi1_put_txreq(ps->s_txreq); 278 ps->s_txreq = NULL; 279 return 1; 280 281 bail: 282 hfi1_put_txreq(ps->s_txreq); 283 284 bail_no_tx: 285 ps->s_txreq = NULL; 286 qp->s_flags &= ~RVT_S_BUSY; 287 qp->s_hdrwords = 0; 288 return 0; 289 } 290 291 /** 292 * hfi1_uc_rcv - handle an incoming UC packet 293 * @ibp: the port the packet came in on 294 * @hdr: the header of the packet 295 * @rcv_flags: flags relevant to rcv processing 296 * @data: the packet data 297 * @tlen: the length of the packet 298 * @qp: the QP for this packet. 299 * 300 * This is called from qp_rcv() to process an incoming UC packet 301 * for the given QP. 302 * Called at interrupt level. 303 */ 304 void hfi1_uc_rcv(struct hfi1_packet *packet) 305 { 306 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; 307 struct hfi1_ib_header *hdr = packet->hdr; 308 u32 rcv_flags = packet->rcv_flags; 309 void *data = packet->ebuf; 310 u32 tlen = packet->tlen; 311 struct rvt_qp *qp = packet->qp; 312 struct hfi1_other_headers *ohdr = packet->ohdr; 313 u32 bth0, opcode; 314 u32 hdrsize = packet->hlen; 315 u32 psn; 316 u32 pad; 317 struct ib_wc wc; 318 u32 pmtu = qp->pmtu; 319 struct ib_reth *reth; 320 int has_grh = rcv_flags & HFI1_HAS_GRH; 321 int ret; 322 323 bth0 = be32_to_cpu(ohdr->bth[0]); 324 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) 325 return; 326 327 process_ecn(qp, packet, true); 328 329 psn = be32_to_cpu(ohdr->bth[2]); 330 opcode = (bth0 >> 24) & 0xff; 331 332 /* Compare the PSN verses the expected PSN. */ 333 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) { 334 /* 335 * Handle a sequence error. 336 * Silently drop any current message. 337 */ 338 qp->r_psn = psn; 339 inv: 340 if (qp->r_state == OP(SEND_FIRST) || 341 qp->r_state == OP(SEND_MIDDLE)) { 342 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); 343 qp->r_sge.num_sge = 0; 344 } else { 345 rvt_put_ss(&qp->r_sge); 346 } 347 qp->r_state = OP(SEND_LAST); 348 switch (opcode) { 349 case OP(SEND_FIRST): 350 case OP(SEND_ONLY): 351 case OP(SEND_ONLY_WITH_IMMEDIATE): 352 goto send_first; 353 354 case OP(RDMA_WRITE_FIRST): 355 case OP(RDMA_WRITE_ONLY): 356 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 357 goto rdma_first; 358 359 default: 360 goto drop; 361 } 362 } 363 364 /* Check for opcode sequence errors. */ 365 switch (qp->r_state) { 366 case OP(SEND_FIRST): 367 case OP(SEND_MIDDLE): 368 if (opcode == OP(SEND_MIDDLE) || 369 opcode == OP(SEND_LAST) || 370 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 371 break; 372 goto inv; 373 374 case OP(RDMA_WRITE_FIRST): 375 case OP(RDMA_WRITE_MIDDLE): 376 if (opcode == OP(RDMA_WRITE_MIDDLE) || 377 opcode == OP(RDMA_WRITE_LAST) || 378 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 379 break; 380 goto inv; 381 382 default: 383 if (opcode == OP(SEND_FIRST) || 384 opcode == OP(SEND_ONLY) || 385 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || 386 opcode == OP(RDMA_WRITE_FIRST) || 387 opcode == OP(RDMA_WRITE_ONLY) || 388 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 389 break; 390 goto inv; 391 } 392 393 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 394 qp_comm_est(qp); 395 396 /* OK, process the packet. */ 397 switch (opcode) { 398 case OP(SEND_FIRST): 399 case OP(SEND_ONLY): 400 case OP(SEND_ONLY_WITH_IMMEDIATE): 401 send_first: 402 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { 403 qp->r_sge = qp->s_rdma_read_sge; 404 } else { 405 ret = hfi1_rvt_get_rwqe(qp, 0); 406 if (ret < 0) 407 goto op_err; 408 if (!ret) 409 goto drop; 410 /* 411 * qp->s_rdma_read_sge will be the owner 412 * of the mr references. 413 */ 414 qp->s_rdma_read_sge = qp->r_sge; 415 } 416 qp->r_rcv_len = 0; 417 if (opcode == OP(SEND_ONLY)) 418 goto no_immediate_data; 419 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) 420 goto send_last_imm; 421 /* FALLTHROUGH */ 422 case OP(SEND_MIDDLE): 423 /* Check for invalid length PMTU or posted rwqe len. */ 424 if (unlikely(tlen != (hdrsize + pmtu + 4))) 425 goto rewind; 426 qp->r_rcv_len += pmtu; 427 if (unlikely(qp->r_rcv_len > qp->r_len)) 428 goto rewind; 429 hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0); 430 break; 431 432 case OP(SEND_LAST_WITH_IMMEDIATE): 433 send_last_imm: 434 wc.ex.imm_data = ohdr->u.imm_data; 435 wc.wc_flags = IB_WC_WITH_IMM; 436 goto send_last; 437 case OP(SEND_LAST): 438 no_immediate_data: 439 wc.ex.imm_data = 0; 440 wc.wc_flags = 0; 441 send_last: 442 /* Get the number of bytes the message was padded by. */ 443 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 444 /* Check for invalid length. */ 445 /* LAST len should be >= 1 */ 446 if (unlikely(tlen < (hdrsize + pad + 4))) 447 goto rewind; 448 /* Don't count the CRC. */ 449 tlen -= (hdrsize + pad + 4); 450 wc.byte_len = tlen + qp->r_rcv_len; 451 if (unlikely(wc.byte_len > qp->r_len)) 452 goto rewind; 453 wc.opcode = IB_WC_RECV; 454 hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0); 455 rvt_put_ss(&qp->s_rdma_read_sge); 456 last_imm: 457 wc.wr_id = qp->r_wr_id; 458 wc.status = IB_WC_SUCCESS; 459 wc.qp = &qp->ibqp; 460 wc.src_qp = qp->remote_qpn; 461 wc.slid = qp->remote_ah_attr.dlid; 462 /* 463 * It seems that IB mandates the presence of an SL in a 464 * work completion only for the UD transport (see section 465 * 11.4.2 of IBTA Vol. 1). 466 * 467 * However, the way the SL is chosen below is consistent 468 * with the way that IB/qib works and is trying avoid 469 * introducing incompatibilities. 470 * 471 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 472 */ 473 wc.sl = qp->remote_ah_attr.sl; 474 /* zero fields that are N/A */ 475 wc.vendor_err = 0; 476 wc.pkey_index = 0; 477 wc.dlid_path_bits = 0; 478 wc.port_num = 0; 479 /* Signal completion event if the solicited bit is set. */ 480 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 481 (ohdr->bth[0] & 482 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 483 break; 484 485 case OP(RDMA_WRITE_FIRST): 486 case OP(RDMA_WRITE_ONLY): 487 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ 488 rdma_first: 489 if (unlikely(!(qp->qp_access_flags & 490 IB_ACCESS_REMOTE_WRITE))) { 491 goto drop; 492 } 493 reth = &ohdr->u.rc.reth; 494 qp->r_len = be32_to_cpu(reth->length); 495 qp->r_rcv_len = 0; 496 qp->r_sge.sg_list = NULL; 497 if (qp->r_len != 0) { 498 u32 rkey = be32_to_cpu(reth->rkey); 499 u64 vaddr = be64_to_cpu(reth->vaddr); 500 int ok; 501 502 /* Check rkey */ 503 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, 504 vaddr, rkey, IB_ACCESS_REMOTE_WRITE); 505 if (unlikely(!ok)) 506 goto drop; 507 qp->r_sge.num_sge = 1; 508 } else { 509 qp->r_sge.num_sge = 0; 510 qp->r_sge.sge.mr = NULL; 511 qp->r_sge.sge.vaddr = NULL; 512 qp->r_sge.sge.length = 0; 513 qp->r_sge.sge.sge_length = 0; 514 } 515 if (opcode == OP(RDMA_WRITE_ONLY)) { 516 goto rdma_last; 517 } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { 518 wc.ex.imm_data = ohdr->u.rc.imm_data; 519 goto rdma_last_imm; 520 } 521 /* FALLTHROUGH */ 522 case OP(RDMA_WRITE_MIDDLE): 523 /* Check for invalid length PMTU or posted rwqe len. */ 524 if (unlikely(tlen != (hdrsize + pmtu + 4))) 525 goto drop; 526 qp->r_rcv_len += pmtu; 527 if (unlikely(qp->r_rcv_len > qp->r_len)) 528 goto drop; 529 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0); 530 break; 531 532 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 533 wc.ex.imm_data = ohdr->u.imm_data; 534 rdma_last_imm: 535 wc.wc_flags = IB_WC_WITH_IMM; 536 537 /* Get the number of bytes the message was padded by. */ 538 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 539 /* Check for invalid length. */ 540 /* LAST len should be >= 1 */ 541 if (unlikely(tlen < (hdrsize + pad + 4))) 542 goto drop; 543 /* Don't count the CRC. */ 544 tlen -= (hdrsize + pad + 4); 545 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 546 goto drop; 547 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { 548 rvt_put_ss(&qp->s_rdma_read_sge); 549 } else { 550 ret = hfi1_rvt_get_rwqe(qp, 1); 551 if (ret < 0) 552 goto op_err; 553 if (!ret) 554 goto drop; 555 } 556 wc.byte_len = qp->r_len; 557 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 558 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0); 559 rvt_put_ss(&qp->r_sge); 560 goto last_imm; 561 562 case OP(RDMA_WRITE_LAST): 563 rdma_last: 564 /* Get the number of bytes the message was padded by. */ 565 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 566 /* Check for invalid length. */ 567 /* LAST len should be >= 1 */ 568 if (unlikely(tlen < (hdrsize + pad + 4))) 569 goto drop; 570 /* Don't count the CRC. */ 571 tlen -= (hdrsize + pad + 4); 572 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 573 goto drop; 574 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0); 575 rvt_put_ss(&qp->r_sge); 576 break; 577 578 default: 579 /* Drop packet for unknown opcodes. */ 580 goto drop; 581 } 582 qp->r_psn++; 583 qp->r_state = opcode; 584 return; 585 586 rewind: 587 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); 588 qp->r_sge.num_sge = 0; 589 drop: 590 ibp->rvp.n_pkt_drops++; 591 return; 592 593 op_err: 594 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 595 } 596