1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/io.h> 49 #include <rdma/rdma_vt.h> 50 #include <rdma/rdmavt_qp.h> 51 52 #include "hfi.h" 53 #include "qp.h" 54 #include "verbs_txreq.h" 55 #include "trace.h" 56 57 /* cut down ridiculously long IB macro names */ 58 #define OP(x) RC_OP(x) 59 60 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, 61 u32 psn, u32 pmtu) 62 { 63 u32 len; 64 65 len = delta_psn(psn, wqe->psn) * pmtu; 66 ss->sge = wqe->sg_list[0]; 67 ss->sg_list = wqe->sg_list + 1; 68 ss->num_sge = wqe->wr.num_sge; 69 ss->total_len = wqe->length; 70 rvt_skip_sge(ss, len, false); 71 return wqe->length - len; 72 } 73 74 /** 75 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) 76 * @dev: the device for this QP 77 * @qp: a pointer to the QP 78 * @ohdr: a pointer to the IB header being constructed 79 * @ps: the xmit packet state 80 * 81 * Return 1 if constructed; otherwise, return 0. 82 * Note that we are in the responder's side of the QP context. 83 * Note the QP s_lock must be held. 84 */ 85 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, 86 struct ib_other_headers *ohdr, 87 struct hfi1_pkt_state *ps) 88 { 89 struct rvt_ack_entry *e; 90 u32 hwords; 91 u32 len; 92 u32 bth0; 93 u32 bth2; 94 int middle = 0; 95 u32 pmtu = qp->pmtu; 96 struct hfi1_qp_priv *priv = qp->priv; 97 98 lockdep_assert_held(&qp->s_lock); 99 /* Don't send an ACK if we aren't supposed to. */ 100 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 101 goto bail; 102 103 if (priv->hdr_type == HFI1_PKT_TYPE_9B) 104 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 105 hwords = 5; 106 else 107 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 108 hwords = 7; 109 110 switch (qp->s_ack_state) { 111 case OP(RDMA_READ_RESPONSE_LAST): 112 case OP(RDMA_READ_RESPONSE_ONLY): 113 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 114 if (e->rdma_sge.mr) { 115 rvt_put_mr(e->rdma_sge.mr); 116 e->rdma_sge.mr = NULL; 117 } 118 /* FALLTHROUGH */ 119 case OP(ATOMIC_ACKNOWLEDGE): 120 /* 121 * We can increment the tail pointer now that the last 122 * response has been sent instead of only being 123 * constructed. 124 */ 125 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) 126 qp->s_tail_ack_queue = 0; 127 /* FALLTHROUGH */ 128 case OP(SEND_ONLY): 129 case OP(ACKNOWLEDGE): 130 /* Check for no next entry in the queue. */ 131 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { 132 if (qp->s_flags & RVT_S_ACK_PENDING) 133 goto normal; 134 goto bail; 135 } 136 137 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 138 if (e->opcode == OP(RDMA_READ_REQUEST)) { 139 /* 140 * If a RDMA read response is being resent and 141 * we haven't seen the duplicate request yet, 142 * then stop sending the remaining responses the 143 * responder has seen until the requester re-sends it. 144 */ 145 len = e->rdma_sge.sge_length; 146 if (len && !e->rdma_sge.mr) { 147 qp->s_tail_ack_queue = qp->r_head_ack_queue; 148 goto bail; 149 } 150 /* Copy SGE state in case we need to resend */ 151 ps->s_txreq->mr = e->rdma_sge.mr; 152 if (ps->s_txreq->mr) 153 rvt_get_mr(ps->s_txreq->mr); 154 qp->s_ack_rdma_sge.sge = e->rdma_sge; 155 qp->s_ack_rdma_sge.num_sge = 1; 156 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 157 if (len > pmtu) { 158 len = pmtu; 159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 160 } else { 161 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 162 e->sent = 1; 163 } 164 ohdr->u.aeth = rvt_compute_aeth(qp); 165 hwords++; 166 qp->s_ack_rdma_psn = e->psn; 167 bth2 = mask_psn(qp->s_ack_rdma_psn++); 168 } else { 169 /* COMPARE_SWAP or FETCH_ADD */ 170 ps->s_txreq->ss = NULL; 171 len = 0; 172 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 173 ohdr->u.at.aeth = rvt_compute_aeth(qp); 174 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); 175 hwords += sizeof(ohdr->u.at) / sizeof(u32); 176 bth2 = mask_psn(e->psn); 177 e->sent = 1; 178 } 179 bth0 = qp->s_ack_state << 24; 180 break; 181 182 case OP(RDMA_READ_RESPONSE_FIRST): 183 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 184 /* FALLTHROUGH */ 185 case OP(RDMA_READ_RESPONSE_MIDDLE): 186 ps->s_txreq->ss = &qp->s_ack_rdma_sge; 187 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; 188 if (ps->s_txreq->mr) 189 rvt_get_mr(ps->s_txreq->mr); 190 len = qp->s_ack_rdma_sge.sge.sge_length; 191 if (len > pmtu) { 192 len = pmtu; 193 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 194 } else { 195 ohdr->u.aeth = rvt_compute_aeth(qp); 196 hwords++; 197 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 198 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 199 e->sent = 1; 200 } 201 bth0 = qp->s_ack_state << 24; 202 bth2 = mask_psn(qp->s_ack_rdma_psn++); 203 break; 204 205 default: 206 normal: 207 /* 208 * Send a regular ACK. 209 * Set the s_ack_state so we wait until after sending 210 * the ACK before setting s_ack_state to ACKNOWLEDGE 211 * (see above). 212 */ 213 qp->s_ack_state = OP(SEND_ONLY); 214 qp->s_flags &= ~RVT_S_ACK_PENDING; 215 ps->s_txreq->ss = NULL; 216 if (qp->s_nak_state) 217 ohdr->u.aeth = 218 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 219 (qp->s_nak_state << 220 IB_AETH_CREDIT_SHIFT)); 221 else 222 ohdr->u.aeth = rvt_compute_aeth(qp); 223 hwords++; 224 len = 0; 225 bth0 = OP(ACKNOWLEDGE) << 24; 226 bth2 = mask_psn(qp->s_ack_psn); 227 } 228 qp->s_rdma_ack_cnt++; 229 qp->s_hdrwords = hwords; 230 ps->s_txreq->sde = priv->s_sde; 231 ps->s_txreq->s_cur_size = len; 232 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); 233 /* pbc */ 234 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 235 return 1; 236 237 bail: 238 qp->s_ack_state = OP(ACKNOWLEDGE); 239 /* 240 * Ensure s_rdma_ack_cnt changes are committed prior to resetting 241 * RVT_S_RESP_PENDING 242 */ 243 smp_wmb(); 244 qp->s_flags &= ~(RVT_S_RESP_PENDING 245 | RVT_S_ACK_PENDING 246 | RVT_S_AHG_VALID); 247 return 0; 248 } 249 250 /** 251 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) 252 * @qp: a pointer to the QP 253 * 254 * Assumes s_lock is held. 255 * 256 * Return 1 if constructed; otherwise, return 0. 257 */ 258 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) 259 { 260 struct hfi1_qp_priv *priv = qp->priv; 261 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 262 struct ib_other_headers *ohdr; 263 struct rvt_sge_state *ss; 264 struct rvt_swqe *wqe; 265 u32 hwords; 266 u32 len; 267 u32 bth0 = 0; 268 u32 bth2; 269 u32 pmtu = qp->pmtu; 270 char newreq; 271 int middle = 0; 272 int delta; 273 274 lockdep_assert_held(&qp->s_lock); 275 ps->s_txreq = get_txreq(ps->dev, qp); 276 if (IS_ERR(ps->s_txreq)) 277 goto bail_no_tx; 278 279 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 280 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 281 hwords = 5; 282 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) 283 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; 284 else 285 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; 286 } else { 287 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ 288 hwords = 7; 289 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 290 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) 291 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; 292 else 293 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; 294 } 295 296 /* Sending responses has higher priority over sending requests. */ 297 if ((qp->s_flags & RVT_S_RESP_PENDING) && 298 make_rc_ack(dev, qp, ohdr, ps)) 299 return 1; 300 301 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 302 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 303 goto bail; 304 /* We are in the error state, flush the work request. */ 305 if (qp->s_last == READ_ONCE(qp->s_head)) 306 goto bail; 307 /* If DMAs are in progress, we can't flush immediately. */ 308 if (iowait_sdma_pending(&priv->s_iowait)) { 309 qp->s_flags |= RVT_S_WAIT_DMA; 310 goto bail; 311 } 312 clear_ahg(qp); 313 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 314 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 315 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 316 /* will get called again */ 317 goto done_free_tx; 318 } 319 320 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) 321 goto bail; 322 323 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { 324 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { 325 qp->s_flags |= RVT_S_WAIT_PSN; 326 goto bail; 327 } 328 qp->s_sending_psn = qp->s_psn; 329 qp->s_sending_hpsn = qp->s_psn - 1; 330 } 331 332 /* Send a request. */ 333 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 334 switch (qp->s_state) { 335 default: 336 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) 337 goto bail; 338 /* 339 * Resend an old request or start a new one. 340 * 341 * We keep track of the current SWQE so that 342 * we don't reset the "furthest progress" state 343 * if we need to back up. 344 */ 345 newreq = 0; 346 if (qp->s_cur == qp->s_tail) { 347 /* Check if send work queue is empty. */ 348 if (qp->s_tail == READ_ONCE(qp->s_head)) { 349 clear_ahg(qp); 350 goto bail; 351 } 352 /* 353 * If a fence is requested, wait for previous 354 * RDMA read and atomic operations to finish. 355 */ 356 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 357 qp->s_num_rd_atomic) { 358 qp->s_flags |= RVT_S_WAIT_FENCE; 359 goto bail; 360 } 361 /* 362 * Local operations are processed immediately 363 * after all prior requests have completed 364 */ 365 if (wqe->wr.opcode == IB_WR_REG_MR || 366 wqe->wr.opcode == IB_WR_LOCAL_INV) { 367 int local_ops = 0; 368 int err = 0; 369 370 if (qp->s_last != qp->s_cur) 371 goto bail; 372 if (++qp->s_cur == qp->s_size) 373 qp->s_cur = 0; 374 if (++qp->s_tail == qp->s_size) 375 qp->s_tail = 0; 376 if (!(wqe->wr.send_flags & 377 RVT_SEND_COMPLETION_ONLY)) { 378 err = rvt_invalidate_rkey( 379 qp, 380 wqe->wr.ex.invalidate_rkey); 381 local_ops = 1; 382 } 383 hfi1_send_complete(qp, wqe, 384 err ? IB_WC_LOC_PROT_ERR 385 : IB_WC_SUCCESS); 386 if (local_ops) 387 atomic_dec(&qp->local_ops_pending); 388 qp->s_hdrwords = 0; 389 goto done_free_tx; 390 } 391 392 newreq = 1; 393 qp->s_psn = wqe->psn; 394 } 395 /* 396 * Note that we have to be careful not to modify the 397 * original work request since we may need to resend 398 * it. 399 */ 400 len = wqe->length; 401 ss = &qp->s_sge; 402 bth2 = mask_psn(qp->s_psn); 403 switch (wqe->wr.opcode) { 404 case IB_WR_SEND: 405 case IB_WR_SEND_WITH_IMM: 406 case IB_WR_SEND_WITH_INV: 407 /* If no credit, return. */ 408 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 409 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 410 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 411 goto bail; 412 } 413 if (len > pmtu) { 414 qp->s_state = OP(SEND_FIRST); 415 len = pmtu; 416 break; 417 } 418 if (wqe->wr.opcode == IB_WR_SEND) { 419 qp->s_state = OP(SEND_ONLY); 420 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 421 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 422 /* Immediate data comes after the BTH */ 423 ohdr->u.imm_data = wqe->wr.ex.imm_data; 424 hwords += 1; 425 } else { 426 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); 427 /* Invalidate rkey comes after the BTH */ 428 ohdr->u.ieth = cpu_to_be32( 429 wqe->wr.ex.invalidate_rkey); 430 hwords += 1; 431 } 432 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 433 bth0 |= IB_BTH_SOLICITED; 434 bth2 |= IB_BTH_REQ_ACK; 435 if (++qp->s_cur == qp->s_size) 436 qp->s_cur = 0; 437 break; 438 439 case IB_WR_RDMA_WRITE: 440 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 441 qp->s_lsn++; 442 goto no_flow_control; 443 case IB_WR_RDMA_WRITE_WITH_IMM: 444 /* If no credit, return. */ 445 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && 446 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { 447 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; 448 goto bail; 449 } 450 no_flow_control: 451 put_ib_reth_vaddr( 452 wqe->rdma_wr.remote_addr, 453 &ohdr->u.rc.reth); 454 ohdr->u.rc.reth.rkey = 455 cpu_to_be32(wqe->rdma_wr.rkey); 456 ohdr->u.rc.reth.length = cpu_to_be32(len); 457 hwords += sizeof(struct ib_reth) / sizeof(u32); 458 if (len > pmtu) { 459 qp->s_state = OP(RDMA_WRITE_FIRST); 460 len = pmtu; 461 break; 462 } 463 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 464 qp->s_state = OP(RDMA_WRITE_ONLY); 465 } else { 466 qp->s_state = 467 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 468 /* Immediate data comes after RETH */ 469 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 470 hwords += 1; 471 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 472 bth0 |= IB_BTH_SOLICITED; 473 } 474 bth2 |= IB_BTH_REQ_ACK; 475 if (++qp->s_cur == qp->s_size) 476 qp->s_cur = 0; 477 break; 478 479 case IB_WR_RDMA_READ: 480 /* 481 * Don't allow more operations to be started 482 * than the QP limits allow. 483 */ 484 if (newreq) { 485 if (qp->s_num_rd_atomic >= 486 qp->s_max_rd_atomic) { 487 qp->s_flags |= RVT_S_WAIT_RDMAR; 488 goto bail; 489 } 490 qp->s_num_rd_atomic++; 491 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 492 qp->s_lsn++; 493 } 494 put_ib_reth_vaddr( 495 wqe->rdma_wr.remote_addr, 496 &ohdr->u.rc.reth); 497 ohdr->u.rc.reth.rkey = 498 cpu_to_be32(wqe->rdma_wr.rkey); 499 ohdr->u.rc.reth.length = cpu_to_be32(len); 500 qp->s_state = OP(RDMA_READ_REQUEST); 501 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 502 ss = NULL; 503 len = 0; 504 bth2 |= IB_BTH_REQ_ACK; 505 if (++qp->s_cur == qp->s_size) 506 qp->s_cur = 0; 507 break; 508 509 case IB_WR_ATOMIC_CMP_AND_SWP: 510 case IB_WR_ATOMIC_FETCH_AND_ADD: 511 /* 512 * Don't allow more operations to be started 513 * than the QP limits allow. 514 */ 515 if (newreq) { 516 if (qp->s_num_rd_atomic >= 517 qp->s_max_rd_atomic) { 518 qp->s_flags |= RVT_S_WAIT_RDMAR; 519 goto bail; 520 } 521 qp->s_num_rd_atomic++; 522 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) 523 qp->s_lsn++; 524 } 525 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 526 qp->s_state = OP(COMPARE_SWAP); 527 put_ib_ateth_swap(wqe->atomic_wr.swap, 528 &ohdr->u.atomic_eth); 529 put_ib_ateth_compare(wqe->atomic_wr.compare_add, 530 &ohdr->u.atomic_eth); 531 } else { 532 qp->s_state = OP(FETCH_ADD); 533 put_ib_ateth_swap(wqe->atomic_wr.compare_add, 534 &ohdr->u.atomic_eth); 535 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); 536 } 537 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, 538 &ohdr->u.atomic_eth); 539 ohdr->u.atomic_eth.rkey = cpu_to_be32( 540 wqe->atomic_wr.rkey); 541 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 542 ss = NULL; 543 len = 0; 544 bth2 |= IB_BTH_REQ_ACK; 545 if (++qp->s_cur == qp->s_size) 546 qp->s_cur = 0; 547 break; 548 549 default: 550 goto bail; 551 } 552 qp->s_sge.sge = wqe->sg_list[0]; 553 qp->s_sge.sg_list = wqe->sg_list + 1; 554 qp->s_sge.num_sge = wqe->wr.num_sge; 555 qp->s_sge.total_len = wqe->length; 556 qp->s_len = wqe->length; 557 if (newreq) { 558 qp->s_tail++; 559 if (qp->s_tail >= qp->s_size) 560 qp->s_tail = 0; 561 } 562 if (wqe->wr.opcode == IB_WR_RDMA_READ) 563 qp->s_psn = wqe->lpsn + 1; 564 else 565 qp->s_psn++; 566 break; 567 568 case OP(RDMA_READ_RESPONSE_FIRST): 569 /* 570 * qp->s_state is normally set to the opcode of the 571 * last packet constructed for new requests and therefore 572 * is never set to RDMA read response. 573 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing 574 * thread to indicate a SEND needs to be restarted from an 575 * earlier PSN without interfering with the sending thread. 576 * See restart_rc(). 577 */ 578 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 579 /* FALLTHROUGH */ 580 case OP(SEND_FIRST): 581 qp->s_state = OP(SEND_MIDDLE); 582 /* FALLTHROUGH */ 583 case OP(SEND_MIDDLE): 584 bth2 = mask_psn(qp->s_psn++); 585 ss = &qp->s_sge; 586 len = qp->s_len; 587 if (len > pmtu) { 588 len = pmtu; 589 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 590 break; 591 } 592 if (wqe->wr.opcode == IB_WR_SEND) { 593 qp->s_state = OP(SEND_LAST); 594 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 595 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 596 /* Immediate data comes after the BTH */ 597 ohdr->u.imm_data = wqe->wr.ex.imm_data; 598 hwords += 1; 599 } else { 600 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); 601 /* invalidate data comes after the BTH */ 602 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); 603 hwords += 1; 604 } 605 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 606 bth0 |= IB_BTH_SOLICITED; 607 bth2 |= IB_BTH_REQ_ACK; 608 qp->s_cur++; 609 if (qp->s_cur >= qp->s_size) 610 qp->s_cur = 0; 611 break; 612 613 case OP(RDMA_READ_RESPONSE_LAST): 614 /* 615 * qp->s_state is normally set to the opcode of the 616 * last packet constructed for new requests and therefore 617 * is never set to RDMA read response. 618 * RDMA_READ_RESPONSE_LAST is used by the ACK processing 619 * thread to indicate a RDMA write needs to be restarted from 620 * an earlier PSN without interfering with the sending thread. 621 * See restart_rc(). 622 */ 623 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); 624 /* FALLTHROUGH */ 625 case OP(RDMA_WRITE_FIRST): 626 qp->s_state = OP(RDMA_WRITE_MIDDLE); 627 /* FALLTHROUGH */ 628 case OP(RDMA_WRITE_MIDDLE): 629 bth2 = mask_psn(qp->s_psn++); 630 ss = &qp->s_sge; 631 len = qp->s_len; 632 if (len > pmtu) { 633 len = pmtu; 634 middle = HFI1_CAP_IS_KSET(SDMA_AHG); 635 break; 636 } 637 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { 638 qp->s_state = OP(RDMA_WRITE_LAST); 639 } else { 640 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 641 /* Immediate data comes after the BTH */ 642 ohdr->u.imm_data = wqe->wr.ex.imm_data; 643 hwords += 1; 644 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 645 bth0 |= IB_BTH_SOLICITED; 646 } 647 bth2 |= IB_BTH_REQ_ACK; 648 qp->s_cur++; 649 if (qp->s_cur >= qp->s_size) 650 qp->s_cur = 0; 651 break; 652 653 case OP(RDMA_READ_RESPONSE_MIDDLE): 654 /* 655 * qp->s_state is normally set to the opcode of the 656 * last packet constructed for new requests and therefore 657 * is never set to RDMA read response. 658 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing 659 * thread to indicate a RDMA read needs to be restarted from 660 * an earlier PSN without interfering with the sending thread. 661 * See restart_rc(). 662 */ 663 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 664 put_ib_reth_vaddr( 665 wqe->rdma_wr.remote_addr + len, 666 &ohdr->u.rc.reth); 667 ohdr->u.rc.reth.rkey = 668 cpu_to_be32(wqe->rdma_wr.rkey); 669 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 670 qp->s_state = OP(RDMA_READ_REQUEST); 671 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 672 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; 673 qp->s_psn = wqe->lpsn + 1; 674 ss = NULL; 675 len = 0; 676 qp->s_cur++; 677 if (qp->s_cur == qp->s_size) 678 qp->s_cur = 0; 679 break; 680 } 681 qp->s_sending_hpsn = bth2; 682 delta = delta_psn(bth2, wqe->psn); 683 if (delta && delta % HFI1_PSN_CREDIT == 0) 684 bth2 |= IB_BTH_REQ_ACK; 685 if (qp->s_flags & RVT_S_SEND_ONE) { 686 qp->s_flags &= ~RVT_S_SEND_ONE; 687 qp->s_flags |= RVT_S_WAIT_ACK; 688 bth2 |= IB_BTH_REQ_ACK; 689 } 690 qp->s_len -= len; 691 qp->s_hdrwords = hwords; 692 ps->s_txreq->sde = priv->s_sde; 693 ps->s_txreq->ss = ss; 694 ps->s_txreq->s_cur_size = len; 695 hfi1_make_ruc_header( 696 qp, 697 ohdr, 698 bth0 | (qp->s_state << 24), 699 bth2, 700 middle, 701 ps); 702 /* pbc */ 703 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; 704 return 1; 705 706 done_free_tx: 707 hfi1_put_txreq(ps->s_txreq); 708 ps->s_txreq = NULL; 709 return 1; 710 711 bail: 712 hfi1_put_txreq(ps->s_txreq); 713 714 bail_no_tx: 715 ps->s_txreq = NULL; 716 qp->s_flags &= ~RVT_S_BUSY; 717 qp->s_hdrwords = 0; 718 return 0; 719 } 720 721 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp, 722 struct ib_other_headers *ohdr, 723 u32 bth0, u32 bth1) 724 { 725 if (qp->r_nak_state) 726 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | 727 (qp->r_nak_state << 728 IB_AETH_CREDIT_SHIFT)); 729 else 730 ohdr->u.aeth = rvt_compute_aeth(qp); 731 732 ohdr->bth[0] = cpu_to_be32(bth0); 733 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); 734 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); 735 } 736 737 static inline void hfi1_queue_rc_ack(struct rvt_qp *qp, bool is_fecn) 738 { 739 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 740 unsigned long flags; 741 742 spin_lock_irqsave(&qp->s_lock, flags); 743 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 744 goto unlock; 745 this_cpu_inc(*ibp->rvp.rc_qacks); 746 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; 747 qp->s_nak_state = qp->r_nak_state; 748 qp->s_ack_psn = qp->r_ack_psn; 749 if (is_fecn) 750 qp->s_flags |= RVT_S_ECN; 751 752 /* Schedule the send tasklet. */ 753 hfi1_schedule_send(qp); 754 unlock: 755 spin_unlock_irqrestore(&qp->s_lock, flags); 756 } 757 758 static inline void hfi1_make_rc_ack_9B(struct rvt_qp *qp, 759 struct hfi1_opa_header *opa_hdr, 760 u8 sc5, bool is_fecn, 761 u64 *pbc_flags, u32 *hwords, 762 u32 *nwords) 763 { 764 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 765 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 766 struct ib_header *hdr = &opa_hdr->ibh; 767 struct ib_other_headers *ohdr; 768 u16 lrh0 = HFI1_LRH_BTH; 769 u16 pkey; 770 u32 bth0, bth1; 771 772 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; 773 ohdr = &hdr->u.oth; 774 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ 775 *hwords = 6; 776 777 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { 778 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 779 rdma_ah_read_grh(&qp->remote_ah_attr), 780 *hwords - 2, SIZE_OF_CRC); 781 ohdr = &hdr->u.l.oth; 782 lrh0 = HFI1_LRH_GRH; 783 } 784 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ 785 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); 786 787 /* read pkey_index w/o lock (its atomic) */ 788 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 789 790 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT | 791 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << 792 IB_SL_SHIFT; 793 794 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC, 795 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), 796 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); 797 798 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 799 if (qp->s_mig_state == IB_MIG_MIGRATED) 800 bth0 |= IB_BTH_MIG_REQ; 801 bth1 = (!!is_fecn) << IB_BECN_SHIFT; 802 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 803 } 804 805 static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp, 806 struct hfi1_opa_header *opa_hdr, 807 u8 sc5, bool is_fecn, 808 u64 *pbc_flags, u32 *hwords, 809 u32 *nwords) 810 { 811 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 812 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 813 struct hfi1_16b_header *hdr = &opa_hdr->opah; 814 struct ib_other_headers *ohdr; 815 u32 bth0, bth1 = 0; 816 u16 len, pkey; 817 u8 becn = !!is_fecn; 818 u8 l4 = OPA_16B_L4_IB_LOCAL; 819 u8 extra_bytes; 820 821 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; 822 ohdr = &hdr->u.oth; 823 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ 824 *hwords = 8; 825 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0); 826 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2); 827 828 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && 829 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { 830 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, 831 rdma_ah_read_grh(&qp->remote_ah_attr), 832 *hwords - 4, *nwords); 833 ohdr = &hdr->u.l.oth; 834 l4 = OPA_16B_L4_IB_GLOBAL; 835 } 836 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC; 837 838 /* read pkey_index w/o lock (its atomic) */ 839 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); 840 841 /* Convert dwords to flits */ 842 len = (*hwords + *nwords) >> 1; 843 844 hfi1_make_16b_hdr(hdr, ppd->lid | 845 (rdma_ah_get_path_bits(&qp->remote_ah_attr) & 846 ((1 << ppd->lmc) - 1)), 847 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 848 16B), len, pkey, becn, 0, l4, sc5); 849 850 bth0 = pkey | (OP(ACKNOWLEDGE) << 24); 851 bth0 |= extra_bytes << 20; 852 if (qp->s_mig_state == IB_MIG_MIGRATED) 853 bth1 = OPA_BTH_MIG_REQ; 854 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1); 855 } 856 857 typedef void (*hfi1_make_rc_ack)(struct rvt_qp *qp, 858 struct hfi1_opa_header *opa_hdr, 859 u8 sc5, bool is_fecn, 860 u64 *pbc_flags, u32 *hwords, 861 u32 *nwords); 862 863 /* We support only two types - 9B and 16B for now */ 864 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = { 865 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B, 866 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B 867 }; 868 869 /** 870 * hfi1_send_rc_ack - Construct an ACK packet and send it 871 * @qp: a pointer to the QP 872 * 873 * This is called from hfi1_rc_rcv() and handle_receive_interrupt(). 874 * Note that RDMA reads and atomics are handled in the 875 * send side QP state and send engine. 876 */ 877 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, 878 struct rvt_qp *qp, bool is_fecn) 879 { 880 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 881 struct hfi1_qp_priv *priv = qp->priv; 882 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 883 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 884 u64 pbc, pbc_flags = 0; 885 u32 hwords = 0; 886 u32 nwords = 0; 887 u32 plen; 888 struct pio_buf *pbuf; 889 struct hfi1_opa_header opa_hdr; 890 891 /* clear the defer count */ 892 qp->r_adefered = 0; 893 894 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 895 if (qp->s_flags & RVT_S_RESP_PENDING) { 896 hfi1_queue_rc_ack(qp, is_fecn); 897 return; 898 } 899 900 /* Ensure s_rdma_ack_cnt changes are committed */ 901 if (qp->s_rdma_ack_cnt) { 902 hfi1_queue_rc_ack(qp, is_fecn); 903 return; 904 } 905 906 /* Don't try to send ACKs if the link isn't ACTIVE */ 907 if (driver_lstate(ppd) != IB_PORT_ACTIVE) 908 return; 909 910 /* Make the appropriate header */ 911 hfi1_make_rc_ack_tbl[priv->hdr_type](qp, &opa_hdr, sc5, is_fecn, 912 &pbc_flags, &hwords, &nwords); 913 914 plen = 2 /* PBC */ + hwords + nwords; 915 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, 916 sc_to_vlt(ppd->dd, sc5), plen); 917 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); 918 if (!pbuf) { 919 /* 920 * We have no room to send at the moment. Pass 921 * responsibility for sending the ACK to the send engine 922 * so that when enough buffer space becomes available, 923 * the ACK is sent ahead of other outgoing packets. 924 */ 925 hfi1_queue_rc_ack(qp, is_fecn); 926 return; 927 } 928 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), 929 &opa_hdr, ib_is_sc5(sc5)); 930 931 /* write the pbc and data */ 932 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, 933 (priv->hdr_type == HFI1_PKT_TYPE_9B ? 934 (void *)&opa_hdr.ibh : 935 (void *)&opa_hdr.opah), hwords); 936 return; 937 } 938 939 /** 940 * reset_psn - reset the QP state to send starting from PSN 941 * @qp: the QP 942 * @psn: the packet sequence number to restart at 943 * 944 * This is called from hfi1_rc_rcv() to process an incoming RC ACK 945 * for the given QP. 946 * Called at interrupt level with the QP s_lock held. 947 */ 948 static void reset_psn(struct rvt_qp *qp, u32 psn) 949 { 950 u32 n = qp->s_acked; 951 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); 952 u32 opcode; 953 954 lockdep_assert_held(&qp->s_lock); 955 qp->s_cur = n; 956 957 /* 958 * If we are starting the request from the beginning, 959 * let the normal send code handle initialization. 960 */ 961 if (cmp_psn(psn, wqe->psn) <= 0) { 962 qp->s_state = OP(SEND_LAST); 963 goto done; 964 } 965 966 /* Find the work request opcode corresponding to the given PSN. */ 967 opcode = wqe->wr.opcode; 968 for (;;) { 969 int diff; 970 971 if (++n == qp->s_size) 972 n = 0; 973 if (n == qp->s_tail) 974 break; 975 wqe = rvt_get_swqe_ptr(qp, n); 976 diff = cmp_psn(psn, wqe->psn); 977 if (diff < 0) 978 break; 979 qp->s_cur = n; 980 /* 981 * If we are starting the request from the beginning, 982 * let the normal send code handle initialization. 983 */ 984 if (diff == 0) { 985 qp->s_state = OP(SEND_LAST); 986 goto done; 987 } 988 opcode = wqe->wr.opcode; 989 } 990 991 /* 992 * Set the state to restart in the middle of a request. 993 * Don't change the s_sge, s_cur_sge, or s_cur_size. 994 * See hfi1_make_rc_req(). 995 */ 996 switch (opcode) { 997 case IB_WR_SEND: 998 case IB_WR_SEND_WITH_IMM: 999 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); 1000 break; 1001 1002 case IB_WR_RDMA_WRITE: 1003 case IB_WR_RDMA_WRITE_WITH_IMM: 1004 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); 1005 break; 1006 1007 case IB_WR_RDMA_READ: 1008 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); 1009 break; 1010 1011 default: 1012 /* 1013 * This case shouldn't happen since its only 1014 * one PSN per req. 1015 */ 1016 qp->s_state = OP(SEND_LAST); 1017 } 1018 done: 1019 qp->s_psn = psn; 1020 /* 1021 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer 1022 * asynchronously before the send engine can get scheduled. 1023 * Doing it in hfi1_make_rc_req() is too late. 1024 */ 1025 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && 1026 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) 1027 qp->s_flags |= RVT_S_WAIT_PSN; 1028 qp->s_flags &= ~RVT_S_AHG_VALID; 1029 } 1030 1031 /* 1032 * Back up requester to resend the last un-ACKed request. 1033 * The QP r_lock and s_lock should be held and interrupts disabled. 1034 */ 1035 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) 1036 { 1037 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1038 struct hfi1_ibport *ibp; 1039 1040 lockdep_assert_held(&qp->r_lock); 1041 lockdep_assert_held(&qp->s_lock); 1042 if (qp->s_retry == 0) { 1043 if (qp->s_mig_state == IB_MIG_ARMED) { 1044 hfi1_migrate_qp(qp); 1045 qp->s_retry = qp->s_retry_cnt; 1046 } else if (qp->s_last == qp->s_acked) { 1047 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 1048 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1049 return; 1050 } else { /* need to handle delayed completion */ 1051 return; 1052 } 1053 } else { 1054 qp->s_retry--; 1055 } 1056 1057 ibp = to_iport(qp->ibqp.device, qp->port_num); 1058 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1059 ibp->rvp.n_rc_resends++; 1060 else 1061 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1062 1063 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | 1064 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | 1065 RVT_S_WAIT_ACK); 1066 if (wait) 1067 qp->s_flags |= RVT_S_SEND_ONE; 1068 reset_psn(qp, psn); 1069 } 1070 1071 /* 1072 * Set qp->s_sending_psn to the next PSN after the given one. 1073 * This would be psn+1 except when RDMA reads are present. 1074 */ 1075 static void reset_sending_psn(struct rvt_qp *qp, u32 psn) 1076 { 1077 struct rvt_swqe *wqe; 1078 u32 n = qp->s_last; 1079 1080 lockdep_assert_held(&qp->s_lock); 1081 /* Find the work request corresponding to the given PSN. */ 1082 for (;;) { 1083 wqe = rvt_get_swqe_ptr(qp, n); 1084 if (cmp_psn(psn, wqe->lpsn) <= 0) { 1085 if (wqe->wr.opcode == IB_WR_RDMA_READ) 1086 qp->s_sending_psn = wqe->lpsn + 1; 1087 else 1088 qp->s_sending_psn = psn + 1; 1089 break; 1090 } 1091 if (++n == qp->s_size) 1092 n = 0; 1093 if (n == qp->s_tail) 1094 break; 1095 } 1096 } 1097 1098 /* 1099 * This should be called with the QP s_lock held and interrupts disabled. 1100 */ 1101 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) 1102 { 1103 struct ib_other_headers *ohdr; 1104 struct hfi1_qp_priv *priv = qp->priv; 1105 struct rvt_swqe *wqe; 1106 struct ib_header *hdr = NULL; 1107 struct hfi1_16b_header *hdr_16b = NULL; 1108 u32 opcode; 1109 u32 psn; 1110 1111 lockdep_assert_held(&qp->s_lock); 1112 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) 1113 return; 1114 1115 /* Find out where the BTH is */ 1116 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 1117 hdr = &opah->ibh; 1118 if (ib_get_lnh(hdr) == HFI1_LRH_BTH) 1119 ohdr = &hdr->u.oth; 1120 else 1121 ohdr = &hdr->u.l.oth; 1122 } else { 1123 u8 l4; 1124 1125 hdr_16b = &opah->opah; 1126 l4 = hfi1_16B_get_l4(hdr_16b); 1127 if (l4 == OPA_16B_L4_IB_LOCAL) 1128 ohdr = &hdr_16b->u.oth; 1129 else 1130 ohdr = &hdr_16b->u.l.oth; 1131 } 1132 1133 opcode = ib_bth_get_opcode(ohdr); 1134 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 1135 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 1136 WARN_ON(!qp->s_rdma_ack_cnt); 1137 qp->s_rdma_ack_cnt--; 1138 return; 1139 } 1140 1141 psn = ib_bth_get_psn(ohdr); 1142 reset_sending_psn(qp, psn); 1143 1144 /* 1145 * Start timer after a packet requesting an ACK has been sent and 1146 * there are still requests that haven't been acked. 1147 */ 1148 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1149 !(qp->s_flags & 1150 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && 1151 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) 1152 rvt_add_retry_timer(qp); 1153 1154 while (qp->s_last != qp->s_acked) { 1155 u32 s_last; 1156 1157 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 1158 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && 1159 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) 1160 break; 1161 s_last = qp->s_last; 1162 trace_hfi1_qp_send_completion(qp, wqe, s_last); 1163 if (++s_last >= qp->s_size) 1164 s_last = 0; 1165 qp->s_last = s_last; 1166 /* see post_send() */ 1167 barrier(); 1168 rvt_put_swqe(wqe); 1169 rvt_qp_swqe_complete(qp, 1170 wqe, 1171 ib_hfi1_wc_opcode[wqe->wr.opcode], 1172 IB_WC_SUCCESS); 1173 } 1174 /* 1175 * If we were waiting for sends to complete before re-sending, 1176 * and they are now complete, restart sending. 1177 */ 1178 trace_hfi1_sendcomplete(qp, psn); 1179 if (qp->s_flags & RVT_S_WAIT_PSN && 1180 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1181 qp->s_flags &= ~RVT_S_WAIT_PSN; 1182 qp->s_sending_psn = qp->s_psn; 1183 qp->s_sending_hpsn = qp->s_psn - 1; 1184 hfi1_schedule_send(qp); 1185 } 1186 } 1187 1188 static inline void update_last_psn(struct rvt_qp *qp, u32 psn) 1189 { 1190 qp->s_last_psn = psn; 1191 } 1192 1193 /* 1194 * Generate a SWQE completion. 1195 * This is similar to hfi1_send_complete but has to check to be sure 1196 * that the SGEs are not being referenced if the SWQE is being resent. 1197 */ 1198 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, 1199 struct rvt_swqe *wqe, 1200 struct hfi1_ibport *ibp) 1201 { 1202 lockdep_assert_held(&qp->s_lock); 1203 /* 1204 * Don't decrement refcount and don't generate a 1205 * completion if the SWQE is being resent until the send 1206 * is finished. 1207 */ 1208 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || 1209 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { 1210 u32 s_last; 1211 1212 rvt_put_swqe(wqe); 1213 s_last = qp->s_last; 1214 trace_hfi1_qp_send_completion(qp, wqe, s_last); 1215 if (++s_last >= qp->s_size) 1216 s_last = 0; 1217 qp->s_last = s_last; 1218 /* see post_send() */ 1219 barrier(); 1220 rvt_qp_swqe_complete(qp, 1221 wqe, 1222 ib_hfi1_wc_opcode[wqe->wr.opcode], 1223 IB_WC_SUCCESS); 1224 } else { 1225 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1226 1227 this_cpu_inc(*ibp->rvp.rc_delayed_comp); 1228 /* 1229 * If send progress not running attempt to progress 1230 * SDMA queue. 1231 */ 1232 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { 1233 struct sdma_engine *engine; 1234 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); 1235 u8 sc5; 1236 1237 /* For now use sc to find engine */ 1238 sc5 = ibp->sl_to_sc[sl]; 1239 engine = qp_to_sdma_engine(qp, sc5); 1240 sdma_engine_progress_schedule(engine); 1241 } 1242 } 1243 1244 qp->s_retry = qp->s_retry_cnt; 1245 update_last_psn(qp, wqe->lpsn); 1246 1247 /* 1248 * If we are completing a request which is in the process of 1249 * being resent, we can stop re-sending it since we know the 1250 * responder has already seen it. 1251 */ 1252 if (qp->s_acked == qp->s_cur) { 1253 if (++qp->s_cur >= qp->s_size) 1254 qp->s_cur = 0; 1255 qp->s_acked = qp->s_cur; 1256 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 1257 if (qp->s_acked != qp->s_tail) { 1258 qp->s_state = OP(SEND_LAST); 1259 qp->s_psn = wqe->psn; 1260 } 1261 } else { 1262 if (++qp->s_acked >= qp->s_size) 1263 qp->s_acked = 0; 1264 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) 1265 qp->s_draining = 0; 1266 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1267 } 1268 return wqe; 1269 } 1270 1271 /** 1272 * do_rc_ack - process an incoming RC ACK 1273 * @qp: the QP the ACK came in on 1274 * @psn: the packet sequence number of the ACK 1275 * @opcode: the opcode of the request that resulted in the ACK 1276 * 1277 * This is called from rc_rcv_resp() to process an incoming RC ACK 1278 * for the given QP. 1279 * May be called at interrupt level, with the QP s_lock held. 1280 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 1281 */ 1282 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, 1283 u64 val, struct hfi1_ctxtdata *rcd) 1284 { 1285 struct hfi1_ibport *ibp; 1286 enum ib_wc_status status; 1287 struct rvt_swqe *wqe; 1288 int ret = 0; 1289 u32 ack_psn; 1290 int diff; 1291 1292 lockdep_assert_held(&qp->s_lock); 1293 /* 1294 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 1295 * requests and implicitly NAK RDMA read and atomic requests issued 1296 * before the NAK'ed request. The MSN won't include the NAK'ed 1297 * request but will include an ACK'ed request(s). 1298 */ 1299 ack_psn = psn; 1300 if (aeth >> IB_AETH_NAK_SHIFT) 1301 ack_psn--; 1302 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1303 ibp = rcd_to_iport(rcd); 1304 1305 /* 1306 * The MSN might be for a later WQE than the PSN indicates so 1307 * only complete WQEs that the PSN finishes. 1308 */ 1309 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { 1310 /* 1311 * RDMA_READ_RESPONSE_ONLY is a special case since 1312 * we want to generate completion events for everything 1313 * before the RDMA read, copy the data, then generate 1314 * the completion for the read. 1315 */ 1316 if (wqe->wr.opcode == IB_WR_RDMA_READ && 1317 opcode == OP(RDMA_READ_RESPONSE_ONLY) && 1318 diff == 0) { 1319 ret = 1; 1320 goto bail_stop; 1321 } 1322 /* 1323 * If this request is a RDMA read or atomic, and the ACK is 1324 * for a later operation, this ACK NAKs the RDMA read or 1325 * atomic. In other words, only a RDMA_READ_LAST or ONLY 1326 * can ACK a RDMA read and likewise for atomic ops. Note 1327 * that the NAK case can only happen if relaxed ordering is 1328 * used and requests are sent after an RDMA read or atomic 1329 * is sent but before the response is received. 1330 */ 1331 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 1332 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || 1333 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1334 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 1335 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { 1336 /* Retry this request. */ 1337 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { 1338 qp->r_flags |= RVT_R_RDMAR_SEQ; 1339 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 1340 if (list_empty(&qp->rspwait)) { 1341 qp->r_flags |= RVT_R_RSP_SEND; 1342 rvt_get_qp(qp); 1343 list_add_tail(&qp->rspwait, 1344 &rcd->qp_wait_list); 1345 } 1346 } 1347 /* 1348 * No need to process the ACK/NAK since we are 1349 * restarting an earlier request. 1350 */ 1351 goto bail_stop; 1352 } 1353 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1354 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 1355 u64 *vaddr = wqe->sg_list[0].vaddr; 1356 *vaddr = val; 1357 } 1358 if (qp->s_num_rd_atomic && 1359 (wqe->wr.opcode == IB_WR_RDMA_READ || 1360 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1361 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 1362 qp->s_num_rd_atomic--; 1363 /* Restart sending task if fence is complete */ 1364 if ((qp->s_flags & RVT_S_WAIT_FENCE) && 1365 !qp->s_num_rd_atomic) { 1366 qp->s_flags &= ~(RVT_S_WAIT_FENCE | 1367 RVT_S_WAIT_ACK); 1368 hfi1_schedule_send(qp); 1369 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { 1370 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | 1371 RVT_S_WAIT_ACK); 1372 hfi1_schedule_send(qp); 1373 } 1374 } 1375 wqe = do_rc_completion(qp, wqe, ibp); 1376 if (qp->s_acked == qp->s_tail) 1377 break; 1378 } 1379 1380 switch (aeth >> IB_AETH_NAK_SHIFT) { 1381 case 0: /* ACK */ 1382 this_cpu_inc(*ibp->rvp.rc_acks); 1383 if (qp->s_acked != qp->s_tail) { 1384 /* 1385 * We are expecting more ACKs so 1386 * mod the retry timer. 1387 */ 1388 rvt_mod_retry_timer(qp); 1389 /* 1390 * We can stop re-sending the earlier packets and 1391 * continue with the next packet the receiver wants. 1392 */ 1393 if (cmp_psn(qp->s_psn, psn) <= 0) 1394 reset_psn(qp, psn + 1); 1395 } else { 1396 /* No more acks - kill all timers */ 1397 rvt_stop_rc_timers(qp); 1398 if (cmp_psn(qp->s_psn, psn) <= 0) { 1399 qp->s_state = OP(SEND_LAST); 1400 qp->s_psn = psn + 1; 1401 } 1402 } 1403 if (qp->s_flags & RVT_S_WAIT_ACK) { 1404 qp->s_flags &= ~RVT_S_WAIT_ACK; 1405 hfi1_schedule_send(qp); 1406 } 1407 rvt_get_credit(qp, aeth); 1408 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1409 qp->s_retry = qp->s_retry_cnt; 1410 update_last_psn(qp, psn); 1411 return 1; 1412 1413 case 1: /* RNR NAK */ 1414 ibp->rvp.n_rnr_naks++; 1415 if (qp->s_acked == qp->s_tail) 1416 goto bail_stop; 1417 if (qp->s_flags & RVT_S_WAIT_RNR) 1418 goto bail_stop; 1419 if (qp->s_rnr_retry == 0) { 1420 status = IB_WC_RNR_RETRY_EXC_ERR; 1421 goto class_b; 1422 } 1423 if (qp->s_rnr_retry_cnt < 7) 1424 qp->s_rnr_retry--; 1425 1426 /* The last valid PSN is the previous PSN. */ 1427 update_last_psn(qp, psn - 1); 1428 1429 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); 1430 1431 reset_psn(qp, psn); 1432 1433 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); 1434 rvt_stop_rc_timers(qp); 1435 rvt_add_rnr_timer(qp, aeth); 1436 return 0; 1437 1438 case 3: /* NAK */ 1439 if (qp->s_acked == qp->s_tail) 1440 goto bail_stop; 1441 /* The last valid PSN is the previous PSN. */ 1442 update_last_psn(qp, psn - 1); 1443 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 1444 IB_AETH_CREDIT_MASK) { 1445 case 0: /* PSN sequence error */ 1446 ibp->rvp.n_seq_naks++; 1447 /* 1448 * Back up to the responder's expected PSN. 1449 * Note that we might get a NAK in the middle of an 1450 * RDMA READ response which terminates the RDMA 1451 * READ. 1452 */ 1453 hfi1_restart_rc(qp, psn, 0); 1454 hfi1_schedule_send(qp); 1455 break; 1456 1457 case 1: /* Invalid Request */ 1458 status = IB_WC_REM_INV_REQ_ERR; 1459 ibp->rvp.n_other_naks++; 1460 goto class_b; 1461 1462 case 2: /* Remote Access Error */ 1463 status = IB_WC_REM_ACCESS_ERR; 1464 ibp->rvp.n_other_naks++; 1465 goto class_b; 1466 1467 case 3: /* Remote Operation Error */ 1468 status = IB_WC_REM_OP_ERR; 1469 ibp->rvp.n_other_naks++; 1470 class_b: 1471 if (qp->s_last == qp->s_acked) { 1472 hfi1_send_complete(qp, wqe, status); 1473 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1474 } 1475 break; 1476 1477 default: 1478 /* Ignore other reserved NAK error codes */ 1479 goto reserved; 1480 } 1481 qp->s_retry = qp->s_retry_cnt; 1482 qp->s_rnr_retry = qp->s_rnr_retry_cnt; 1483 goto bail_stop; 1484 1485 default: /* 2: reserved */ 1486 reserved: 1487 /* Ignore reserved NAK codes. */ 1488 goto bail_stop; 1489 } 1490 /* cannot be reached */ 1491 bail_stop: 1492 rvt_stop_rc_timers(qp); 1493 return ret; 1494 } 1495 1496 /* 1497 * We have seen an out of sequence RDMA read middle or last packet. 1498 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. 1499 */ 1500 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, 1501 struct hfi1_ctxtdata *rcd) 1502 { 1503 struct rvt_swqe *wqe; 1504 1505 lockdep_assert_held(&qp->s_lock); 1506 /* Remove QP from retry timer */ 1507 rvt_stop_rc_timers(qp); 1508 1509 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1510 1511 while (cmp_psn(psn, wqe->lpsn) > 0) { 1512 if (wqe->wr.opcode == IB_WR_RDMA_READ || 1513 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 1514 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 1515 break; 1516 wqe = do_rc_completion(qp, wqe, ibp); 1517 } 1518 1519 ibp->rvp.n_rdma_seq++; 1520 qp->r_flags |= RVT_R_RDMAR_SEQ; 1521 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); 1522 if (list_empty(&qp->rspwait)) { 1523 qp->r_flags |= RVT_R_RSP_SEND; 1524 rvt_get_qp(qp); 1525 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1526 } 1527 } 1528 1529 /** 1530 * rc_rcv_resp - process an incoming RC response packet 1531 * @packet: data packet information 1532 * 1533 * This is called from hfi1_rc_rcv() to process an incoming RC response 1534 * packet for the given QP. 1535 * Called at interrupt level. 1536 */ 1537 static void rc_rcv_resp(struct hfi1_packet *packet) 1538 { 1539 struct hfi1_ctxtdata *rcd = packet->rcd; 1540 void *data = packet->payload; 1541 u32 tlen = packet->tlen; 1542 struct rvt_qp *qp = packet->qp; 1543 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1544 struct ib_other_headers *ohdr = packet->ohdr; 1545 struct rvt_swqe *wqe; 1546 enum ib_wc_status status; 1547 unsigned long flags; 1548 int diff; 1549 u64 val; 1550 u32 aeth; 1551 u32 psn = ib_bth_get_psn(packet->ohdr); 1552 u32 pmtu = qp->pmtu; 1553 u16 hdrsize = packet->hlen; 1554 u8 opcode = packet->opcode; 1555 u8 pad = packet->pad; 1556 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 1557 1558 spin_lock_irqsave(&qp->s_lock, flags); 1559 trace_hfi1_ack(qp, psn); 1560 1561 /* Ignore invalid responses. */ 1562 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) 1563 goto ack_done; 1564 1565 /* Ignore duplicate responses. */ 1566 diff = cmp_psn(psn, qp->s_last_psn); 1567 if (unlikely(diff <= 0)) { 1568 /* Update credits for "ghost" ACKs */ 1569 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { 1570 aeth = be32_to_cpu(ohdr->u.aeth); 1571 if ((aeth >> IB_AETH_NAK_SHIFT) == 0) 1572 rvt_get_credit(qp, aeth); 1573 } 1574 goto ack_done; 1575 } 1576 1577 /* 1578 * Skip everything other than the PSN we expect, if we are waiting 1579 * for a reply to a restarted RDMA read or atomic op. 1580 */ 1581 if (qp->r_flags & RVT_R_RDMAR_SEQ) { 1582 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) 1583 goto ack_done; 1584 qp->r_flags &= ~RVT_R_RDMAR_SEQ; 1585 } 1586 1587 if (unlikely(qp->s_acked == qp->s_tail)) 1588 goto ack_done; 1589 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1590 status = IB_WC_SUCCESS; 1591 1592 switch (opcode) { 1593 case OP(ACKNOWLEDGE): 1594 case OP(ATOMIC_ACKNOWLEDGE): 1595 case OP(RDMA_READ_RESPONSE_FIRST): 1596 aeth = be32_to_cpu(ohdr->u.aeth); 1597 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) 1598 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); 1599 else 1600 val = 0; 1601 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || 1602 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 1603 goto ack_done; 1604 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1605 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1606 goto ack_op_err; 1607 /* 1608 * If this is a response to a resent RDMA read, we 1609 * have to be careful to copy the data to the right 1610 * location. 1611 */ 1612 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1613 wqe, psn, pmtu); 1614 goto read_middle; 1615 1616 case OP(RDMA_READ_RESPONSE_MIDDLE): 1617 /* no AETH, no ACK */ 1618 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1619 goto ack_seq_err; 1620 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1621 goto ack_op_err; 1622 read_middle: 1623 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 1624 goto ack_len_err; 1625 if (unlikely(pmtu >= qp->s_rdma_read_len)) 1626 goto ack_len_err; 1627 1628 /* 1629 * We got a response so update the timeout. 1630 * 4.096 usec. * (1 << qp->timeout) 1631 */ 1632 rvt_mod_retry_timer(qp); 1633 if (qp->s_flags & RVT_S_WAIT_ACK) { 1634 qp->s_flags &= ~RVT_S_WAIT_ACK; 1635 hfi1_schedule_send(qp); 1636 } 1637 1638 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 1639 qp->s_retry = qp->s_retry_cnt; 1640 1641 /* 1642 * Update the RDMA receive state but do the copy w/o 1643 * holding the locks and blocking interrupts. 1644 */ 1645 qp->s_rdma_read_len -= pmtu; 1646 update_last_psn(qp, psn); 1647 spin_unlock_irqrestore(&qp->s_lock, flags); 1648 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false); 1649 goto bail; 1650 1651 case OP(RDMA_READ_RESPONSE_ONLY): 1652 aeth = be32_to_cpu(ohdr->u.aeth); 1653 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) 1654 goto ack_done; 1655 /* 1656 * Check that the data size is >= 0 && <= pmtu. 1657 * Remember to account for ICRC (4). 1658 */ 1659 if (unlikely(tlen < (hdrsize + extra_bytes))) 1660 goto ack_len_err; 1661 /* 1662 * If this is a response to a resent RDMA read, we 1663 * have to be careful to copy the data to the right 1664 * location. 1665 */ 1666 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 1667 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1668 wqe, psn, pmtu); 1669 goto read_last; 1670 1671 case OP(RDMA_READ_RESPONSE_LAST): 1672 /* ACKs READ req. */ 1673 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) 1674 goto ack_seq_err; 1675 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1676 goto ack_op_err; 1677 /* 1678 * Check that the data size is >= 1 && <= pmtu. 1679 * Remember to account for ICRC (4). 1680 */ 1681 if (unlikely(tlen <= (hdrsize + extra_bytes))) 1682 goto ack_len_err; 1683 read_last: 1684 tlen -= hdrsize + extra_bytes; 1685 if (unlikely(tlen != qp->s_rdma_read_len)) 1686 goto ack_len_err; 1687 aeth = be32_to_cpu(ohdr->u.aeth); 1688 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false); 1689 WARN_ON(qp->s_rdma_read_sge.num_sge); 1690 (void)do_rc_ack(qp, aeth, psn, 1691 OP(RDMA_READ_RESPONSE_LAST), 0, rcd); 1692 goto ack_done; 1693 } 1694 1695 ack_op_err: 1696 status = IB_WC_LOC_QP_OP_ERR; 1697 goto ack_err; 1698 1699 ack_seq_err: 1700 rdma_seq_err(qp, ibp, psn, rcd); 1701 goto ack_done; 1702 1703 ack_len_err: 1704 status = IB_WC_LOC_LEN_ERR; 1705 ack_err: 1706 if (qp->s_last == qp->s_acked) { 1707 hfi1_send_complete(qp, wqe, status); 1708 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1709 } 1710 ack_done: 1711 spin_unlock_irqrestore(&qp->s_lock, flags); 1712 bail: 1713 return; 1714 } 1715 1716 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, 1717 struct rvt_qp *qp) 1718 { 1719 if (list_empty(&qp->rspwait)) { 1720 qp->r_flags |= RVT_R_RSP_NAK; 1721 rvt_get_qp(qp); 1722 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 1723 } 1724 } 1725 1726 static inline void rc_cancel_ack(struct rvt_qp *qp) 1727 { 1728 qp->r_adefered = 0; 1729 if (list_empty(&qp->rspwait)) 1730 return; 1731 list_del_init(&qp->rspwait); 1732 qp->r_flags &= ~RVT_R_RSP_NAK; 1733 rvt_put_qp(qp); 1734 } 1735 1736 /** 1737 * rc_rcv_error - process an incoming duplicate or error RC packet 1738 * @ohdr: the other headers for this packet 1739 * @data: the packet data 1740 * @qp: the QP for this packet 1741 * @opcode: the opcode for this packet 1742 * @psn: the packet sequence number for this packet 1743 * @diff: the difference between the PSN and the expected PSN 1744 * 1745 * This is called from hfi1_rc_rcv() to process an unexpected 1746 * incoming RC packet for the given QP. 1747 * Called at interrupt level. 1748 * Return 1 if no more processing is needed; otherwise return 0 to 1749 * schedule a response to be sent. 1750 */ 1751 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, 1752 struct rvt_qp *qp, u32 opcode, u32 psn, 1753 int diff, struct hfi1_ctxtdata *rcd) 1754 { 1755 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 1756 struct rvt_ack_entry *e; 1757 unsigned long flags; 1758 u8 i, prev; 1759 int old_req; 1760 1761 trace_hfi1_rcv_error(qp, psn); 1762 if (diff > 0) { 1763 /* 1764 * Packet sequence error. 1765 * A NAK will ACK earlier sends and RDMA writes. 1766 * Don't queue the NAK if we already sent one. 1767 */ 1768 if (!qp->r_nak_state) { 1769 ibp->rvp.n_rc_seqnak++; 1770 qp->r_nak_state = IB_NAK_PSN_ERROR; 1771 /* Use the expected PSN. */ 1772 qp->r_ack_psn = qp->r_psn; 1773 /* 1774 * Wait to send the sequence NAK until all packets 1775 * in the receive queue have been processed. 1776 * Otherwise, we end up propagating congestion. 1777 */ 1778 rc_defered_ack(rcd, qp); 1779 } 1780 goto done; 1781 } 1782 1783 /* 1784 * Handle a duplicate request. Don't re-execute SEND, RDMA 1785 * write or atomic op. Don't NAK errors, just silently drop 1786 * the duplicate request. Note that r_sge, r_len, and 1787 * r_rcv_len may be in use so don't modify them. 1788 * 1789 * We are supposed to ACK the earliest duplicate PSN but we 1790 * can coalesce an outstanding duplicate ACK. We have to 1791 * send the earliest so that RDMA reads can be restarted at 1792 * the requester's expected PSN. 1793 * 1794 * First, find where this duplicate PSN falls within the 1795 * ACKs previously sent. 1796 * old_req is true if there is an older response that is scheduled 1797 * to be sent before sending this one. 1798 */ 1799 e = NULL; 1800 old_req = 1; 1801 ibp->rvp.n_rc_dupreq++; 1802 1803 spin_lock_irqsave(&qp->s_lock, flags); 1804 1805 for (i = qp->r_head_ack_queue; ; i = prev) { 1806 if (i == qp->s_tail_ack_queue) 1807 old_req = 0; 1808 if (i) 1809 prev = i - 1; 1810 else 1811 prev = HFI1_MAX_RDMA_ATOMIC; 1812 if (prev == qp->r_head_ack_queue) { 1813 e = NULL; 1814 break; 1815 } 1816 e = &qp->s_ack_queue[prev]; 1817 if (!e->opcode) { 1818 e = NULL; 1819 break; 1820 } 1821 if (cmp_psn(psn, e->psn) >= 0) { 1822 if (prev == qp->s_tail_ack_queue && 1823 cmp_psn(psn, e->lpsn) <= 0) 1824 old_req = 0; 1825 break; 1826 } 1827 } 1828 switch (opcode) { 1829 case OP(RDMA_READ_REQUEST): { 1830 struct ib_reth *reth; 1831 u32 offset; 1832 u32 len; 1833 1834 /* 1835 * If we didn't find the RDMA read request in the ack queue, 1836 * we can ignore this request. 1837 */ 1838 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) 1839 goto unlock_done; 1840 /* RETH comes after BTH */ 1841 reth = &ohdr->u.rc.reth; 1842 /* 1843 * Address range must be a subset of the original 1844 * request and start on pmtu boundaries. 1845 * We reuse the old ack_queue slot since the requester 1846 * should not back up and request an earlier PSN for the 1847 * same request. 1848 */ 1849 offset = delta_psn(psn, e->psn) * qp->pmtu; 1850 len = be32_to_cpu(reth->length); 1851 if (unlikely(offset + len != e->rdma_sge.sge_length)) 1852 goto unlock_done; 1853 if (e->rdma_sge.mr) { 1854 rvt_put_mr(e->rdma_sge.mr); 1855 e->rdma_sge.mr = NULL; 1856 } 1857 if (len != 0) { 1858 u32 rkey = be32_to_cpu(reth->rkey); 1859 u64 vaddr = get_ib_reth_vaddr(reth); 1860 int ok; 1861 1862 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, 1863 IB_ACCESS_REMOTE_READ); 1864 if (unlikely(!ok)) 1865 goto unlock_done; 1866 } else { 1867 e->rdma_sge.vaddr = NULL; 1868 e->rdma_sge.length = 0; 1869 e->rdma_sge.sge_length = 0; 1870 } 1871 e->psn = psn; 1872 if (old_req) 1873 goto unlock_done; 1874 qp->s_tail_ack_queue = prev; 1875 break; 1876 } 1877 1878 case OP(COMPARE_SWAP): 1879 case OP(FETCH_ADD): { 1880 /* 1881 * If we didn't find the atomic request in the ack queue 1882 * or the send engine is already backed up to send an 1883 * earlier entry, we can ignore this request. 1884 */ 1885 if (!e || e->opcode != (u8)opcode || old_req) 1886 goto unlock_done; 1887 qp->s_tail_ack_queue = prev; 1888 break; 1889 } 1890 1891 default: 1892 /* 1893 * Ignore this operation if it doesn't request an ACK 1894 * or an earlier RDMA read or atomic is going to be resent. 1895 */ 1896 if (!(psn & IB_BTH_REQ_ACK) || old_req) 1897 goto unlock_done; 1898 /* 1899 * Resend the most recent ACK if this request is 1900 * after all the previous RDMA reads and atomics. 1901 */ 1902 if (i == qp->r_head_ack_queue) { 1903 spin_unlock_irqrestore(&qp->s_lock, flags); 1904 qp->r_nak_state = 0; 1905 qp->r_ack_psn = qp->r_psn - 1; 1906 goto send_ack; 1907 } 1908 1909 /* 1910 * Resend the RDMA read or atomic op which 1911 * ACKs this duplicate request. 1912 */ 1913 qp->s_tail_ack_queue = i; 1914 break; 1915 } 1916 qp->s_ack_state = OP(ACKNOWLEDGE); 1917 qp->s_flags |= RVT_S_RESP_PENDING; 1918 qp->r_nak_state = 0; 1919 hfi1_schedule_send(qp); 1920 1921 unlock_done: 1922 spin_unlock_irqrestore(&qp->s_lock, flags); 1923 done: 1924 return 1; 1925 1926 send_ack: 1927 return 0; 1928 } 1929 1930 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n) 1931 { 1932 unsigned next; 1933 1934 next = n + 1; 1935 if (next > HFI1_MAX_RDMA_ATOMIC) 1936 next = 0; 1937 qp->s_tail_ack_queue = next; 1938 qp->s_ack_state = OP(ACKNOWLEDGE); 1939 } 1940 1941 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, 1942 u32 lqpn, u32 rqpn, u8 svc_type) 1943 { 1944 struct opa_hfi1_cong_log_event_internal *cc_event; 1945 unsigned long flags; 1946 1947 if (sl >= OPA_MAX_SLS) 1948 return; 1949 1950 spin_lock_irqsave(&ppd->cc_log_lock, flags); 1951 1952 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); 1953 ppd->threshold_event_counter++; 1954 1955 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; 1956 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) 1957 ppd->cc_log_idx = 0; 1958 cc_event->lqpn = lqpn & RVT_QPN_MASK; 1959 cc_event->rqpn = rqpn & RVT_QPN_MASK; 1960 cc_event->sl = sl; 1961 cc_event->svc_type = svc_type; 1962 cc_event->rlid = rlid; 1963 /* keep timestamp in units of 1.024 usec */ 1964 cc_event->timestamp = ktime_get_ns() / 1024; 1965 1966 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); 1967 } 1968 1969 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, 1970 u32 rqpn, u8 svc_type) 1971 { 1972 struct cca_timer *cca_timer; 1973 u16 ccti, ccti_incr, ccti_timer, ccti_limit; 1974 u8 trigger_threshold; 1975 struct cc_state *cc_state; 1976 unsigned long flags; 1977 1978 if (sl >= OPA_MAX_SLS) 1979 return; 1980 1981 cc_state = get_cc_state(ppd); 1982 1983 if (!cc_state) 1984 return; 1985 1986 /* 1987 * 1) increase CCTI (for this SL) 1988 * 2) select IPG (i.e., call set_link_ipg()) 1989 * 3) start timer 1990 */ 1991 ccti_limit = cc_state->cct.ccti_limit; 1992 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; 1993 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 1994 trigger_threshold = 1995 cc_state->cong_setting.entries[sl].trigger_threshold; 1996 1997 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 1998 1999 cca_timer = &ppd->cca_timer[sl]; 2000 if (cca_timer->ccti < ccti_limit) { 2001 if (cca_timer->ccti + ccti_incr <= ccti_limit) 2002 cca_timer->ccti += ccti_incr; 2003 else 2004 cca_timer->ccti = ccti_limit; 2005 set_link_ipg(ppd); 2006 } 2007 2008 ccti = cca_timer->ccti; 2009 2010 if (!hrtimer_active(&cca_timer->hrtimer)) { 2011 /* ccti_timer is in units of 1.024 usec */ 2012 unsigned long nsec = 1024 * ccti_timer; 2013 2014 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), 2015 HRTIMER_MODE_REL); 2016 } 2017 2018 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 2019 2020 if ((trigger_threshold != 0) && (ccti >= trigger_threshold)) 2021 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type); 2022 } 2023 2024 /** 2025 * hfi1_rc_rcv - process an incoming RC packet 2026 * @packet: data packet information 2027 * 2028 * This is called from qp_rcv() to process an incoming RC packet 2029 * for the given QP. 2030 * May be called at interrupt level. 2031 */ 2032 void hfi1_rc_rcv(struct hfi1_packet *packet) 2033 { 2034 struct hfi1_ctxtdata *rcd = packet->rcd; 2035 void *data = packet->payload; 2036 u32 tlen = packet->tlen; 2037 struct rvt_qp *qp = packet->qp; 2038 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2039 struct ib_other_headers *ohdr = packet->ohdr; 2040 u32 bth0 = be32_to_cpu(ohdr->bth[0]); 2041 u32 opcode = packet->opcode; 2042 u32 hdrsize = packet->hlen; 2043 u32 psn = ib_bth_get_psn(packet->ohdr); 2044 u32 pad = packet->pad; 2045 struct ib_wc wc; 2046 u32 pmtu = qp->pmtu; 2047 int diff; 2048 struct ib_reth *reth; 2049 unsigned long flags; 2050 int ret; 2051 bool is_fecn = false; 2052 bool copy_last = false; 2053 u32 rkey; 2054 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); 2055 2056 lockdep_assert_held(&qp->r_lock); 2057 2058 if (hfi1_ruc_check_hdr(ibp, packet)) 2059 return; 2060 2061 is_fecn = process_ecn(qp, packet, false); 2062 2063 /* 2064 * Process responses (ACKs) before anything else. Note that the 2065 * packet sequence number will be for something in the send work 2066 * queue rather than the expected receive packet sequence number. 2067 * In other words, this QP is the requester. 2068 */ 2069 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && 2070 opcode <= OP(ATOMIC_ACKNOWLEDGE)) { 2071 rc_rcv_resp(packet); 2072 if (is_fecn) 2073 goto send_ack; 2074 return; 2075 } 2076 2077 /* Compute 24 bits worth of difference. */ 2078 diff = delta_psn(psn, qp->r_psn); 2079 if (unlikely(diff)) { 2080 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) 2081 return; 2082 goto send_ack; 2083 } 2084 2085 /* Check for opcode sequence errors. */ 2086 switch (qp->r_state) { 2087 case OP(SEND_FIRST): 2088 case OP(SEND_MIDDLE): 2089 if (opcode == OP(SEND_MIDDLE) || 2090 opcode == OP(SEND_LAST) || 2091 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2092 opcode == OP(SEND_LAST_WITH_INVALIDATE)) 2093 break; 2094 goto nack_inv; 2095 2096 case OP(RDMA_WRITE_FIRST): 2097 case OP(RDMA_WRITE_MIDDLE): 2098 if (opcode == OP(RDMA_WRITE_MIDDLE) || 2099 opcode == OP(RDMA_WRITE_LAST) || 2100 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2101 break; 2102 goto nack_inv; 2103 2104 default: 2105 if (opcode == OP(SEND_MIDDLE) || 2106 opcode == OP(SEND_LAST) || 2107 opcode == OP(SEND_LAST_WITH_IMMEDIATE) || 2108 opcode == OP(SEND_LAST_WITH_INVALIDATE) || 2109 opcode == OP(RDMA_WRITE_MIDDLE) || 2110 opcode == OP(RDMA_WRITE_LAST) || 2111 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) 2112 goto nack_inv; 2113 /* 2114 * Note that it is up to the requester to not send a new 2115 * RDMA read or atomic operation before receiving an ACK 2116 * for the previous operation. 2117 */ 2118 break; 2119 } 2120 2121 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) 2122 rvt_comm_est(qp); 2123 2124 /* OK, process the packet. */ 2125 switch (opcode) { 2126 case OP(SEND_FIRST): 2127 ret = hfi1_rvt_get_rwqe(qp, 0); 2128 if (ret < 0) 2129 goto nack_op_err; 2130 if (!ret) 2131 goto rnr_nak; 2132 qp->r_rcv_len = 0; 2133 /* FALLTHROUGH */ 2134 case OP(SEND_MIDDLE): 2135 case OP(RDMA_WRITE_MIDDLE): 2136 send_middle: 2137 /* Check for invalid length PMTU or posted rwqe len. */ 2138 /* 2139 * There will be no padding for 9B packet but 16B packets 2140 * will come in with some padding since we always add 2141 * CRC and LT bytes which will need to be flit aligned 2142 */ 2143 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes))) 2144 goto nack_inv; 2145 qp->r_rcv_len += pmtu; 2146 if (unlikely(qp->r_rcv_len > qp->r_len)) 2147 goto nack_inv; 2148 hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false); 2149 break; 2150 2151 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): 2152 /* consume RWQE */ 2153 ret = hfi1_rvt_get_rwqe(qp, 1); 2154 if (ret < 0) 2155 goto nack_op_err; 2156 if (!ret) 2157 goto rnr_nak; 2158 goto send_last_imm; 2159 2160 case OP(SEND_ONLY): 2161 case OP(SEND_ONLY_WITH_IMMEDIATE): 2162 case OP(SEND_ONLY_WITH_INVALIDATE): 2163 ret = hfi1_rvt_get_rwqe(qp, 0); 2164 if (ret < 0) 2165 goto nack_op_err; 2166 if (!ret) 2167 goto rnr_nak; 2168 qp->r_rcv_len = 0; 2169 if (opcode == OP(SEND_ONLY)) 2170 goto no_immediate_data; 2171 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE)) 2172 goto send_last_inv; 2173 /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */ 2174 case OP(SEND_LAST_WITH_IMMEDIATE): 2175 send_last_imm: 2176 wc.ex.imm_data = ohdr->u.imm_data; 2177 wc.wc_flags = IB_WC_WITH_IMM; 2178 goto send_last; 2179 case OP(SEND_LAST_WITH_INVALIDATE): 2180 send_last_inv: 2181 rkey = be32_to_cpu(ohdr->u.ieth); 2182 if (rvt_invalidate_rkey(qp, rkey)) 2183 goto no_immediate_data; 2184 wc.ex.invalidate_rkey = rkey; 2185 wc.wc_flags = IB_WC_WITH_INVALIDATE; 2186 goto send_last; 2187 case OP(RDMA_WRITE_LAST): 2188 copy_last = rvt_is_user_qp(qp); 2189 /* fall through */ 2190 case OP(SEND_LAST): 2191 no_immediate_data: 2192 wc.wc_flags = 0; 2193 wc.ex.imm_data = 0; 2194 send_last: 2195 /* Check for invalid length. */ 2196 /* LAST len should be >= 1 */ 2197 if (unlikely(tlen < (hdrsize + extra_bytes))) 2198 goto nack_inv; 2199 /* Don't count the CRC(and padding and LT byte for 16B). */ 2200 tlen -= (hdrsize + extra_bytes); 2201 wc.byte_len = tlen + qp->r_rcv_len; 2202 if (unlikely(wc.byte_len > qp->r_len)) 2203 goto nack_inv; 2204 hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last); 2205 rvt_put_ss(&qp->r_sge); 2206 qp->r_msn++; 2207 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 2208 break; 2209 wc.wr_id = qp->r_wr_id; 2210 wc.status = IB_WC_SUCCESS; 2211 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || 2212 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) 2213 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 2214 else 2215 wc.opcode = IB_WC_RECV; 2216 wc.qp = &qp->ibqp; 2217 wc.src_qp = qp->remote_qpn; 2218 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; 2219 /* 2220 * It seems that IB mandates the presence of an SL in a 2221 * work completion only for the UD transport (see section 2222 * 11.4.2 of IBTA Vol. 1). 2223 * 2224 * However, the way the SL is chosen below is consistent 2225 * with the way that IB/qib works and is trying avoid 2226 * introducing incompatibilities. 2227 * 2228 * See also OPA Vol. 1, section 9.7.6, and table 9-17. 2229 */ 2230 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); 2231 /* zero fields that are N/A */ 2232 wc.vendor_err = 0; 2233 wc.pkey_index = 0; 2234 wc.dlid_path_bits = 0; 2235 wc.port_num = 0; 2236 /* Signal completion event if the solicited bit is set. */ 2237 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 2238 (bth0 & IB_BTH_SOLICITED) != 0); 2239 break; 2240 2241 case OP(RDMA_WRITE_ONLY): 2242 copy_last = rvt_is_user_qp(qp); 2243 /* fall through */ 2244 case OP(RDMA_WRITE_FIRST): 2245 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 2246 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 2247 goto nack_inv; 2248 /* consume RWQE */ 2249 reth = &ohdr->u.rc.reth; 2250 qp->r_len = be32_to_cpu(reth->length); 2251 qp->r_rcv_len = 0; 2252 qp->r_sge.sg_list = NULL; 2253 if (qp->r_len != 0) { 2254 u32 rkey = be32_to_cpu(reth->rkey); 2255 u64 vaddr = get_ib_reth_vaddr(reth); 2256 int ok; 2257 2258 /* Check rkey & NAK */ 2259 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, 2260 rkey, IB_ACCESS_REMOTE_WRITE); 2261 if (unlikely(!ok)) 2262 goto nack_acc; 2263 qp->r_sge.num_sge = 1; 2264 } else { 2265 qp->r_sge.num_sge = 0; 2266 qp->r_sge.sge.mr = NULL; 2267 qp->r_sge.sge.vaddr = NULL; 2268 qp->r_sge.sge.length = 0; 2269 qp->r_sge.sge.sge_length = 0; 2270 } 2271 if (opcode == OP(RDMA_WRITE_FIRST)) 2272 goto send_middle; 2273 else if (opcode == OP(RDMA_WRITE_ONLY)) 2274 goto no_immediate_data; 2275 ret = hfi1_rvt_get_rwqe(qp, 1); 2276 if (ret < 0) 2277 goto nack_op_err; 2278 if (!ret) { 2279 /* peer will send again */ 2280 rvt_put_ss(&qp->r_sge); 2281 goto rnr_nak; 2282 } 2283 wc.ex.imm_data = ohdr->u.rc.imm_data; 2284 wc.wc_flags = IB_WC_WITH_IMM; 2285 goto send_last; 2286 2287 case OP(RDMA_READ_REQUEST): { 2288 struct rvt_ack_entry *e; 2289 u32 len; 2290 u8 next; 2291 2292 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 2293 goto nack_inv; 2294 next = qp->r_head_ack_queue + 1; 2295 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */ 2296 if (next > HFI1_MAX_RDMA_ATOMIC) 2297 next = 0; 2298 spin_lock_irqsave(&qp->s_lock, flags); 2299 if (unlikely(next == qp->s_tail_ack_queue)) { 2300 if (!qp->s_ack_queue[next].sent) 2301 goto nack_inv_unlck; 2302 update_ack_queue(qp, next); 2303 } 2304 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2305 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2306 rvt_put_mr(e->rdma_sge.mr); 2307 e->rdma_sge.mr = NULL; 2308 } 2309 reth = &ohdr->u.rc.reth; 2310 len = be32_to_cpu(reth->length); 2311 if (len) { 2312 u32 rkey = be32_to_cpu(reth->rkey); 2313 u64 vaddr = get_ib_reth_vaddr(reth); 2314 int ok; 2315 2316 /* Check rkey & NAK */ 2317 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, 2318 rkey, IB_ACCESS_REMOTE_READ); 2319 if (unlikely(!ok)) 2320 goto nack_acc_unlck; 2321 /* 2322 * Update the next expected PSN. We add 1 later 2323 * below, so only add the remainder here. 2324 */ 2325 qp->r_psn += rvt_div_mtu(qp, len - 1); 2326 } else { 2327 e->rdma_sge.mr = NULL; 2328 e->rdma_sge.vaddr = NULL; 2329 e->rdma_sge.length = 0; 2330 e->rdma_sge.sge_length = 0; 2331 } 2332 e->opcode = opcode; 2333 e->sent = 0; 2334 e->psn = psn; 2335 e->lpsn = qp->r_psn; 2336 /* 2337 * We need to increment the MSN here instead of when we 2338 * finish sending the result since a duplicate request would 2339 * increment it more than once. 2340 */ 2341 qp->r_msn++; 2342 qp->r_psn++; 2343 qp->r_state = opcode; 2344 qp->r_nak_state = 0; 2345 qp->r_head_ack_queue = next; 2346 2347 /* Schedule the send engine. */ 2348 qp->s_flags |= RVT_S_RESP_PENDING; 2349 hfi1_schedule_send(qp); 2350 2351 spin_unlock_irqrestore(&qp->s_lock, flags); 2352 if (is_fecn) 2353 goto send_ack; 2354 return; 2355 } 2356 2357 case OP(COMPARE_SWAP): 2358 case OP(FETCH_ADD): { 2359 struct ib_atomic_eth *ateth; 2360 struct rvt_ack_entry *e; 2361 u64 vaddr; 2362 atomic64_t *maddr; 2363 u64 sdata; 2364 u32 rkey; 2365 u8 next; 2366 2367 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 2368 goto nack_inv; 2369 next = qp->r_head_ack_queue + 1; 2370 if (next > HFI1_MAX_RDMA_ATOMIC) 2371 next = 0; 2372 spin_lock_irqsave(&qp->s_lock, flags); 2373 if (unlikely(next == qp->s_tail_ack_queue)) { 2374 if (!qp->s_ack_queue[next].sent) 2375 goto nack_inv_unlck; 2376 update_ack_queue(qp, next); 2377 } 2378 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 2379 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { 2380 rvt_put_mr(e->rdma_sge.mr); 2381 e->rdma_sge.mr = NULL; 2382 } 2383 ateth = &ohdr->u.atomic_eth; 2384 vaddr = get_ib_ateth_vaddr(ateth); 2385 if (unlikely(vaddr & (sizeof(u64) - 1))) 2386 goto nack_inv_unlck; 2387 rkey = be32_to_cpu(ateth->rkey); 2388 /* Check rkey & NAK */ 2389 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 2390 vaddr, rkey, 2391 IB_ACCESS_REMOTE_ATOMIC))) 2392 goto nack_acc_unlck; 2393 /* Perform atomic OP and save result. */ 2394 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; 2395 sdata = get_ib_ateth_swap(ateth); 2396 e->atomic_data = (opcode == OP(FETCH_ADD)) ? 2397 (u64)atomic64_add_return(sdata, maddr) - sdata : 2398 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, 2399 get_ib_ateth_compare(ateth), 2400 sdata); 2401 rvt_put_mr(qp->r_sge.sge.mr); 2402 qp->r_sge.num_sge = 0; 2403 e->opcode = opcode; 2404 e->sent = 0; 2405 e->psn = psn; 2406 e->lpsn = psn; 2407 qp->r_msn++; 2408 qp->r_psn++; 2409 qp->r_state = opcode; 2410 qp->r_nak_state = 0; 2411 qp->r_head_ack_queue = next; 2412 2413 /* Schedule the send engine. */ 2414 qp->s_flags |= RVT_S_RESP_PENDING; 2415 hfi1_schedule_send(qp); 2416 2417 spin_unlock_irqrestore(&qp->s_lock, flags); 2418 if (is_fecn) 2419 goto send_ack; 2420 return; 2421 } 2422 2423 default: 2424 /* NAK unknown opcodes. */ 2425 goto nack_inv; 2426 } 2427 qp->r_psn++; 2428 qp->r_state = opcode; 2429 qp->r_ack_psn = psn; 2430 qp->r_nak_state = 0; 2431 /* Send an ACK if requested or required. */ 2432 if (psn & IB_BTH_REQ_ACK) { 2433 if (packet->numpkt == 0) { 2434 rc_cancel_ack(qp); 2435 goto send_ack; 2436 } 2437 if (qp->r_adefered >= HFI1_PSN_CREDIT) { 2438 rc_cancel_ack(qp); 2439 goto send_ack; 2440 } 2441 if (unlikely(is_fecn)) { 2442 rc_cancel_ack(qp); 2443 goto send_ack; 2444 } 2445 qp->r_adefered++; 2446 rc_defered_ack(rcd, qp); 2447 } 2448 return; 2449 2450 rnr_nak: 2451 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; 2452 qp->r_ack_psn = qp->r_psn; 2453 /* Queue RNR NAK for later */ 2454 rc_defered_ack(rcd, qp); 2455 return; 2456 2457 nack_op_err: 2458 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2459 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 2460 qp->r_ack_psn = qp->r_psn; 2461 /* Queue NAK for later */ 2462 rc_defered_ack(rcd, qp); 2463 return; 2464 2465 nack_inv_unlck: 2466 spin_unlock_irqrestore(&qp->s_lock, flags); 2467 nack_inv: 2468 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2469 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 2470 qp->r_ack_psn = qp->r_psn; 2471 /* Queue NAK for later */ 2472 rc_defered_ack(rcd, qp); 2473 return; 2474 2475 nack_acc_unlck: 2476 spin_unlock_irqrestore(&qp->s_lock, flags); 2477 nack_acc: 2478 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); 2479 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 2480 qp->r_ack_psn = qp->r_psn; 2481 send_ack: 2482 hfi1_send_rc_ack(rcd, qp, is_fecn); 2483 } 2484 2485 void hfi1_rc_hdrerr( 2486 struct hfi1_ctxtdata *rcd, 2487 struct hfi1_packet *packet, 2488 struct rvt_qp *qp) 2489 { 2490 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 2491 int diff; 2492 u32 opcode; 2493 u32 psn; 2494 2495 if (hfi1_ruc_check_hdr(ibp, packet)) 2496 return; 2497 2498 psn = ib_bth_get_psn(packet->ohdr); 2499 opcode = ib_bth_get_opcode(packet->ohdr); 2500 2501 /* Only deal with RDMA Writes for now */ 2502 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 2503 diff = delta_psn(psn, qp->r_psn); 2504 if (!qp->r_nak_state && diff >= 0) { 2505 ibp->rvp.n_rc_seqnak++; 2506 qp->r_nak_state = IB_NAK_PSN_ERROR; 2507 /* Use the expected PSN. */ 2508 qp->r_ack_psn = qp->r_psn; 2509 /* 2510 * Wait to send the sequence 2511 * NAK until all packets 2512 * in the receive queue have 2513 * been processed. 2514 * Otherwise, we end up 2515 * propagating congestion. 2516 */ 2517 rc_defered_ack(rcd, qp); 2518 } /* Out of sequence NAK */ 2519 } /* QP Request NAKs */ 2520 } 2521