1 /* 2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <rdma/ib_smi.h> 35 #include <rdma/ib_verbs.h> 36 37 #include "qib.h" 38 #include "qib_mad.h" 39 40 /** 41 * qib_ud_loopback - handle send on loopback QPs 42 * @sqp: the sending QP 43 * @swqe: the send work request 44 * 45 * This is called from qib_make_ud_req() to forward a WQE addressed 46 * to the same HCA. 47 * Note that the receive interrupt handler may be calling qib_ud_rcv() 48 * while this is being called. 49 */ 50 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) 51 { 52 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); 53 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 54 struct qib_devdata *dd = ppd->dd; 55 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 56 struct rvt_qp *qp; 57 struct ib_ah_attr *ah_attr; 58 unsigned long flags; 59 struct rvt_sge_state ssge; 60 struct rvt_sge *sge; 61 struct ib_wc wc; 62 u32 length; 63 enum ib_qp_type sqptype, dqptype; 64 65 rcu_read_lock(); 66 qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn); 67 if (!qp) { 68 ibp->rvp.n_pkt_drops++; 69 rcu_read_unlock(); 70 return; 71 } 72 73 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? 74 IB_QPT_UD : sqp->ibqp.qp_type; 75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? 76 IB_QPT_UD : qp->ibqp.qp_type; 77 78 if (dqptype != sqptype || 79 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { 80 ibp->rvp.n_pkt_drops++; 81 goto drop; 82 } 83 84 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr; 85 ppd = ppd_from_ibp(ibp); 86 87 if (qp->ibqp.qp_num > 1) { 88 u16 pkey1; 89 u16 pkey2; 90 u16 lid; 91 92 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index); 93 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); 94 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { 95 lid = ppd->lid | (ah_attr->src_path_bits & 96 ((1 << ppd->lmc) - 1)); 97 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1, 98 ah_attr->sl, 99 sqp->ibqp.qp_num, qp->ibqp.qp_num, 100 cpu_to_be16(lid), 101 cpu_to_be16(ah_attr->dlid)); 102 goto drop; 103 } 104 } 105 106 /* 107 * Check that the qkey matches (except for QP0, see 9.6.1.4.1). 108 * Qkeys with the high order bit set mean use the 109 * qkey from the QP context instead of the WR (see 10.2.5). 110 */ 111 if (qp->ibqp.qp_num) { 112 u32 qkey; 113 114 qkey = (int)swqe->ud_wr.remote_qkey < 0 ? 115 sqp->qkey : swqe->ud_wr.remote_qkey; 116 if (unlikely(qkey != qp->qkey)) { 117 u16 lid; 118 119 lid = ppd->lid | (ah_attr->src_path_bits & 120 ((1 << ppd->lmc) - 1)); 121 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, 122 ah_attr->sl, 123 sqp->ibqp.qp_num, qp->ibqp.qp_num, 124 cpu_to_be16(lid), 125 cpu_to_be16(ah_attr->dlid)); 126 goto drop; 127 } 128 } 129 130 /* 131 * A GRH is expected to precede the data even if not 132 * present on the wire. 133 */ 134 length = swqe->length; 135 memset(&wc, 0, sizeof(wc)); 136 wc.byte_len = length + sizeof(struct ib_grh); 137 138 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 139 wc.wc_flags = IB_WC_WITH_IMM; 140 wc.ex.imm_data = swqe->wr.ex.imm_data; 141 } 142 143 spin_lock_irqsave(&qp->r_lock, flags); 144 145 /* 146 * Get the next work request entry to find where to put the data. 147 */ 148 if (qp->r_flags & RVT_R_REUSE_SGE) 149 qp->r_flags &= ~RVT_R_REUSE_SGE; 150 else { 151 int ret; 152 153 ret = qib_get_rwqe(qp, 0); 154 if (ret < 0) { 155 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 156 goto bail_unlock; 157 } 158 if (!ret) { 159 if (qp->ibqp.qp_num == 0) 160 ibp->rvp.n_vl15_dropped++; 161 goto bail_unlock; 162 } 163 } 164 /* Silently drop packets which are too big. */ 165 if (unlikely(wc.byte_len > qp->r_len)) { 166 qp->r_flags |= RVT_R_REUSE_SGE; 167 ibp->rvp.n_pkt_drops++; 168 goto bail_unlock; 169 } 170 171 if (ah_attr->ah_flags & IB_AH_GRH) { 172 qib_copy_sge(&qp->r_sge, &ah_attr->grh, 173 sizeof(struct ib_grh), 1); 174 wc.wc_flags |= IB_WC_GRH; 175 } else 176 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); 177 ssge.sg_list = swqe->sg_list + 1; 178 ssge.sge = *swqe->sg_list; 179 ssge.num_sge = swqe->wr.num_sge; 180 sge = &ssge.sge; 181 while (length) { 182 u32 len = sge->length; 183 184 if (len > length) 185 len = length; 186 if (len > sge->sge_length) 187 len = sge->sge_length; 188 BUG_ON(len == 0); 189 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); 190 sge->vaddr += len; 191 sge->length -= len; 192 sge->sge_length -= len; 193 if (sge->sge_length == 0) { 194 if (--ssge.num_sge) 195 *sge = *ssge.sg_list++; 196 } else if (sge->length == 0 && sge->mr->lkey) { 197 if (++sge->n >= RVT_SEGSZ) { 198 if (++sge->m >= sge->mr->mapsz) 199 break; 200 sge->n = 0; 201 } 202 sge->vaddr = 203 sge->mr->map[sge->m]->segs[sge->n].vaddr; 204 sge->length = 205 sge->mr->map[sge->m]->segs[sge->n].length; 206 } 207 length -= len; 208 } 209 rvt_put_ss(&qp->r_sge); 210 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 211 goto bail_unlock; 212 wc.wr_id = qp->r_wr_id; 213 wc.status = IB_WC_SUCCESS; 214 wc.opcode = IB_WC_RECV; 215 wc.qp = &qp->ibqp; 216 wc.src_qp = sqp->ibqp.qp_num; 217 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? 218 swqe->ud_wr.pkey_index : 0; 219 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); 220 wc.sl = ah_attr->sl; 221 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); 222 wc.port_num = qp->port_num; 223 /* Signal completion event if the solicited bit is set. */ 224 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 225 swqe->wr.send_flags & IB_SEND_SOLICITED); 226 ibp->rvp.n_loop_pkts++; 227 bail_unlock: 228 spin_unlock_irqrestore(&qp->r_lock, flags); 229 drop: 230 rcu_read_unlock(); 231 } 232 233 /** 234 * qib_make_ud_req - construct a UD request packet 235 * @qp: the QP 236 * 237 * Assumes the s_lock is held. 238 * 239 * Return 1 if constructed; otherwise, return 0. 240 */ 241 int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) 242 { 243 struct qib_qp_priv *priv = qp->priv; 244 struct qib_other_headers *ohdr; 245 struct ib_ah_attr *ah_attr; 246 struct qib_pportdata *ppd; 247 struct qib_ibport *ibp; 248 struct rvt_swqe *wqe; 249 u32 nwords; 250 u32 extra_bytes; 251 u32 bth0; 252 u16 lrh0; 253 u16 lid; 254 int ret = 0; 255 int next_cur; 256 257 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { 258 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) 259 goto bail; 260 /* We are in the error state, flush the work request. */ 261 smp_read_barrier_depends(); /* see post_one_send */ 262 if (qp->s_last == ACCESS_ONCE(qp->s_head)) 263 goto bail; 264 /* If DMAs are in progress, we can't flush immediately. */ 265 if (atomic_read(&priv->s_dma_busy)) { 266 qp->s_flags |= RVT_S_WAIT_DMA; 267 goto bail; 268 } 269 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 270 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 271 goto done; 272 } 273 274 /* see post_one_send() */ 275 smp_read_barrier_depends(); 276 if (qp->s_cur == ACCESS_ONCE(qp->s_head)) 277 goto bail; 278 279 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 280 next_cur = qp->s_cur + 1; 281 if (next_cur >= qp->s_size) 282 next_cur = 0; 283 284 /* Construct the header. */ 285 ibp = to_iport(qp->ibqp.device, qp->port_num); 286 ppd = ppd_from_ibp(ibp); 287 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; 288 if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { 289 if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE)) 290 this_cpu_inc(ibp->pmastats->n_multicast_xmit); 291 else 292 this_cpu_inc(ibp->pmastats->n_unicast_xmit); 293 } else { 294 this_cpu_inc(ibp->pmastats->n_unicast_xmit); 295 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); 296 if (unlikely(lid == ppd->lid)) { 297 unsigned long tflags = *flags; 298 /* 299 * If DMAs are in progress, we can't generate 300 * a completion for the loopback packet since 301 * it would be out of order. 302 * XXX Instead of waiting, we could queue a 303 * zero length descriptor so we get a callback. 304 */ 305 if (atomic_read(&priv->s_dma_busy)) { 306 qp->s_flags |= RVT_S_WAIT_DMA; 307 goto bail; 308 } 309 qp->s_cur = next_cur; 310 spin_unlock_irqrestore(&qp->s_lock, tflags); 311 qib_ud_loopback(qp, wqe); 312 spin_lock_irqsave(&qp->s_lock, tflags); 313 *flags = tflags; 314 qib_send_complete(qp, wqe, IB_WC_SUCCESS); 315 goto done; 316 } 317 } 318 319 qp->s_cur = next_cur; 320 extra_bytes = -wqe->length & 3; 321 nwords = (wqe->length + extra_bytes) >> 2; 322 323 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */ 324 qp->s_hdrwords = 7; 325 qp->s_cur_size = wqe->length; 326 qp->s_cur_sge = &qp->s_sge; 327 qp->s_srate = ah_attr->static_rate; 328 qp->s_wqe = wqe; 329 qp->s_sge.sge = wqe->sg_list[0]; 330 qp->s_sge.sg_list = wqe->sg_list + 1; 331 qp->s_sge.num_sge = wqe->wr.num_sge; 332 qp->s_sge.total_len = wqe->length; 333 334 if (ah_attr->ah_flags & IB_AH_GRH) { 335 /* Header size in 32-bit words. */ 336 qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh, 337 &ah_attr->grh, 338 qp->s_hdrwords, nwords); 339 lrh0 = QIB_LRH_GRH; 340 ohdr = &priv->s_hdr->u.l.oth; 341 /* 342 * Don't worry about sending to locally attached multicast 343 * QPs. It is unspecified by the spec. what happens. 344 */ 345 } else { 346 /* Header size in 32-bit words. */ 347 lrh0 = QIB_LRH_BTH; 348 ohdr = &priv->s_hdr->u.oth; 349 } 350 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 351 qp->s_hdrwords++; 352 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; 353 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 354 } else 355 bth0 = IB_OPCODE_UD_SEND_ONLY << 24; 356 lrh0 |= ah_attr->sl << 4; 357 if (qp->ibqp.qp_type == IB_QPT_SMI) 358 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ 359 else 360 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; 361 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0); 362 priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ 363 priv->s_hdr->lrh[2] = 364 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); 365 lid = ppd->lid; 366 if (lid) { 367 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); 368 priv->s_hdr->lrh[3] = cpu_to_be16(lid); 369 } else 370 priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE; 371 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 372 bth0 |= IB_BTH_SOLICITED; 373 bth0 |= extra_bytes << 20; 374 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : 375 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? 376 wqe->ud_wr.pkey_index : qp->s_pkey_index); 377 ohdr->bth[0] = cpu_to_be32(bth0); 378 /* 379 * Use the multicast QP if the destination LID is a multicast LID. 380 */ 381 ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) && 382 ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ? 383 cpu_to_be32(QIB_MULTICAST_QPN) : 384 cpu_to_be32(wqe->ud_wr.remote_qpn); 385 ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK); 386 /* 387 * Qkeys with the high order bit set mean use the 388 * qkey from the QP context instead of the WR (see 10.2.5). 389 */ 390 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? 391 qp->qkey : wqe->ud_wr.remote_qkey); 392 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); 393 394 done: 395 return 1; 396 bail: 397 qp->s_flags &= ~RVT_S_BUSY; 398 return ret; 399 } 400 401 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey) 402 { 403 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 404 struct qib_devdata *dd = ppd->dd; 405 unsigned ctxt = ppd->hw_pidx; 406 unsigned i; 407 408 pkey &= 0x7fff; /* remove limited/full membership bit */ 409 410 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) 411 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) 412 return i; 413 414 /* 415 * Should not get here, this means hardware failed to validate pkeys. 416 * Punt and return index 0. 417 */ 418 return 0; 419 } 420 421 /** 422 * qib_ud_rcv - receive an incoming UD packet 423 * @ibp: the port the packet came in on 424 * @hdr: the packet header 425 * @has_grh: true if the packet has a GRH 426 * @data: the packet data 427 * @tlen: the packet length 428 * @qp: the QP the packet came on 429 * 430 * This is called from qib_qp_rcv() to process an incoming UD packet 431 * for the given QP. 432 * Called at interrupt level. 433 */ 434 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 435 int has_grh, void *data, u32 tlen, struct rvt_qp *qp) 436 { 437 struct qib_other_headers *ohdr; 438 int opcode; 439 u32 hdrsize; 440 u32 pad; 441 struct ib_wc wc; 442 u32 qkey; 443 u32 src_qp; 444 u16 dlid; 445 446 /* Check for GRH */ 447 if (!has_grh) { 448 ohdr = &hdr->u.oth; 449 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */ 450 } else { 451 ohdr = &hdr->u.l.oth; 452 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ 453 } 454 qkey = be32_to_cpu(ohdr->u.ud.deth[0]); 455 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; 456 457 /* 458 * Get the number of bytes the message was padded by 459 * and drop incomplete packets. 460 */ 461 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 462 if (unlikely(tlen < (hdrsize + pad + 4))) 463 goto drop; 464 465 tlen -= hdrsize + pad + 4; 466 467 /* 468 * Check that the permissive LID is only used on QP0 469 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). 470 */ 471 if (qp->ibqp.qp_num) { 472 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || 473 hdr->lrh[3] == IB_LID_PERMISSIVE)) 474 goto drop; 475 if (qp->ibqp.qp_num > 1) { 476 u16 pkey1, pkey2; 477 478 pkey1 = be32_to_cpu(ohdr->bth[0]); 479 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); 480 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { 481 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, 482 pkey1, 483 (be16_to_cpu(hdr->lrh[0]) >> 4) & 484 0xF, 485 src_qp, qp->ibqp.qp_num, 486 hdr->lrh[3], hdr->lrh[1]); 487 return; 488 } 489 } 490 if (unlikely(qkey != qp->qkey)) { 491 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, 492 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 493 src_qp, qp->ibqp.qp_num, 494 hdr->lrh[3], hdr->lrh[1]); 495 return; 496 } 497 /* Drop invalid MAD packets (see 13.5.3.1). */ 498 if (unlikely(qp->ibqp.qp_num == 1 && 499 (tlen != 256 || 500 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) 501 goto drop; 502 } else { 503 struct ib_smp *smp; 504 505 /* Drop invalid MAD packets (see 13.5.3.1). */ 506 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) 507 goto drop; 508 smp = (struct ib_smp *) data; 509 if ((hdr->lrh[1] == IB_LID_PERMISSIVE || 510 hdr->lrh[3] == IB_LID_PERMISSIVE) && 511 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 512 goto drop; 513 } 514 515 /* 516 * The opcode is in the low byte when its in network order 517 * (top byte when in host order). 518 */ 519 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 520 if (qp->ibqp.qp_num > 1 && 521 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 522 wc.ex.imm_data = ohdr->u.ud.imm_data; 523 wc.wc_flags = IB_WC_WITH_IMM; 524 tlen -= sizeof(u32); 525 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 526 wc.ex.imm_data = 0; 527 wc.wc_flags = 0; 528 } else 529 goto drop; 530 531 /* 532 * A GRH is expected to precede the data even if not 533 * present on the wire. 534 */ 535 wc.byte_len = tlen + sizeof(struct ib_grh); 536 537 /* 538 * Get the next work request entry to find where to put the data. 539 */ 540 if (qp->r_flags & RVT_R_REUSE_SGE) 541 qp->r_flags &= ~RVT_R_REUSE_SGE; 542 else { 543 int ret; 544 545 ret = qib_get_rwqe(qp, 0); 546 if (ret < 0) { 547 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 548 return; 549 } 550 if (!ret) { 551 if (qp->ibqp.qp_num == 0) 552 ibp->rvp.n_vl15_dropped++; 553 return; 554 } 555 } 556 /* Silently drop packets which are too big. */ 557 if (unlikely(wc.byte_len > qp->r_len)) { 558 qp->r_flags |= RVT_R_REUSE_SGE; 559 goto drop; 560 } 561 if (has_grh) { 562 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, 563 sizeof(struct ib_grh), 1); 564 wc.wc_flags |= IB_WC_GRH; 565 } else 566 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); 567 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); 568 rvt_put_ss(&qp->r_sge); 569 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) 570 return; 571 wc.wr_id = qp->r_wr_id; 572 wc.status = IB_WC_SUCCESS; 573 wc.opcode = IB_WC_RECV; 574 wc.vendor_err = 0; 575 wc.qp = &qp->ibqp; 576 wc.src_qp = src_qp; 577 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? 578 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0; 579 wc.slid = be16_to_cpu(hdr->lrh[3]); 580 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF; 581 dlid = be16_to_cpu(hdr->lrh[1]); 582 /* 583 * Save the LMC lower bits if the destination LID is a unicast LID. 584 */ 585 wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 : 586 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); 587 wc.port_num = qp->port_num; 588 /* Signal completion event if the solicited bit is set. */ 589 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 590 (ohdr->bth[0] & 591 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 592 return; 593 594 drop: 595 ibp->rvp.n_pkt_drops++; 596 } 597