1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/vmalloc.h> 50 #include <linux/hash.h> 51 #include <linux/module.h> 52 #include <linux/seq_file.h> 53 #include <rdma/rdma_vt.h> 54 #include <rdma/rdmavt_qp.h> 55 #include <rdma/ib_verbs.h> 56 57 #include "hfi.h" 58 #include "qp.h" 59 #include "trace.h" 60 #include "verbs_txreq.h" 61 62 unsigned int hfi1_qp_table_size = 256; 63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); 64 MODULE_PARM_DESC(qp_table_size, "QP table size"); 65 66 static void flush_tx_list(struct rvt_qp *qp); 67 static int iowait_sleep( 68 struct sdma_engine *sde, 69 struct iowait *wait, 70 struct sdma_txreq *stx, 71 unsigned seq); 72 static void iowait_wakeup(struct iowait *wait, int reason); 73 static void iowait_sdma_drained(struct iowait *wait); 74 static void qp_pio_drain(struct rvt_qp *qp); 75 76 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, 77 struct rvt_qpn_map *map, unsigned off) 78 { 79 return (map - qpt->map) * RVT_BITS_PER_PAGE + off; 80 } 81 82 /* 83 * Convert the AETH credit code into the number of credits. 84 */ 85 static const u16 credit_table[31] = { 86 0, /* 0 */ 87 1, /* 1 */ 88 2, /* 2 */ 89 3, /* 3 */ 90 4, /* 4 */ 91 6, /* 5 */ 92 8, /* 6 */ 93 12, /* 7 */ 94 16, /* 8 */ 95 24, /* 9 */ 96 32, /* A */ 97 48, /* B */ 98 64, /* C */ 99 96, /* D */ 100 128, /* E */ 101 192, /* F */ 102 256, /* 10 */ 103 384, /* 11 */ 104 512, /* 12 */ 105 768, /* 13 */ 106 1024, /* 14 */ 107 1536, /* 15 */ 108 2048, /* 16 */ 109 3072, /* 17 */ 110 4096, /* 18 */ 111 6144, /* 19 */ 112 8192, /* 1A */ 113 12288, /* 1B */ 114 16384, /* 1C */ 115 24576, /* 1D */ 116 32768 /* 1E */ 117 }; 118 119 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { 120 [IB_WR_RDMA_WRITE] = { 121 .length = sizeof(struct ib_rdma_wr), 122 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 123 }, 124 125 [IB_WR_RDMA_READ] = { 126 .length = sizeof(struct ib_rdma_wr), 127 .qpt_support = BIT(IB_QPT_RC), 128 .flags = RVT_OPERATION_ATOMIC, 129 }, 130 131 [IB_WR_ATOMIC_CMP_AND_SWP] = { 132 .length = sizeof(struct ib_atomic_wr), 133 .qpt_support = BIT(IB_QPT_RC), 134 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 135 }, 136 137 [IB_WR_ATOMIC_FETCH_AND_ADD] = { 138 .length = sizeof(struct ib_atomic_wr), 139 .qpt_support = BIT(IB_QPT_RC), 140 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 141 }, 142 143 [IB_WR_RDMA_WRITE_WITH_IMM] = { 144 .length = sizeof(struct ib_rdma_wr), 145 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 146 }, 147 148 [IB_WR_SEND] = { 149 .length = sizeof(struct ib_send_wr), 150 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 151 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 152 }, 153 154 [IB_WR_SEND_WITH_IMM] = { 155 .length = sizeof(struct ib_send_wr), 156 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 157 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 158 }, 159 160 [IB_WR_REG_MR] = { 161 .length = sizeof(struct ib_reg_wr), 162 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 163 .flags = RVT_OPERATION_LOCAL, 164 }, 165 166 [IB_WR_LOCAL_INV] = { 167 .length = sizeof(struct ib_send_wr), 168 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 169 .flags = RVT_OPERATION_LOCAL, 170 }, 171 172 [IB_WR_SEND_WITH_INV] = { 173 .length = sizeof(struct ib_send_wr), 174 .qpt_support = BIT(IB_QPT_RC), 175 }, 176 177 }; 178 179 static void flush_tx_list(struct rvt_qp *qp) 180 { 181 struct hfi1_qp_priv *priv = qp->priv; 182 183 while (!list_empty(&priv->s_iowait.tx_head)) { 184 struct sdma_txreq *tx; 185 186 tx = list_first_entry( 187 &priv->s_iowait.tx_head, 188 struct sdma_txreq, 189 list); 190 list_del_init(&tx->list); 191 hfi1_put_txreq( 192 container_of(tx, struct verbs_txreq, txreq)); 193 } 194 } 195 196 static void flush_iowait(struct rvt_qp *qp) 197 { 198 struct hfi1_qp_priv *priv = qp->priv; 199 unsigned long flags; 200 seqlock_t *lock = priv->s_iowait.lock; 201 202 if (!lock) 203 return; 204 write_seqlock_irqsave(lock, flags); 205 if (!list_empty(&priv->s_iowait.list)) { 206 list_del_init(&priv->s_iowait.list); 207 priv->s_iowait.lock = NULL; 208 rvt_put_qp(qp); 209 } 210 write_sequnlock_irqrestore(lock, flags); 211 } 212 213 static inline int opa_mtu_enum_to_int(int mtu) 214 { 215 switch (mtu) { 216 case OPA_MTU_8192: return 8192; 217 case OPA_MTU_10240: return 10240; 218 default: return -1; 219 } 220 } 221 222 /** 223 * This function is what we would push to the core layer if we wanted to be a 224 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 225 * to blindly pass the MTU enum value from the PathRecord to us. 226 */ 227 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 228 { 229 int val; 230 231 /* Constraining 10KB packets to 8KB packets */ 232 if (mtu == (enum ib_mtu)OPA_MTU_10240) 233 mtu = OPA_MTU_8192; 234 val = opa_mtu_enum_to_int((int)mtu); 235 if (val > 0) 236 return val; 237 return ib_mtu_enum_to_int(mtu); 238 } 239 240 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 241 int attr_mask, struct ib_udata *udata) 242 { 243 struct ib_qp *ibqp = &qp->ibqp; 244 struct hfi1_ibdev *dev = to_idev(ibqp->device); 245 struct hfi1_devdata *dd = dd_from_dev(dev); 246 u8 sc; 247 248 if (attr_mask & IB_QP_AV) { 249 sc = ah_to_sc(ibqp->device, &attr->ah_attr); 250 if (sc == 0xf) 251 return -EINVAL; 252 253 if (!qp_to_sdma_engine(qp, sc) && 254 dd->flags & HFI1_HAS_SEND_DMA) 255 return -EINVAL; 256 257 if (!qp_to_send_context(qp, sc)) 258 return -EINVAL; 259 } 260 261 if (attr_mask & IB_QP_ALT_PATH) { 262 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); 263 if (sc == 0xf) 264 return -EINVAL; 265 266 if (!qp_to_sdma_engine(qp, sc) && 267 dd->flags & HFI1_HAS_SEND_DMA) 268 return -EINVAL; 269 270 if (!qp_to_send_context(qp, sc)) 271 return -EINVAL; 272 } 273 274 return 0; 275 } 276 277 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 278 int attr_mask, struct ib_udata *udata) 279 { 280 struct ib_qp *ibqp = &qp->ibqp; 281 struct hfi1_qp_priv *priv = qp->priv; 282 283 if (attr_mask & IB_QP_AV) { 284 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 285 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 286 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 287 } 288 289 if (attr_mask & IB_QP_PATH_MIG_STATE && 290 attr->path_mig_state == IB_MIG_MIGRATED && 291 qp->s_mig_state == IB_MIG_ARMED) { 292 qp->s_flags |= RVT_S_AHG_CLEAR; 293 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 294 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 295 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 296 } 297 } 298 299 /** 300 * hfi1_check_send_wqe - validate wqe 301 * @qp - The qp 302 * @wqe - The built wqe 303 * 304 * validate wqe. This is called 305 * prior to inserting the wqe into 306 * the ring but after the wqe has been 307 * setup. 308 * 309 * Returns 0 on success, -EINVAL on failure 310 * 311 */ 312 int hfi1_check_send_wqe(struct rvt_qp *qp, 313 struct rvt_swqe *wqe) 314 { 315 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 316 struct rvt_ah *ah; 317 318 switch (qp->ibqp.qp_type) { 319 case IB_QPT_RC: 320 case IB_QPT_UC: 321 if (wqe->length > 0x80000000U) 322 return -EINVAL; 323 break; 324 case IB_QPT_SMI: 325 ah = ibah_to_rvtah(wqe->ud_wr.ah); 326 if (wqe->length > (1 << ah->log_pmtu)) 327 return -EINVAL; 328 break; 329 case IB_QPT_GSI: 330 case IB_QPT_UD: 331 ah = ibah_to_rvtah(wqe->ud_wr.ah); 332 if (wqe->length > (1 << ah->log_pmtu)) 333 return -EINVAL; 334 if (ibp->sl_to_sc[ah->attr.sl] == 0xf) 335 return -EINVAL; 336 default: 337 break; 338 } 339 return wqe->length <= piothreshold; 340 } 341 342 /** 343 * hfi1_compute_aeth - compute the AETH (syndrome + MSN) 344 * @qp: the queue pair to compute the AETH for 345 * 346 * Returns the AETH. 347 */ 348 __be32 hfi1_compute_aeth(struct rvt_qp *qp) 349 { 350 u32 aeth = qp->r_msn & HFI1_MSN_MASK; 351 352 if (qp->ibqp.srq) { 353 /* 354 * Shared receive queues don't generate credits. 355 * Set the credit field to the invalid value. 356 */ 357 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT; 358 } else { 359 u32 min, max, x; 360 u32 credits; 361 struct rvt_rwq *wq = qp->r_rq.wq; 362 u32 head; 363 u32 tail; 364 365 /* sanity check pointers before trusting them */ 366 head = wq->head; 367 if (head >= qp->r_rq.size) 368 head = 0; 369 tail = wq->tail; 370 if (tail >= qp->r_rq.size) 371 tail = 0; 372 /* 373 * Compute the number of credits available (RWQEs). 374 * There is a small chance that the pair of reads are 375 * not atomic, which is OK, since the fuzziness is 376 * resolved as further ACKs go out. 377 */ 378 credits = head - tail; 379 if ((int)credits < 0) 380 credits += qp->r_rq.size; 381 /* 382 * Binary search the credit table to find the code to 383 * use. 384 */ 385 min = 0; 386 max = 31; 387 for (;;) { 388 x = (min + max) / 2; 389 if (credit_table[x] == credits) 390 break; 391 if (credit_table[x] > credits) { 392 max = x; 393 } else { 394 if (min == x) 395 break; 396 min = x; 397 } 398 } 399 aeth |= x << HFI1_AETH_CREDIT_SHIFT; 400 } 401 return cpu_to_be32(aeth); 402 } 403 404 /** 405 * _hfi1_schedule_send - schedule progress 406 * @qp: the QP 407 * 408 * This schedules qp progress w/o regard to the s_flags. 409 * 410 * It is only used in the post send, which doesn't hold 411 * the s_lock. 412 */ 413 void _hfi1_schedule_send(struct rvt_qp *qp) 414 { 415 struct hfi1_qp_priv *priv = qp->priv; 416 struct hfi1_ibport *ibp = 417 to_iport(qp->ibqp.device, qp->port_num); 418 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 419 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 420 421 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 422 priv->s_sde ? 423 priv->s_sde->cpu : 424 cpumask_first(cpumask_of_node(dd->node))); 425 } 426 427 static void qp_pio_drain(struct rvt_qp *qp) 428 { 429 struct hfi1_ibdev *dev; 430 struct hfi1_qp_priv *priv = qp->priv; 431 432 if (!priv->s_sendcontext) 433 return; 434 dev = to_idev(qp->ibqp.device); 435 while (iowait_pio_pending(&priv->s_iowait)) { 436 write_seqlock_irq(&dev->iowait_lock); 437 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 438 write_sequnlock_irq(&dev->iowait_lock); 439 iowait_pio_drain(&priv->s_iowait); 440 write_seqlock_irq(&dev->iowait_lock); 441 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 442 write_sequnlock_irq(&dev->iowait_lock); 443 } 444 } 445 446 /** 447 * hfi1_schedule_send - schedule progress 448 * @qp: the QP 449 * 450 * This schedules qp progress and caller should hold 451 * the s_lock. 452 */ 453 void hfi1_schedule_send(struct rvt_qp *qp) 454 { 455 lockdep_assert_held(&qp->s_lock); 456 if (hfi1_send_ok(qp)) 457 _hfi1_schedule_send(qp); 458 } 459 460 /** 461 * hfi1_get_credit - handle credit in aeth 462 * @qp: the qp 463 * @aeth: the Acknowledge Extended Transport Header 464 * 465 * The QP s_lock should be held. 466 */ 467 void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) 468 { 469 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; 470 471 lockdep_assert_held(&qp->s_lock); 472 /* 473 * If the credit is invalid, we can send 474 * as many packets as we like. Otherwise, we have to 475 * honor the credit field. 476 */ 477 if (credit == HFI1_AETH_CREDIT_INVAL) { 478 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 479 qp->s_flags |= RVT_S_UNLIMITED_CREDIT; 480 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 481 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 482 hfi1_schedule_send(qp); 483 } 484 } 485 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { 486 /* Compute new LSN (i.e., MSN + credit) */ 487 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; 488 if (cmp_msn(credit, qp->s_lsn) > 0) { 489 qp->s_lsn = credit; 490 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { 491 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; 492 hfi1_schedule_send(qp); 493 } 494 } 495 } 496 } 497 498 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) 499 { 500 unsigned long flags; 501 502 spin_lock_irqsave(&qp->s_lock, flags); 503 if (qp->s_flags & flag) { 504 qp->s_flags &= ~flag; 505 trace_hfi1_qpwakeup(qp, flag); 506 hfi1_schedule_send(qp); 507 } 508 spin_unlock_irqrestore(&qp->s_lock, flags); 509 /* Notify hfi1_destroy_qp() if it is waiting. */ 510 rvt_put_qp(qp); 511 } 512 513 static int iowait_sleep( 514 struct sdma_engine *sde, 515 struct iowait *wait, 516 struct sdma_txreq *stx, 517 unsigned seq) 518 { 519 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); 520 struct rvt_qp *qp; 521 struct hfi1_qp_priv *priv; 522 unsigned long flags; 523 int ret = 0; 524 struct hfi1_ibdev *dev; 525 526 qp = tx->qp; 527 priv = qp->priv; 528 529 spin_lock_irqsave(&qp->s_lock, flags); 530 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 531 /* 532 * If we couldn't queue the DMA request, save the info 533 * and try again later rather than destroying the 534 * buffer and undoing the side effects of the copy. 535 */ 536 /* Make a common routine? */ 537 dev = &sde->dd->verbs_dev; 538 list_add_tail(&stx->list, &wait->tx_head); 539 write_seqlock(&dev->iowait_lock); 540 if (sdma_progress(sde, seq, stx)) 541 goto eagain; 542 if (list_empty(&priv->s_iowait.list)) { 543 struct hfi1_ibport *ibp = 544 to_iport(qp->ibqp.device, qp->port_num); 545 546 ibp->rvp.n_dmawait++; 547 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 548 list_add_tail(&priv->s_iowait.list, &sde->dmawait); 549 priv->s_iowait.lock = &dev->iowait_lock; 550 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 551 rvt_get_qp(qp); 552 } 553 write_sequnlock(&dev->iowait_lock); 554 qp->s_flags &= ~RVT_S_BUSY; 555 spin_unlock_irqrestore(&qp->s_lock, flags); 556 ret = -EBUSY; 557 } else { 558 spin_unlock_irqrestore(&qp->s_lock, flags); 559 hfi1_put_txreq(tx); 560 } 561 return ret; 562 eagain: 563 write_sequnlock(&dev->iowait_lock); 564 spin_unlock_irqrestore(&qp->s_lock, flags); 565 list_del_init(&stx->list); 566 return -EAGAIN; 567 } 568 569 static void iowait_wakeup(struct iowait *wait, int reason) 570 { 571 struct rvt_qp *qp = iowait_to_qp(wait); 572 573 WARN_ON(reason != SDMA_AVAIL_REASON); 574 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); 575 } 576 577 static void iowait_sdma_drained(struct iowait *wait) 578 { 579 struct rvt_qp *qp = iowait_to_qp(wait); 580 unsigned long flags; 581 582 /* 583 * This happens when the send engine notes 584 * a QP in the error state and cannot 585 * do the flush work until that QP's 586 * sdma work has finished. 587 */ 588 spin_lock_irqsave(&qp->s_lock, flags); 589 if (qp->s_flags & RVT_S_WAIT_DMA) { 590 qp->s_flags &= ~RVT_S_WAIT_DMA; 591 hfi1_schedule_send(qp); 592 } 593 spin_unlock_irqrestore(&qp->s_lock, flags); 594 } 595 596 /** 597 * 598 * qp_to_sdma_engine - map a qp to a send engine 599 * @qp: the QP 600 * @sc5: the 5 bit sc 601 * 602 * Return: 603 * A send engine for the qp or NULL for SMI type qp. 604 */ 605 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) 606 { 607 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 608 struct sdma_engine *sde; 609 610 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 611 return NULL; 612 switch (qp->ibqp.qp_type) { 613 case IB_QPT_SMI: 614 return NULL; 615 default: 616 break; 617 } 618 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); 619 return sde; 620 } 621 622 /* 623 * qp_to_send_context - map a qp to a send context 624 * @qp: the QP 625 * @sc5: the 5 bit sc 626 * 627 * Return: 628 * A send context for the qp 629 */ 630 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) 631 { 632 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 633 634 switch (qp->ibqp.qp_type) { 635 case IB_QPT_SMI: 636 /* SMA packets to VL15 */ 637 return dd->vld[15].sc; 638 default: 639 break; 640 } 641 642 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, 643 sc5); 644 } 645 646 struct qp_iter { 647 struct hfi1_ibdev *dev; 648 struct rvt_qp *qp; 649 int specials; 650 int n; 651 }; 652 653 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) 654 { 655 struct qp_iter *iter; 656 657 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 658 if (!iter) 659 return NULL; 660 661 iter->dev = dev; 662 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; 663 664 return iter; 665 } 666 667 int qp_iter_next(struct qp_iter *iter) 668 { 669 struct hfi1_ibdev *dev = iter->dev; 670 int n = iter->n; 671 int ret = 1; 672 struct rvt_qp *pqp = iter->qp; 673 struct rvt_qp *qp; 674 675 /* 676 * The approach is to consider the special qps 677 * as an additional table entries before the 678 * real hash table. Since the qp code sets 679 * the qp->next hash link to NULL, this works just fine. 680 * 681 * iter->specials is 2 * # ports 682 * 683 * n = 0..iter->specials is the special qp indices 684 * 685 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are 686 * the potential hash bucket entries 687 * 688 */ 689 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { 690 if (pqp) { 691 qp = rcu_dereference(pqp->next); 692 } else { 693 if (n < iter->specials) { 694 struct hfi1_pportdata *ppd; 695 struct hfi1_ibport *ibp; 696 int pidx; 697 698 pidx = n % dev->rdi.ibdev.phys_port_cnt; 699 ppd = &dd_from_dev(dev)->pport[pidx]; 700 ibp = &ppd->ibport_data; 701 702 if (!(n & 1)) 703 qp = rcu_dereference(ibp->rvp.qp[0]); 704 else 705 qp = rcu_dereference(ibp->rvp.qp[1]); 706 } else { 707 qp = rcu_dereference( 708 dev->rdi.qp_dev->qp_table[ 709 (n - iter->specials)]); 710 } 711 } 712 pqp = qp; 713 if (qp) { 714 iter->qp = qp; 715 iter->n = n; 716 return 0; 717 } 718 } 719 return ret; 720 } 721 722 static const char * const qp_type_str[] = { 723 "SMI", "GSI", "RC", "UC", "UD", 724 }; 725 726 static int qp_idle(struct rvt_qp *qp) 727 { 728 return 729 qp->s_last == qp->s_acked && 730 qp->s_acked == qp->s_cur && 731 qp->s_cur == qp->s_tail && 732 qp->s_tail == qp->s_head; 733 } 734 735 void qp_iter_print(struct seq_file *s, struct qp_iter *iter) 736 { 737 struct rvt_swqe *wqe; 738 struct rvt_qp *qp = iter->qp; 739 struct hfi1_qp_priv *priv = qp->priv; 740 struct sdma_engine *sde; 741 struct send_context *send_context; 742 743 sde = qp_to_sdma_engine(qp, priv->s_sc); 744 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 745 send_context = qp_to_send_context(qp, priv->s_sc); 746 seq_printf(s, 747 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", 748 iter->n, 749 qp_idle(qp) ? "I" : "B", 750 qp->ibqp.qp_num, 751 atomic_read(&qp->refcount), 752 qp_type_str[qp->ibqp.qp_type], 753 qp->state, 754 wqe ? wqe->wr.opcode : 0, 755 qp->s_hdrwords, 756 qp->s_flags, 757 iowait_sdma_pending(&priv->s_iowait), 758 iowait_pio_pending(&priv->s_iowait), 759 !list_empty(&priv->s_iowait.list), 760 qp->timeout, 761 wqe ? wqe->ssn : 0, 762 qp->s_lsn, 763 qp->s_last_psn, 764 qp->s_psn, qp->s_next_psn, 765 qp->s_sending_psn, qp->s_sending_hpsn, 766 qp->s_last, qp->s_acked, qp->s_cur, 767 qp->s_tail, qp->s_head, qp->s_size, 768 qp->s_avail, 769 qp->remote_qpn, 770 qp->remote_ah_attr.dlid, 771 qp->remote_ah_attr.sl, 772 qp->pmtu, 773 qp->s_retry, 774 qp->s_retry_cnt, 775 qp->s_rnr_retry_cnt, 776 sde, 777 sde ? sde->this_idx : 0, 778 send_context, 779 send_context ? send_context->sw_index : 0, 780 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, 781 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, 782 qp->pid); 783 } 784 785 void qp_comm_est(struct rvt_qp *qp) 786 { 787 qp->r_flags |= RVT_R_COMM_EST; 788 if (qp->ibqp.event_handler) { 789 struct ib_event ev; 790 791 ev.device = qp->ibqp.device; 792 ev.element.qp = &qp->ibqp; 793 ev.event = IB_EVENT_COMM_EST; 794 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 795 } 796 } 797 798 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 799 gfp_t gfp) 800 { 801 struct hfi1_qp_priv *priv; 802 803 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); 804 if (!priv) 805 return ERR_PTR(-ENOMEM); 806 807 priv->owner = qp; 808 809 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, 810 rdi->dparms.node); 811 if (!priv->s_ahg) { 812 kfree(priv); 813 return ERR_PTR(-ENOMEM); 814 } 815 iowait_init( 816 &priv->s_iowait, 817 1, 818 _hfi1_do_send, 819 iowait_sleep, 820 iowait_wakeup, 821 iowait_sdma_drained); 822 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); 823 qp->s_timer.function = hfi1_rc_timeout; 824 return priv; 825 } 826 827 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 828 { 829 struct hfi1_qp_priv *priv = qp->priv; 830 831 kfree(priv->s_ahg); 832 kfree(priv); 833 } 834 835 unsigned free_all_qps(struct rvt_dev_info *rdi) 836 { 837 struct hfi1_ibdev *verbs_dev = container_of(rdi, 838 struct hfi1_ibdev, 839 rdi); 840 struct hfi1_devdata *dd = container_of(verbs_dev, 841 struct hfi1_devdata, 842 verbs_dev); 843 int n; 844 unsigned qp_inuse = 0; 845 846 for (n = 0; n < dd->num_pports; n++) { 847 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; 848 849 rcu_read_lock(); 850 if (rcu_dereference(ibp->rvp.qp[0])) 851 qp_inuse++; 852 if (rcu_dereference(ibp->rvp.qp[1])) 853 qp_inuse++; 854 rcu_read_unlock(); 855 } 856 857 return qp_inuse; 858 } 859 860 void flush_qp_waiters(struct rvt_qp *qp) 861 { 862 lockdep_assert_held(&qp->s_lock); 863 flush_iowait(qp); 864 hfi1_stop_rc_timers(qp); 865 } 866 867 void stop_send_queue(struct rvt_qp *qp) 868 { 869 struct hfi1_qp_priv *priv = qp->priv; 870 871 cancel_work_sync(&priv->s_iowait.iowork); 872 hfi1_del_timers_sync(qp); 873 } 874 875 void quiesce_qp(struct rvt_qp *qp) 876 { 877 struct hfi1_qp_priv *priv = qp->priv; 878 879 iowait_sdma_drain(&priv->s_iowait); 880 qp_pio_drain(qp); 881 flush_tx_list(qp); 882 } 883 884 void notify_qp_reset(struct rvt_qp *qp) 885 { 886 struct hfi1_qp_priv *priv = qp->priv; 887 888 priv->r_adefered = 0; 889 clear_ahg(qp); 890 } 891 892 /* 893 * Switch to alternate path. 894 * The QP s_lock should be held and interrupts disabled. 895 */ 896 void hfi1_migrate_qp(struct rvt_qp *qp) 897 { 898 struct hfi1_qp_priv *priv = qp->priv; 899 struct ib_event ev; 900 901 qp->s_mig_state = IB_MIG_MIGRATED; 902 qp->remote_ah_attr = qp->alt_ah_attr; 903 qp->port_num = qp->alt_ah_attr.port_num; 904 qp->s_pkey_index = qp->s_alt_pkey_index; 905 qp->s_flags |= RVT_S_AHG_CLEAR; 906 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); 907 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 908 909 ev.device = qp->ibqp.device; 910 ev.element.qp = &qp->ibqp; 911 ev.event = IB_EVENT_PATH_MIG; 912 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 913 } 914 915 int mtu_to_path_mtu(u32 mtu) 916 { 917 return mtu_to_enum(mtu, OPA_MTU_8192); 918 } 919 920 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) 921 { 922 u32 mtu; 923 struct hfi1_ibdev *verbs_dev = container_of(rdi, 924 struct hfi1_ibdev, 925 rdi); 926 struct hfi1_devdata *dd = container_of(verbs_dev, 927 struct hfi1_devdata, 928 verbs_dev); 929 struct hfi1_ibport *ibp; 930 u8 sc, vl; 931 932 ibp = &dd->pport[qp->port_num - 1].ibport_data; 933 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; 934 vl = sc_to_vlt(dd, sc); 935 936 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); 937 if (vl < PER_VL_SEND_CONTEXTS) 938 mtu = min_t(u32, mtu, dd->vld[vl].mtu); 939 return mtu; 940 } 941 942 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, 943 struct ib_qp_attr *attr) 944 { 945 int mtu, pidx = qp->port_num - 1; 946 struct hfi1_ibdev *verbs_dev = container_of(rdi, 947 struct hfi1_ibdev, 948 rdi); 949 struct hfi1_devdata *dd = container_of(verbs_dev, 950 struct hfi1_devdata, 951 verbs_dev); 952 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); 953 if (mtu == -1) 954 return -1; /* values less than 0 are error */ 955 956 if (mtu > dd->pport[pidx].ibmtu) 957 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); 958 else 959 return attr->path_mtu; 960 } 961 962 void notify_error_qp(struct rvt_qp *qp) 963 { 964 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); 965 struct hfi1_qp_priv *priv = qp->priv; 966 967 write_seqlock(&dev->iowait_lock); 968 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { 969 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 970 list_del_init(&priv->s_iowait.list); 971 priv->s_iowait.lock = NULL; 972 rvt_put_qp(qp); 973 } 974 write_sequnlock(&dev->iowait_lock); 975 976 if (!(qp->s_flags & RVT_S_BUSY)) { 977 qp->s_hdrwords = 0; 978 if (qp->s_rdma_mr) { 979 rvt_put_mr(qp->s_rdma_mr); 980 qp->s_rdma_mr = NULL; 981 } 982 flush_tx_list(qp); 983 } 984 } 985 986 /** 987 * hfi1_error_port_qps - put a port's RC/UC qps into error state 988 * @ibp: the ibport. 989 * @sl: the service level. 990 * 991 * This function places all RC/UC qps with a given service level into error 992 * state. It is generally called to force upper lay apps to abandon stale qps 993 * after an sl->sc mapping change. 994 */ 995 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) 996 { 997 struct rvt_qp *qp = NULL; 998 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 999 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; 1000 int n; 1001 int lastwqe; 1002 struct ib_event ev; 1003 1004 rcu_read_lock(); 1005 1006 /* Deal only with RC/UC qps that use the given SL. */ 1007 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { 1008 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; 1009 qp = rcu_dereference(qp->next)) { 1010 if (qp->port_num == ppd->port && 1011 (qp->ibqp.qp_type == IB_QPT_UC || 1012 qp->ibqp.qp_type == IB_QPT_RC) && 1013 qp->remote_ah_attr.sl == sl && 1014 (ib_rvt_state_ops[qp->state] & 1015 RVT_POST_SEND_OK)) { 1016 spin_lock_irq(&qp->r_lock); 1017 spin_lock(&qp->s_hlock); 1018 spin_lock(&qp->s_lock); 1019 lastwqe = rvt_error_qp(qp, 1020 IB_WC_WR_FLUSH_ERR); 1021 spin_unlock(&qp->s_lock); 1022 spin_unlock(&qp->s_hlock); 1023 spin_unlock_irq(&qp->r_lock); 1024 if (lastwqe) { 1025 ev.device = qp->ibqp.device; 1026 ev.element.qp = &qp->ibqp; 1027 ev.event = 1028 IB_EVENT_QP_LAST_WQE_REACHED; 1029 qp->ibqp.event_handler(&ev, 1030 qp->ibqp.qp_context); 1031 } 1032 } 1033 } 1034 } 1035 1036 rcu_read_unlock(); 1037 } 1038