1 /* 2 * Copyright(c) 2015 - 2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/vmalloc.h> 50 #include <linux/hash.h> 51 #include <linux/module.h> 52 #include <linux/seq_file.h> 53 #include <rdma/rdma_vt.h> 54 #include <rdma/rdmavt_qp.h> 55 #include <rdma/ib_verbs.h> 56 57 #include "hfi.h" 58 #include "qp.h" 59 #include "trace.h" 60 #include "verbs_txreq.h" 61 62 unsigned int hfi1_qp_table_size = 256; 63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); 64 MODULE_PARM_DESC(qp_table_size, "QP table size"); 65 66 static void flush_tx_list(struct rvt_qp *qp); 67 static int iowait_sleep( 68 struct sdma_engine *sde, 69 struct iowait *wait, 70 struct sdma_txreq *stx, 71 unsigned int seq, 72 bool pkts_sent); 73 static void iowait_wakeup(struct iowait *wait, int reason); 74 static void iowait_sdma_drained(struct iowait *wait); 75 static void qp_pio_drain(struct rvt_qp *qp); 76 77 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { 78 [IB_WR_RDMA_WRITE] = { 79 .length = sizeof(struct ib_rdma_wr), 80 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 81 }, 82 83 [IB_WR_RDMA_READ] = { 84 .length = sizeof(struct ib_rdma_wr), 85 .qpt_support = BIT(IB_QPT_RC), 86 .flags = RVT_OPERATION_ATOMIC, 87 }, 88 89 [IB_WR_ATOMIC_CMP_AND_SWP] = { 90 .length = sizeof(struct ib_atomic_wr), 91 .qpt_support = BIT(IB_QPT_RC), 92 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 93 }, 94 95 [IB_WR_ATOMIC_FETCH_AND_ADD] = { 96 .length = sizeof(struct ib_atomic_wr), 97 .qpt_support = BIT(IB_QPT_RC), 98 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE, 99 }, 100 101 [IB_WR_RDMA_WRITE_WITH_IMM] = { 102 .length = sizeof(struct ib_rdma_wr), 103 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 104 }, 105 106 [IB_WR_SEND] = { 107 .length = sizeof(struct ib_send_wr), 108 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 109 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 110 }, 111 112 [IB_WR_SEND_WITH_IMM] = { 113 .length = sizeof(struct ib_send_wr), 114 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) | 115 BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 116 }, 117 118 [IB_WR_REG_MR] = { 119 .length = sizeof(struct ib_reg_wr), 120 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 121 .flags = RVT_OPERATION_LOCAL, 122 }, 123 124 [IB_WR_LOCAL_INV] = { 125 .length = sizeof(struct ib_send_wr), 126 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC), 127 .flags = RVT_OPERATION_LOCAL, 128 }, 129 130 [IB_WR_SEND_WITH_INV] = { 131 .length = sizeof(struct ib_send_wr), 132 .qpt_support = BIT(IB_QPT_RC), 133 }, 134 135 }; 136 137 static void flush_tx_list(struct rvt_qp *qp) 138 { 139 struct hfi1_qp_priv *priv = qp->priv; 140 141 while (!list_empty(&priv->s_iowait.tx_head)) { 142 struct sdma_txreq *tx; 143 144 tx = list_first_entry( 145 &priv->s_iowait.tx_head, 146 struct sdma_txreq, 147 list); 148 list_del_init(&tx->list); 149 hfi1_put_txreq( 150 container_of(tx, struct verbs_txreq, txreq)); 151 } 152 } 153 154 static void flush_iowait(struct rvt_qp *qp) 155 { 156 struct hfi1_qp_priv *priv = qp->priv; 157 unsigned long flags; 158 seqlock_t *lock = priv->s_iowait.lock; 159 160 if (!lock) 161 return; 162 write_seqlock_irqsave(lock, flags); 163 if (!list_empty(&priv->s_iowait.list)) { 164 list_del_init(&priv->s_iowait.list); 165 priv->s_iowait.lock = NULL; 166 rvt_put_qp(qp); 167 } 168 write_sequnlock_irqrestore(lock, flags); 169 } 170 171 static inline int opa_mtu_enum_to_int(int mtu) 172 { 173 switch (mtu) { 174 case OPA_MTU_8192: return 8192; 175 case OPA_MTU_10240: return 10240; 176 default: return -1; 177 } 178 } 179 180 /** 181 * This function is what we would push to the core layer if we wanted to be a 182 * "first class citizen". Instead we hide this here and rely on Verbs ULPs 183 * to blindly pass the MTU enum value from the PathRecord to us. 184 */ 185 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) 186 { 187 int val; 188 189 /* Constraining 10KB packets to 8KB packets */ 190 if (mtu == (enum ib_mtu)OPA_MTU_10240) 191 mtu = OPA_MTU_8192; 192 val = opa_mtu_enum_to_int((int)mtu); 193 if (val > 0) 194 return val; 195 return ib_mtu_enum_to_int(mtu); 196 } 197 198 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 199 int attr_mask, struct ib_udata *udata) 200 { 201 struct ib_qp *ibqp = &qp->ibqp; 202 struct hfi1_ibdev *dev = to_idev(ibqp->device); 203 struct hfi1_devdata *dd = dd_from_dev(dev); 204 u8 sc; 205 206 if (attr_mask & IB_QP_AV) { 207 sc = ah_to_sc(ibqp->device, &attr->ah_attr); 208 if (sc == 0xf) 209 return -EINVAL; 210 211 if (!qp_to_sdma_engine(qp, sc) && 212 dd->flags & HFI1_HAS_SEND_DMA) 213 return -EINVAL; 214 215 if (!qp_to_send_context(qp, sc)) 216 return -EINVAL; 217 } 218 219 if (attr_mask & IB_QP_ALT_PATH) { 220 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); 221 if (sc == 0xf) 222 return -EINVAL; 223 224 if (!qp_to_sdma_engine(qp, sc) && 225 dd->flags & HFI1_HAS_SEND_DMA) 226 return -EINVAL; 227 228 if (!qp_to_send_context(qp, sc)) 229 return -EINVAL; 230 } 231 232 return 0; 233 } 234 235 /* 236 * qp_set_16b - Set the hdr_type based on whether the slid or the 237 * dlid in the connection is extended. Only applicable for RC and UC 238 * QPs. UD QPs determine this on the fly from the ah in the wqe 239 */ 240 static inline void qp_set_16b(struct rvt_qp *qp) 241 { 242 struct hfi1_pportdata *ppd; 243 struct hfi1_ibport *ibp; 244 struct hfi1_qp_priv *priv = qp->priv; 245 246 /* Update ah_attr to account for extended LIDs */ 247 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); 248 249 /* Create 32 bit LIDs */ 250 hfi1_make_opa_lid(&qp->remote_ah_attr); 251 252 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) 253 return; 254 255 ibp = to_iport(qp->ibqp.device, qp->port_num); 256 ppd = ppd_from_ibp(ibp); 257 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr); 258 } 259 260 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, 261 int attr_mask, struct ib_udata *udata) 262 { 263 struct ib_qp *ibqp = &qp->ibqp; 264 struct hfi1_qp_priv *priv = qp->priv; 265 266 if (attr_mask & IB_QP_AV) { 267 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 268 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 269 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 270 qp_set_16b(qp); 271 } 272 273 if (attr_mask & IB_QP_PATH_MIG_STATE && 274 attr->path_mig_state == IB_MIG_MIGRATED && 275 qp->s_mig_state == IB_MIG_ARMED) { 276 qp->s_flags |= RVT_S_AHG_CLEAR; 277 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); 278 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 279 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); 280 qp_set_16b(qp); 281 } 282 } 283 284 /** 285 * hfi1_check_send_wqe - validate wqe 286 * @qp - The qp 287 * @wqe - The built wqe 288 * 289 * validate wqe. This is called 290 * prior to inserting the wqe into 291 * the ring but after the wqe has been 292 * setup. 293 * 294 * Returns 0 on success, -EINVAL on failure 295 * 296 */ 297 int hfi1_check_send_wqe(struct rvt_qp *qp, 298 struct rvt_swqe *wqe) 299 { 300 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 301 struct rvt_ah *ah; 302 303 switch (qp->ibqp.qp_type) { 304 case IB_QPT_RC: 305 case IB_QPT_UC: 306 if (wqe->length > 0x80000000U) 307 return -EINVAL; 308 break; 309 case IB_QPT_SMI: 310 ah = ibah_to_rvtah(wqe->ud_wr.ah); 311 if (wqe->length > (1 << ah->log_pmtu)) 312 return -EINVAL; 313 break; 314 case IB_QPT_GSI: 315 case IB_QPT_UD: 316 ah = ibah_to_rvtah(wqe->ud_wr.ah); 317 if (wqe->length > (1 << ah->log_pmtu)) 318 return -EINVAL; 319 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf) 320 return -EINVAL; 321 default: 322 break; 323 } 324 return wqe->length <= piothreshold; 325 } 326 327 /** 328 * _hfi1_schedule_send - schedule progress 329 * @qp: the QP 330 * 331 * This schedules qp progress w/o regard to the s_flags. 332 * 333 * It is only used in the post send, which doesn't hold 334 * the s_lock. 335 */ 336 void _hfi1_schedule_send(struct rvt_qp *qp) 337 { 338 struct hfi1_qp_priv *priv = qp->priv; 339 struct hfi1_ibport *ibp = 340 to_iport(qp->ibqp.device, qp->port_num); 341 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 342 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 343 344 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, 345 priv->s_sde ? 346 priv->s_sde->cpu : 347 cpumask_first(cpumask_of_node(dd->node))); 348 } 349 350 static void qp_pio_drain(struct rvt_qp *qp) 351 { 352 struct hfi1_ibdev *dev; 353 struct hfi1_qp_priv *priv = qp->priv; 354 355 if (!priv->s_sendcontext) 356 return; 357 dev = to_idev(qp->ibqp.device); 358 while (iowait_pio_pending(&priv->s_iowait)) { 359 write_seqlock_irq(&dev->iowait_lock); 360 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); 361 write_sequnlock_irq(&dev->iowait_lock); 362 iowait_pio_drain(&priv->s_iowait); 363 write_seqlock_irq(&dev->iowait_lock); 364 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); 365 write_sequnlock_irq(&dev->iowait_lock); 366 } 367 } 368 369 /** 370 * hfi1_schedule_send - schedule progress 371 * @qp: the QP 372 * 373 * This schedules qp progress and caller should hold 374 * the s_lock. 375 */ 376 void hfi1_schedule_send(struct rvt_qp *qp) 377 { 378 lockdep_assert_held(&qp->s_lock); 379 if (hfi1_send_ok(qp)) 380 _hfi1_schedule_send(qp); 381 } 382 383 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) 384 { 385 unsigned long flags; 386 387 spin_lock_irqsave(&qp->s_lock, flags); 388 if (qp->s_flags & flag) { 389 qp->s_flags &= ~flag; 390 trace_hfi1_qpwakeup(qp, flag); 391 hfi1_schedule_send(qp); 392 } 393 spin_unlock_irqrestore(&qp->s_lock, flags); 394 /* Notify hfi1_destroy_qp() if it is waiting. */ 395 rvt_put_qp(qp); 396 } 397 398 static int iowait_sleep( 399 struct sdma_engine *sde, 400 struct iowait *wait, 401 struct sdma_txreq *stx, 402 uint seq, 403 bool pkts_sent) 404 { 405 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); 406 struct rvt_qp *qp; 407 struct hfi1_qp_priv *priv; 408 unsigned long flags; 409 int ret = 0; 410 struct hfi1_ibdev *dev; 411 412 qp = tx->qp; 413 priv = qp->priv; 414 415 spin_lock_irqsave(&qp->s_lock, flags); 416 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 417 /* 418 * If we couldn't queue the DMA request, save the info 419 * and try again later rather than destroying the 420 * buffer and undoing the side effects of the copy. 421 */ 422 /* Make a common routine? */ 423 dev = &sde->dd->verbs_dev; 424 list_add_tail(&stx->list, &wait->tx_head); 425 write_seqlock(&dev->iowait_lock); 426 if (sdma_progress(sde, seq, stx)) 427 goto eagain; 428 if (list_empty(&priv->s_iowait.list)) { 429 struct hfi1_ibport *ibp = 430 to_iport(qp->ibqp.device, qp->port_num); 431 432 ibp->rvp.n_dmawait++; 433 qp->s_flags |= RVT_S_WAIT_DMA_DESC; 434 iowait_queue(pkts_sent, &priv->s_iowait, 435 &sde->dmawait); 436 priv->s_iowait.lock = &dev->iowait_lock; 437 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); 438 rvt_get_qp(qp); 439 } 440 write_sequnlock(&dev->iowait_lock); 441 qp->s_flags &= ~RVT_S_BUSY; 442 spin_unlock_irqrestore(&qp->s_lock, flags); 443 ret = -EBUSY; 444 } else { 445 spin_unlock_irqrestore(&qp->s_lock, flags); 446 hfi1_put_txreq(tx); 447 } 448 return ret; 449 eagain: 450 write_sequnlock(&dev->iowait_lock); 451 spin_unlock_irqrestore(&qp->s_lock, flags); 452 list_del_init(&stx->list); 453 return -EAGAIN; 454 } 455 456 static void iowait_wakeup(struct iowait *wait, int reason) 457 { 458 struct rvt_qp *qp = iowait_to_qp(wait); 459 460 WARN_ON(reason != SDMA_AVAIL_REASON); 461 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); 462 } 463 464 static void iowait_sdma_drained(struct iowait *wait) 465 { 466 struct rvt_qp *qp = iowait_to_qp(wait); 467 unsigned long flags; 468 469 /* 470 * This happens when the send engine notes 471 * a QP in the error state and cannot 472 * do the flush work until that QP's 473 * sdma work has finished. 474 */ 475 spin_lock_irqsave(&qp->s_lock, flags); 476 if (qp->s_flags & RVT_S_WAIT_DMA) { 477 qp->s_flags &= ~RVT_S_WAIT_DMA; 478 hfi1_schedule_send(qp); 479 } 480 spin_unlock_irqrestore(&qp->s_lock, flags); 481 } 482 483 /** 484 * 485 * qp_to_sdma_engine - map a qp to a send engine 486 * @qp: the QP 487 * @sc5: the 5 bit sc 488 * 489 * Return: 490 * A send engine for the qp or NULL for SMI type qp. 491 */ 492 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) 493 { 494 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 495 struct sdma_engine *sde; 496 497 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 498 return NULL; 499 switch (qp->ibqp.qp_type) { 500 case IB_QPT_SMI: 501 return NULL; 502 default: 503 break; 504 } 505 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); 506 return sde; 507 } 508 509 /* 510 * qp_to_send_context - map a qp to a send context 511 * @qp: the QP 512 * @sc5: the 5 bit sc 513 * 514 * Return: 515 * A send context for the qp 516 */ 517 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) 518 { 519 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); 520 521 switch (qp->ibqp.qp_type) { 522 case IB_QPT_SMI: 523 /* SMA packets to VL15 */ 524 return dd->vld[15].sc; 525 default: 526 break; 527 } 528 529 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, 530 sc5); 531 } 532 533 static const char * const qp_type_str[] = { 534 "SMI", "GSI", "RC", "UC", "UD", 535 }; 536 537 static int qp_idle(struct rvt_qp *qp) 538 { 539 return 540 qp->s_last == qp->s_acked && 541 qp->s_acked == qp->s_cur && 542 qp->s_cur == qp->s_tail && 543 qp->s_tail == qp->s_head; 544 } 545 546 /** 547 * qp_iter_print - print the qp information to seq_file 548 * @s: the seq_file to emit the qp information on 549 * @iter: the iterator for the qp hash list 550 */ 551 void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter) 552 { 553 struct rvt_swqe *wqe; 554 struct rvt_qp *qp = iter->qp; 555 struct hfi1_qp_priv *priv = qp->priv; 556 struct sdma_engine *sde; 557 struct send_context *send_context; 558 struct rvt_ack_entry *e = NULL; 559 struct rvt_srq *srq = qp->ibqp.srq ? 560 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL; 561 562 sde = qp_to_sdma_engine(qp, priv->s_sc); 563 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 564 send_context = qp_to_send_context(qp, priv->s_sc); 565 if (qp->s_ack_queue) 566 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 567 seq_printf(s, 568 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n", 569 iter->n, 570 qp_idle(qp) ? "I" : "B", 571 qp->ibqp.qp_num, 572 atomic_read(&qp->refcount), 573 qp_type_str[qp->ibqp.qp_type], 574 qp->state, 575 wqe ? wqe->wr.opcode : 0, 576 qp->s_hdrwords, 577 qp->s_flags, 578 iowait_sdma_pending(&priv->s_iowait), 579 iowait_pio_pending(&priv->s_iowait), 580 !list_empty(&priv->s_iowait.list), 581 qp->timeout, 582 wqe ? wqe->ssn : 0, 583 qp->s_lsn, 584 qp->s_last_psn, 585 qp->s_psn, qp->s_next_psn, 586 qp->s_sending_psn, qp->s_sending_hpsn, 587 qp->r_psn, 588 qp->s_last, qp->s_acked, qp->s_cur, 589 qp->s_tail, qp->s_head, qp->s_size, 590 qp->s_avail, 591 /* ack_queue ring pointers, size */ 592 qp->s_tail_ack_queue, qp->r_head_ack_queue, 593 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi), 594 /* remote QP info */ 595 qp->remote_qpn, 596 rdma_ah_get_dlid(&qp->remote_ah_attr), 597 rdma_ah_get_sl(&qp->remote_ah_attr), 598 qp->pmtu, 599 qp->s_retry, 600 qp->s_retry_cnt, 601 qp->s_rnr_retry_cnt, 602 qp->s_rnr_retry, 603 sde, 604 sde ? sde->this_idx : 0, 605 send_context, 606 send_context ? send_context->sw_index : 0, 607 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, 608 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, 609 qp->pid, 610 qp->s_state, 611 qp->s_ack_state, 612 /* ack queue information */ 613 e ? e->opcode : 0, 614 e ? e->psn : 0, 615 e ? e->lpsn : 0, 616 qp->r_min_rnr_timer, 617 srq ? "SRQ" : "RQ", 618 srq ? srq->rq.size : qp->r_rq.size 619 ); 620 } 621 622 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) 623 { 624 struct hfi1_qp_priv *priv; 625 626 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); 627 if (!priv) 628 return ERR_PTR(-ENOMEM); 629 630 priv->owner = qp; 631 632 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, 633 rdi->dparms.node); 634 if (!priv->s_ahg) { 635 kfree(priv); 636 return ERR_PTR(-ENOMEM); 637 } 638 iowait_init( 639 &priv->s_iowait, 640 1, 641 _hfi1_do_send, 642 iowait_sleep, 643 iowait_wakeup, 644 iowait_sdma_drained); 645 return priv; 646 } 647 648 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) 649 { 650 struct hfi1_qp_priv *priv = qp->priv; 651 652 kfree(priv->s_ahg); 653 kfree(priv); 654 } 655 656 unsigned free_all_qps(struct rvt_dev_info *rdi) 657 { 658 struct hfi1_ibdev *verbs_dev = container_of(rdi, 659 struct hfi1_ibdev, 660 rdi); 661 struct hfi1_devdata *dd = container_of(verbs_dev, 662 struct hfi1_devdata, 663 verbs_dev); 664 int n; 665 unsigned qp_inuse = 0; 666 667 for (n = 0; n < dd->num_pports; n++) { 668 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; 669 670 rcu_read_lock(); 671 if (rcu_dereference(ibp->rvp.qp[0])) 672 qp_inuse++; 673 if (rcu_dereference(ibp->rvp.qp[1])) 674 qp_inuse++; 675 rcu_read_unlock(); 676 } 677 678 return qp_inuse; 679 } 680 681 void flush_qp_waiters(struct rvt_qp *qp) 682 { 683 lockdep_assert_held(&qp->s_lock); 684 flush_iowait(qp); 685 } 686 687 void stop_send_queue(struct rvt_qp *qp) 688 { 689 struct hfi1_qp_priv *priv = qp->priv; 690 691 cancel_work_sync(&priv->s_iowait.iowork); 692 } 693 694 void quiesce_qp(struct rvt_qp *qp) 695 { 696 struct hfi1_qp_priv *priv = qp->priv; 697 698 iowait_sdma_drain(&priv->s_iowait); 699 qp_pio_drain(qp); 700 flush_tx_list(qp); 701 } 702 703 void notify_qp_reset(struct rvt_qp *qp) 704 { 705 qp->r_adefered = 0; 706 clear_ahg(qp); 707 } 708 709 /* 710 * Switch to alternate path. 711 * The QP s_lock should be held and interrupts disabled. 712 */ 713 void hfi1_migrate_qp(struct rvt_qp *qp) 714 { 715 struct hfi1_qp_priv *priv = qp->priv; 716 struct ib_event ev; 717 718 qp->s_mig_state = IB_MIG_MIGRATED; 719 qp->remote_ah_attr = qp->alt_ah_attr; 720 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); 721 qp->s_pkey_index = qp->s_alt_pkey_index; 722 qp->s_flags |= RVT_S_AHG_CLEAR; 723 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); 724 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); 725 qp_set_16b(qp); 726 727 ev.device = qp->ibqp.device; 728 ev.element.qp = &qp->ibqp; 729 ev.event = IB_EVENT_PATH_MIG; 730 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 731 } 732 733 int mtu_to_path_mtu(u32 mtu) 734 { 735 return mtu_to_enum(mtu, OPA_MTU_8192); 736 } 737 738 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) 739 { 740 u32 mtu; 741 struct hfi1_ibdev *verbs_dev = container_of(rdi, 742 struct hfi1_ibdev, 743 rdi); 744 struct hfi1_devdata *dd = container_of(verbs_dev, 745 struct hfi1_devdata, 746 verbs_dev); 747 struct hfi1_ibport *ibp; 748 u8 sc, vl; 749 750 ibp = &dd->pport[qp->port_num - 1].ibport_data; 751 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; 752 vl = sc_to_vlt(dd, sc); 753 754 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); 755 if (vl < PER_VL_SEND_CONTEXTS) 756 mtu = min_t(u32, mtu, dd->vld[vl].mtu); 757 return mtu; 758 } 759 760 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, 761 struct ib_qp_attr *attr) 762 { 763 int mtu, pidx = qp->port_num - 1; 764 struct hfi1_ibdev *verbs_dev = container_of(rdi, 765 struct hfi1_ibdev, 766 rdi); 767 struct hfi1_devdata *dd = container_of(verbs_dev, 768 struct hfi1_devdata, 769 verbs_dev); 770 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); 771 if (mtu == -1) 772 return -1; /* values less than 0 are error */ 773 774 if (mtu > dd->pport[pidx].ibmtu) 775 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); 776 else 777 return attr->path_mtu; 778 } 779 780 void notify_error_qp(struct rvt_qp *qp) 781 { 782 struct hfi1_qp_priv *priv = qp->priv; 783 seqlock_t *lock = priv->s_iowait.lock; 784 785 if (lock) { 786 write_seqlock(lock); 787 if (!list_empty(&priv->s_iowait.list) && 788 !(qp->s_flags & RVT_S_BUSY)) { 789 qp->s_flags &= ~RVT_S_ANY_WAIT_IO; 790 list_del_init(&priv->s_iowait.list); 791 priv->s_iowait.lock = NULL; 792 rvt_put_qp(qp); 793 } 794 write_sequnlock(lock); 795 } 796 797 if (!(qp->s_flags & RVT_S_BUSY)) { 798 qp->s_hdrwords = 0; 799 if (qp->s_rdma_mr) { 800 rvt_put_mr(qp->s_rdma_mr); 801 qp->s_rdma_mr = NULL; 802 } 803 flush_tx_list(qp); 804 } 805 } 806 807 /** 808 * hfi1_qp_iter_cb - callback for iterator 809 * @qp - the qp 810 * @v - the sl in low bits of v 811 * 812 * This is called from the iterator callback to work 813 * on an individual qp. 814 */ 815 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) 816 { 817 int lastwqe; 818 struct ib_event ev; 819 struct hfi1_ibport *ibp = 820 to_iport(qp->ibqp.device, qp->port_num); 821 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 822 u8 sl = (u8)v; 823 824 if (qp->port_num != ppd->port || 825 (qp->ibqp.qp_type != IB_QPT_UC && 826 qp->ibqp.qp_type != IB_QPT_RC) || 827 rdma_ah_get_sl(&qp->remote_ah_attr) != sl || 828 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) 829 return; 830 831 spin_lock_irq(&qp->r_lock); 832 spin_lock(&qp->s_hlock); 833 spin_lock(&qp->s_lock); 834 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 835 spin_unlock(&qp->s_lock); 836 spin_unlock(&qp->s_hlock); 837 spin_unlock_irq(&qp->r_lock); 838 if (lastwqe) { 839 ev.device = qp->ibqp.device; 840 ev.element.qp = &qp->ibqp; 841 ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 842 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); 843 } 844 } 845 846 /** 847 * hfi1_error_port_qps - put a port's RC/UC qps into error state 848 * @ibp: the ibport. 849 * @sl: the service level. 850 * 851 * This function places all RC/UC qps with a given service level into error 852 * state. It is generally called to force upper lay apps to abandon stale qps 853 * after an sl->sc mapping change. 854 */ 855 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) 856 { 857 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 858 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; 859 860 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb); 861 } 862