Lines Matching full:qp

16 #include "qp.h"
22 MODULE_PARM_DESC(qp_table_size, "QP table size");
24 static void flush_tx_list(struct rvt_qp *qp);
33 static void qp_pio_drain(struct rvt_qp *qp);
122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument
124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list()
130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument
132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait()
142 rvt_put_qp(qp); in flush_iowait()
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument
163 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp()
173 if (!qp_to_sdma_engine(qp, sc) && in hfi1_check_modify_qp()
177 if (!qp_to_send_context(qp, sc)) in hfi1_check_modify_qp()
186 if (!qp_to_sdma_engine(qp, sc) && in hfi1_check_modify_qp()
190 if (!qp_to_send_context(qp, sc)) in hfi1_check_modify_qp()
202 static inline void qp_set_16b(struct rvt_qp *qp) in qp_set_16b() argument
206 struct hfi1_qp_priv *priv = qp->priv; in qp_set_16b()
209 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); in qp_set_16b()
212 hfi1_make_opa_lid(&qp->remote_ah_attr); in qp_set_16b()
214 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) in qp_set_16b()
217 ibp = to_iport(qp->ibqp.device, qp->port_num); in qp_set_16b()
219 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr); in qp_set_16b()
222 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_modify_qp() argument
225 struct ib_qp *ibqp = &qp->ibqp; in hfi1_modify_qp()
226 struct hfi1_qp_priv *priv = qp->priv; in hfi1_modify_qp()
229 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp()
230 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); in hfi1_modify_qp()
231 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); in hfi1_modify_qp()
232 qp_set_16b(qp); in hfi1_modify_qp()
237 qp->s_mig_state == IB_MIG_ARMED) { in hfi1_modify_qp()
238 qp->s_flags |= HFI1_S_AHG_CLEAR; in hfi1_modify_qp()
239 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp()
240 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); in hfi1_modify_qp()
241 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); in hfi1_modify_qp()
242 qp_set_16b(qp); in hfi1_modify_qp()
245 opfn_qp_init(qp, attr, attr_mask); in hfi1_modify_qp()
250 * @qp: The qp
263 int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) in hfi1_setup_wqe() argument
265 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_setup_wqe()
270 switch (qp->ibqp.qp_type) { in hfi1_setup_wqe()
272 hfi1_setup_tid_rdma_wqe(qp, wqe); in hfi1_setup_wqe()
277 if (wqe->length > qp->pmtu) in hfi1_setup_wqe()
316 * @qp: the QP
318 * This schedules qp progress w/o regard to the s_flags.
323 bool _hfi1_schedule_send(struct rvt_qp *qp) in _hfi1_schedule_send() argument
325 struct hfi1_qp_priv *priv = qp->priv; in _hfi1_schedule_send()
327 to_iport(qp->ibqp.device, qp->port_num); in _hfi1_schedule_send()
340 static void qp_pio_drain(struct rvt_qp *qp) in qp_pio_drain() argument
342 struct hfi1_qp_priv *priv = qp->priv; in qp_pio_drain()
359 * @qp: the QP
361 * This schedules qp progress and caller should hold
366 bool hfi1_schedule_send(struct rvt_qp *qp) in hfi1_schedule_send() argument
368 lockdep_assert_held(&qp->s_lock); in hfi1_schedule_send()
369 if (hfi1_send_ok(qp)) { in hfi1_schedule_send()
370 _hfi1_schedule_send(qp); in hfi1_schedule_send()
373 if (qp->s_flags & HFI1_S_ANY_WAIT_IO) in hfi1_schedule_send()
374 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait, in hfi1_schedule_send()
379 static void hfi1_qp_schedule(struct rvt_qp *qp) in hfi1_qp_schedule() argument
381 struct hfi1_qp_priv *priv = qp->priv; in hfi1_qp_schedule()
385 ret = hfi1_schedule_send(qp); in hfi1_qp_schedule()
390 ret = hfi1_schedule_tid_send(qp); in hfi1_qp_schedule()
396 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) in hfi1_qp_wakeup() argument
400 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_qp_wakeup()
401 if (qp->s_flags & flag) { in hfi1_qp_wakeup()
402 qp->s_flags &= ~flag; in hfi1_qp_wakeup()
403 trace_hfi1_qpwakeup(qp, flag); in hfi1_qp_wakeup()
404 hfi1_qp_schedule(qp); in hfi1_qp_wakeup()
406 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_qp_wakeup()
408 rvt_put_qp(qp); in hfi1_qp_wakeup()
411 void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) in hfi1_qp_unbusy() argument
413 struct hfi1_qp_priv *priv = qp->priv; in hfi1_qp_unbusy()
416 qp->s_flags &= ~RVT_S_BUSY; in hfi1_qp_unbusy()
420 * avoid a race condition when the qp wakes up before in hfi1_qp_unbusy()
444 struct rvt_qp *qp; in iowait_sleep() local
449 qp = tx->qp; in iowait_sleep()
450 priv = qp->priv; in iowait_sleep()
452 spin_lock_irqsave(&qp->s_lock, flags); in iowait_sleep()
453 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { in iowait_sleep()
466 to_iport(qp->ibqp.device, qp->port_num); in iowait_sleep()
469 qp->s_flags |= RVT_S_WAIT_DMA_DESC; in iowait_sleep()
474 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); in iowait_sleep()
475 rvt_get_qp(qp); in iowait_sleep()
478 hfi1_qp_unbusy(qp, wait); in iowait_sleep()
479 spin_unlock_irqrestore(&qp->s_lock, flags); in iowait_sleep()
482 spin_unlock_irqrestore(&qp->s_lock, flags); in iowait_sleep()
488 spin_unlock_irqrestore(&qp->s_lock, flags); in iowait_sleep()
495 struct rvt_qp *qp = iowait_to_qp(wait); in iowait_wakeup() local
498 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); in iowait_wakeup()
503 struct rvt_qp *qp = iowait_to_qp(wait); in iowait_sdma_drained() local
508 * a QP in the error state and cannot in iowait_sdma_drained()
509 * do the flush work until that QP's in iowait_sdma_drained()
512 spin_lock_irqsave(&qp->s_lock, flags); in iowait_sdma_drained()
513 if (qp->s_flags & RVT_S_WAIT_DMA) { in iowait_sdma_drained()
514 qp->s_flags &= ~RVT_S_WAIT_DMA; in iowait_sdma_drained()
515 hfi1_schedule_send(qp); in iowait_sdma_drained()
517 spin_unlock_irqrestore(&qp->s_lock, flags); in iowait_sdma_drained()
522 struct rvt_qp *qp = iowait_to_qp(w); in hfi1_init_priority() local
523 struct hfi1_qp_priv *priv = qp->priv; in hfi1_init_priority()
525 if (qp->s_flags & RVT_S_ACK_PENDING) in hfi1_init_priority()
532 * qp_to_sdma_engine - map a qp to a send engine
533 * @qp: the QP
537 * A send engine for the qp or NULL for SMI type qp.
539 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) in qp_to_sdma_engine() argument
541 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qp_to_sdma_engine()
546 switch (qp->ibqp.qp_type) { in qp_to_sdma_engine()
552 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); in qp_to_sdma_engine()
557 * qp_to_send_context - map a qp to a send context
558 * @qp: the QP
562 * A send context for the qp
564 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) in qp_to_send_context() argument
566 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in qp_to_send_context()
568 switch (qp->ibqp.qp_type) { in qp_to_send_context()
576 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, in qp_to_send_context()
584 static int qp_idle(struct rvt_qp *qp) in qp_idle() argument
587 qp->s_last == qp->s_acked && in qp_idle()
588 qp->s_acked == qp->s_cur && in qp_idle()
589 qp->s_cur == qp->s_tail && in qp_idle()
590 qp->s_tail == qp->s_head; in qp_idle()
594 * qp_iter_print - print the qp information to seq_file
595 * @s: the seq_file to emit the qp information on
596 * @iter: the iterator for the qp hash list
601 struct rvt_qp *qp = iter->qp; in qp_iter_print() local
602 struct hfi1_qp_priv *priv = qp->priv; in qp_iter_print()
606 struct rvt_srq *srq = qp->ibqp.srq ? in qp_iter_print()
607 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL; in qp_iter_print()
609 sde = qp_to_sdma_engine(qp, priv->s_sc); in qp_iter_print()
610 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qp_iter_print()
611 send_context = qp_to_send_context(qp, priv->s_sc); in qp_iter_print()
612 if (qp->s_ack_queue) in qp_iter_print()
613 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qp_iter_print()
615 …"N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %… in qp_iter_print()
617 qp_idle(qp) ? "I" : "B", in qp_iter_print()
618 qp->ibqp.qp_num, in qp_iter_print()
619 atomic_read(&qp->refcount), in qp_iter_print()
620 qp_type_str[qp->ibqp.qp_type], in qp_iter_print()
621 qp->state, in qp_iter_print()
623 qp->s_flags, in qp_iter_print()
627 qp->timeout, in qp_iter_print()
629 qp->s_lsn, in qp_iter_print()
630 qp->s_last_psn, in qp_iter_print()
631 qp->s_psn, qp->s_next_psn, in qp_iter_print()
632 qp->s_sending_psn, qp->s_sending_hpsn, in qp_iter_print()
633 qp->r_psn, in qp_iter_print()
634 qp->s_last, qp->s_acked, qp->s_cur, in qp_iter_print()
635 qp->s_tail, qp->s_head, qp->s_size, in qp_iter_print()
636 qp->s_avail, in qp_iter_print()
638 qp->s_tail_ack_queue, qp->r_head_ack_queue, in qp_iter_print()
639 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi), in qp_iter_print()
640 /* remote QP info */ in qp_iter_print()
641 qp->remote_qpn, in qp_iter_print()
642 rdma_ah_get_dlid(&qp->remote_ah_attr), in qp_iter_print()
643 rdma_ah_get_sl(&qp->remote_ah_attr), in qp_iter_print()
644 qp->pmtu, in qp_iter_print()
645 qp->s_retry, in qp_iter_print()
646 qp->s_retry_cnt, in qp_iter_print()
647 qp->s_rnr_retry_cnt, in qp_iter_print()
648 qp->s_rnr_retry, in qp_iter_print()
653 ib_cq_head(qp->ibqp.send_cq), in qp_iter_print()
654 ib_cq_tail(qp->ibqp.send_cq), in qp_iter_print()
655 qp->pid, in qp_iter_print()
656 qp->s_state, in qp_iter_print()
657 qp->s_ack_state, in qp_iter_print()
662 qp->r_min_rnr_timer, in qp_iter_print()
664 srq ? srq->rq.size : qp->r_rq.size in qp_iter_print()
668 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) in qp_priv_alloc() argument
676 priv->owner = qp; in qp_priv_alloc()
698 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) in qp_priv_free() argument
700 struct hfi1_qp_priv *priv = qp->priv; in qp_priv_free()
702 hfi1_qp_priv_tid_free(rdi, qp); in qp_priv_free()
722 if (rcu_dereference(ibp->rvp.qp[0])) in free_all_qps()
724 if (rcu_dereference(ibp->rvp.qp[1])) in free_all_qps()
732 void flush_qp_waiters(struct rvt_qp *qp) in flush_qp_waiters() argument
734 lockdep_assert_held(&qp->s_lock); in flush_qp_waiters()
735 flush_iowait(qp); in flush_qp_waiters()
736 hfi1_tid_rdma_flush_wait(qp); in flush_qp_waiters()
739 void stop_send_queue(struct rvt_qp *qp) in stop_send_queue() argument
741 struct hfi1_qp_priv *priv = qp->priv; in stop_send_queue()
745 rvt_put_qp(qp); in stop_send_queue()
748 void quiesce_qp(struct rvt_qp *qp) in quiesce_qp() argument
750 struct hfi1_qp_priv *priv = qp->priv; in quiesce_qp()
752 hfi1_del_tid_reap_timer(qp); in quiesce_qp()
753 hfi1_del_tid_retry_timer(qp); in quiesce_qp()
755 qp_pio_drain(qp); in quiesce_qp()
756 flush_tx_list(qp); in quiesce_qp()
759 void notify_qp_reset(struct rvt_qp *qp) in notify_qp_reset() argument
761 hfi1_qp_kern_exp_rcv_clear_all(qp); in notify_qp_reset()
762 qp->r_adefered = 0; in notify_qp_reset()
763 clear_ahg(qp); in notify_qp_reset()
766 if (qp->ibqp.qp_type == IB_QPT_RC) in notify_qp_reset()
767 opfn_conn_error(qp); in notify_qp_reset()
772 * The QP s_lock should be held and interrupts disabled.
774 void hfi1_migrate_qp(struct rvt_qp *qp) in hfi1_migrate_qp() argument
776 struct hfi1_qp_priv *priv = qp->priv; in hfi1_migrate_qp()
779 qp->s_mig_state = IB_MIG_MIGRATED; in hfi1_migrate_qp()
780 qp->remote_ah_attr = qp->alt_ah_attr; in hfi1_migrate_qp()
781 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in hfi1_migrate_qp()
782 qp->s_pkey_index = qp->s_alt_pkey_index; in hfi1_migrate_qp()
783 qp->s_flags |= HFI1_S_AHG_CLEAR; in hfi1_migrate_qp()
784 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); in hfi1_migrate_qp()
785 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); in hfi1_migrate_qp()
786 qp_set_16b(qp); in hfi1_migrate_qp()
788 ev.device = qp->ibqp.device; in hfi1_migrate_qp()
789 ev.element.qp = &qp->ibqp; in hfi1_migrate_qp()
791 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in hfi1_migrate_qp()
799 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) in mtu_from_qp() argument
811 ibp = &dd->pport[qp->port_num - 1].ibport_data; in mtu_from_qp()
812 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in mtu_from_qp()
815 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); in mtu_from_qp()
821 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, in get_pmtu_from_attr() argument
824 int mtu, pidx = qp->port_num - 1; in get_pmtu_from_attr()
831 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); in get_pmtu_from_attr()
841 void notify_error_qp(struct rvt_qp *qp) in notify_error_qp() argument
843 struct hfi1_qp_priv *priv = qp->priv; in notify_error_qp()
849 !(qp->s_flags & RVT_S_BUSY) && in notify_error_qp()
851 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; in notify_error_qp()
856 rvt_put_qp(qp); in notify_error_qp()
861 if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { in notify_error_qp()
862 qp->s_hdrwords = 0; in notify_error_qp()
863 if (qp->s_rdma_mr) { in notify_error_qp()
864 rvt_put_mr(qp->s_rdma_mr); in notify_error_qp()
865 qp->s_rdma_mr = NULL; in notify_error_qp()
867 flush_tx_list(qp); in notify_error_qp()
873 * @qp: the qp
877 * on an individual qp.
879 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) in hfi1_qp_iter_cb() argument
884 to_iport(qp->ibqp.device, qp->port_num); in hfi1_qp_iter_cb()
888 if (qp->port_num != ppd->port || in hfi1_qp_iter_cb()
889 (qp->ibqp.qp_type != IB_QPT_UC && in hfi1_qp_iter_cb()
890 qp->ibqp.qp_type != IB_QPT_RC) || in hfi1_qp_iter_cb()
891 rdma_ah_get_sl(&qp->remote_ah_attr) != sl || in hfi1_qp_iter_cb()
892 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) in hfi1_qp_iter_cb()
895 spin_lock_irq(&qp->r_lock); in hfi1_qp_iter_cb()
896 spin_lock(&qp->s_hlock); in hfi1_qp_iter_cb()
897 spin_lock(&qp->s_lock); in hfi1_qp_iter_cb()
898 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); in hfi1_qp_iter_cb()
899 spin_unlock(&qp->s_lock); in hfi1_qp_iter_cb()
900 spin_unlock(&qp->s_hlock); in hfi1_qp_iter_cb()
901 spin_unlock_irq(&qp->r_lock); in hfi1_qp_iter_cb()
903 ev.device = qp->ibqp.device; in hfi1_qp_iter_cb()
904 ev.element.qp = &qp->ibqp; in hfi1_qp_iter_cb()
906 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in hfi1_qp_iter_cb()