| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mcast.c | 31 static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_mcast_add() argument 37 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_add() 56 static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_mcast_del() argument 62 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_del() 83 struct rb_root *tree = &mcg->rxe->mcg_tree; in __rxe_insert_mcg() 112 rb_erase(&mcg->node, &mcg->rxe->mcg_tree); in __rxe_remove_mcg() 123 static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe, in __rxe_lookup_mcg() argument 126 struct rb_root *tree = &rxe->mcg_tree; in __rxe_lookup_mcg() 161 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_lookup_mcg() argument 165 spin_lock_bh(&rxe->mcg_lock); in rxe_lookup_mcg() [all …]
|
| H A D | rxe_mmap.c | 20 struct rxe_dev *rxe = to_rdev(ip->context->device); in rxe_mmap_release() local 22 spin_lock_bh(&rxe->pending_lock); in rxe_mmap_release() 27 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap_release() 64 struct rxe_dev *rxe = to_rdev(context->device); in rxe_mmap() local 75 spin_lock_bh(&rxe->pending_lock); in rxe_mmap() 76 list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { in rxe_mmap() 82 rxe_dbg_dev(rxe, "mmap region is larger than the object!\n"); in rxe_mmap() 83 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap() 90 rxe_dbg_dev(rxe, "unable to find pending mmap info\n"); in rxe_mmap() 91 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap() [all …]
|
| H A D | rxe_recv.c | 13 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_type_state() argument 75 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_keys() argument 78 struct rxe_port *port = &rxe->port; in check_keys() 100 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_addr() argument 137 struct rxe_dev *rxe = pkt->rxe; in hdr_check() local 138 struct rxe_port *port = &rxe->port; in hdr_check() 153 qp = rxe_pool_get_index(&rxe->qp_pool, index); in hdr_check() 157 err = check_type_state(rxe, pkt, qp); in hdr_check() 161 err = check_addr(rxe, pkt, qp); in hdr_check() 165 err = check_keys(rxe, pkt, qpn, qp); in hdr_check() [all …]
|
| H A D | rxe_net.c | 185 struct rxe_dev *rxe; in rxe_udp_encap_recv() local 192 rxe = rxe_get_dev_from_net(ndev); in rxe_udp_encap_recv() 193 if (!rxe && is_vlan_dev(ndev)) in rxe_udp_encap_recv() 194 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); in rxe_udp_encap_recv() 195 if (!rxe) in rxe_udp_encap_recv() 199 ib_device_put(&rxe->ib_dev); in rxe_udp_encap_recv() 204 pkt->rxe = rxe; in rxe_udp_encap_recv() 448 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { in rxe_loopback() 466 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_xmit_packet() local 485 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); in rxe_xmit_packet() [all …]
|
| H A D | rxe_srq.c | 11 int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) in rxe_srq_chk_init() argument 15 if (attr->max_wr > rxe->attr.max_srq_wr) { in rxe_srq_chk_init() 16 rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n", in rxe_srq_chk_init() 17 attr->max_wr, rxe->attr.max_srq_wr); in rxe_srq_chk_init() 22 rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr); in rxe_srq_chk_init() 29 if (attr->max_sge > rxe->attr.max_srq_sge) { in rxe_srq_chk_init() 30 rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n", in rxe_srq_chk_init() 31 attr->max_sge, rxe->attr.max_srq_sge); in rxe_srq_chk_init() 44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, in rxe_srq_from_init() argument 65 q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size, in rxe_srq_from_init() [all …]
|
| H A D | rxe_qp.c | 66 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, in rxe_qp_chk_cap() argument 69 if (cap->max_send_wr > rxe->attr.max_qp_wr) { in rxe_qp_chk_cap() 70 rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n", in rxe_qp_chk_cap() 71 cap->max_send_wr, rxe->attr.max_qp_wr); in rxe_qp_chk_cap() 75 if (cap->max_send_sge > rxe->attr.max_send_sge) { in rxe_qp_chk_cap() 76 rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n", in rxe_qp_chk_cap() 77 cap->max_send_sge, rxe->attr.max_send_sge); in rxe_qp_chk_cap() 82 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { in rxe_qp_chk_cap() 83 rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n", in rxe_qp_chk_cap() 84 cap->max_recv_wr, rxe->attr.max_qp_wr); in rxe_qp_chk_cap() [all …]
|
| H A D | rxe_cq.c | 11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() argument 17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 50 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 53 rxe_dbg_dev(rxe, "unable to create cq\n"); in rxe_cq_from_init() 57 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, in rxe_cq_from_init()
|
| H A D | rxe_queue.c | 12 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, in do_mmap_info() argument 20 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); in do_mmap_info() 31 spin_lock_bh(&rxe->pending_lock); in do_mmap_info() 32 list_add(&ip->pending_mmaps, &rxe->pending_mmaps); in do_mmap_info() 33 spin_unlock_bh(&rxe->pending_lock); in do_mmap_info() 55 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, in rxe_queue_init() argument 70 q->rxe = rxe; in rxe_queue_init() 158 new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); in rxe_queue_resize() 162 err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf, in rxe_queue_resize()
|
| H A D | rxe_mw.c | 20 struct rxe_dev *rxe = to_rdev(ibmw->device); in rxe_alloc_mw() local 25 ret = rxe_add_to_pool(&rxe->mw_pool, mw); in rxe_alloc_mw() 169 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_bind_mw() local 174 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); in rxe_bind_mw() 186 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); in rxe_bind_mw() 260 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_invalidate_mw() local 264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw() 292 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_lookup_mw() local 297 mw = rxe_pool_get_index(&rxe->mw_pool, index); in rxe_lookup_mw()
|
| H A D | rxe_av.c | 21 struct rxe_dev *rxe; in chk_attr() local 28 rxe = to_rdev(ah->ibah.device); in chk_attr() 31 rxe = to_rdev(qp->ibqp.device); in chk_attr() 34 port = &rxe->port; in chk_attr() 151 ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num); in rxe_get_av()
|
| H A D | rxe_comp.c | 132 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED); in rxe_comp_queue_pkt() 221 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack() local 304 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR); in check_ack() 314 rxe_counter_inc(rxe, in check_ack() 442 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete() local 462 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND); in do_complete() 640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer() local 784 rxe_counter_inc(rxe, in rxe_completer() 793 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED); in rxe_completer() 816 rxe_counter_inc(rxe, in rxe_completer()
|
| H A D | rxe_pool.h | 32 struct rxe_dev *rxe; member 51 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
|
| H A D | rxe_queue.h | 63 struct rxe_dev *rxe; member 79 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, 85 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
| H A D | rxe_mr.c | 129 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, in rxe_mr_init_user() argument 139 umem = ib_umem_get(&rxe->ib_dev, start, length, access); in rxe_mr_init_user() 618 struct rxe_dev *rxe = to_rdev(pd->ibpd.device); in lookup_mr() local 621 mr = rxe_pool_get_index(&rxe->mr_pool, index); in lookup_mr() 638 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_invalidate_mr() local 643 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); in rxe_invalidate_mr()
|
| H A D | rxe_req.c | 405 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in get_mtu() local 410 return rxe->port.mtu_cap; in get_mtu() 419 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in init_req_packet() local 433 skb = rxe_init_packet(rxe, av, paylen, pkt); in init_req_packet() 641 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_requester() local 766 pkt.rxe = rxe; in rxe_requester()
|
| H A D | rxe_resp.c | 74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn() local 83 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ); in check_psn() 87 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ); in check_psn() 785 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet() local 797 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet() 846 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_recheck_mr() local 851 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_recheck_mr() 868 mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8); in rxe_recheck_mr() 1076 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete() local 1095 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV); in do_complete() [all …]
|
| H A D | rxe_odp.c | 79 int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, in rxe_odp_mr_init_user() argument 93 if (!(rxe->attr.odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) in rxe_odp_mr_init_user() 99 umem_odp = ib_umem_odp_get(&rxe->ib_dev, start, length, access_flags, in rxe_odp_mr_init_user()
|
| H A D | Kconfig | 27 https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
|
| H A D | rxe_hdr.h | 15 struct rxe_dev *rxe; /* device that owns packet */ member
|
| /linux/drivers/infiniband/sw/ |
| H A D | Makefile | 3 obj-$(CONFIG_RDMA_RXE) += rxe/
|
| /linux/include/net/libeth/ |
| H A D | rx.h | 249 u32 rxe:1; member
|
| /linux/drivers/infiniband/ |
| H A D | Kconfig | 101 source "drivers/infiniband/sw/rxe/Kconfig"
|
| /linux/drivers/net/ethernet/intel/iavf/ |
| H A D | iavf_txrx.c | 1300 fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1); in iavf_extract_legacy_rx_fields() 1346 fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1); in iavf_extract_flex_rx_fields() 1452 if (unlikely(fields.rxe)) { in iavf_clean_rx_irq()
|