/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe.c | 21 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); in rxe_dealloc() local 23 rxe_pool_cleanup(&rxe->uc_pool); in rxe_dealloc() 24 rxe_pool_cleanup(&rxe->pd_pool); in rxe_dealloc() 25 rxe_pool_cleanup(&rxe->ah_pool); in rxe_dealloc() 26 rxe_pool_cleanup(&rxe->srq_pool); in rxe_dealloc() 27 rxe_pool_cleanup(&rxe->qp_pool); in rxe_dealloc() 28 rxe_pool_cleanup(&rxe->cq_pool); in rxe_dealloc() 29 rxe_pool_cleanup(&rxe->mr_pool); in rxe_dealloc() 30 rxe_pool_cleanup(&rxe->mw_pool); in rxe_dealloc() 32 WARN_ON(!RB_EMPTY_ROOT(&rxe->mcg_tree)); in rxe_dealloc() [all …]
|
H A D | rxe_mcast.c | 31 static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_mcast_add() argument 37 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_add() 56 static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_mcast_del() argument 62 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_del() 83 struct rb_root *tree = &mcg->rxe->mcg_tree; in __rxe_insert_mcg() 112 rb_erase(&mcg->node, &mcg->rxe->mcg_tree); in __rxe_remove_mcg() 123 static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe, in __rxe_lookup_mcg() argument 126 struct rb_root *tree = &rxe->mcg_tree; in __rxe_lookup_mcg() 161 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_lookup_mcg() argument 165 spin_lock_bh(&rxe->mcg_lock); in rxe_lookup_mcg() [all …]
|
H A D | rxe_mmap.c | 20 struct rxe_dev *rxe = to_rdev(ip->context->device); in rxe_mmap_release() local 22 spin_lock_bh(&rxe->pending_lock); in rxe_mmap_release() 27 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap_release() 64 struct rxe_dev *rxe = to_rdev(context->device); in rxe_mmap() local 75 spin_lock_bh(&rxe->pending_lock); in rxe_mmap() 76 list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { in rxe_mmap() 82 rxe_dbg_dev(rxe, "mmap region is larger than the object!\n"); in rxe_mmap() 83 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap() 90 rxe_dbg_dev(rxe, "unable to find pending mmap info\n"); in rxe_mmap() 91 spin_unlock_bh(&rxe->pending_lock); in rxe_mmap() [all …]
|
H A D | rxe_net.c | 137 struct rxe_dev *rxe; in rxe_udp_encap_recv() local 144 rxe = rxe_get_dev_from_net(ndev); in rxe_udp_encap_recv() 145 if (!rxe && is_vlan_dev(ndev)) in rxe_udp_encap_recv() 146 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); in rxe_udp_encap_recv() 147 if (!rxe) in rxe_udp_encap_recv() 151 ib_device_put(&rxe->ib_dev); in rxe_udp_encap_recv() 156 pkt->rxe = rxe; in rxe_udp_encap_recv() 349 struct rxe_dev *rxe; in rxe_skb_tx_dtor() local 354 rxe = rxe_get_dev_from_net(ndev); in rxe_skb_tx_dtor() 355 if (!rxe && is_vlan_dev(ndev)) in rxe_skb_tx_dtor() [all …]
|
H A D | rxe_recv.c | 13 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_type_state() argument 75 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_keys() argument 78 struct rxe_port *port = &rxe->port; in check_keys() 100 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, in check_addr() argument 137 struct rxe_dev *rxe = pkt->rxe; in hdr_check() local 138 struct rxe_port *port = &rxe->port; in hdr_check() 153 qp = rxe_pool_get_index(&rxe->qp_pool, index); in hdr_check() 157 err = check_type_state(rxe, pkt, qp); in hdr_check() 161 err = check_addr(rxe, pkt, qp); in hdr_check() 165 err = check_keys(rxe, pkt, qpn, qp); in hdr_check() [all …]
|
H A D | rxe_verbs.c | 22 struct rxe_dev *rxe = to_rdev(ibdev); in rxe_query_device() local 26 rxe_dbg_dev(rxe, "malformed udata\n"); in rxe_query_device() 31 memcpy(attr, &rxe->attr, sizeof(*attr)); in rxe_query_device() 36 rxe_err_dev(rxe, "returned err = %d\n", err); in rxe_query_device() 43 struct rxe_dev *rxe = to_rdev(ibdev); in rxe_query_port() local 49 rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); in rxe_query_port() 59 memcpy(attr, &rxe->port.attr, sizeof(*attr)); in rxe_query_port() 61 mutex_lock(&rxe->usdev_lock); in rxe_query_port() 73 mutex_unlock(&rxe->usdev_lock); in rxe_query_port() 79 rxe_err_dev(rxe, "returned err = %d\n", err); in rxe_query_port() [all …]
|
H A D | rxe_srq.c | 11 int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init) in rxe_srq_chk_init() argument 15 if (attr->max_wr > rxe->attr.max_srq_wr) { in rxe_srq_chk_init() 16 rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n", in rxe_srq_chk_init() 17 attr->max_wr, rxe->attr.max_srq_wr); in rxe_srq_chk_init() 22 rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr); in rxe_srq_chk_init() 29 if (attr->max_sge > rxe->attr.max_srq_sge) { in rxe_srq_chk_init() 30 rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n", in rxe_srq_chk_init() 31 attr->max_sge, rxe->attr.max_srq_sge); in rxe_srq_chk_init() 44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, in rxe_srq_from_init() argument 65 q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size, in rxe_srq_from_init() [all …]
|
H A D | rxe_icrc.c | 18 int rxe_icrc_init(struct rxe_dev *rxe) in rxe_icrc_init() argument 24 rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n", in rxe_icrc_init() 29 rxe->tfm = tfm; in rxe_icrc_init() 43 static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len) in rxe_crc32() argument 48 SHASH_DESC_ON_STACK(shash, rxe->tfm); in rxe_crc32() 50 shash->tfm = rxe->tfm; in rxe_crc32() 54 rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err); in rxe_crc32() 124 crc = rxe_crc32(pkt->rxe, crc, pshdr, length); in rxe_icrc_hdr() 127 crc = rxe_crc32(pkt->rxe, crc, pkt->hdr + RXE_BTH_BYTES, in rxe_icrc_hdr() 150 icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt), in rxe_icrc_check() [all …]
|
H A D | rxe_loc.h | 21 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, 24 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, 37 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid); 62 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, 92 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, 98 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); 101 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); 102 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, 107 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, 152 int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init); [all …]
|
H A D | rxe_cq.c | 11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() argument 17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 50 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 53 rxe_dbg_dev(rxe, "unable to create cq\n"); in rxe_cq_from_init() 57 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, in rxe_cq_from_init()
|
H A D | rxe_queue.c | 12 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, in do_mmap_info() argument 20 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf); in do_mmap_info() 31 spin_lock_bh(&rxe->pending_lock); in do_mmap_info() 32 list_add(&ip->pending_mmaps, &rxe->pending_mmaps); in do_mmap_info() 33 spin_unlock_bh(&rxe->pending_lock); in do_mmap_info() 55 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, in rxe_queue_init() argument 70 q->rxe = rxe; in rxe_queue_init() 158 new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); in rxe_queue_resize() 162 err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf, in rxe_queue_resize()
|
H A D | rxe.h | 42 #define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \ argument 63 #define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \ argument 84 #define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \ argument 140 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); 142 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name, 158 void rxe_port_up(struct rxe_dev *rxe); 159 void rxe_port_down(struct rxe_dev *rxe); 160 void rxe_set_port_state(struct rxe_dev *rxe);
|
H A D | rxe_mw.c | 20 struct rxe_dev *rxe = to_rdev(ibmw->device); in rxe_alloc_mw() local 25 ret = rxe_add_to_pool(&rxe->mw_pool, mw); in rxe_alloc_mw() 169 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_bind_mw() local 174 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); in rxe_bind_mw() 186 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); in rxe_bind_mw() 260 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_invalidate_mw() local 264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw() 292 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_lookup_mw() local 297 mw = rxe_pool_get_index(&rxe->mw_pool, index); in rxe_lookup_mw()
|
H A D | rxe_av.c | 21 struct rxe_dev *rxe; in chk_attr() local 28 rxe = to_rdev(ah->ibah.device); in chk_attr() 31 rxe = to_rdev(qp->ibqp.device); in chk_attr() 34 port = &rxe->port; in chk_attr() 151 ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num); in rxe_get_av()
|
H A D | rxe_comp.c | 132 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED); in rxe_comp_queue_pkt() 221 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack() local 304 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR); in check_ack() 314 rxe_counter_inc(rxe, in check_ack() 442 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete() local 462 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND); in do_complete() 640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer() local 784 rxe_counter_inc(rxe, in rxe_completer() 793 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED); in rxe_completer() 816 rxe_counter_inc(rxe, in rxe_completer()
|
H A D | rxe_pool.h | 32 struct rxe_dev *rxe; member 51 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
|
H A D | rxe_verbs.h | 350 struct rxe_dev *rxe; member 413 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index) in rxe_counter_inc() argument 415 atomic64_inc(&rxe->stats_counters[index]); in rxe_counter_inc() 478 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
|
H A D | rxe_queue.h | 63 struct rxe_dev *rxe; member 79 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf, 85 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
|
H A D | rxe_mr.c | 129 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, in rxe_mr_init_user() argument 139 umem = ib_umem_get(&rxe->ib_dev, start, length, access); in rxe_mr_init_user() 620 struct rxe_dev *rxe = to_rdev(pd->ibpd.device); in lookup_mr() local 623 mr = rxe_pool_get_index(&rxe->mr_pool, index); in lookup_mr() 640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_invalidate_mr() local 645 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); in rxe_invalidate_mr()
|
H A D | rxe_resp.c | 74 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn() local 83 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ); in check_psn() 87 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ); in check_psn() 770 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet() local 782 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet() 831 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_recheck_mr() local 836 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_recheck_mr() 853 mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8); in rxe_recheck_mr() 1061 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete() local 1080 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV); in do_complete() [all …]
|
H A D | Makefile | 5 rxe.o \
|
H A D | Kconfig | 28 https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
|
H A D | rxe_hdr.h | 15 struct rxe_dev *rxe; /* device that owns packet */ member
|
/linux/drivers/infiniband/sw/ |
H A D | Makefile | 3 obj-$(CONFIG_RDMA_RXE) += rxe/
|
/linux/drivers/infiniband/ |
H A D | Kconfig | 100 source "drivers/infiniband/sw/rxe/Kconfig"
|