Lines Matching refs:rxe

31 static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)  in rxe_mcast_add()  argument
37 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_add()
56 static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_mcast_del() argument
62 ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); in rxe_mcast_del()
83 struct rb_root *tree = &mcg->rxe->mcg_tree; in __rxe_insert_mcg()
112 rb_erase(&mcg->node, &mcg->rxe->mcg_tree); in __rxe_remove_mcg()
123 static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe, in __rxe_lookup_mcg() argument
126 struct rb_root *tree = &rxe->mcg_tree; in __rxe_lookup_mcg()
161 struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_lookup_mcg() argument
165 spin_lock_bh(&rxe->mcg_lock); in rxe_lookup_mcg()
166 mcg = __rxe_lookup_mcg(rxe, mgid); in rxe_lookup_mcg()
167 spin_unlock_bh(&rxe->mcg_lock); in rxe_lookup_mcg()
180 static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid, in __rxe_init_mcg() argument
186 mcg->rxe = rxe; in __rxe_init_mcg()
206 static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid) in rxe_get_mcg() argument
211 if (rxe->attr.max_mcast_grp == 0) in rxe_get_mcg()
215 mcg = rxe_lookup_mcg(rxe, mgid); in rxe_get_mcg()
220 if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) { in rxe_get_mcg()
232 spin_lock_bh(&rxe->mcg_lock); in rxe_get_mcg()
234 tmp = __rxe_lookup_mcg(rxe, mgid); in rxe_get_mcg()
236 spin_unlock_bh(&rxe->mcg_lock); in rxe_get_mcg()
237 atomic_dec(&rxe->mcg_num); in rxe_get_mcg()
242 __rxe_init_mcg(rxe, mgid, mcg); in rxe_get_mcg()
243 spin_unlock_bh(&rxe->mcg_lock); in rxe_get_mcg()
246 err = rxe_mcast_add(rxe, mgid); in rxe_get_mcg()
252 atomic_dec(&rxe->mcg_num); in rxe_get_mcg()
276 struct rxe_dev *rxe = mcg->rxe; in __rxe_destroy_mcg() local
282 atomic_dec(&rxe->mcg_num); in __rxe_destroy_mcg()
294 rxe_mcast_del(mcg->rxe, &mcg->mgid); in rxe_destroy_mcg()
296 spin_lock_bh(&mcg->rxe->mcg_lock); in rxe_destroy_mcg()
298 spin_unlock_bh(&mcg->rxe->mcg_lock); in rxe_destroy_mcg()
315 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in __rxe_init_mca() local
318 n = atomic_inc_return(&rxe->mcg_attach); in __rxe_init_mca()
319 if (n > rxe->attr.max_total_mcast_qp_attach) { in __rxe_init_mca()
320 atomic_dec(&rxe->mcg_attach); in __rxe_init_mca()
325 if (n > rxe->attr.max_mcast_qp_attach) { in __rxe_init_mca()
327 atomic_dec(&rxe->mcg_attach); in __rxe_init_mca()
351 struct rxe_dev *rxe = mcg->rxe; in rxe_attach_mcg() local
356 spin_lock_bh(&rxe->mcg_lock); in rxe_attach_mcg()
359 spin_unlock_bh(&rxe->mcg_lock); in rxe_attach_mcg()
363 spin_unlock_bh(&rxe->mcg_lock); in rxe_attach_mcg()
370 spin_lock_bh(&rxe->mcg_lock); in rxe_attach_mcg()
384 spin_unlock_bh(&rxe->mcg_lock); in rxe_attach_mcg()
400 atomic_dec(&mcg->rxe->mcg_attach); in __rxe_cleanup_mca()
416 struct rxe_dev *rxe = mcg->rxe; in rxe_detach_mcg() local
419 spin_lock_bh(&rxe->mcg_lock); in rxe_detach_mcg()
433 spin_unlock_bh(&rxe->mcg_lock); in rxe_detach_mcg()
439 spin_unlock_bh(&rxe->mcg_lock); in rxe_detach_mcg()
454 struct rxe_dev *rxe = to_rdev(ibqp->device); in rxe_attach_mcast() local
459 mcg = rxe_get_mcg(rxe, mgid); in rxe_attach_mcast()
484 struct rxe_dev *rxe = to_rdev(ibqp->device); in rxe_detach_mcast() local
489 mcg = rxe_lookup_mcg(rxe, mgid); in rxe_detach_mcast()