cma.c (ae6ba10d5090fc7e9095eaef3dcf06ba016725a6) cma.c (ea1075edcbab7d92f4e4ccf5490043f796bf78be)
1/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU

--- 645 unchanged lines hidden (view full) ---

654static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
655{
656 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
657 const struct ib_gid_attr *sgid_attr;
658 union ib_gid gid, iboe_gid, *gidp;
659 struct cma_device *cma_dev;
660 enum ib_gid_type gid_type;
661 int ret = -ENODEV;
1/*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU

--- 645 unchanged lines hidden (view full) ---

654static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
655{
656 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
657 const struct ib_gid_attr *sgid_attr;
658 union ib_gid gid, iboe_gid, *gidp;
659 struct cma_device *cma_dev;
660 enum ib_gid_type gid_type;
661 int ret = -ENODEV;
662 u8 port;
662 unsigned int port;
663
664 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
665 id_priv->id.ps == RDMA_PS_IPOIB)
666 return -EINVAL;
667
668 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
669 &iboe_gid);
670
671 memcpy(&gid, dev_addr->src_dev_addr +
672 rdma_addr_gid_offset(dev_addr), sizeof(gid));
673
674 mutex_lock(&lock);
675 list_for_each_entry(cma_dev, &dev_list, list) {
663
664 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
665 id_priv->id.ps == RDMA_PS_IPOIB)
666 return -EINVAL;
667
668 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
669 &iboe_gid);
670
671 memcpy(&gid, dev_addr->src_dev_addr +
672 rdma_addr_gid_offset(dev_addr), sizeof(gid));
673
674 mutex_lock(&lock);
675 list_for_each_entry(cma_dev, &dev_list, list) {
676 for (port = rdma_start_port(cma_dev->device);
677 port <= rdma_end_port(cma_dev->device); port++) {
676 rdma_for_each_port (cma_dev->device, port) {
678 gidp = rdma_protocol_roce(cma_dev->device, port) ?
679 &iboe_gid : &gid;
680 gid_type = cma_dev->default_gid_type[port - 1];
681 sgid_attr = cma_validate_port(cma_dev->device, port,
682 gid_type, gidp, id_priv);
683 if (!IS_ERR(sgid_attr)) {
684 id_priv->id.port_num = port;
685 cma_bind_sgid_attr(id_priv, sgid_attr);

--- 197 unchanged lines hidden (view full) ---

883 rdma_restrack_set_task(&id_priv->res, caller);
884 id_priv->res.type = RDMA_RESTRACK_CM_ID;
885 id_priv->state = RDMA_CM_IDLE;
886 id_priv->id.context = context;
887 id_priv->id.event_handler = event_handler;
888 id_priv->id.ps = ps;
889 id_priv->id.qp_type = qp_type;
890 id_priv->tos_set = false;
677 gidp = rdma_protocol_roce(cma_dev->device, port) ?
678 &iboe_gid : &gid;
679 gid_type = cma_dev->default_gid_type[port - 1];
680 sgid_attr = cma_validate_port(cma_dev->device, port,
681 gid_type, gidp, id_priv);
682 if (!IS_ERR(sgid_attr)) {
683 id_priv->id.port_num = port;
684 cma_bind_sgid_attr(id_priv, sgid_attr);

--- 197 unchanged lines hidden (view full) ---

882 rdma_restrack_set_task(&id_priv->res, caller);
883 id_priv->res.type = RDMA_RESTRACK_CM_ID;
884 id_priv->state = RDMA_CM_IDLE;
885 id_priv->id.context = context;
886 id_priv->id.event_handler = event_handler;
887 id_priv->id.ps = ps;
888 id_priv->id.qp_type = qp_type;
889 id_priv->tos_set = false;
890 id_priv->timeout_set = false;
891 id_priv->gid_type = IB_GID_TYPE_IB;
892 spin_lock_init(&id_priv->lock);
893 mutex_init(&id_priv->qp_mutex);
894 init_completion(&id_priv->comp);
895 atomic_set(&id_priv->refcount, 1);
896 mutex_init(&id_priv->handler_mutex);
897 INIT_LIST_HEAD(&id_priv->listen_list);
898 INIT_LIST_HEAD(&id_priv->mc_list);

--- 226 unchanged lines hidden (view full) ---

1125 } else
1126 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1127 qp_attr_mask);
1128 qp_attr->port_num = id_priv->id.port_num;
1129 *qp_attr_mask |= IB_QP_PORT;
1130 } else
1131 ret = -ENOSYS;
1132
891 id_priv->gid_type = IB_GID_TYPE_IB;
892 spin_lock_init(&id_priv->lock);
893 mutex_init(&id_priv->qp_mutex);
894 init_completion(&id_priv->comp);
895 atomic_set(&id_priv->refcount, 1);
896 mutex_init(&id_priv->handler_mutex);
897 INIT_LIST_HEAD(&id_priv->listen_list);
898 INIT_LIST_HEAD(&id_priv->mc_list);

--- 226 unchanged lines hidden (view full) ---

1125 } else
1126 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1127 qp_attr_mask);
1128 qp_attr->port_num = id_priv->id.port_num;
1129 *qp_attr_mask |= IB_QP_PORT;
1130 } else
1131 ret = -ENOSYS;
1132
1133 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
1134 qp_attr->timeout = id_priv->timeout;
1135
1133 return ret;
1134}
1135EXPORT_SYMBOL(rdma_init_qp_attr);
1136
1137static inline bool cma_zero_addr(const struct sockaddr *addr)
1138{
1139 switch (addr->sa_family) {
1140 case AF_INET:

--- 1264 unchanged lines hidden (view full) ---

2405
2406 id = iw_create_cm_id(id_priv->id.device,
2407 iw_conn_req_handler,
2408 id_priv);
2409 if (IS_ERR(id))
2410 return PTR_ERR(id);
2411
2412 id->tos = id_priv->tos;
1136 return ret;
1137}
1138EXPORT_SYMBOL(rdma_init_qp_attr);
1139
1140static inline bool cma_zero_addr(const struct sockaddr *addr)
1141{
1142 switch (addr->sa_family) {
1143 case AF_INET:

--- 1264 unchanged lines hidden (view full) ---

2408
2409 id = iw_create_cm_id(id_priv->id.device,
2410 iw_conn_req_handler,
2411 id_priv);
2412 if (IS_ERR(id))
2413 return PTR_ERR(id);
2414
2415 id->tos = id_priv->tos;
2416 id->tos_set = id_priv->tos_set;
2413 id_priv->cm_id.iw = id;
2414
2415 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2416 rdma_addr_size(cma_src_addr(id_priv)));
2417
2418 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2419
2420 if (ret) {

--- 36 unchanged lines hidden (view full) ---

2457 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2458 rdma_addr_size(cma_src_addr(id_priv)));
2459
2460 _cma_attach_to_dev(dev_id_priv, cma_dev);
2461 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
2462 atomic_inc(&id_priv->refcount);
2463 dev_id_priv->internal_id = 1;
2464 dev_id_priv->afonly = id_priv->afonly;
2417 id_priv->cm_id.iw = id;
2418
2419 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2420 rdma_addr_size(cma_src_addr(id_priv)));
2421
2422 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2423
2424 if (ret) {

--- 36 unchanged lines hidden (view full) ---

2461 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2462 rdma_addr_size(cma_src_addr(id_priv)));
2463
2464 _cma_attach_to_dev(dev_id_priv, cma_dev);
2465 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
2466 atomic_inc(&id_priv->refcount);
2467 dev_id_priv->internal_id = 1;
2468 dev_id_priv->afonly = id_priv->afonly;
2469 dev_id_priv->tos_set = id_priv->tos_set;
2470 dev_id_priv->tos = id_priv->tos;
2465
2466 ret = rdma_listen(id, id_priv->backlog);
2467 if (ret)
2468 dev_warn(&cma_dev->device->dev,
2469 "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
2470}
2471
2472static void cma_listen_on_all(struct rdma_id_private *id_priv)

--- 12 unchanged lines hidden (view full) ---

2485 struct rdma_id_private *id_priv;
2486
2487 id_priv = container_of(id, struct rdma_id_private, id);
2488 id_priv->tos = (u8) tos;
2489 id_priv->tos_set = true;
2490}
2491EXPORT_SYMBOL(rdma_set_service_type);
2492
2471
2472 ret = rdma_listen(id, id_priv->backlog);
2473 if (ret)
2474 dev_warn(&cma_dev->device->dev,
2475 "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
2476}
2477
2478static void cma_listen_on_all(struct rdma_id_private *id_priv)

--- 12 unchanged lines hidden (view full) ---

2491 struct rdma_id_private *id_priv;
2492
2493 id_priv = container_of(id, struct rdma_id_private, id);
2494 id_priv->tos = (u8) tos;
2495 id_priv->tos_set = true;
2496}
2497EXPORT_SYMBOL(rdma_set_service_type);
2498
2499/**
2500 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2501 * with a connection identifier.
2502 * @id: Communication identifier to associated with service type.
2503 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2504 *
2505 * This function should be called before rdma_connect() on active side,
2506 * and on passive side before rdma_accept(). It is applicable to primary
2507 * path only. The timeout will affect the local side of the QP, it is not
2508 * negotiated with remote side and zero disables the timer.
2509 *
2510 * Return: 0 for success
2511 */
2512int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2513{
2514 struct rdma_id_private *id_priv;
2515
2516 if (id->qp_type != IB_QPT_RC)
2517 return -EINVAL;
2518
2519 id_priv = container_of(id, struct rdma_id_private, id);
2520 id_priv->timeout = timeout;
2521 id_priv->timeout_set = true;
2522
2523 return 0;
2524}
2525EXPORT_SYMBOL(rdma_set_ack_timeout);
2526
2493static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2494 void *context)
2495{
2496 struct cma_work *work = context;
2497 struct rdma_route *route;
2498
2499 route = &work->id->id.route;
2500

--- 460 unchanged lines hidden (view full) ---

2961 return ret;
2962}
2963
2964static void addr_handler(int status, struct sockaddr *src_addr,
2965 struct rdma_dev_addr *dev_addr, void *context)
2966{
2967 struct rdma_id_private *id_priv = context;
2968 struct rdma_cm_event event = {};
2527static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2528 void *context)
2529{
2530 struct cma_work *work = context;
2531 struct rdma_route *route;
2532
2533 route = &work->id->id.route;
2534

--- 460 unchanged lines hidden (view full) ---

2995 return ret;
2996}
2997
2998static void addr_handler(int status, struct sockaddr *src_addr,
2999 struct rdma_dev_addr *dev_addr, void *context)
3000{
3001 struct rdma_id_private *id_priv = context;
3002 struct rdma_cm_event event = {};
3003 struct sockaddr *addr;
3004 struct sockaddr_storage old_addr;
2969
2970 mutex_lock(&id_priv->handler_mutex);
2971 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2972 RDMA_CM_ADDR_RESOLVED))
2973 goto out;
2974
3005
3006 mutex_lock(&id_priv->handler_mutex);
3007 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3008 RDMA_CM_ADDR_RESOLVED))
3009 goto out;
3010
2975 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
3011 /*
3012 * Store the previous src address, so that if we fail to acquire
3013 * matching rdma device, old address can be restored back, which helps
3014 * to cancel the cma listen operation correctly.
3015 */
3016 addr = cma_src_addr(id_priv);
3017 memcpy(&old_addr, addr, rdma_addr_size(addr));
3018 memcpy(addr, src_addr, rdma_addr_size(src_addr));
2976 if (!status && !id_priv->cma_dev) {
2977 status = cma_acquire_dev_by_src_ip(id_priv);
2978 if (status)
2979 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
2980 status);
2981 } else {
2982 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
2983 }
2984
2985 if (status) {
3019 if (!status && !id_priv->cma_dev) {
3020 status = cma_acquire_dev_by_src_ip(id_priv);
3021 if (status)
3022 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3023 status);
3024 } else {
3025 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3026 }
3027
3028 if (status) {
3029 memcpy(addr, &old_addr,
3030 rdma_addr_size((struct sockaddr *)&old_addr));
2986 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2987 RDMA_CM_ADDR_BOUND))
2988 goto out;
2989 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2990 event.status = status;
2991 } else
2992 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2993

--- 799 unchanged lines hidden (view full) ---

3793 int ret;
3794 struct iw_cm_conn_param iw_param;
3795
3796 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
3797 if (IS_ERR(cm_id))
3798 return PTR_ERR(cm_id);
3799
3800 cm_id->tos = id_priv->tos;
3031 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3032 RDMA_CM_ADDR_BOUND))
3033 goto out;
3034 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3035 event.status = status;
3036 } else
3037 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3038

--- 799 unchanged lines hidden (view full) ---

3838 int ret;
3839 struct iw_cm_conn_param iw_param;
3840
3841 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
3842 if (IS_ERR(cm_id))
3843 return PTR_ERR(cm_id);
3844
3845 cm_id->tos = id_priv->tos;
3846 cm_id->tos_set = id_priv->tos_set;
3801 id_priv->cm_id.iw = cm_id;
3802
3803 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
3804 rdma_addr_size(cma_src_addr(id_priv)));
3805 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
3806 rdma_addr_size(cma_dst_addr(id_priv)));
3807
3808 ret = cma_modify_qp_rtr(id_priv, conn_param);

--- 687 unchanged lines hidden (view full) ---

4496 goto free_cma_dev;
4497
4498 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
4499 sizeof(*cma_dev->default_roce_tos),
4500 GFP_KERNEL);
4501 if (!cma_dev->default_roce_tos)
4502 goto free_gid_type;
4503
3847 id_priv->cm_id.iw = cm_id;
3848
3849 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
3850 rdma_addr_size(cma_src_addr(id_priv)));
3851 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
3852 rdma_addr_size(cma_dst_addr(id_priv)));
3853
3854 ret = cma_modify_qp_rtr(id_priv, conn_param);

--- 687 unchanged lines hidden (view full) ---

4542 goto free_cma_dev;
4543
4544 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
4545 sizeof(*cma_dev->default_roce_tos),
4546 GFP_KERNEL);
4547 if (!cma_dev->default_roce_tos)
4548 goto free_gid_type;
4549
4504 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
4550 rdma_for_each_port (device, i) {
4505 supported_gids = roce_gid_type_mask_support(device, i);
4506 WARN_ON(!supported_gids);
4507 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
4508 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4509 CMA_PREFERRED_ROCE_GID_TYPE;
4510 else
4511 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4512 find_first_bit(&supported_gids, BITS_PER_LONG);

--- 87 unchanged lines hidden (view full) ---

4600 mutex_unlock(&lock);
4601
4602 cma_process_remove(cma_dev);
4603 kfree(cma_dev->default_roce_tos);
4604 kfree(cma_dev->default_gid_type);
4605 kfree(cma_dev);
4606}
4607
4551 supported_gids = roce_gid_type_mask_support(device, i);
4552 WARN_ON(!supported_gids);
4553 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
4554 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4555 CMA_PREFERRED_ROCE_GID_TYPE;
4556 else
4557 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4558 find_first_bit(&supported_gids, BITS_PER_LONG);

--- 87 unchanged lines hidden (view full) ---

4646 mutex_unlock(&lock);
4647
4648 cma_process_remove(cma_dev);
4649 kfree(cma_dev->default_roce_tos);
4650 kfree(cma_dev->default_gid_type);
4651 kfree(cma_dev);
4652}
4653
4608static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
4609{
4610 struct nlmsghdr *nlh;
4611 struct rdma_cm_id_stats *id_stats;
4612 struct rdma_id_private *id_priv;
4613 struct rdma_cm_id *id = NULL;
4614 struct cma_device *cma_dev;
4615 int i_dev = 0, i_id = 0;
4616
4617 /*
4618 * We export all of the IDs as a sequence of messages. Each
4619 * ID gets its own netlink message.
4620 */
4621 mutex_lock(&lock);
4622
4623 list_for_each_entry(cma_dev, &dev_list, list) {
4624 if (i_dev < cb->args[0]) {
4625 i_dev++;
4626 continue;
4627 }
4628
4629 i_id = 0;
4630 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
4631 if (i_id < cb->args[1]) {
4632 i_id++;
4633 continue;
4634 }
4635
4636 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
4637 sizeof *id_stats, RDMA_NL_RDMA_CM,
4638 RDMA_NL_RDMA_CM_ID_STATS,
4639 NLM_F_MULTI);
4640 if (!id_stats)
4641 goto out;
4642
4643 memset(id_stats, 0, sizeof *id_stats);
4644 id = &id_priv->id;
4645 id_stats->node_type = id->route.addr.dev_addr.dev_type;
4646 id_stats->port_num = id->port_num;
4647 id_stats->bound_dev_if =
4648 id->route.addr.dev_addr.bound_dev_if;
4649
4650 if (ibnl_put_attr(skb, nlh,
4651 rdma_addr_size(cma_src_addr(id_priv)),
4652 cma_src_addr(id_priv),
4653 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
4654 goto out;
4655 if (ibnl_put_attr(skb, nlh,
4656 rdma_addr_size(cma_dst_addr(id_priv)),
4657 cma_dst_addr(id_priv),
4658 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
4659 goto out;
4660
4661 id_stats->pid = task_pid_vnr(id_priv->res.task);
4662 id_stats->port_space = id->ps;
4663 id_stats->cm_state = id_priv->state;
4664 id_stats->qp_num = id_priv->qp_num;
4665 id_stats->qp_type = id->qp_type;
4666
4667 i_id++;
4668 nlmsg_end(skb, nlh);
4669 }
4670
4671 cb->args[1] = 0;
4672 i_dev++;
4673 }
4674
4675out:
4676 mutex_unlock(&lock);
4677 cb->args[0] = i_dev;
4678 cb->args[1] = i_id;
4679
4680 return skb->len;
4681}
4682
4683static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
4684 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
4685};
4686
4687static int cma_init_net(struct net *net)
4688{
4689 struct cma_pernet *pernet = cma_pernet(net);
4690
4691 idr_init(&pernet->tcp_ps);
4692 idr_init(&pernet->udp_ps);
4693 idr_init(&pernet->ipoib_ps);
4694 idr_init(&pernet->ib_ps);

--- 32 unchanged lines hidden (view full) ---

4727
4728 ib_sa_register_client(&sa_client);
4729 register_netdevice_notifier(&cma_nb);
4730
4731 ret = ib_register_client(&cma_client);
4732 if (ret)
4733 goto err;
4734
4654static int cma_init_net(struct net *net)
4655{
4656 struct cma_pernet *pernet = cma_pernet(net);
4657
4658 idr_init(&pernet->tcp_ps);
4659 idr_init(&pernet->udp_ps);
4660 idr_init(&pernet->ipoib_ps);
4661 idr_init(&pernet->ib_ps);

--- 32 unchanged lines hidden (view full) ---

4694
4695 ib_sa_register_client(&sa_client);
4696 register_netdevice_notifier(&cma_nb);
4697
4698 ret = ib_register_client(&cma_client);
4699 if (ret)
4700 goto err;
4701
4735 rdma_nl_register(RDMA_NL_RDMA_CM, cma_cb_table);
4736 cma_configfs_init();
4737
4738 return 0;
4739
4740err:
4741 unregister_netdevice_notifier(&cma_nb);
4742 ib_sa_unregister_client(&sa_client);
4743err_wq:
4744 destroy_workqueue(cma_wq);
4745 return ret;
4746}
4747
4748static void __exit cma_cleanup(void)
4749{
4750 cma_configfs_exit();
4702 cma_configfs_init();
4703
4704 return 0;
4705
4706err:
4707 unregister_netdevice_notifier(&cma_nb);
4708 ib_sa_unregister_client(&sa_client);
4709err_wq:
4710 destroy_workqueue(cma_wq);
4711 return ret;
4712}
4713
4714static void __exit cma_cleanup(void)
4715{
4716 cma_configfs_exit();
4751 rdma_nl_unregister(RDMA_NL_RDMA_CM);
4752 ib_unregister_client(&cma_client);
4753 unregister_netdevice_notifier(&cma_nb);
4754 ib_sa_unregister_client(&sa_client);
4755 unregister_pernet_subsys(&cma_pernet_operations);
4756 destroy_workqueue(cma_wq);
4757}
4758
4717 ib_unregister_client(&cma_client);
4718 unregister_netdevice_notifier(&cma_nb);
4719 ib_sa_unregister_client(&sa_client);
4720 unregister_pernet_subsys(&cma_pernet_operations);
4721 destroy_workqueue(cma_wq);
4722}
4723
4759MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_RDMA_CM, 1);
4760
4761module_init(cma_init);
4762module_exit(cma_cleanup);
4724module_init(cma_init);
4725module_exit(cma_cleanup);