1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _CORE_PRIV_H 34 #define _CORE_PRIV_H 35 36 #include <linux/list.h> 37 #include <linux/spinlock.h> 38 39 #include <rdma/ib_verbs.h> 40 41 #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) 42 int cma_configfs_init(void); 43 void cma_configfs_exit(void); 44 #else 45 static inline int cma_configfs_init(void) 46 { 47 return 0; 48 } 49 50 static inline void cma_configfs_exit(void) 51 { 52 } 53 #endif 54 struct cma_device; 55 void cma_ref_dev(struct cma_device *cma_dev); 56 void cma_deref_dev(struct cma_device *cma_dev); 57 typedef bool (*cma_device_filter)(struct ib_device *, void *); 58 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 59 void *cookie); 60 int cma_get_default_gid_type(struct cma_device *cma_dev, 61 unsigned int port); 62 int cma_set_default_gid_type(struct cma_device *cma_dev, 63 unsigned int port, 64 enum ib_gid_type default_gid_type); 65 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); 66 67 int ib_device_register_sysfs(struct ib_device *device, 68 int (*port_callback)(struct ib_device *, 69 u8, struct kobject *)); 70 void ib_device_unregister_sysfs(struct ib_device *device); 71 72 void ib_cache_setup(void); 73 void ib_cache_cleanup(void); 74 75 typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, 76 struct net_device *idev, void *cookie); 77 78 typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port, 79 struct net_device *idev, void *cookie); 80 81 void ib_enum_roce_netdev(struct ib_device *ib_dev, 82 roce_netdev_filter filter, 83 void *filter_cookie, 84 roce_netdev_callback cb, 85 void *cookie); 86 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 87 void *filter_cookie, 88 roce_netdev_callback cb, 89 void *cookie); 90 91 enum ib_cache_gid_default_mode { 92 IB_CACHE_GID_DEFAULT_MODE_SET, 93 IB_CACHE_GID_DEFAULT_MODE_DELETE 94 }; 95 96 int ib_cache_gid_parse_type_str(const char *buf); 97 98 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); 99 100 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, 101 struct net_device *ndev, 102 unsigned long gid_type_mask, 103 enum ib_cache_gid_default_mode mode); 104 105 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 106 union ib_gid *gid, struct ib_gid_attr *attr); 107 108 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 109 union ib_gid *gid, struct ib_gid_attr *attr); 110 111 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 112 struct net_device *ndev); 113 114 int roce_gid_mgmt_init(void); 115 void roce_gid_mgmt_cleanup(void); 116 117 int roce_rescan_device(struct ib_device *ib_dev); 118 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); 119 120 int ib_cache_setup_one(struct ib_device *device); 121 void ib_cache_cleanup_one(struct ib_device *device); 122 void ib_cache_release_one(struct ib_device *device); 123 124 static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, 125 struct net_device *upper) 126 { 127 return netdev_has_upper_dev_all_rcu(dev, upper); 128 } 129 130 int addr_init(void); 131 void addr_cleanup(void); 132 133 int ib_mad_init(void); 134 void ib_mad_cleanup(void); 135 136 int ib_sa_init(void); 137 void ib_sa_cleanup(void); 138 139 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 140 struct netlink_callback *cb); 141 int ib_nl_handle_set_timeout(struct sk_buff *skb, 142 struct netlink_callback *cb); 143 int ib_nl_handle_ip_res_resp(struct sk_buff *skb, 144 struct netlink_callback *cb); 145 146 #endif /* _CORE_PRIV_H */ 147