xref: /freebsd/sys/ofed/drivers/infiniband/core/core_priv.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef _CORE_PRIV_H
36 #define _CORE_PRIV_H
37 
38 #include <linux/list.h>
39 #include <linux/spinlock.h>
40 
41 #include <rdma/ib_verbs.h>
42 
43 #include <net/if_vlan_var.h>
44 
45 /* Total number of ports combined across all struct ib_devices's */
46 #define RDMA_MAX_PORTS 8192
47 
48 #ifdef CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS
49 int cma_configfs_init(void);
50 void cma_configfs_exit(void);
51 #else
cma_configfs_init(void)52 static inline int cma_configfs_init(void)
53 {
54 	return 0;
55 }
56 
cma_configfs_exit(void)57 static inline void cma_configfs_exit(void)
58 {
59 }
60 #endif
61 struct cma_device;
62 void cma_ref_dev(struct cma_device *cma_dev);
63 void cma_deref_dev(struct cma_device *cma_dev);
64 typedef bool (*cma_device_filter)(struct ib_device *, void *);
65 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
66 					     void		*cookie);
67 int cma_get_default_gid_type(struct cma_device *cma_dev,
68 			     unsigned int port);
69 int cma_set_default_gid_type(struct cma_device *cma_dev,
70 			     unsigned int port,
71 			     enum ib_gid_type default_gid_type);
72 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
73 
74 int  ib_device_register_sysfs(struct ib_device *device,
75 			      int (*port_callback)(struct ib_device *,
76 						   u8, struct kobject *));
77 void ib_device_unregister_sysfs(struct ib_device *device);
78 
79 void ib_cache_setup(void);
80 void ib_cache_cleanup(void);
81 
82 typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
83 	      if_t idev, void *cookie);
84 
85 typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
86 	     if_t idev, void *cookie);
87 
88 void ib_enum_roce_netdev(struct ib_device *ib_dev,
89 			 roce_netdev_filter filter,
90 			 void *filter_cookie,
91 			 roce_netdev_callback cb,
92 			 void *cookie);
93 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
94 			      void *filter_cookie,
95 			      roce_netdev_callback cb,
96 			      void *cookie);
97 
98 enum ib_cache_gid_default_mode {
99 	IB_CACHE_GID_DEFAULT_MODE_SET,
100 	IB_CACHE_GID_DEFAULT_MODE_DELETE
101 };
102 
103 int ib_cache_gid_parse_type_str(const char *buf);
104 
105 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
106 
107 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
108 				  if_t ndev,
109 				  unsigned long gid_type_mask,
110 				  enum ib_cache_gid_default_mode mode);
111 
112 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
113 		     union ib_gid *gid, struct ib_gid_attr *attr);
114 
115 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
116 		     union ib_gid *gid, struct ib_gid_attr *attr);
117 
118 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
119 				     if_t ndev);
120 void ib_cache_gid_del_all_by_netdev(if_t ndev);
121 
122 int roce_gid_mgmt_init(void);
123 void roce_gid_mgmt_cleanup(void);
124 
125 int roce_rescan_device(struct ib_device *ib_dev);
126 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
127 
128 int ib_cache_setup_one(struct ib_device *device);
129 void ib_cache_cleanup_one(struct ib_device *device);
130 void ib_cache_release_one(struct ib_device *device);
131 
132 #define	ib_rdmacg_try_charge(...) ({ 0; })
133 
134 int addr_init(void);
135 void addr_cleanup(void);
136 
137 int ib_mad_init(void);
138 void ib_mad_cleanup(void);
139 
140 int ib_sa_init(void);
141 void ib_sa_cleanup(void);
142 
143 int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
144 				 struct kobject *kobj, struct kobj_type *ktype,
145 				 const char *name);
146 void ib_port_unregister_module_stat(struct kobject *kobj);
147 
_ib_create_qp(struct ib_device * dev,struct ib_pd * pd,struct ib_qp_init_attr * attr,struct ib_udata * udata,struct ib_uqp_object * uobj)148 static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
149 					  struct ib_pd *pd,
150 					  struct ib_qp_init_attr *attr,
151 					  struct ib_udata *udata,
152 					  struct ib_uqp_object *uobj)
153 {
154 	struct ib_qp *qp;
155 
156 	if (!dev->create_qp)
157 		return ERR_PTR(-EOPNOTSUPP);
158 
159 	qp = dev->create_qp(pd, attr, udata);
160 	if (IS_ERR(qp))
161 		return qp;
162 
163 	qp->device = dev;
164 	qp->pd = pd;
165 	qp->uobject = uobj;
166 	qp->real_qp = qp;
167 
168 	qp->qp_type = attr->qp_type;
169 	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
170 	qp->send_cq = attr->send_cq;
171 	qp->recv_cq = attr->recv_cq;
172 	qp->srq = attr->srq;
173 	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
174 	qp->event_handler = attr->event_handler;
175 
176 	atomic_set(&qp->usecnt, 0);
177 	spin_lock_init(&qp->mr_lock);
178 
179 	return qp;
180 }
181 
182 struct rdma_umap_priv {
183 	struct vm_area_struct *vma;
184 	struct list_head list;
185 	struct rdma_user_mmap_entry *entry;
186 };
187 
188 void rdma_umap_priv_init(struct rdma_umap_priv *priv,
189 			 struct vm_area_struct *vma,
190 			 struct rdma_user_mmap_entry *entry);
191 
192 #endif /* _CORE_PRIV_H */
193