xref: /linux/drivers/infiniband/hw/mana/mana_ib.h (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2022 Microsoft Corporation. All rights reserved.
4  */
5 
6 #ifndef _MANA_IB_H_
7 #define _MANA_IB_H_
8 
9 #include <rdma/ib_verbs.h>
10 #include <rdma/ib_mad.h>
11 #include <rdma/ib_umem.h>
12 #include <rdma/mana-abi.h>
13 #include <rdma/uverbs_ioctl.h>
14 
15 #include <net/mana/mana.h>
16 
17 #define PAGE_SZ_BM                                                             \
18 	(SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K |        \
19 	 SZ_512K | SZ_1M | SZ_2M)
20 
21 /* MANA doesn't have any limit for MR size */
22 #define MANA_IB_MAX_MR_SIZE	U64_MAX
23 
24 /*
25  * The hardware limit of number of MRs is greater than maximum number of MRs
26  * that can possibly represent in 24 bits
27  */
28 #define MANA_IB_MAX_MR		0xFFFFFFu
29 
30 struct mana_ib_adapter_caps {
31 	u32 max_sq_id;
32 	u32 max_rq_id;
33 	u32 max_cq_id;
34 	u32 max_qp_count;
35 	u32 max_cq_count;
36 	u32 max_mr_count;
37 	u32 max_pd_count;
38 	u32 max_inbound_read_limit;
39 	u32 max_outbound_read_limit;
40 	u32 mw_count;
41 	u32 max_srq_count;
42 	u32 max_qp_wr;
43 	u32 max_send_sge_count;
44 	u32 max_recv_sge_count;
45 	u32 max_inline_data_size;
46 };
47 
48 struct mana_ib_queue {
49 	struct ib_umem *umem;
50 	u64 gdma_region;
51 	u64 id;
52 };
53 
54 struct mana_ib_dev {
55 	struct ib_device ib_dev;
56 	struct gdma_dev *gdma_dev;
57 	mana_handle_t adapter_handle;
58 	struct gdma_queue *fatal_err_eq;
59 	struct gdma_queue **eqs;
60 	struct mana_ib_adapter_caps adapter_caps;
61 };
62 
63 struct mana_ib_wq {
64 	struct ib_wq ibwq;
65 	struct mana_ib_queue queue;
66 	int wqe;
67 	u32 wq_buf_size;
68 	mana_handle_t rx_object;
69 };
70 
71 struct mana_ib_pd {
72 	struct ib_pd ibpd;
73 	u32 pdn;
74 	mana_handle_t pd_handle;
75 
76 	/* Mutex for sharing access to vport_use_count */
77 	struct mutex vport_mutex;
78 	int vport_use_count;
79 
80 	bool tx_shortform_allowed;
81 	u32 tx_vp_offset;
82 };
83 
84 struct mana_ib_mr {
85 	struct ib_mr ibmr;
86 	struct ib_umem *umem;
87 	mana_handle_t mr_handle;
88 };
89 
90 struct mana_ib_cq {
91 	struct ib_cq ibcq;
92 	struct mana_ib_queue queue;
93 	int cqe;
94 	u32 comp_vector;
95 	mana_handle_t  cq_handle;
96 };
97 
98 struct mana_ib_qp {
99 	struct ib_qp ibqp;
100 
101 	mana_handle_t qp_handle;
102 	struct mana_ib_queue raw_sq;
103 
104 	/* The port on the IB device, starting with 1 */
105 	u32 port;
106 };
107 
108 struct mana_ib_ucontext {
109 	struct ib_ucontext ibucontext;
110 	u32 doorbell;
111 };
112 
113 struct mana_ib_rwq_ind_table {
114 	struct ib_rwq_ind_table ib_ind_table;
115 };
116 
117 enum mana_ib_command_code {
118 	MANA_IB_GET_ADAPTER_CAP = 0x30001,
119 	MANA_IB_CREATE_ADAPTER  = 0x30002,
120 	MANA_IB_DESTROY_ADAPTER = 0x30003,
121 	MANA_IB_CONFIG_IP_ADDR	= 0x30004,
122 	MANA_IB_CONFIG_MAC_ADDR	= 0x30005,
123 	MANA_IB_CREATE_CQ       = 0x30008,
124 	MANA_IB_DESTROY_CQ      = 0x30009,
125 };
126 
127 struct mana_ib_query_adapter_caps_req {
128 	struct gdma_req_hdr hdr;
129 }; /*HW Data */
130 
131 struct mana_ib_query_adapter_caps_resp {
132 	struct gdma_resp_hdr hdr;
133 	u32 max_sq_id;
134 	u32 max_rq_id;
135 	u32 max_cq_id;
136 	u32 max_qp_count;
137 	u32 max_cq_count;
138 	u32 max_mr_count;
139 	u32 max_pd_count;
140 	u32 max_inbound_read_limit;
141 	u32 max_outbound_read_limit;
142 	u32 mw_count;
143 	u32 max_srq_count;
144 	u32 max_requester_sq_size;
145 	u32 max_responder_sq_size;
146 	u32 max_requester_rq_size;
147 	u32 max_responder_rq_size;
148 	u32 max_send_sge_count;
149 	u32 max_recv_sge_count;
150 	u32 max_inline_data_size;
151 }; /* HW Data */
152 
153 struct mana_rnic_create_adapter_req {
154 	struct gdma_req_hdr hdr;
155 	u32 notify_eq_id;
156 	u32 reserved;
157 	u64 feature_flags;
158 }; /*HW Data */
159 
160 struct mana_rnic_create_adapter_resp {
161 	struct gdma_resp_hdr hdr;
162 	mana_handle_t adapter;
163 }; /* HW Data */
164 
165 struct mana_rnic_destroy_adapter_req {
166 	struct gdma_req_hdr hdr;
167 	mana_handle_t adapter;
168 }; /*HW Data */
169 
170 struct mana_rnic_destroy_adapter_resp {
171 	struct gdma_resp_hdr hdr;
172 }; /* HW Data */
173 
174 enum mana_ib_addr_op {
175 	ADDR_OP_ADD = 1,
176 	ADDR_OP_REMOVE = 2,
177 };
178 
179 enum sgid_entry_type {
180 	SGID_TYPE_IPV4 = 1,
181 	SGID_TYPE_IPV6 = 2,
182 };
183 
184 struct mana_rnic_config_addr_req {
185 	struct gdma_req_hdr hdr;
186 	mana_handle_t adapter;
187 	enum mana_ib_addr_op op;
188 	enum sgid_entry_type sgid_type;
189 	u8 ip_addr[16];
190 }; /* HW Data */
191 
192 struct mana_rnic_config_addr_resp {
193 	struct gdma_resp_hdr hdr;
194 }; /* HW Data */
195 
196 struct mana_rnic_config_mac_addr_req {
197 	struct gdma_req_hdr hdr;
198 	mana_handle_t adapter;
199 	enum mana_ib_addr_op op;
200 	u8 mac_addr[ETH_ALEN];
201 	u8 reserved[6];
202 }; /* HW Data */
203 
204 struct mana_rnic_config_mac_addr_resp {
205 	struct gdma_resp_hdr hdr;
206 }; /* HW Data */
207 
208 struct mana_rnic_create_cq_req {
209 	struct gdma_req_hdr hdr;
210 	mana_handle_t adapter;
211 	u64 gdma_region;
212 	u32 eq_id;
213 	u32 doorbell_page;
214 }; /* HW Data */
215 
216 struct mana_rnic_create_cq_resp {
217 	struct gdma_resp_hdr hdr;
218 	mana_handle_t cq_handle;
219 	u32 cq_id;
220 	u32 reserved;
221 }; /* HW Data */
222 
223 struct mana_rnic_destroy_cq_req {
224 	struct gdma_req_hdr hdr;
225 	mana_handle_t adapter;
226 	mana_handle_t cq_handle;
227 }; /* HW Data */
228 
229 struct mana_rnic_destroy_cq_resp {
230 	struct gdma_resp_hdr hdr;
231 }; /* HW Data */
232 
233 static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
234 {
235 	return mdev->gdma_dev->gdma_context;
236 }
237 
238 static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
239 {
240 	struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
241 	struct gdma_context *gc = mdev_to_gc(mdev);
242 	struct mana_context *mc = gc->mana.driver_data;
243 
244 	if (port < 1 || port > mc->num_ports)
245 		return NULL;
246 	return mc->ports[port - 1];
247 }
248 
249 static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
250 {
251 	u32 i;
252 
253 	for (i = 0; i < size; i++)
254 		dst[size - 1 - i] = src[i];
255 }
256 
257 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
258 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
259 
260 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
261 					  mana_handle_t *gdma_region);
262 
263 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
264 			      mana_handle_t *gdma_region, u64 virt);
265 
266 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
267 				  mana_handle_t gdma_region);
268 
269 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
270 			 struct mana_ib_queue *queue);
271 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
272 
273 struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
274 				struct ib_wq_init_attr *init_attr,
275 				struct ib_udata *udata);
276 
277 int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
278 		      u32 wq_attr_mask, struct ib_udata *udata);
279 
280 int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata);
281 
282 int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
283 				 struct ib_rwq_ind_table_init_attr *init_attr,
284 				 struct ib_udata *udata);
285 
286 int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl);
287 
288 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
289 
290 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
291 				  u64 iova, int access_flags,
292 				  struct ib_udata *udata);
293 
294 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
295 
296 int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
297 		      struct ib_udata *udata);
298 
299 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
300 		      int attr_mask, struct ib_udata *udata);
301 
302 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
303 
304 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
305 		      struct mana_ib_pd *pd, u32 doorbell_id);
306 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
307 			 u32 port);
308 
309 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
310 		      struct ib_udata *udata);
311 
312 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
313 
314 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
315 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
316 
317 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
318 			   struct ib_udata *udata);
319 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
320 
321 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma);
322 
323 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
324 			       struct ib_port_immutable *immutable);
325 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
326 			 struct ib_udata *uhw);
327 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
328 		       struct ib_port_attr *props);
329 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
330 		      union ib_gid *gid);
331 
332 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
333 
334 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
335 
336 int mana_ib_create_eqs(struct mana_ib_dev *mdev);
337 
338 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
339 
340 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
341 
342 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
343 
344 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
345 
346 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
347 
348 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
349 
350 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
351 
352 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
353 
354 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
355 
356 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
357 #endif
358