1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #ifndef RXE_VERBS_H
8 #define RXE_VERBS_H
9
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include "rxe_pool.h"
13 #include "rxe_task.h"
14 #include "rxe_hw_counters.h"
15
pkey_match(u16 key1,u16 key2)16 static inline int pkey_match(u16 key1, u16 key2)
17 {
18 return (((key1 & 0x7fff) != 0) &&
19 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
20 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
21 }
22
23 /* Return >0 if psn_a > psn_b
24 * 0 if psn_a == psn_b
25 * <0 if psn_a < psn_b
26 */
psn_compare(u32 psn_a,u32 psn_b)27 static inline int psn_compare(u32 psn_a, u32 psn_b)
28 {
29 s32 diff;
30
31 diff = (psn_a - psn_b) << 8;
32 return diff;
33 }
34
35 struct rxe_ucontext {
36 struct ib_ucontext ibuc;
37 struct rxe_pool_elem elem;
38 };
39
40 struct rxe_pd {
41 struct ib_pd ibpd;
42 struct rxe_pool_elem elem;
43 };
44
45 struct rxe_ah {
46 struct ib_ah ibah;
47 struct rxe_pool_elem elem;
48 struct rxe_av av;
49 bool is_user;
50 int ah_num;
51 };
52
53 struct rxe_cqe {
54 union {
55 struct ib_wc ibwc;
56 struct ib_uverbs_wc uibwc;
57 };
58 };
59
60 struct rxe_cq {
61 struct ib_cq ibcq;
62 struct rxe_pool_elem elem;
63 struct rxe_queue *queue;
64 spinlock_t cq_lock;
65 u8 notify;
66 bool is_user;
67 atomic_t num_wq;
68 };
69
70 enum wqe_state {
71 wqe_state_posted,
72 wqe_state_processing,
73 wqe_state_pending,
74 wqe_state_done,
75 wqe_state_error,
76 };
77
78 struct rxe_sq {
79 int max_wr;
80 int max_sge;
81 int max_inline;
82 spinlock_t sq_lock; /* guard queue */
83 struct rxe_queue *queue;
84 };
85
86 struct rxe_rq {
87 int max_wr;
88 int max_sge;
89 spinlock_t producer_lock; /* guard queue producer */
90 spinlock_t consumer_lock; /* guard queue consumer */
91 struct rxe_queue *queue;
92 };
93
94 struct rxe_srq {
95 struct ib_srq ibsrq;
96 struct rxe_pool_elem elem;
97 struct rxe_pd *pd;
98 struct rxe_rq rq;
99 u32 srq_num;
100
101 int limit;
102 int error;
103 };
104
105 struct rxe_req_info {
106 int wqe_index;
107 u32 psn;
108 int opcode;
109 atomic_t rd_atomic;
110 int wait_fence;
111 int need_rd_atomic;
112 int wait_psn;
113 int need_retry;
114 int wait_for_rnr_timer;
115 int noack_pkts;
116 int again;
117 };
118
119 struct rxe_comp_info {
120 u32 psn;
121 int opcode;
122 int timeout;
123 int timeout_retry;
124 int started_retry;
125 u32 retry_cnt;
126 u32 rnr_retry;
127 };
128
129 enum rdatm_res_state {
130 rdatm_res_state_next,
131 rdatm_res_state_new,
132 rdatm_res_state_replay,
133 };
134
135 struct resp_res {
136 int type;
137 int replay;
138 u32 first_psn;
139 u32 last_psn;
140 u32 cur_psn;
141 enum rdatm_res_state state;
142
143 union {
144 struct {
145 u64 orig_val;
146 } atomic;
147 struct {
148 u64 va_org;
149 u32 rkey;
150 u32 length;
151 u64 va;
152 u32 resid;
153 } read;
154 struct {
155 u32 length;
156 u64 va;
157 u8 type;
158 u8 level;
159 } flush;
160 };
161 };
162
163 struct rxe_resp_info {
164 u32 msn;
165 u32 psn;
166 u32 ack_psn;
167 int opcode;
168 int drop_msg;
169 int goto_error;
170 int sent_psn_nak;
171 enum ib_wc_status status;
172 u8 aeth_syndrome;
173
174 /* Receive only */
175 struct rxe_recv_wqe *wqe;
176
177 /* RDMA read / atomic only */
178 u64 va;
179 u64 offset;
180 struct rxe_mr *mr;
181 u32 resid;
182 u32 rkey;
183 u32 length;
184
185 /* SRQ only */
186 struct {
187 struct rxe_recv_wqe wqe;
188 struct ib_sge sge[RXE_MAX_SGE];
189 } srq_wqe;
190
191 /* Responder resources. It's a circular list where the oldest
192 * resource is dropped first.
193 */
194 struct resp_res *resources;
195 unsigned int res_head;
196 unsigned int res_tail;
197 struct resp_res *res;
198 };
199
200 struct rxe_qp {
201 struct ib_qp ibqp;
202 struct rxe_pool_elem elem;
203 struct ib_qp_attr attr;
204 unsigned int valid;
205 unsigned int mtu;
206 bool is_user;
207
208 struct rxe_pd *pd;
209 struct rxe_srq *srq;
210 struct rxe_cq *scq;
211 struct rxe_cq *rcq;
212
213 enum ib_sig_type sq_sig_type;
214
215 struct rxe_sq sq;
216 struct rxe_rq rq;
217
218 struct socket *sk;
219 u32 dst_cookie;
220 u16 src_port;
221
222 struct rxe_av pri_av;
223 struct rxe_av alt_av;
224
225 atomic_t mcg_num;
226
227 struct sk_buff_head req_pkts;
228 struct sk_buff_head resp_pkts;
229
230 struct rxe_task send_task;
231 struct rxe_task recv_task;
232
233 struct rxe_req_info req;
234 struct rxe_comp_info comp;
235 struct rxe_resp_info resp;
236
237 atomic_t ssn;
238 atomic_t skb_out;
239 int need_req_skb;
240
241 /* Timer for retranmitting packet when ACKs have been lost. RC
242 * only. The requester sets it when it is not already
243 * started. The responder resets it whenever an ack is
244 * received.
245 */
246 struct timer_list retrans_timer;
247 u64 qp_timeout_jiffies;
248
249 /* Timer for handling RNR NAKS. */
250 struct timer_list rnr_nak_timer;
251
252 spinlock_t state_lock; /* guard requester and completer */
253
254 struct execute_work cleanup_work;
255 };
256
257 enum {
258 RXE_ACCESS_REMOTE = IB_ACCESS_REMOTE_READ
259 | IB_ACCESS_REMOTE_WRITE
260 | IB_ACCESS_REMOTE_ATOMIC,
261 RXE_ACCESS_SUPPORTED_MR = RXE_ACCESS_REMOTE
262 | IB_ACCESS_LOCAL_WRITE
263 | IB_ACCESS_MW_BIND
264 | IB_ACCESS_ON_DEMAND
265 | IB_ACCESS_FLUSH_GLOBAL
266 | IB_ACCESS_FLUSH_PERSISTENT
267 | IB_ACCESS_OPTIONAL,
268 RXE_ACCESS_SUPPORTED_QP = RXE_ACCESS_SUPPORTED_MR,
269 RXE_ACCESS_SUPPORTED_MW = RXE_ACCESS_SUPPORTED_MR
270 | IB_ZERO_BASED,
271 };
272
273 enum rxe_mr_state {
274 RXE_MR_STATE_INVALID,
275 RXE_MR_STATE_FREE,
276 RXE_MR_STATE_VALID,
277 };
278
279 enum rxe_mr_copy_dir {
280 RXE_TO_MR_OBJ,
281 RXE_FROM_MR_OBJ,
282 };
283
284 enum rxe_mr_lookup_type {
285 RXE_LOOKUP_LOCAL,
286 RXE_LOOKUP_REMOTE,
287 };
288
289 enum rxe_rereg {
290 RXE_MR_REREG_SUPPORTED = IB_MR_REREG_PD
291 | IB_MR_REREG_ACCESS,
292 };
293
rkey_is_mw(u32 rkey)294 static inline int rkey_is_mw(u32 rkey)
295 {
296 u32 index = rkey >> 8;
297
298 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
299 }
300
301 struct rxe_mr {
302 struct rxe_pool_elem elem;
303 struct ib_mr ibmr;
304
305 struct ib_umem *umem;
306
307 u32 lkey;
308 u32 rkey;
309 enum rxe_mr_state state;
310 int access;
311 atomic_t num_mw;
312
313 unsigned int page_offset;
314 unsigned int page_shift;
315 u64 page_mask;
316
317 u32 num_buf;
318 u32 nbuf;
319
320 struct xarray page_list;
321 };
322
mr_page_size(struct rxe_mr * mr)323 static inline unsigned int mr_page_size(struct rxe_mr *mr)
324 {
325 return mr ? mr->ibmr.page_size : PAGE_SIZE;
326 }
327
328 enum rxe_mw_state {
329 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
330 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
331 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
332 };
333
334 struct rxe_mw {
335 struct ib_mw ibmw;
336 struct rxe_pool_elem elem;
337 spinlock_t lock;
338 enum rxe_mw_state state;
339 struct rxe_qp *qp; /* Type 2 only */
340 struct rxe_mr *mr;
341 u32 rkey;
342 int access;
343 u64 addr;
344 u64 length;
345 };
346
347 struct rxe_mcg {
348 struct rb_node node;
349 struct kref ref_cnt;
350 struct rxe_dev *rxe;
351 struct list_head qp_list;
352 union ib_gid mgid;
353 atomic_t qp_num;
354 u32 qkey;
355 u16 pkey;
356 };
357
358 struct rxe_mca {
359 struct list_head qp_list;
360 struct rxe_qp *qp;
361 };
362
363 struct rxe_port {
364 struct ib_port_attr attr;
365 __be64 port_guid;
366 __be64 subnet_prefix;
367 spinlock_t port_lock; /* guard port */
368 unsigned int mtu_cap;
369 /* special QPs */
370 u32 qp_gsi_index;
371 };
372
373 struct rxe_dev {
374 struct ib_device ib_dev;
375 struct ib_device_attr attr;
376 int max_ucontext;
377 int max_inline_data;
378 struct mutex usdev_lock;
379
380 struct net_device *ndev;
381
382 struct rxe_pool uc_pool;
383 struct rxe_pool pd_pool;
384 struct rxe_pool ah_pool;
385 struct rxe_pool srq_pool;
386 struct rxe_pool qp_pool;
387 struct rxe_pool cq_pool;
388 struct rxe_pool mr_pool;
389 struct rxe_pool mw_pool;
390
391 /* multicast support */
392 spinlock_t mcg_lock;
393 struct rb_root mcg_tree;
394 atomic_t mcg_num;
395 atomic_t mcg_attach;
396
397 spinlock_t pending_lock; /* guard pending_mmaps */
398 struct list_head pending_mmaps;
399
400 spinlock_t mmap_offset_lock; /* guard mmap_offset */
401 u64 mmap_offset;
402
403 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
404
405 struct rxe_port port;
406 struct crypto_shash *tfm;
407 };
408
rxe_counter_inc(struct rxe_dev * rxe,enum rxe_counters index)409 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
410 {
411 atomic64_inc(&rxe->stats_counters[index]);
412 }
413
to_rdev(struct ib_device * dev)414 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
415 {
416 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
417 }
418
to_ruc(struct ib_ucontext * uc)419 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
420 {
421 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
422 }
423
to_rpd(struct ib_pd * pd)424 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
425 {
426 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
427 }
428
to_rah(struct ib_ah * ah)429 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
430 {
431 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
432 }
433
to_rsrq(struct ib_srq * srq)434 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
435 {
436 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
437 }
438
to_rqp(struct ib_qp * qp)439 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
440 {
441 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
442 }
443
to_rcq(struct ib_cq * cq)444 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
445 {
446 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
447 }
448
to_rmr(struct ib_mr * mr)449 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
450 {
451 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
452 }
453
to_rmw(struct ib_mw * mw)454 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
455 {
456 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
457 }
458
rxe_ah_pd(struct rxe_ah * ah)459 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
460 {
461 return to_rpd(ah->ibah.pd);
462 }
463
mr_pd(struct rxe_mr * mr)464 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
465 {
466 return to_rpd(mr->ibmr.pd);
467 }
468
rxe_mw_pd(struct rxe_mw * mw)469 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
470 {
471 return to_rpd(mw->ibmw.pd);
472 }
473
474 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
475
476 #endif /* RXE_VERBS_H */
477