xref: /linux/include/rdma/rdma_vt.h (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright(c) 2016 - 2019 Intel Corporation.
4  */
5 
6 #ifndef DEF_RDMA_VT_H
7 #define DEF_RDMA_VT_H
8 
9 /*
10  * Structure that low level drivers will populate in order to register with the
11  * rdmavt layer.
12  */
13 
14 #include <linux/spinlock.h>
15 #include <linux/list.h>
16 #include <linux/hash.h>
17 #include <rdma/ib_verbs.h>
18 #include <rdma/ib_mad.h>
19 #include <rdma/rdmavt_mr.h>
20 
21 #define RVT_MAX_PKEY_VALUES 16
22 
23 #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
24 #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
25 #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
26 
27 struct trap_list {
28 	u32 list_len;
29 	struct list_head list;
30 };
31 
32 struct rvt_qp;
33 struct rvt_qpn_table;
34 struct rvt_ibport {
35 	struct rvt_qp __rcu *qp[2];
36 	struct ib_mad_agent *send_agent;	/* agent for SMI (traps) */
37 	struct rb_root mcast_tree;
38 	spinlock_t lock;		/* protect changes in this struct */
39 
40 	/* non-zero when timer is set */
41 	unsigned long mkey_lease_timeout;
42 	unsigned long trap_timeout;
43 	__be64 gid_prefix;      /* in network order */
44 	__be64 mkey;
45 	u64 tid;
46 	u32 port_cap_flags;
47 	u16 port_cap3_flags;
48 	u32 pma_sample_start;
49 	u32 pma_sample_interval;
50 	__be16 pma_counter_select[5];
51 	u16 pma_tag;
52 	u16 mkey_lease_period;
53 	u32 sm_lid;
54 	u8 sm_sl;
55 	u8 mkeyprot;
56 	u8 subnet_timeout;
57 	u8 vl_high_limit;
58 
59 	/*
60 	 * Driver is expected to keep these up to date. These
61 	 * counters are informational only and not required to be
62 	 * completely accurate.
63 	 */
64 	u64 n_rc_resends;
65 	u64 n_seq_naks;
66 	u64 n_rdma_seq;
67 	u64 n_rnr_naks;
68 	u64 n_other_naks;
69 	u64 n_loop_pkts;
70 	u64 n_pkt_drops;
71 	u64 n_vl15_dropped;
72 	u64 n_rc_timeouts;
73 	u64 n_dmawait;
74 	u64 n_unaligned;
75 	u64 n_rc_dupreq;
76 	u64 n_rc_seqnak;
77 	u64 n_rc_crwaits;
78 	u16 pkey_violations;
79 	u16 qkey_violations;
80 	u16 mkey_violations;
81 
82 	/* Hot-path per CPU counters to avoid cacheline trading to update */
83 	u64 z_rc_acks;
84 	u64 z_rc_qacks;
85 	u64 z_rc_delayed_comp;
86 	u64 __percpu *rc_acks;
87 	u64 __percpu *rc_qacks;
88 	u64 __percpu *rc_delayed_comp;
89 
90 	void *priv; /* driver private data */
91 
92 	/*
93 	 * The pkey table is allocated and maintained by the driver. Drivers
94 	 * need to have access to this before registering with rdmav. However
95 	 * rdmavt will need access to it so drivers need to provide this during
96 	 * the attach port API call.
97 	 */
98 	u16 *pkey_table;
99 
100 	struct rvt_ah *sm_ah;
101 
102 	/*
103 	 * Keep a list of traps that have not been repressed.  They will be
104 	 * resent based on trap_timer.
105 	 */
106 	struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
107 	struct timer_list trap_timer;
108 };
109 
110 #define RVT_CQN_MAX 16 /* maximum length of cq name */
111 
112 #define RVT_SGE_COPY_MEMCPY	0
113 #define RVT_SGE_COPY_CACHELESS	1
114 #define RVT_SGE_COPY_ADAPTIVE	2
115 
116 /*
117  * Things that are driver specific, module parameters in hfi1 and qib
118  */
119 struct rvt_driver_params {
120 	struct ib_device_attr props;
121 
122 	/*
123 	 * Anything driver specific that is not covered by props
124 	 * For instance special module parameters. Goes here.
125 	 */
126 	unsigned int lkey_table_size;
127 	unsigned int qp_table_size;
128 	unsigned int sge_copy_mode;
129 	unsigned int wss_threshold;
130 	unsigned int wss_clean_period;
131 	int qpn_start;
132 	int qpn_inc;
133 	int qpn_res_start;
134 	int qpn_res_end;
135 	int nports;
136 	int npkeys;
137 	int node;
138 	int psn_mask;
139 	int psn_shift;
140 	int psn_modify_mask;
141 	u32 core_cap_flags;
142 	u32 max_mad_size;
143 	u8 qos_shift;
144 	u8 max_rdma_atomic;
145 	u8 extra_rdma_atomic;
146 	u8 reserved_operations;
147 };
148 
149 /* User context */
150 struct rvt_ucontext {
151 	struct ib_ucontext ibucontext;
152 	void *priv;
153 };
154 
155 /* Protection domain */
156 struct rvt_pd {
157 	struct ib_pd ibpd;
158 	bool user;
159 };
160 
161 /* Address handle */
162 struct rvt_ah {
163 	struct ib_ah ibah;
164 	struct rdma_ah_attr attr;
165 	u8 vl;
166 	u8 log_pmtu;
167 };
168 
169 /*
170  * This structure is used by rvt_mmap() to validate an offset
171  * when an mmap() request is made.  The vm_area_struct then uses
172  * this as its vm_private_data.
173  */
174 struct rvt_mmap_info {
175 	struct list_head pending_mmaps;
176 	struct ib_ucontext *context;
177 	void *obj;
178 	__u64 offset;
179 	struct kref ref;
180 	u32 size;
181 };
182 
183 /* memory working set size */
184 struct rvt_wss {
185 	unsigned long *entries;
186 	atomic_t total_count;
187 	atomic_t clean_counter;
188 	atomic_t clean_entry;
189 
190 	int threshold;
191 	int num_entries;
192 	long pages_mask;
193 	unsigned int clean_period;
194 };
195 
196 struct rvt_dev_info;
197 struct rvt_swqe;
198 struct rvt_driver_provided {
199 	/*
200 	 * Which functions are required depends on which verbs rdmavt is
201 	 * providing and which verbs the driver is overriding. See
202 	 * check_support() for details.
203 	 */
204 
205 	/* hot path calldowns in a single cacheline */
206 
207 	/*
208 	 * Give the driver a notice that there is send work to do. It is up to
209 	 * the driver to generally push the packets out, this just queues the
210 	 * work with the driver. There are two variants here. The no_lock
211 	 * version requires the s_lock not to be held. The other assumes the
212 	 * s_lock is held.
213 	 */
214 	bool (*schedule_send)(struct rvt_qp *qp);
215 	bool (*schedule_send_no_lock)(struct rvt_qp *qp);
216 
217 	/*
218 	 * Driver specific work request setup and checking.
219 	 * This function is allowed to perform any setup, checks, or
220 	 * adjustments required to the SWQE in order to be usable by
221 	 * underlying protocols. This includes private data structure
222 	 * allocations.
223 	 */
224 	int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
225 			 bool *call_send);
226 
227 	/*
228 	 * Sometimes rdmavt needs to kick the driver's send progress. That is
229 	 * done by this call back.
230 	 */
231 	void (*do_send)(struct rvt_qp *qp);
232 
233 	/*
234 	 * Returns a pointer to the underlying hardware's PCI device. This is
235 	 * used to display information as to what hardware is being referenced
236 	 * in an output message
237 	 */
238 	struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
239 
240 	/*
241 	 * Allocate a private queue pair data structure for driver specific
242 	 * information which is opaque to rdmavt.  Errors are returned via
243 	 * ERR_PTR(err).  The driver is free to return NULL or a valid
244 	 * pointer.
245 	 */
246 	void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
247 
248 	/*
249 	 * Init a structure allocated with qp_priv_alloc(). This should be
250 	 * called after all qp fields have been initialized in rdmavt.
251 	 */
252 	int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
253 			    struct ib_qp_init_attr *init_attr);
254 
255 	/*
256 	 * Free the driver's private qp structure.
257 	 */
258 	void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
259 
260 	/*
261 	 * Inform the driver the particular qp in question has been reset so
262 	 * that it can clean up anything it needs to.
263 	 */
264 	void (*notify_qp_reset)(struct rvt_qp *qp);
265 
266 	/*
267 	 * Get a path mtu from the driver based on qp attributes.
268 	 */
269 	int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
270 				  struct ib_qp_attr *attr);
271 
272 	/*
273 	 * Notify driver that it needs to flush any outstanding IO requests that
274 	 * are waiting on a qp.
275 	 */
276 	void (*flush_qp_waiters)(struct rvt_qp *qp);
277 
278 	/*
279 	 * Notify driver to stop its queue of sending packets. Nothing else
280 	 * should be posted to the queue pair after this has been called.
281 	 */
282 	void (*stop_send_queue)(struct rvt_qp *qp);
283 
284 	/*
285 	 * Have the driver drain any in progress operations
286 	 */
287 	void (*quiesce_qp)(struct rvt_qp *qp);
288 
289 	/*
290 	 * Inform the driver a qp has went to error state.
291 	 */
292 	void (*notify_error_qp)(struct rvt_qp *qp);
293 
294 	/*
295 	 * Get an MTU for a qp.
296 	 */
297 	u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
298 			   u32 pmtu);
299 	/*
300 	 * Convert an mtu to a path mtu
301 	 */
302 	int (*mtu_to_path_mtu)(u32 mtu);
303 
304 	/*
305 	 * Get the guid of a port in big endian byte order
306 	 */
307 	int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
308 			   int guid_index, __be64 *guid);
309 
310 	/*
311 	 * Query driver for the state of the port.
312 	 */
313 	int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num,
314 				struct ib_port_attr *props);
315 
316 	/*
317 	 * Tell driver to shutdown a port
318 	 */
319 	int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num);
320 
321 	/* Tell driver to send a trap for changed  port capabilities */
322 	void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num);
323 
324 	/*
325 	 * The following functions can be safely ignored completely. Any use of
326 	 * these is checked for NULL before blindly calling. Rdmavt should also
327 	 * be functional if drivers omit these.
328 	 */
329 
330 	/* Called to inform the driver that all qps should now be freed. */
331 	unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
332 
333 	/* Driver specific AH validation */
334 	int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
335 
336 	/* Inform the driver a new AH has been created */
337 	void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
338 			      struct rvt_ah *);
339 
340 	/* Let the driver pick the next queue pair number*/
341 	int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
342 			 enum ib_qp_type type, u32 port_num);
343 
344 	/* Determine if its safe or allowed to modify the qp */
345 	int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
346 			       int attr_mask, struct ib_udata *udata);
347 
348 	/* Driver specific QP modification/notification-of */
349 	void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
350 			  int attr_mask, struct ib_udata *udata);
351 
352 	/* Notify driver a mad agent has been created */
353 	void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
354 
355 	/* Notify driver a mad agent has been removed */
356 	void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
357 
358 	/* Notify driver to restart rc */
359 	void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
360 
361 	/* Get and return CPU to pin CQ processing thread */
362 	int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
363 
364 	/* allocate a ucontext */
365 	int (*alloc_ucontext)(struct ib_ucontext *uctx, struct ib_udata *udata);
366 
367 	/* deallocate a ucontext */
368 	void (*dealloc_ucontext)(struct ib_ucontext *context);
369 
370 	/* driver mmap */
371 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
372 };
373 
374 struct rvt_dev_info {
375 	struct ib_device ibdev; /* Keep this first. Nothing above here */
376 
377 	/*
378 	 * Prior to calling for registration the driver will be responsible for
379 	 * allocating space for this structure.
380 	 *
381 	 * The driver will also be responsible for filling in certain members of
382 	 * dparms.props. The driver needs to fill in dparms exactly as it would
383 	 * want values reported to a ULP. This will be returned to the caller
384 	 * in rdmavt's device. The driver should also therefore refrain from
385 	 * modifying this directly after registration with rdmavt.
386 	 */
387 
388 	/* Driver specific properties */
389 	struct rvt_driver_params dparms;
390 
391 	/* post send table */
392 	const struct rvt_operation_params *post_parms;
393 
394 	/* opcode translation table */
395 	const enum ib_wc_opcode *wc_opcode;
396 
397 	/* Driver specific helper functions */
398 	struct rvt_driver_provided driver_f;
399 
400 	struct rvt_mregion __rcu *dma_mr;
401 	struct rvt_lkey_table lkey_table;
402 
403 	/* Internal use */
404 	int n_pds_allocated;
405 	spinlock_t n_pds_lock; /* Protect pd allocated count */
406 
407 	int n_ahs_allocated;
408 	spinlock_t n_ahs_lock; /* Protect ah allocated count */
409 
410 	u32 n_srqs_allocated;
411 	spinlock_t n_srqs_lock; /* Protect srqs allocated count */
412 
413 	int flags;
414 	struct rvt_ibport **ports;
415 
416 	/* QP */
417 	struct rvt_qp_ibdev *qp_dev;
418 	u32 n_qps_allocated;    /* number of QPs allocated for device */
419 	u32 n_rc_qps;		/* number of RC QPs allocated for device */
420 	u32 busy_jiffies;	/* timeout scaling based on RC QP count */
421 	spinlock_t n_qps_lock;	/* protect qps, rc qps and busy jiffy counts */
422 
423 	/* memory maps */
424 	struct list_head pending_mmaps;
425 	spinlock_t mmap_offset_lock; /* protect mmap_offset */
426 	u32 mmap_offset;
427 	spinlock_t pending_lock; /* protect pending mmap list */
428 
429 	/* CQ */
430 	u32 n_cqs_allocated;    /* number of CQs allocated for device */
431 	spinlock_t n_cqs_lock; /* protect count of in use cqs */
432 
433 	/* Multicast */
434 	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
435 	spinlock_t n_mcast_grps_lock;
436 
437 	/* Memory Working Set Size */
438 	struct rvt_wss *wss;
439 };
440 
441 /**
442  * rvt_set_ibdev_name - Craft an IB device name from client info
443  * @rdi: pointer to the client rvt_dev_info structure
444  * @name: client specific name
445  * @unit: client specific unit number.
446  */
447 static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
448 				      const char *fmt, const char *name,
449 				      const int unit)
450 {
451 	/*
452 	 * FIXME: rvt and its users want to touch the ibdev before
453 	 * registration and have things like the name work. We don't have the
454 	 * infrastructure in the core to support this directly today, hack it
455 	 * to work by setting the name manually here.
456 	 */
457 	dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
458 	strscpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
459 }
460 
461 /**
462  * rvt_get_ibdev_name - return the IB name
463  * @rdi: rdmavt device
464  *
465  * Return the registered name of the device.
466  */
467 static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
468 {
469 	return dev_name(&rdi->ibdev.dev);
470 }
471 
472 static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
473 {
474 	return container_of(ibpd, struct rvt_pd, ibpd);
475 }
476 
477 static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
478 {
479 	return container_of(ibah, struct rvt_ah, ibah);
480 }
481 
482 static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
483 {
484 	return  container_of(ibdev, struct rvt_dev_info, ibdev);
485 }
486 
487 static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
488 {
489 	/*
490 	 * All ports have same number of pkeys.
491 	 */
492 	return rdi->dparms.npkeys;
493 }
494 
495 /*
496  * Return the max atomic suitable for determining
497  * the size of the ack ring buffer in a QP.
498  */
499 static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
500 {
501 	return rdi->dparms.max_rdma_atomic +
502 		rdi->dparms.extra_rdma_atomic + 1;
503 }
504 
505 static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
506 {
507 	return rdi->dparms.max_rdma_atomic +
508 		rdi->dparms.extra_rdma_atomic;
509 }
510 
511 /*
512  * Return the indexed PKEY from the port PKEY table.
513  */
514 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
515 			       int port_index,
516 			       unsigned index)
517 {
518 	if (index >= rvt_get_npkeys(rdi))
519 		return 0;
520 	else
521 		return rdi->ports[port_index]->pkey_table[index];
522 }
523 
524 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
525 void rvt_dealloc_device(struct rvt_dev_info *rdi);
526 int rvt_register_device(struct rvt_dev_info *rvd);
527 void rvt_unregister_device(struct rvt_dev_info *rvd);
528 int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
529 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
530 		  int port_index, u16 *pkey_table);
531 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
532 		    int access);
533 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
534 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
535 		u32 len, u64 vaddr, u32 rkey, int acc);
536 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
537 		struct rvt_sge *isge, struct rvt_sge *last_sge,
538 		struct ib_sge *sge, int acc);
539 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
540 				 u16 lid);
541 
542 #endif          /* DEF_RDMA_VT_H */
543