xref: /linux/include/rdma/ib_verbs.h (revision 0ac6f4056c4a257f4b230b910e3e6fee6c6fc9b9)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
5  * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
6  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10  */
11 
12 #ifndef IB_VERBS_H
13 #define IB_VERBS_H
14 
15 #include <linux/ethtool.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/bvec.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/list.h>
22 #include <linux/rwsem.h>
23 #include <linux/workqueue.h>
24 #include <linux/irq_poll.h>
25 #include <uapi/linux/if_ether.h>
26 #include <net/ipv6.h>
27 #include <net/ip.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/netdevice.h>
31 #include <linux/refcount.h>
32 #include <linux/if_link.h>
33 #include <linux/atomic.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/uaccess.h>
36 #include <linux/cgroup_rdma.h>
37 #include <linux/irqflags.h>
38 #include <linux/preempt.h>
39 #include <linux/dim.h>
40 #include <uapi/rdma/ib_user_verbs.h>
41 #include <rdma/rdma_counter.h>
42 #include <rdma/restrack.h>
43 #include <rdma/signature.h>
44 #include <uapi/rdma/rdma_user_ioctl.h>
45 #include <uapi/rdma/ib_user_ioctl_verbs.h>
46 #include <linux/pci-tph.h>
47 #include <linux/dma-buf.h>
48 
49 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
50 
51 struct ib_umem_odp;
52 struct ib_uqp_object;
53 struct ib_usrq_object;
54 struct ib_uwq_object;
55 struct rdma_cm_id;
56 struct ib_port;
57 struct hw_stats_device_data;
58 
59 extern struct workqueue_struct *ib_wq;
60 extern struct workqueue_struct *ib_comp_wq;
61 extern struct workqueue_struct *ib_comp_unbound_wq;
62 
63 struct ib_ucq_object;
64 
65 __printf(2, 3) __cold
66 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
67 __printf(2, 3) __cold
68 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
69 __printf(2, 3) __cold
70 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
71 __printf(2, 3) __cold
72 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
73 __printf(2, 3) __cold
74 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
75 __printf(2, 3) __cold
76 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
77 __printf(2, 3) __cold
78 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
79 
80 #if defined(CONFIG_DYNAMIC_DEBUG) || \
81 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
82 #define ibdev_dbg(__dev, format, args...)                       \
83 	dynamic_ibdev_dbg(__dev, format, ##args)
84 #else
85 __printf(2, 3) __cold
86 static inline
87 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
88 #endif
89 
90 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
91 do {                                                                    \
92 	static DEFINE_RATELIMIT_STATE(_rs,                              \
93 				      DEFAULT_RATELIMIT_INTERVAL,       \
94 				      DEFAULT_RATELIMIT_BURST);         \
95 	if (__ratelimit(&_rs))                                          \
96 		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
97 } while (0)
98 
99 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
100 	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
101 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
102 	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
103 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
104 	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
105 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
106 	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
107 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
108 	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
109 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
110 	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
111 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
112 	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
113 
114 #if defined(CONFIG_DYNAMIC_DEBUG) || \
115 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
116 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
117 #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
118 do {                                                                    \
119 	static DEFINE_RATELIMIT_STATE(_rs,                              \
120 				      DEFAULT_RATELIMIT_INTERVAL,       \
121 				      DEFAULT_RATELIMIT_BURST);         \
122 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
123 	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
124 		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
125 				    ##__VA_ARGS__);                     \
126 } while (0)
127 #else
128 __printf(2, 3) __cold
129 static inline
130 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
131 #endif
132 
133 union ib_gid {
134 	u8	raw[16];
135 	struct {
136 		__be64	subnet_prefix;
137 		__be64	interface_id;
138 	} global;
139 };
140 
141 extern union ib_gid zgid;
142 
143 enum ib_gid_type {
144 	IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
145 	IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
146 	IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
147 	IB_GID_TYPE_SIZE
148 };
149 
150 #define ROCE_V2_UDP_DPORT      4791
151 struct ib_gid_attr {
152 	struct net_device __rcu	*ndev;
153 	struct ib_device	*device;
154 	union ib_gid		gid;
155 	enum ib_gid_type	gid_type;
156 	u16			index;
157 	u32			port_num;
158 };
159 
160 enum {
161 	/* set the local administered indication */
162 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
163 };
164 
165 enum rdma_transport_type {
166 	RDMA_TRANSPORT_IB,
167 	RDMA_TRANSPORT_IWARP,
168 	RDMA_TRANSPORT_USNIC,
169 	RDMA_TRANSPORT_USNIC_UDP,
170 	RDMA_TRANSPORT_UNSPECIFIED,
171 };
172 
173 enum rdma_protocol_type {
174 	RDMA_PROTOCOL_IB,
175 	RDMA_PROTOCOL_IBOE,
176 	RDMA_PROTOCOL_IWARP,
177 	RDMA_PROTOCOL_USNIC_UDP
178 };
179 
180 __attribute_const__ enum rdma_transport_type
181 rdma_node_get_transport(unsigned int node_type);
182 
183 enum rdma_network_type {
184 	RDMA_NETWORK_IB,
185 	RDMA_NETWORK_ROCE_V1,
186 	RDMA_NETWORK_IPV4,
187 	RDMA_NETWORK_IPV6
188 };
189 
190 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
191 {
192 	if (network_type == RDMA_NETWORK_IPV4 ||
193 	    network_type == RDMA_NETWORK_IPV6)
194 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
195 	else if (network_type == RDMA_NETWORK_ROCE_V1)
196 		return IB_GID_TYPE_ROCE;
197 	else
198 		return IB_GID_TYPE_IB;
199 }
200 
201 static inline enum rdma_network_type
202 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
203 {
204 	if (attr->gid_type == IB_GID_TYPE_IB)
205 		return RDMA_NETWORK_IB;
206 
207 	if (attr->gid_type == IB_GID_TYPE_ROCE)
208 		return RDMA_NETWORK_ROCE_V1;
209 
210 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
211 		return RDMA_NETWORK_IPV4;
212 	else
213 		return RDMA_NETWORK_IPV6;
214 }
215 
216 enum rdma_link_layer {
217 	IB_LINK_LAYER_UNSPECIFIED,
218 	IB_LINK_LAYER_INFINIBAND,
219 	IB_LINK_LAYER_ETHERNET,
220 };
221 
222 enum ib_device_cap_flags {
223 	IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
224 	IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
225 	IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
226 	IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
227 	IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
228 	IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
229 	IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
230 	IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
231 	IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
232 	/* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
233 	IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
234 	IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
235 	IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
236 	IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
237 	IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
238 
239 	/* Reserved, old SEND_W_INV = 1 << 16,*/
240 	IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
241 	/*
242 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
243 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
244 	 * messages and can verify the validity of checksum for
245 	 * incoming messages.  Setting this flag implies that the
246 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
247 	 */
248 	IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
249 	IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
250 
251 	/*
252 	 * This device supports the IB "base memory management extension",
253 	 * which includes support for fast registrations (IB_WR_REG_MR,
254 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
255 	 * also be set by any iWarp device which must support FRs to comply
256 	 * to the iWarp verbs spec.  iWarp devices also support the
257 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
258 	 * stag.
259 	 */
260 	IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
261 	IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
262 	IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
263 	IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
264 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
265 	IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
266 	IB_DEVICE_MANAGED_FLOW_STEERING =
267 		IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
268 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
269 	IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
270 	/* The device supports padding incoming writes to cacheline. */
271 	IB_DEVICE_PCI_WRITE_END_PADDING =
272 		IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
273 	/* Placement type attributes */
274 	IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
275 	IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
276 	IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
277 };
278 
279 enum ib_kernel_cap_flags {
280 	/*
281 	 * This device supports a per-device lkey or stag that can be
282 	 * used without performing a memory registration for the local
283 	 * memory.  Note that ULPs should never check this flag, but
284 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
285 	 * which will always contain a usable lkey.
286 	 */
287 	IBK_LOCAL_DMA_LKEY = 1 << 0,
288 	/* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
289 	IBK_INTEGRITY_HANDOVER = 1 << 1,
290 	/* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
291 	IBK_ON_DEMAND_PAGING = 1 << 2,
292 	/* IB_MR_TYPE_SG_GAPS is supported */
293 	IBK_SG_GAPS_REG = 1 << 3,
294 	/* Driver supports RDMA_NLDEV_CMD_DELLINK */
295 	IBK_ALLOW_USER_UNREG = 1 << 4,
296 
297 	/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
298 	IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
299 	/* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
300 	IBK_UD_TSO = 1 << 6,
301 	/* iopib will use the device ops:
302 	 *   get_vf_config
303 	 *   get_vf_guid
304 	 *   get_vf_stats
305 	 *   set_vf_guid
306 	 *   set_vf_link_state
307 	 */
308 	IBK_VIRTUAL_FUNCTION = 1 << 7,
309 	/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
310 	IBK_RDMA_NETDEV_OPA = 1 << 8,
311 };
312 
313 enum ib_atomic_cap {
314 	IB_ATOMIC_NONE,
315 	IB_ATOMIC_HCA,
316 	IB_ATOMIC_GLOB
317 };
318 
319 enum ib_odp_general_cap_bits {
320 	IB_ODP_SUPPORT		= IB_UVERBS_ODP_SUPPORT,
321 	IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT,
322 };
323 
324 enum ib_odp_transport_cap_bits {
325 	IB_ODP_SUPPORT_SEND	= IB_UVERBS_ODP_SUPPORT_SEND,
326 	IB_ODP_SUPPORT_RECV	= IB_UVERBS_ODP_SUPPORT_RECV,
327 	IB_ODP_SUPPORT_WRITE	= IB_UVERBS_ODP_SUPPORT_WRITE,
328 	IB_ODP_SUPPORT_READ	= IB_UVERBS_ODP_SUPPORT_READ,
329 	IB_ODP_SUPPORT_ATOMIC	= IB_UVERBS_ODP_SUPPORT_ATOMIC,
330 	IB_ODP_SUPPORT_SRQ_RECV	= IB_UVERBS_ODP_SUPPORT_SRQ_RECV,
331 	IB_ODP_SUPPORT_FLUSH	= IB_UVERBS_ODP_SUPPORT_FLUSH,
332 	IB_ODP_SUPPORT_ATOMIC_WRITE	= IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE,
333 };
334 
335 struct ib_odp_caps {
336 	uint64_t general_caps;
337 	struct {
338 		uint32_t  rc_odp_caps;
339 		uint32_t  uc_odp_caps;
340 		uint32_t  ud_odp_caps;
341 		uint32_t  xrc_odp_caps;
342 	} per_transport_caps;
343 };
344 
345 struct ib_rss_caps {
346 	/* Corresponding bit will be set if qp type from
347 	 * 'enum ib_qp_type' is supported, e.g.
348 	 * supported_qpts |= 1 << IB_QPT_UD
349 	 */
350 	u32 supported_qpts;
351 	u32 max_rwq_indirection_tables;
352 	u32 max_rwq_indirection_table_size;
353 };
354 
355 enum ib_tm_cap_flags {
356 	/*  Support tag matching with rendezvous offload for RC transport */
357 	IB_TM_CAP_RNDV_RC = 1 << 0,
358 };
359 
360 struct ib_tm_caps {
361 	/* Max size of RNDV header */
362 	u32 max_rndv_hdr_size;
363 	/* Max number of entries in tag matching list */
364 	u32 max_num_tags;
365 	/* From enum ib_tm_cap_flags */
366 	u32 flags;
367 	/* Max number of outstanding list operations */
368 	u32 max_ops;
369 	/* Max number of SGE in tag matching entry */
370 	u32 max_sge;
371 };
372 
373 struct ib_cq_init_attr {
374 	unsigned int	cqe;
375 	u32		comp_vector;
376 	u32		flags;
377 };
378 
379 enum ib_cq_attr_mask {
380 	IB_CQ_MODERATE = 1 << 0,
381 };
382 
383 struct ib_cq_caps {
384 	u16     max_cq_moderation_count;
385 	u16     max_cq_moderation_period;
386 };
387 
388 struct ib_dm_mr_attr {
389 	u64		length;
390 	u64		offset;
391 	u32		access_flags;
392 };
393 
394 struct ib_dm_alloc_attr {
395 	u64	length;
396 	u32	alignment;
397 	u32	flags;
398 };
399 
400 struct ib_device_attr {
401 	u64			fw_ver;
402 	__be64			sys_image_guid;
403 	u64			max_mr_size;
404 	u64			page_size_cap;
405 	u32			vendor_id;
406 	u32			vendor_part_id;
407 	u32			hw_ver;
408 	int			max_qp;
409 	int			max_qp_wr;
410 	u64			device_cap_flags;
411 	u64			kernel_cap_flags;
412 	int			max_send_sge;
413 	int			max_recv_sge;
414 	int			max_sge_rd;
415 	int			max_cq;
416 	int			max_cqe;
417 	int			max_mr;
418 	int			max_pd;
419 	int			max_qp_rd_atom;
420 	int			max_ee_rd_atom;
421 	int			max_res_rd_atom;
422 	int			max_qp_init_rd_atom;
423 	int			max_ee_init_rd_atom;
424 	enum ib_atomic_cap	atomic_cap;
425 	enum ib_atomic_cap	masked_atomic_cap;
426 	int			max_ee;
427 	int			max_rdd;
428 	int			max_mw;
429 	int			max_raw_ipv6_qp;
430 	int			max_raw_ethy_qp;
431 	int			max_mcast_grp;
432 	int			max_mcast_qp_attach;
433 	int			max_total_mcast_qp_attach;
434 	int			max_ah;
435 	int			max_srq;
436 	int			max_srq_wr;
437 	int			max_srq_sge;
438 	unsigned int		max_fast_reg_page_list_len;
439 	unsigned int		max_pi_fast_reg_page_list_len;
440 	u16			max_pkeys;
441 	u8			local_ca_ack_delay;
442 	int			sig_prot_cap;
443 	int			sig_guard_cap;
444 	struct ib_odp_caps	odp_caps;
445 	uint64_t		timestamp_mask;
446 	uint64_t		hca_core_clock; /* in KHZ */
447 	struct ib_rss_caps	rss_caps;
448 	u32			max_wq_type_rq;
449 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
450 	struct ib_tm_caps	tm_caps;
451 	struct ib_cq_caps       cq_caps;
452 	u64			max_dm_size;
453 	/* Max entries for sgl for optimized performance per READ */
454 	u32			max_sgl_rd;
455 };
456 
457 enum ib_mtu {
458 	IB_MTU_256  = 1,
459 	IB_MTU_512  = 2,
460 	IB_MTU_1024 = 3,
461 	IB_MTU_2048 = 4,
462 	IB_MTU_4096 = 5
463 };
464 
465 enum opa_mtu {
466 	OPA_MTU_8192 = 6,
467 	OPA_MTU_10240 = 7
468 };
469 
470 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
471 {
472 	switch (mtu) {
473 	case IB_MTU_256:  return  256;
474 	case IB_MTU_512:  return  512;
475 	case IB_MTU_1024: return 1024;
476 	case IB_MTU_2048: return 2048;
477 	case IB_MTU_4096: return 4096;
478 	default: 	  return -1;
479 	}
480 }
481 
482 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
483 {
484 	if (mtu >= 4096)
485 		return IB_MTU_4096;
486 	else if (mtu >= 2048)
487 		return IB_MTU_2048;
488 	else if (mtu >= 1024)
489 		return IB_MTU_1024;
490 	else if (mtu >= 512)
491 		return IB_MTU_512;
492 	else
493 		return IB_MTU_256;
494 }
495 
496 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
497 {
498 	switch (mtu) {
499 	case OPA_MTU_8192:
500 		return 8192;
501 	case OPA_MTU_10240:
502 		return 10240;
503 	default:
504 		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
505 	}
506 }
507 
508 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
509 {
510 	if (mtu >= 10240)
511 		return OPA_MTU_10240;
512 	else if (mtu >= 8192)
513 		return OPA_MTU_8192;
514 	else
515 		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
516 }
517 
518 enum ib_port_state {
519 	IB_PORT_NOP		= 0,
520 	IB_PORT_DOWN		= 1,
521 	IB_PORT_INIT		= 2,
522 	IB_PORT_ARMED		= 3,
523 	IB_PORT_ACTIVE		= 4,
524 	IB_PORT_ACTIVE_DEFER	= 5
525 };
526 
527 static inline const char *__attribute_const__
528 ib_port_state_to_str(enum ib_port_state state)
529 {
530 	const char * const states[] = {
531 		[IB_PORT_NOP] = "NOP",
532 		[IB_PORT_DOWN] = "DOWN",
533 		[IB_PORT_INIT] = "INIT",
534 		[IB_PORT_ARMED] = "ARMED",
535 		[IB_PORT_ACTIVE] = "ACTIVE",
536 		[IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER",
537 	};
538 
539 	if (state < ARRAY_SIZE(states))
540 		return states[state];
541 	return "UNKNOWN";
542 }
543 
544 enum ib_port_phys_state {
545 	IB_PORT_PHYS_STATE_SLEEP = 1,
546 	IB_PORT_PHYS_STATE_POLLING = 2,
547 	IB_PORT_PHYS_STATE_DISABLED = 3,
548 	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
549 	IB_PORT_PHYS_STATE_LINK_UP = 5,
550 	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
551 	IB_PORT_PHYS_STATE_PHY_TEST = 7,
552 };
553 
554 enum ib_port_width {
555 	IB_WIDTH_1X	= 1,
556 	IB_WIDTH_2X	= 16,
557 	IB_WIDTH_4X	= 2,
558 	IB_WIDTH_8X	= 4,
559 	IB_WIDTH_12X	= 8
560 };
561 
562 static inline int ib_width_enum_to_int(enum ib_port_width width)
563 {
564 	switch (width) {
565 	case IB_WIDTH_1X:  return  1;
566 	case IB_WIDTH_2X:  return  2;
567 	case IB_WIDTH_4X:  return  4;
568 	case IB_WIDTH_8X:  return  8;
569 	case IB_WIDTH_12X: return 12;
570 	default: 	  return -1;
571 	}
572 }
573 
574 enum ib_port_speed {
575 	IB_SPEED_SDR	= 1,
576 	IB_SPEED_DDR	= 2,
577 	IB_SPEED_QDR	= 4,
578 	IB_SPEED_FDR10	= 8,
579 	IB_SPEED_FDR	= 16,
580 	IB_SPEED_EDR	= 32,
581 	IB_SPEED_HDR	= 64,
582 	IB_SPEED_NDR	= 128,
583 	IB_SPEED_XDR	= 256,
584 };
585 
586 enum ib_stat_flag {
587 	IB_STAT_FLAG_OPTIONAL = 1 << 0,
588 };
589 
590 /**
591  * struct rdma_stat_desc - description of one rdma stat/counter
592  * @name: The name of the counter
593  * @flags: Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
594  * @priv: Driver private information; Core code should not use
595  */
596 struct rdma_stat_desc {
597 	const char *name;
598 	unsigned int flags;
599 	const void *priv;
600 };
601 
602 /**
603  * struct rdma_hw_stats - collection of hardware stats and their management
604  * @lock: Mutex to protect parallel write access to lifespan and values
605  *    of counters, which are 64bits and not guaranteed to be written
606  *    atomicaly on 32bits systems.
607  * @timestamp: Used by the core code to track when the last update was
608  * @lifespan: Used by the core code to determine how old the counters
609  *   should be before being updated again.  Stored in jiffies, defaults
610  *   to 10 milliseconds, drivers can override the default be specifying
611  *   their own value during their allocation routine.
612  * @descs: Array of pointers to static descriptors used for the counters
613  *   in directory.
614  * @is_disabled: A bitmap to indicate each counter is currently disabled
615  *   or not.
616  * @num_counters: How many hardware counters there are.  If name is
617  *   shorter than this number, a kernel oops will result.  Driver authors
618  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
619  *   in their code to prevent this.
620  * @value: Array of u64 counters that are accessed by the sysfs code and
621  *   filled in by the drivers get_stats routine
622  */
623 struct rdma_hw_stats {
624 	struct mutex	lock; /* Protect lifespan and values[] */
625 	unsigned long	timestamp;
626 	unsigned long	lifespan;
627 	const struct rdma_stat_desc *descs;
628 	unsigned long	*is_disabled;
629 	int		num_counters;
630 	u64		value[] __counted_by(num_counters);
631 };
632 
633 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
634 
635 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
636 	const struct rdma_stat_desc *descs, int num_counters,
637 	unsigned long lifespan);
638 
639 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
640 
641 /* Define bits for the various functionality this port needs to be supported by
642  * the core.
643  */
644 /* Management                           0x00000FFF */
645 #define RDMA_CORE_CAP_IB_MAD            0x00000001
646 #define RDMA_CORE_CAP_IB_SMI            0x00000002
647 #define RDMA_CORE_CAP_IB_CM             0x00000004
648 #define RDMA_CORE_CAP_IW_CM             0x00000008
649 #define RDMA_CORE_CAP_IB_SA             0x00000010
650 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
651 
652 /* Address format                       0x000FF000 */
653 #define RDMA_CORE_CAP_AF_IB             0x00001000
654 #define RDMA_CORE_CAP_ETH_AH            0x00002000
655 #define RDMA_CORE_CAP_OPA_AH            0x00004000
656 #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
657 
658 /* Protocol                             0xFFF00000 */
659 #define RDMA_CORE_CAP_PROT_IB           0x00100000
660 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
661 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
662 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
663 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
664 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
665 
666 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
667 					| RDMA_CORE_CAP_PROT_ROCE     \
668 					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
669 
670 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
671 					| RDMA_CORE_CAP_IB_MAD \
672 					| RDMA_CORE_CAP_IB_SMI \
673 					| RDMA_CORE_CAP_IB_CM  \
674 					| RDMA_CORE_CAP_IB_SA  \
675 					| RDMA_CORE_CAP_AF_IB)
676 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
677 					| RDMA_CORE_CAP_IB_MAD  \
678 					| RDMA_CORE_CAP_IB_CM   \
679 					| RDMA_CORE_CAP_AF_IB   \
680 					| RDMA_CORE_CAP_ETH_AH)
681 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
682 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
683 					| RDMA_CORE_CAP_IB_MAD  \
684 					| RDMA_CORE_CAP_IB_CM   \
685 					| RDMA_CORE_CAP_AF_IB   \
686 					| RDMA_CORE_CAP_ETH_AH)
687 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
688 					| RDMA_CORE_CAP_IW_CM)
689 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
690 					| RDMA_CORE_CAP_OPA_MAD)
691 
692 #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
693 
694 #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
695 
696 struct ib_port_attr {
697 	u64			subnet_prefix;
698 	enum ib_port_state	state;
699 	enum ib_mtu		max_mtu;
700 	enum ib_mtu		active_mtu;
701 	u32                     phys_mtu;
702 	int			gid_tbl_len;
703 	unsigned int		ip_gids:1;
704 	/* This is the value from PortInfo CapabilityMask, defined by IBA */
705 	u32			port_cap_flags;
706 	u32			max_msg_sz;
707 	u32			bad_pkey_cntr;
708 	u32			qkey_viol_cntr;
709 	u16			pkey_tbl_len;
710 	u32			sm_lid;
711 	u32			lid;
712 	u8			lmc;
713 	u8			max_vl_num;
714 	u8			sm_sl;
715 	u8			subnet_timeout;
716 	u8			init_type_reply;
717 	u8			active_width;
718 	u16			active_speed;
719 	u8                      phys_state;
720 	u16			port_cap_flags2;
721 };
722 
723 enum ib_device_modify_flags {
724 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
725 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
726 };
727 
728 #define IB_DEVICE_NODE_DESC_MAX 64
729 
730 struct ib_device_modify {
731 	u64	sys_image_guid;
732 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
733 };
734 
735 enum ib_port_modify_flags {
736 	IB_PORT_SHUTDOWN		= 1,
737 	IB_PORT_INIT_TYPE		= (1<<2),
738 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
739 	IB_PORT_OPA_MASK_CHG		= (1<<4)
740 };
741 
742 struct ib_port_modify {
743 	u32	set_port_cap_mask;
744 	u32	clr_port_cap_mask;
745 	u8	init_type;
746 };
747 
748 enum ib_event_type {
749 	IB_EVENT_CQ_ERR,
750 	IB_EVENT_QP_FATAL,
751 	IB_EVENT_QP_REQ_ERR,
752 	IB_EVENT_QP_ACCESS_ERR,
753 	IB_EVENT_COMM_EST,
754 	IB_EVENT_SQ_DRAINED,
755 	IB_EVENT_PATH_MIG,
756 	IB_EVENT_PATH_MIG_ERR,
757 	IB_EVENT_DEVICE_FATAL,
758 	IB_EVENT_PORT_ACTIVE,
759 	IB_EVENT_PORT_ERR,
760 	IB_EVENT_LID_CHANGE,
761 	IB_EVENT_PKEY_CHANGE,
762 	IB_EVENT_SM_CHANGE,
763 	IB_EVENT_SRQ_ERR,
764 	IB_EVENT_SRQ_LIMIT_REACHED,
765 	IB_EVENT_QP_LAST_WQE_REACHED,
766 	IB_EVENT_CLIENT_REREGISTER,
767 	IB_EVENT_GID_CHANGE,
768 	IB_EVENT_WQ_FATAL,
769 	IB_EVENT_DEVICE_SPEED_CHANGE,
770 };
771 
772 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
773 
774 struct ib_event {
775 	struct ib_device	*device;
776 	union {
777 		struct ib_cq	*cq;
778 		struct ib_qp	*qp;
779 		struct ib_srq	*srq;
780 		struct ib_wq	*wq;
781 		u32		port_num;
782 	} element;
783 	enum ib_event_type	event;
784 };
785 
786 struct ib_event_handler {
787 	struct ib_device *device;
788 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
789 	struct list_head  list;
790 };
791 
792 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
793 	do {							\
794 		(_ptr)->device  = _device;			\
795 		(_ptr)->handler = _handler;			\
796 		INIT_LIST_HEAD(&(_ptr)->list);			\
797 	} while (0)
798 
799 struct ib_global_route {
800 	const struct ib_gid_attr *sgid_attr;
801 	union ib_gid	dgid;
802 	u32		flow_label;
803 	u8		sgid_index;
804 	u8		hop_limit;
805 	u8		traffic_class;
806 };
807 
808 struct ib_grh {
809 	__be32		version_tclass_flow;
810 	__be16		paylen;
811 	u8		next_hdr;
812 	u8		hop_limit;
813 	union ib_gid	sgid;
814 	union ib_gid	dgid;
815 };
816 
817 union rdma_network_hdr {
818 	struct ib_grh ibgrh;
819 	struct {
820 		/* The IB spec states that if it's IPv4, the header
821 		 * is located in the last 20 bytes of the header.
822 		 */
823 		u8		reserved[20];
824 		struct iphdr	roce4grh;
825 	};
826 };
827 
828 #define IB_QPN_MASK		0xFFFFFF
829 
830 enum {
831 	IB_MULTICAST_QPN = 0xffffff
832 };
833 
834 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
835 #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
836 
837 enum ib_ah_flags {
838 	IB_AH_GRH	= 1
839 };
840 
841 enum ib_rate {
842 	IB_RATE_PORT_CURRENT = 0,
843 	IB_RATE_2_5_GBPS = 2,
844 	IB_RATE_5_GBPS   = 5,
845 	IB_RATE_10_GBPS  = 3,
846 	IB_RATE_20_GBPS  = 6,
847 	IB_RATE_30_GBPS  = 4,
848 	IB_RATE_40_GBPS  = 7,
849 	IB_RATE_60_GBPS  = 8,
850 	IB_RATE_80_GBPS  = 9,
851 	IB_RATE_120_GBPS = 10,
852 	IB_RATE_14_GBPS  = 11,
853 	IB_RATE_56_GBPS  = 12,
854 	IB_RATE_112_GBPS = 13,
855 	IB_RATE_168_GBPS = 14,
856 	IB_RATE_25_GBPS  = 15,
857 	IB_RATE_100_GBPS = 16,
858 	IB_RATE_200_GBPS = 17,
859 	IB_RATE_300_GBPS = 18,
860 	IB_RATE_28_GBPS  = 19,
861 	IB_RATE_50_GBPS  = 20,
862 	IB_RATE_400_GBPS = 21,
863 	IB_RATE_600_GBPS = 22,
864 	IB_RATE_800_GBPS = 23,
865 	IB_RATE_1600_GBPS = 25,
866 };
867 
868 /**
869  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
870  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
871  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
872  * @rate: rate to convert.
873  */
874 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
875 
876 /**
877  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
878  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
879  * @rate: rate to convert.
880  */
881 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
882 
883 struct ib_port_speed_info {
884 	const char *str;
885 	int rate;	/* in deci-Gb/sec (100 MBps units) */
886 };
887 
888 /**
889  * ib_port_attr_to_speed_info - Convert port attributes to speed information
890  * @attr: Port attributes containing active_speed and active_width
891  * @speed_info: Speed information to return
892  *
893  * Returns 0 on success, -EINVAL on error.
894  */
895 int ib_port_attr_to_speed_info(struct ib_port_attr *attr,
896 			       struct ib_port_speed_info *speed_info);
897 
898 /**
899  * enum ib_mr_type - memory region type
900  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
901  *                            normal registration
902  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
903  *                            register any arbitrary sg lists (without
904  *                            the normal mr constraints - see
905  *                            ib_map_mr_sg)
906  * @IB_MR_TYPE_DM:            memory region that is used for device
907  *                            memory registration
908  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
909  *                            application
910  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
911  *                            without address translations (VA=PA)
912  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
913  *                            data integrity operations
914  */
915 enum ib_mr_type {
916 	IB_MR_TYPE_MEM_REG,
917 	IB_MR_TYPE_SG_GAPS,
918 	IB_MR_TYPE_DM,
919 	IB_MR_TYPE_USER,
920 	IB_MR_TYPE_DMA,
921 	IB_MR_TYPE_INTEGRITY,
922 };
923 
924 enum ib_mr_status_check {
925 	IB_MR_CHECK_SIG_STATUS = 1,
926 };
927 
928 /**
929  * struct ib_mr_status - Memory region status container
930  *
931  * @fail_status: Bitmask of MR checks status. For each
932  *     failed check a corresponding status bit is set.
933  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
934  *     failure.
935  */
936 struct ib_mr_status {
937 	u32		    fail_status;
938 	struct ib_sig_err   sig_err;
939 };
940 
941 /**
942  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
943  * enum.
944  * @mult: multiple to convert.
945  */
946 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
947 
948 struct rdma_ah_init_attr {
949 	struct rdma_ah_attr *ah_attr;
950 	u32 flags;
951 	struct net_device *xmit_slave;
952 };
953 
954 enum rdma_ah_attr_type {
955 	RDMA_AH_ATTR_TYPE_UNDEFINED,
956 	RDMA_AH_ATTR_TYPE_IB,
957 	RDMA_AH_ATTR_TYPE_ROCE,
958 	RDMA_AH_ATTR_TYPE_OPA,
959 };
960 
961 struct ib_ah_attr {
962 	u16			dlid;
963 	u8			src_path_bits;
964 };
965 
966 struct roce_ah_attr {
967 	u8			dmac[ETH_ALEN];
968 };
969 
970 struct opa_ah_attr {
971 	u32			dlid;
972 	u8			src_path_bits;
973 	bool			make_grd;
974 };
975 
976 struct rdma_ah_attr {
977 	struct ib_global_route	grh;
978 	u8			sl;
979 	u8			static_rate;
980 	u32			port_num;
981 	u8			ah_flags;
982 	enum rdma_ah_attr_type type;
983 	union {
984 		struct ib_ah_attr ib;
985 		struct roce_ah_attr roce;
986 		struct opa_ah_attr opa;
987 	};
988 };
989 
990 enum ib_wc_status {
991 	IB_WC_SUCCESS,
992 	IB_WC_LOC_LEN_ERR,
993 	IB_WC_LOC_QP_OP_ERR,
994 	IB_WC_LOC_EEC_OP_ERR,
995 	IB_WC_LOC_PROT_ERR,
996 	IB_WC_WR_FLUSH_ERR,
997 	IB_WC_MW_BIND_ERR,
998 	IB_WC_BAD_RESP_ERR,
999 	IB_WC_LOC_ACCESS_ERR,
1000 	IB_WC_REM_INV_REQ_ERR,
1001 	IB_WC_REM_ACCESS_ERR,
1002 	IB_WC_REM_OP_ERR,
1003 	IB_WC_RETRY_EXC_ERR,
1004 	IB_WC_RNR_RETRY_EXC_ERR,
1005 	IB_WC_LOC_RDD_VIOL_ERR,
1006 	IB_WC_REM_INV_RD_REQ_ERR,
1007 	IB_WC_REM_ABORT_ERR,
1008 	IB_WC_INV_EECN_ERR,
1009 	IB_WC_INV_EEC_STATE_ERR,
1010 	IB_WC_FATAL_ERR,
1011 	IB_WC_RESP_TIMEOUT_ERR,
1012 	IB_WC_GENERAL_ERR
1013 };
1014 
1015 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
1016 
1017 enum ib_wc_opcode {
1018 	IB_WC_SEND = IB_UVERBS_WC_SEND,
1019 	IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
1020 	IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
1021 	IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
1022 	IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
1023 	IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
1024 	IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
1025 	IB_WC_LSO = IB_UVERBS_WC_TSO,
1026 	IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
1027 	IB_WC_REG_MR,
1028 	IB_WC_MASKED_COMP_SWAP,
1029 	IB_WC_MASKED_FETCH_ADD,
1030 	IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
1031 /*
1032  * Set value of IB_WC_RECV so consumers can test if a completion is a
1033  * receive by testing (opcode & IB_WC_RECV).
1034  */
1035 	IB_WC_RECV			= 1 << 7,
1036 	IB_WC_RECV_RDMA_WITH_IMM
1037 };
1038 
1039 enum ib_wc_flags {
1040 	IB_WC_GRH		= 1,
1041 	IB_WC_WITH_IMM		= (1<<1),
1042 	IB_WC_WITH_INVALIDATE	= (1<<2),
1043 	IB_WC_IP_CSUM_OK	= (1<<3),
1044 	IB_WC_WITH_SMAC		= (1<<4),
1045 	IB_WC_WITH_VLAN		= (1<<5),
1046 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1047 };
1048 
1049 struct ib_wc {
1050 	union {
1051 		u64		wr_id;
1052 		struct ib_cqe	*wr_cqe;
1053 	};
1054 	enum ib_wc_status	status;
1055 	enum ib_wc_opcode	opcode;
1056 	u32			vendor_err;
1057 	u32			byte_len;
1058 	struct ib_qp	       *qp;
1059 	union {
1060 		__be32		imm_data;
1061 		u32		invalidate_rkey;
1062 	} ex;
1063 	u32			src_qp;
1064 	u32			slid;
1065 	int			wc_flags;
1066 	u16			pkey_index;
1067 	u8			sl;
1068 	u8			dlid_path_bits;
1069 	u32 port_num; /* valid only for DR SMPs on switches */
1070 	u8			smac[ETH_ALEN];
1071 	u16			vlan_id;
1072 	u8			network_hdr_type;
1073 };
1074 
1075 enum ib_cq_notify_flags {
1076 	IB_CQ_SOLICITED			= 1 << 0,
1077 	IB_CQ_NEXT_COMP			= 1 << 1,
1078 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1079 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1080 };
1081 
1082 enum ib_srq_type {
1083 	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1084 	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1085 	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1086 };
1087 
1088 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1089 {
1090 	return srq_type == IB_SRQT_XRC ||
1091 	       srq_type == IB_SRQT_TM;
1092 }
1093 
1094 enum ib_srq_attr_mask {
1095 	IB_SRQ_MAX_WR	= 1 << 0,
1096 	IB_SRQ_LIMIT	= 1 << 1,
1097 };
1098 
1099 struct ib_srq_attr {
1100 	u32	max_wr;
1101 	u32	max_sge;
1102 	u32	srq_limit;
1103 };
1104 
1105 struct ib_srq_init_attr {
1106 	void		      (*event_handler)(struct ib_event *, void *);
1107 	void		       *srq_context;
1108 	struct ib_srq_attr	attr;
1109 	enum ib_srq_type	srq_type;
1110 
1111 	struct {
1112 		struct ib_cq   *cq;
1113 		union {
1114 			struct {
1115 				struct ib_xrcd *xrcd;
1116 			} xrc;
1117 
1118 			struct {
1119 				u32		max_num_tags;
1120 			} tag_matching;
1121 		};
1122 	} ext;
1123 };
1124 
1125 struct ib_qp_cap {
1126 	u32	max_send_wr;
1127 	u32	max_recv_wr;
1128 	u32	max_send_sge;
1129 	u32	max_recv_sge;
1130 	u32	max_inline_data;
1131 
1132 	/*
1133 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1134 	 * ib_create_qp() will calculate the right amount of needed WRs
1135 	 * and MRs based on this.
1136 	 */
1137 	u32	max_rdma_ctxs;
1138 };
1139 
1140 enum ib_sig_type {
1141 	IB_SIGNAL_ALL_WR,
1142 	IB_SIGNAL_REQ_WR
1143 };
1144 
1145 enum ib_qp_type {
1146 	/*
1147 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1148 	 * here (and in that order) since the MAD layer uses them as
1149 	 * indices into a 2-entry table.
1150 	 */
1151 	IB_QPT_SMI,
1152 	IB_QPT_GSI,
1153 
1154 	IB_QPT_RC = IB_UVERBS_QPT_RC,
1155 	IB_QPT_UC = IB_UVERBS_QPT_UC,
1156 	IB_QPT_UD = IB_UVERBS_QPT_UD,
1157 	IB_QPT_RAW_IPV6,
1158 	IB_QPT_RAW_ETHERTYPE,
1159 	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1160 	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1161 	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1162 	IB_QPT_MAX,
1163 	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1164 	/* Reserve a range for qp types internal to the low level driver.
1165 	 * These qp types will not be visible at the IB core layer, so the
1166 	 * IB_QPT_MAX usages should not be affected in the core layer
1167 	 */
1168 	IB_QPT_RESERVED1 = 0x1000,
1169 	IB_QPT_RESERVED2,
1170 	IB_QPT_RESERVED3,
1171 	IB_QPT_RESERVED4,
1172 	IB_QPT_RESERVED5,
1173 	IB_QPT_RESERVED6,
1174 	IB_QPT_RESERVED7,
1175 	IB_QPT_RESERVED8,
1176 	IB_QPT_RESERVED9,
1177 	IB_QPT_RESERVED10,
1178 };
1179 
1180 enum ib_qp_create_flags {
1181 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1182 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1183 		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1184 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1185 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1186 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1187 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1188 	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1189 	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1190 	IB_QP_CREATE_SCATTER_FCS		=
1191 		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1192 	IB_QP_CREATE_CVLAN_STRIPPING		=
1193 		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1194 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1195 	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1196 		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1197 	/* reserve bits 26-31 for low level drivers' internal use */
1198 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1199 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1200 };
1201 
1202 /*
1203  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1204  * callback to destroy the passed in QP.
1205  */
1206 
1207 struct ib_qp_init_attr {
1208 	/* This callback occurs in workqueue context */
1209 	void                  (*event_handler)(struct ib_event *, void *);
1210 
1211 	void		       *qp_context;
1212 	struct ib_cq	       *send_cq;
1213 	struct ib_cq	       *recv_cq;
1214 	struct ib_srq	       *srq;
1215 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1216 	struct ib_qp_cap	cap;
1217 	enum ib_sig_type	sq_sig_type;
1218 	enum ib_qp_type		qp_type;
1219 	u32			create_flags;
1220 
1221 	/*
1222 	 * Only needed for special QP types, or when using the RW API.
1223 	 */
1224 	u32			port_num;
1225 	struct ib_rwq_ind_table *rwq_ind_tbl;
1226 	u32			source_qpn;
1227 };
1228 
1229 struct ib_qp_open_attr {
1230 	void                  (*event_handler)(struct ib_event *, void *);
1231 	void		       *qp_context;
1232 	u32			qp_num;
1233 	enum ib_qp_type		qp_type;
1234 };
1235 
1236 enum ib_rnr_timeout {
1237 	IB_RNR_TIMER_655_36 =  0,
1238 	IB_RNR_TIMER_000_01 =  1,
1239 	IB_RNR_TIMER_000_02 =  2,
1240 	IB_RNR_TIMER_000_03 =  3,
1241 	IB_RNR_TIMER_000_04 =  4,
1242 	IB_RNR_TIMER_000_06 =  5,
1243 	IB_RNR_TIMER_000_08 =  6,
1244 	IB_RNR_TIMER_000_12 =  7,
1245 	IB_RNR_TIMER_000_16 =  8,
1246 	IB_RNR_TIMER_000_24 =  9,
1247 	IB_RNR_TIMER_000_32 = 10,
1248 	IB_RNR_TIMER_000_48 = 11,
1249 	IB_RNR_TIMER_000_64 = 12,
1250 	IB_RNR_TIMER_000_96 = 13,
1251 	IB_RNR_TIMER_001_28 = 14,
1252 	IB_RNR_TIMER_001_92 = 15,
1253 	IB_RNR_TIMER_002_56 = 16,
1254 	IB_RNR_TIMER_003_84 = 17,
1255 	IB_RNR_TIMER_005_12 = 18,
1256 	IB_RNR_TIMER_007_68 = 19,
1257 	IB_RNR_TIMER_010_24 = 20,
1258 	IB_RNR_TIMER_015_36 = 21,
1259 	IB_RNR_TIMER_020_48 = 22,
1260 	IB_RNR_TIMER_030_72 = 23,
1261 	IB_RNR_TIMER_040_96 = 24,
1262 	IB_RNR_TIMER_061_44 = 25,
1263 	IB_RNR_TIMER_081_92 = 26,
1264 	IB_RNR_TIMER_122_88 = 27,
1265 	IB_RNR_TIMER_163_84 = 28,
1266 	IB_RNR_TIMER_245_76 = 29,
1267 	IB_RNR_TIMER_327_68 = 30,
1268 	IB_RNR_TIMER_491_52 = 31
1269 };
1270 
1271 enum ib_qp_attr_mask {
1272 	IB_QP_STATE			= 1,
1273 	IB_QP_CUR_STATE			= (1<<1),
1274 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1275 	IB_QP_ACCESS_FLAGS		= (1<<3),
1276 	IB_QP_PKEY_INDEX		= (1<<4),
1277 	IB_QP_PORT			= (1<<5),
1278 	IB_QP_QKEY			= (1<<6),
1279 	IB_QP_AV			= (1<<7),
1280 	IB_QP_PATH_MTU			= (1<<8),
1281 	IB_QP_TIMEOUT			= (1<<9),
1282 	IB_QP_RETRY_CNT			= (1<<10),
1283 	IB_QP_RNR_RETRY			= (1<<11),
1284 	IB_QP_RQ_PSN			= (1<<12),
1285 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1286 	IB_QP_ALT_PATH			= (1<<14),
1287 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1288 	IB_QP_SQ_PSN			= (1<<16),
1289 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1290 	IB_QP_PATH_MIG_STATE		= (1<<18),
1291 	IB_QP_CAP			= (1<<19),
1292 	IB_QP_DEST_QPN			= (1<<20),
1293 	IB_QP_RESERVED1			= (1<<21),
1294 	IB_QP_RESERVED2			= (1<<22),
1295 	IB_QP_RESERVED3			= (1<<23),
1296 	IB_QP_RESERVED4			= (1<<24),
1297 	IB_QP_RATE_LIMIT		= (1<<25),
1298 
1299 	IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1300 };
1301 
1302 enum ib_qp_state {
1303 	IB_QPS_RESET,
1304 	IB_QPS_INIT,
1305 	IB_QPS_RTR,
1306 	IB_QPS_RTS,
1307 	IB_QPS_SQD,
1308 	IB_QPS_SQE,
1309 	IB_QPS_ERR
1310 };
1311 
1312 enum ib_mig_state {
1313 	IB_MIG_MIGRATED,
1314 	IB_MIG_REARM,
1315 	IB_MIG_ARMED
1316 };
1317 
1318 enum ib_mw_type {
1319 	IB_MW_TYPE_1 = 1,
1320 	IB_MW_TYPE_2 = 2
1321 };
1322 
1323 struct ib_qp_attr {
1324 	enum ib_qp_state	qp_state;
1325 	enum ib_qp_state	cur_qp_state;
1326 	enum ib_mtu		path_mtu;
1327 	enum ib_mig_state	path_mig_state;
1328 	u32			qkey;
1329 	u32			rq_psn;
1330 	u32			sq_psn;
1331 	u32			dest_qp_num;
1332 	int			qp_access_flags;
1333 	struct ib_qp_cap	cap;
1334 	struct rdma_ah_attr	ah_attr;
1335 	struct rdma_ah_attr	alt_ah_attr;
1336 	u16			pkey_index;
1337 	u16			alt_pkey_index;
1338 	u8			en_sqd_async_notify;
1339 	u8			sq_draining;
1340 	u8			max_rd_atomic;
1341 	u8			max_dest_rd_atomic;
1342 	u8			min_rnr_timer;
1343 	u32			port_num;
1344 	u8			timeout;
1345 	u8			retry_cnt;
1346 	u8			rnr_retry;
1347 	u32			alt_port_num;
1348 	u8			alt_timeout;
1349 	u32			rate_limit;
1350 	struct net_device	*xmit_slave;
1351 };
1352 
1353 enum ib_wr_opcode {
1354 	/* These are shared with userspace */
1355 	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1356 	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1357 	IB_WR_SEND = IB_UVERBS_WR_SEND,
1358 	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1359 	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1360 	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1361 	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1362 	IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1363 	IB_WR_LSO = IB_UVERBS_WR_TSO,
1364 	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1365 	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1366 	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1367 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1368 		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1369 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1370 		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1371 	IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
1372 	IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
1373 
1374 	/* These are kernel only and can not be issued by userspace */
1375 	IB_WR_REG_MR = 0x20,
1376 	IB_WR_REG_MR_INTEGRITY,
1377 
1378 	/* reserve values for low level drivers' internal use.
1379 	 * These values will not be used at all in the ib core layer.
1380 	 */
1381 	IB_WR_RESERVED1 = 0xf0,
1382 	IB_WR_RESERVED2,
1383 	IB_WR_RESERVED3,
1384 	IB_WR_RESERVED4,
1385 	IB_WR_RESERVED5,
1386 	IB_WR_RESERVED6,
1387 	IB_WR_RESERVED7,
1388 	IB_WR_RESERVED8,
1389 	IB_WR_RESERVED9,
1390 	IB_WR_RESERVED10,
1391 };
1392 
1393 enum ib_send_flags {
1394 	IB_SEND_FENCE		= 1,
1395 	IB_SEND_SIGNALED	= (1<<1),
1396 	IB_SEND_SOLICITED	= (1<<2),
1397 	IB_SEND_INLINE		= (1<<3),
1398 	IB_SEND_IP_CSUM		= (1<<4),
1399 
1400 	/* reserve bits 26-31 for low level drivers' internal use */
1401 	IB_SEND_RESERVED_START	= (1 << 26),
1402 	IB_SEND_RESERVED_END	= (1 << 31),
1403 };
1404 
1405 struct ib_sge {
1406 	u64	addr;
1407 	u32	length;
1408 	u32	lkey;
1409 };
1410 
1411 struct ib_cqe {
1412 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1413 };
1414 
1415 struct ib_send_wr {
1416 	struct ib_send_wr      *next;
1417 	union {
1418 		u64		wr_id;
1419 		struct ib_cqe	*wr_cqe;
1420 	};
1421 	struct ib_sge	       *sg_list;
1422 	int			num_sge;
1423 	enum ib_wr_opcode	opcode;
1424 	int			send_flags;
1425 	union {
1426 		__be32		imm_data;
1427 		u32		invalidate_rkey;
1428 	} ex;
1429 };
1430 
1431 struct ib_rdma_wr {
1432 	struct ib_send_wr	wr;
1433 	u64			remote_addr;
1434 	u32			rkey;
1435 };
1436 
1437 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1438 {
1439 	return container_of(wr, struct ib_rdma_wr, wr);
1440 }
1441 
1442 struct ib_atomic_wr {
1443 	struct ib_send_wr	wr;
1444 	u64			remote_addr;
1445 	u64			compare_add;
1446 	u64			swap;
1447 	u64			compare_add_mask;
1448 	u64			swap_mask;
1449 	u32			rkey;
1450 };
1451 
1452 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1453 {
1454 	return container_of(wr, struct ib_atomic_wr, wr);
1455 }
1456 
1457 struct ib_ud_wr {
1458 	struct ib_send_wr	wr;
1459 	struct ib_ah		*ah;
1460 	void			*header;
1461 	int			hlen;
1462 	int			mss;
1463 	u32			remote_qpn;
1464 	u32			remote_qkey;
1465 	u16			pkey_index; /* valid for GSI only */
1466 	u32			port_num; /* valid for DR SMPs on switch only */
1467 };
1468 
1469 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1470 {
1471 	return container_of(wr, struct ib_ud_wr, wr);
1472 }
1473 
1474 struct ib_reg_wr {
1475 	struct ib_send_wr	wr;
1476 	struct ib_mr		*mr;
1477 	u32			key;
1478 	int			access;
1479 };
1480 
1481 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1482 {
1483 	return container_of(wr, struct ib_reg_wr, wr);
1484 }
1485 
1486 struct ib_recv_wr {
1487 	struct ib_recv_wr      *next;
1488 	union {
1489 		u64		wr_id;
1490 		struct ib_cqe	*wr_cqe;
1491 	};
1492 	struct ib_sge	       *sg_list;
1493 	int			num_sge;
1494 };
1495 
1496 enum ib_access_flags {
1497 	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1498 	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1499 	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1500 	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1501 	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1502 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1503 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1504 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1505 	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1506 	IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1507 	IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
1508 
1509 	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1510 	IB_ACCESS_SUPPORTED =
1511 		((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1512 };
1513 
1514 /*
1515  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1516  * are hidden here instead of a uapi header!
1517  */
1518 enum ib_mr_rereg_flags {
1519 	IB_MR_REREG_TRANS	= 1,
1520 	IB_MR_REREG_PD		= (1<<1),
1521 	IB_MR_REREG_ACCESS	= (1<<2),
1522 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1523 };
1524 
1525 struct ib_umem;
1526 
1527 enum rdma_remove_reason {
1528 	/*
1529 	 * Userspace requested uobject deletion or initial try
1530 	 * to remove uobject via cleanup. Call could fail
1531 	 */
1532 	RDMA_REMOVE_DESTROY,
1533 	/* Context deletion. This call should delete the actual object itself */
1534 	RDMA_REMOVE_CLOSE,
1535 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1536 	RDMA_REMOVE_DRIVER_REMOVE,
1537 	/* uobj is being cleaned-up before being committed */
1538 	RDMA_REMOVE_ABORT,
1539 	/* The driver failed to destroy the uobject and is being disconnected */
1540 	RDMA_REMOVE_DRIVER_FAILURE,
1541 };
1542 
1543 struct ib_rdmacg_object {
1544 #ifdef CONFIG_CGROUP_RDMA
1545 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1546 #endif
1547 };
1548 
1549 struct ib_ucontext {
1550 	struct ib_device       *device;
1551 	struct ib_uverbs_file  *ufile;
1552 
1553 	struct ib_rdmacg_object	cg_obj;
1554 	u64 enabled_caps;
1555 	/*
1556 	 * Implementation details of the RDMA core, don't use in drivers:
1557 	 */
1558 	struct rdma_restrack_entry res;
1559 	struct xarray mmap_xa;
1560 };
1561 
1562 struct ib_uobject {
1563 	u64			user_handle;	/* handle given to us by userspace */
1564 	/* ufile & ucontext owning this object */
1565 	struct ib_uverbs_file  *ufile;
1566 	/* FIXME, save memory: ufile->context == context */
1567 	struct ib_ucontext     *context;	/* associated user context */
1568 	void		       *object;		/* containing object */
1569 	struct list_head	list;		/* link to context's list */
1570 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1571 	int			id;		/* index into kernel idr */
1572 	struct kref		ref;
1573 	atomic_t		usecnt;		/* protects exclusive access */
1574 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1575 
1576 	const struct uverbs_api_object *uapi_object;
1577 };
1578 
1579 struct ib_udata {
1580 	const void __user *inbuf;
1581 	void __user *outbuf;
1582 	size_t       inlen;
1583 	size_t       outlen;
1584 };
1585 
1586 struct ib_pd {
1587 	u32			local_dma_lkey;
1588 	u32			flags;
1589 	struct ib_device       *device;
1590 	struct ib_uobject      *uobject;
1591 	atomic_t          	usecnt; /* count all resources */
1592 
1593 	u32			unsafe_global_rkey;
1594 
1595 	/*
1596 	 * Implementation details of the RDMA core, don't use in drivers:
1597 	 */
1598 	struct ib_mr	       *__internal_mr;
1599 	struct rdma_restrack_entry res;
1600 };
1601 
1602 struct ib_xrcd {
1603 	struct ib_device       *device;
1604 	atomic_t		usecnt; /* count all exposed resources */
1605 	struct inode	       *inode;
1606 	struct rw_semaphore	tgt_qps_rwsem;
1607 	struct xarray		tgt_qps;
1608 };
1609 
1610 struct ib_ah {
1611 	struct ib_device	*device;
1612 	struct ib_pd		*pd;
1613 	struct ib_uobject	*uobject;
1614 	const struct ib_gid_attr *sgid_attr;
1615 	enum rdma_ah_attr_type	type;
1616 };
1617 
1618 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1619 
1620 enum ib_poll_context {
1621 	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1622 	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1623 	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1624 	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1625 
1626 	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1627 };
1628 
1629 struct ib_cq {
1630 	struct ib_device       *device;
1631 	struct ib_ucq_object   *uobject;
1632 	ib_comp_handler   	comp_handler;
1633 	void                  (*event_handler)(struct ib_event *, void *);
1634 	void                   *cq_context;
1635 	int               	cqe;
1636 	unsigned int		cqe_used;
1637 	atomic_t          	usecnt; /* count number of work queues */
1638 	enum ib_poll_context	poll_ctx;
1639 	struct ib_wc		*wc;
1640 	struct list_head        pool_entry;
1641 	union {
1642 		struct irq_poll		iop;
1643 		struct work_struct	work;
1644 	};
1645 	struct workqueue_struct *comp_wq;
1646 	struct dim *dim;
1647 
1648 	/* updated only by trace points */
1649 	ktime_t timestamp;
1650 	u8 interrupt:1;
1651 	u8 shared:1;
1652 	unsigned int comp_vector;
1653 
1654 	/*
1655 	 * Implementation details of the RDMA core, don't use in drivers:
1656 	 */
1657 	struct rdma_restrack_entry res;
1658 };
1659 
1660 struct ib_srq {
1661 	struct ib_device       *device;
1662 	struct ib_pd	       *pd;
1663 	struct ib_usrq_object  *uobject;
1664 	void		      (*event_handler)(struct ib_event *, void *);
1665 	void		       *srq_context;
1666 	enum ib_srq_type	srq_type;
1667 	atomic_t		usecnt;
1668 
1669 	struct {
1670 		struct ib_cq   *cq;
1671 		union {
1672 			struct {
1673 				struct ib_xrcd *xrcd;
1674 				u32		srq_num;
1675 			} xrc;
1676 		};
1677 	} ext;
1678 
1679 	/*
1680 	 * Implementation details of the RDMA core, don't use in drivers:
1681 	 */
1682 	struct rdma_restrack_entry res;
1683 };
1684 
1685 enum ib_raw_packet_caps {
1686 	/*
1687 	 * Strip cvlan from incoming packet and report it in the matching work
1688 	 * completion is supported.
1689 	 */
1690 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1691 		IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1692 	/*
1693 	 * Scatter FCS field of an incoming packet to host memory is supported.
1694 	 */
1695 	IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1696 	/* Checksum offloads are supported (for both send and receive). */
1697 	IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1698 	/*
1699 	 * When a packet is received for an RQ with no receive WQEs, the
1700 	 * packet processing is delayed.
1701 	 */
1702 	IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1703 };
1704 
1705 enum ib_wq_type {
1706 	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1707 };
1708 
1709 enum ib_wq_state {
1710 	IB_WQS_RESET,
1711 	IB_WQS_RDY,
1712 	IB_WQS_ERR
1713 };
1714 
1715 struct ib_wq {
1716 	struct ib_device       *device;
1717 	struct ib_uwq_object   *uobject;
1718 	void		    *wq_context;
1719 	void		    (*event_handler)(struct ib_event *, void *);
1720 	struct ib_pd	       *pd;
1721 	struct ib_cq	       *cq;
1722 	u32		wq_num;
1723 	enum ib_wq_state       state;
1724 	enum ib_wq_type	wq_type;
1725 	atomic_t		usecnt;
1726 };
1727 
1728 enum ib_wq_flags {
1729 	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1730 	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1731 	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1732 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1733 				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1734 };
1735 
1736 struct ib_wq_init_attr {
1737 	void		       *wq_context;
1738 	enum ib_wq_type	wq_type;
1739 	u32		max_wr;
1740 	u32		max_sge;
1741 	struct	ib_cq	       *cq;
1742 	void		    (*event_handler)(struct ib_event *, void *);
1743 	u32		create_flags; /* Use enum ib_wq_flags */
1744 };
1745 
1746 enum ib_wq_attr_mask {
1747 	IB_WQ_STATE		= 1 << 0,
1748 	IB_WQ_CUR_STATE		= 1 << 1,
1749 	IB_WQ_FLAGS		= 1 << 2,
1750 };
1751 
1752 struct ib_wq_attr {
1753 	enum	ib_wq_state	wq_state;
1754 	enum	ib_wq_state	curr_wq_state;
1755 	u32			flags; /* Use enum ib_wq_flags */
1756 	u32			flags_mask; /* Use enum ib_wq_flags */
1757 };
1758 
1759 struct ib_rwq_ind_table {
1760 	struct ib_device	*device;
1761 	struct ib_uobject      *uobject;
1762 	atomic_t		usecnt;
1763 	u32		ind_tbl_num;
1764 	u32		log_ind_tbl_size;
1765 	struct ib_wq	**ind_tbl;
1766 };
1767 
1768 struct ib_rwq_ind_table_init_attr {
1769 	u32		log_ind_tbl_size;
1770 	/* Each entry is a pointer to Receive Work Queue */
1771 	struct ib_wq	**ind_tbl;
1772 };
1773 
1774 enum port_pkey_state {
1775 	IB_PORT_PKEY_NOT_VALID = 0,
1776 	IB_PORT_PKEY_VALID = 1,
1777 	IB_PORT_PKEY_LISTED = 2,
1778 };
1779 
1780 struct ib_qp_security;
1781 
1782 struct ib_port_pkey {
1783 	enum port_pkey_state	state;
1784 	u16			pkey_index;
1785 	u32			port_num;
1786 	struct list_head	qp_list;
1787 	struct list_head	to_error_list;
1788 	struct ib_qp_security  *sec;
1789 };
1790 
1791 struct ib_ports_pkeys {
1792 	struct ib_port_pkey	main;
1793 	struct ib_port_pkey	alt;
1794 };
1795 
1796 struct ib_qp_security {
1797 	struct ib_qp	       *qp;
1798 	struct ib_device       *dev;
1799 	/* Hold this mutex when changing port and pkey settings. */
1800 	struct mutex		mutex;
1801 	struct ib_ports_pkeys  *ports_pkeys;
1802 	/* A list of all open shared QP handles.  Required to enforce security
1803 	 * properly for all users of a shared QP.
1804 	 */
1805 	struct list_head        shared_qp_list;
1806 	void                   *security;
1807 	bool			destroying;
1808 	atomic_t		error_list_count;
1809 	struct completion	error_complete;
1810 	int			error_comps_pending;
1811 };
1812 
1813 /*
1814  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1815  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1816  */
1817 struct ib_qp {
1818 	struct ib_device       *device;
1819 	struct ib_pd	       *pd;
1820 	struct ib_cq	       *send_cq;
1821 	struct ib_cq	       *recv_cq;
1822 	spinlock_t		mr_lock;
1823 	int			mrs_used;
1824 	struct list_head	rdma_mrs;
1825 	struct list_head	sig_mrs;
1826 	struct ib_srq	       *srq;
1827 	struct completion	srq_completion;
1828 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1829 	struct list_head	xrcd_list;
1830 
1831 	/* count times opened, mcast attaches, flow attaches */
1832 	atomic_t		usecnt;
1833 	struct list_head	open_list;
1834 	struct ib_qp           *real_qp;
1835 	struct ib_uqp_object   *uobject;
1836 	void                  (*event_handler)(struct ib_event *, void *);
1837 	void                  (*registered_event_handler)(struct ib_event *, void *);
1838 	void		       *qp_context;
1839 	/* sgid_attrs associated with the AV's */
1840 	const struct ib_gid_attr *av_sgid_attr;
1841 	const struct ib_gid_attr *alt_path_sgid_attr;
1842 	u32			qp_num;
1843 	u32			max_write_sge;
1844 	u32			max_read_sge;
1845 	enum ib_qp_type		qp_type;
1846 	struct ib_rwq_ind_table *rwq_ind_tbl;
1847 	struct ib_qp_security  *qp_sec;
1848 	u32			port;
1849 
1850 	bool			integrity_en;
1851 	/*
1852 	 * Implementation details of the RDMA core, don't use in drivers:
1853 	 */
1854 	struct rdma_restrack_entry     res;
1855 
1856 	/* The counter the qp is bind to */
1857 	struct rdma_counter    *counter;
1858 };
1859 
1860 struct ib_dm {
1861 	struct ib_device  *device;
1862 	u32		   length;
1863 	u32		   flags;
1864 	struct ib_uobject *uobject;
1865 	atomic_t	   usecnt;
1866 };
1867 
1868 /* bit values to mark existence of ib_dmah fields */
1869 enum {
1870 	IB_DMAH_CPU_ID_EXISTS,
1871 	IB_DMAH_MEM_TYPE_EXISTS,
1872 	IB_DMAH_PH_EXISTS,
1873 };
1874 
1875 struct ib_dmah {
1876 	struct ib_device *device;
1877 	struct ib_uobject *uobject;
1878 	/*
1879 	 * Implementation details of the RDMA core, don't use in drivers:
1880 	 */
1881 	struct rdma_restrack_entry res;
1882 	u32 cpu_id;
1883 	enum tph_mem_type mem_type;
1884 	atomic_t usecnt;
1885 	u8 ph;
1886 	u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */
1887 };
1888 
1889 struct ib_mr {
1890 	struct ib_device  *device;
1891 	struct ib_pd	  *pd;
1892 	u32		   lkey;
1893 	u32		   rkey;
1894 	u64		   iova;
1895 	u64		   length;
1896 	unsigned int	   page_size;
1897 	enum ib_mr_type	   type;
1898 	bool		   need_inval;
1899 	union {
1900 		struct ib_uobject	*uobject;	/* user */
1901 		struct list_head	qp_entry;	/* FR */
1902 	};
1903 
1904 	struct ib_dm      *dm;
1905 	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1906 	struct ib_dmah *dmah;
1907 	/*
1908 	 * Implementation details of the RDMA core, don't use in drivers:
1909 	 */
1910 	struct rdma_restrack_entry res;
1911 };
1912 
1913 struct ib_mw {
1914 	struct ib_device	*device;
1915 	struct ib_pd		*pd;
1916 	struct ib_uobject	*uobject;
1917 	u32			rkey;
1918 	enum ib_mw_type         type;
1919 };
1920 
1921 /* Supported steering options */
1922 enum ib_flow_attr_type {
1923 	/* steering according to rule specifications */
1924 	IB_FLOW_ATTR_NORMAL		= 0x0,
1925 	/* default unicast and multicast rule -
1926 	 * receive all Eth traffic which isn't steered to any QP
1927 	 */
1928 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1929 	/* default multicast rule -
1930 	 * receive all Eth multicast traffic which isn't steered to any QP
1931 	 */
1932 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1933 	/* sniffer rule - receive all port traffic */
1934 	IB_FLOW_ATTR_SNIFFER		= 0x3
1935 };
1936 
1937 /* Supported steering header types */
1938 enum ib_flow_spec_type {
1939 	/* L2 headers*/
1940 	IB_FLOW_SPEC_ETH		= 0x20,
1941 	IB_FLOW_SPEC_IB			= 0x22,
1942 	/* L3 header*/
1943 	IB_FLOW_SPEC_IPV4		= 0x30,
1944 	IB_FLOW_SPEC_IPV6		= 0x31,
1945 	IB_FLOW_SPEC_ESP                = 0x34,
1946 	/* L4 headers*/
1947 	IB_FLOW_SPEC_TCP		= 0x40,
1948 	IB_FLOW_SPEC_UDP		= 0x41,
1949 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1950 	IB_FLOW_SPEC_GRE		= 0x51,
1951 	IB_FLOW_SPEC_MPLS		= 0x60,
1952 	IB_FLOW_SPEC_INNER		= 0x100,
1953 	/* Actions */
1954 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1955 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1956 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1957 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1958 };
1959 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1960 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1961 
1962 enum ib_flow_flags {
1963 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1964 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1965 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1966 };
1967 
1968 struct ib_flow_eth_filter {
1969 	u8	dst_mac[6];
1970 	u8	src_mac[6];
1971 	__be16	ether_type;
1972 	__be16	vlan_tag;
1973 };
1974 
1975 struct ib_flow_spec_eth {
1976 	u32			  type;
1977 	u16			  size;
1978 	struct ib_flow_eth_filter val;
1979 	struct ib_flow_eth_filter mask;
1980 };
1981 
1982 struct ib_flow_ib_filter {
1983 	__be16 dlid;
1984 	__u8   sl;
1985 };
1986 
1987 struct ib_flow_spec_ib {
1988 	u32			 type;
1989 	u16			 size;
1990 	struct ib_flow_ib_filter val;
1991 	struct ib_flow_ib_filter mask;
1992 };
1993 
1994 /* IPv4 header flags */
1995 enum ib_ipv4_flags {
1996 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1997 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1998 				    last have this flag set */
1999 };
2000 
2001 struct ib_flow_ipv4_filter {
2002 	__be32	src_ip;
2003 	__be32	dst_ip;
2004 	u8	proto;
2005 	u8	tos;
2006 	u8	ttl;
2007 	u8	flags;
2008 };
2009 
2010 struct ib_flow_spec_ipv4 {
2011 	u32			   type;
2012 	u16			   size;
2013 	struct ib_flow_ipv4_filter val;
2014 	struct ib_flow_ipv4_filter mask;
2015 };
2016 
2017 struct ib_flow_ipv6_filter {
2018 	u8	src_ip[16];
2019 	u8	dst_ip[16];
2020 	__be32	flow_label;
2021 	u8	next_hdr;
2022 	u8	traffic_class;
2023 	u8	hop_limit;
2024 } __packed;
2025 
2026 struct ib_flow_spec_ipv6 {
2027 	u32			   type;
2028 	u16			   size;
2029 	struct ib_flow_ipv6_filter val;
2030 	struct ib_flow_ipv6_filter mask;
2031 };
2032 
2033 struct ib_flow_tcp_udp_filter {
2034 	__be16	dst_port;
2035 	__be16	src_port;
2036 };
2037 
2038 struct ib_flow_spec_tcp_udp {
2039 	u32			      type;
2040 	u16			      size;
2041 	struct ib_flow_tcp_udp_filter val;
2042 	struct ib_flow_tcp_udp_filter mask;
2043 };
2044 
2045 struct ib_flow_tunnel_filter {
2046 	__be32	tunnel_id;
2047 };
2048 
2049 /* ib_flow_spec_tunnel describes the Vxlan tunnel
2050  * the tunnel_id from val has the vni value
2051  */
2052 struct ib_flow_spec_tunnel {
2053 	u32			      type;
2054 	u16			      size;
2055 	struct ib_flow_tunnel_filter  val;
2056 	struct ib_flow_tunnel_filter  mask;
2057 };
2058 
2059 struct ib_flow_esp_filter {
2060 	__be32	spi;
2061 	__be32  seq;
2062 };
2063 
2064 struct ib_flow_spec_esp {
2065 	u32                           type;
2066 	u16			      size;
2067 	struct ib_flow_esp_filter     val;
2068 	struct ib_flow_esp_filter     mask;
2069 };
2070 
2071 struct ib_flow_gre_filter {
2072 	__be16 c_ks_res0_ver;
2073 	__be16 protocol;
2074 	__be32 key;
2075 };
2076 
2077 struct ib_flow_spec_gre {
2078 	u32                           type;
2079 	u16			      size;
2080 	struct ib_flow_gre_filter     val;
2081 	struct ib_flow_gre_filter     mask;
2082 };
2083 
2084 struct ib_flow_mpls_filter {
2085 	__be32 tag;
2086 };
2087 
2088 struct ib_flow_spec_mpls {
2089 	u32                           type;
2090 	u16			      size;
2091 	struct ib_flow_mpls_filter     val;
2092 	struct ib_flow_mpls_filter     mask;
2093 };
2094 
2095 struct ib_flow_spec_action_tag {
2096 	enum ib_flow_spec_type	      type;
2097 	u16			      size;
2098 	u32                           tag_id;
2099 };
2100 
2101 struct ib_flow_spec_action_drop {
2102 	enum ib_flow_spec_type	      type;
2103 	u16			      size;
2104 };
2105 
2106 struct ib_flow_spec_action_handle {
2107 	enum ib_flow_spec_type	      type;
2108 	u16			      size;
2109 	struct ib_flow_action	     *act;
2110 };
2111 
2112 enum ib_counters_description {
2113 	IB_COUNTER_PACKETS,
2114 	IB_COUNTER_BYTES,
2115 };
2116 
2117 struct ib_flow_spec_action_count {
2118 	enum ib_flow_spec_type type;
2119 	u16 size;
2120 	struct ib_counters *counters;
2121 };
2122 
2123 union ib_flow_spec {
2124 	struct {
2125 		u32			type;
2126 		u16			size;
2127 	};
2128 	struct ib_flow_spec_eth		eth;
2129 	struct ib_flow_spec_ib		ib;
2130 	struct ib_flow_spec_ipv4        ipv4;
2131 	struct ib_flow_spec_tcp_udp	tcp_udp;
2132 	struct ib_flow_spec_ipv6        ipv6;
2133 	struct ib_flow_spec_tunnel      tunnel;
2134 	struct ib_flow_spec_esp		esp;
2135 	struct ib_flow_spec_gre		gre;
2136 	struct ib_flow_spec_mpls	mpls;
2137 	struct ib_flow_spec_action_tag  flow_tag;
2138 	struct ib_flow_spec_action_drop drop;
2139 	struct ib_flow_spec_action_handle action;
2140 	struct ib_flow_spec_action_count flow_count;
2141 };
2142 
2143 struct ib_flow_attr {
2144 	enum ib_flow_attr_type type;
2145 	u16	     size;
2146 	u16	     priority;
2147 	u32	     flags;
2148 	u8	     num_of_specs;
2149 	u32	     port;
2150 	union ib_flow_spec flows[];
2151 };
2152 
2153 struct ib_flow {
2154 	struct ib_qp		*qp;
2155 	struct ib_device	*device;
2156 	struct ib_uobject	*uobject;
2157 };
2158 
2159 enum ib_flow_action_type {
2160 	IB_FLOW_ACTION_UNSPECIFIED,
2161 	IB_FLOW_ACTION_ESP = 1,
2162 };
2163 
2164 struct ib_flow_action_attrs_esp_keymats {
2165 	enum ib_uverbs_flow_action_esp_keymat			protocol;
2166 	union {
2167 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2168 	} keymat;
2169 };
2170 
2171 struct ib_flow_action_attrs_esp_replays {
2172 	enum ib_uverbs_flow_action_esp_replay			protocol;
2173 	union {
2174 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2175 	} replay;
2176 };
2177 
2178 enum ib_flow_action_attrs_esp_flags {
2179 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2180 	 * This is done in order to share the same flags between user-space and
2181 	 * kernel and spare an unnecessary translation.
2182 	 */
2183 
2184 	/* Kernel flags */
2185 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2186 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2187 };
2188 
2189 struct ib_flow_spec_list {
2190 	struct ib_flow_spec_list	*next;
2191 	union ib_flow_spec		spec;
2192 };
2193 
2194 struct ib_flow_action_attrs_esp {
2195 	struct ib_flow_action_attrs_esp_keymats		*keymat;
2196 	struct ib_flow_action_attrs_esp_replays		*replay;
2197 	struct ib_flow_spec_list			*encap;
2198 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2199 	 * Value of 0 is a valid value.
2200 	 */
2201 	u32						esn;
2202 	u32						spi;
2203 	u32						seq;
2204 	u32						tfc_pad;
2205 	/* Use enum ib_flow_action_attrs_esp_flags */
2206 	u64						flags;
2207 	u64						hard_limit_pkts;
2208 };
2209 
2210 struct ib_flow_action {
2211 	struct ib_device		*device;
2212 	struct ib_uobject		*uobject;
2213 	enum ib_flow_action_type	type;
2214 	atomic_t			usecnt;
2215 };
2216 
2217 struct ib_mad;
2218 
2219 enum ib_process_mad_flags {
2220 	IB_MAD_IGNORE_MKEY	= 1,
2221 	IB_MAD_IGNORE_BKEY	= 2,
2222 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2223 };
2224 
2225 enum ib_mad_result {
2226 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2227 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2228 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2229 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2230 };
2231 
2232 struct ib_port_cache {
2233 	u64		      subnet_prefix;
2234 	struct ib_pkey_cache  *pkey;
2235 	struct ib_gid_table   *gid;
2236 	u8                     lmc;
2237 	enum ib_port_state     port_state;
2238 	enum ib_port_state     last_port_state;
2239 };
2240 
2241 struct ib_port_immutable {
2242 	int                           pkey_tbl_len;
2243 	int                           gid_tbl_len;
2244 	u32                           core_cap_flags;
2245 	u32                           max_mad_size;
2246 };
2247 
2248 struct ib_port_data {
2249 	struct ib_device *ib_dev;
2250 
2251 	struct ib_port_immutable immutable;
2252 
2253 	spinlock_t pkey_list_lock;
2254 
2255 	spinlock_t netdev_lock;
2256 
2257 	struct list_head pkey_list;
2258 
2259 	struct ib_port_cache cache;
2260 
2261 	struct net_device __rcu *netdev;
2262 	netdevice_tracker netdev_tracker;
2263 	struct hlist_node ndev_hash_link;
2264 	struct rdma_port_counter port_counter;
2265 	struct ib_port *sysfs;
2266 };
2267 
2268 /* rdma netdev type - specifies protocol type */
2269 enum rdma_netdev_t {
2270 	RDMA_NETDEV_OPA_VNIC,
2271 	RDMA_NETDEV_IPOIB,
2272 };
2273 
2274 /**
2275  * struct rdma_netdev - rdma netdev
2276  * For cases where netstack interfacing is required.
2277  */
2278 struct rdma_netdev {
2279 	void              *clnt_priv;
2280 	struct ib_device  *hca;
2281 	u32		   port_num;
2282 	int                mtu;
2283 
2284 	/*
2285 	 * cleanup function must be specified.
2286 	 * FIXME: This is only used for OPA_VNIC and that usage should be
2287 	 * removed too.
2288 	 */
2289 	void (*free_rdma_netdev)(struct net_device *netdev);
2290 
2291 	/* control functions */
2292 	void (*set_id)(struct net_device *netdev, int id);
2293 	/* send packet */
2294 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2295 		    struct ib_ah *address, u32 dqpn);
2296 	/* multicast */
2297 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2298 			    union ib_gid *gid, u16 mlid,
2299 			    int set_qkey, u32 qkey);
2300 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2301 			    union ib_gid *gid, u16 mlid);
2302 	/* timeout */
2303 	void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2304 };
2305 
2306 struct rdma_netdev_alloc_params {
2307 	size_t sizeof_priv;
2308 	unsigned int txqs;
2309 	unsigned int rxqs;
2310 	void *param;
2311 
2312 	int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2313 				      struct net_device *netdev, void *param);
2314 };
2315 
2316 struct ib_odp_counters {
2317 	atomic64_t faults;
2318 	atomic64_t faults_handled;
2319 	atomic64_t invalidations;
2320 	atomic64_t invalidations_handled;
2321 	atomic64_t prefetch;
2322 };
2323 
2324 struct ib_counters {
2325 	struct ib_device	*device;
2326 	struct ib_uobject	*uobject;
2327 	/* num of objects attached */
2328 	atomic_t	usecnt;
2329 };
2330 
2331 struct ib_counters_read_attr {
2332 	u64	*counters_buff;
2333 	u32	ncounters;
2334 	u32	flags; /* use enum ib_read_counters_flags */
2335 };
2336 
2337 struct uverbs_attr_bundle;
2338 struct iw_cm_id;
2339 struct iw_cm_conn_param;
2340 
2341 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2342 	.size_##ib_struct =                                                    \
2343 		(sizeof(struct drv_struct) +                                   \
2344 		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2345 		 BUILD_BUG_ON_ZERO(                                            \
2346 			 !__same_type(((struct drv_struct *)NULL)->member,     \
2347 				      struct ib_struct)))
2348 
2349 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2350 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2351 					   gfp, false))
2352 
2353 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2354 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2355 					   GFP_KERNEL, true))
2356 
2357 #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2358 	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2359 
2360 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2361 
2362 struct rdma_user_mmap_entry {
2363 	struct kref ref;
2364 	struct ib_ucontext *ucontext;
2365 	unsigned long start_pgoff;
2366 	size_t npages;
2367 	bool driver_removed;
2368 	/* protects access to dmabufs */
2369 	struct mutex dmabufs_lock;
2370 	struct list_head dmabufs;
2371 };
2372 
2373 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2374 static inline u64
2375 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2376 {
2377 	return (u64)entry->start_pgoff << PAGE_SHIFT;
2378 }
2379 
2380 /**
2381  * struct ib_device_ops - InfiniBand device operations
2382  * This structure defines all the InfiniBand device operations, providers will
2383  * need to define the supported operations, otherwise they will be set to null.
2384  */
2385 struct ib_device_ops {
2386 	struct module *owner;
2387 	enum rdma_driver_id driver_id;
2388 	u32 uverbs_abi_ver;
2389 	unsigned int uverbs_no_driver_id_binding:1;
2390 
2391 	/*
2392 	 * NOTE: New drivers should not make use of device_group; instead new
2393 	 * device parameter should be exposed via netlink command. This
2394 	 * mechanism exists only for existing drivers.
2395 	 */
2396 	const struct attribute_group *device_group;
2397 	const struct attribute_group **port_groups;
2398 
2399 	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2400 			 const struct ib_send_wr **bad_send_wr);
2401 	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2402 			 const struct ib_recv_wr **bad_recv_wr);
2403 	void (*drain_rq)(struct ib_qp *qp);
2404 	void (*drain_sq)(struct ib_qp *qp);
2405 	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2406 	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2407 	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2408 	int (*post_srq_recv)(struct ib_srq *srq,
2409 			     const struct ib_recv_wr *recv_wr,
2410 			     const struct ib_recv_wr **bad_recv_wr);
2411 	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2412 			   u32 port_num, const struct ib_wc *in_wc,
2413 			   const struct ib_grh *in_grh,
2414 			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2415 			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2416 	int (*query_device)(struct ib_device *device,
2417 			    struct ib_device_attr *device_attr,
2418 			    struct ib_udata *udata);
2419 	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2420 			     struct ib_device_modify *device_modify);
2421 	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2422 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2423 						     int comp_vector);
2424 	int (*query_port)(struct ib_device *device, u32 port_num,
2425 			  struct ib_port_attr *port_attr);
2426 	int (*query_port_speed)(struct ib_device *device, u32 port_num,
2427 				u64 *speed);
2428 	int (*modify_port)(struct ib_device *device, u32 port_num,
2429 			   int port_modify_mask,
2430 			   struct ib_port_modify *port_modify);
2431 	/*
2432 	 * The following mandatory functions are used only at device
2433 	 * registration.  Keep functions such as these at the end of this
2434 	 * structure to avoid cache line misses when accessing struct ib_device
2435 	 * in fast paths.
2436 	 */
2437 	int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2438 				  struct ib_port_immutable *immutable);
2439 	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2440 					       u32 port_num);
2441 	/*
2442 	 * When calling get_netdev, the HW vendor's driver should return the
2443 	 * net device of device @device at port @port_num or NULL if such
2444 	 * a net device doesn't exist. The vendor driver should call dev_hold
2445 	 * on this net device. The HW vendor's device driver must guarantee
2446 	 * that this function returns NULL before the net device has finished
2447 	 * NETDEV_UNREGISTER state.
2448 	 */
2449 	struct net_device *(*get_netdev)(struct ib_device *device,
2450 					 u32 port_num);
2451 	/*
2452 	 * rdma netdev operation
2453 	 *
2454 	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2455 	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2456 	 */
2457 	struct net_device *(*alloc_rdma_netdev)(
2458 		struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2459 		const char *name, unsigned char name_assign_type,
2460 		void (*setup)(struct net_device *));
2461 
2462 	int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2463 				      enum rdma_netdev_t type,
2464 				      struct rdma_netdev_alloc_params *params);
2465 	/*
2466 	 * query_gid should be return GID value for @device, when @port_num
2467 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2468 	 * is RoCE link layer.
2469 	 */
2470 	int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2471 			 union ib_gid *gid);
2472 	/*
2473 	 * When calling add_gid, the HW vendor's driver should add the gid
2474 	 * of device of port at gid index available at @attr. Meta-info of
2475 	 * that gid (for example, the network device related to this gid) is
2476 	 * available at @attr. @context allows the HW vendor driver to store
2477 	 * extra information together with a GID entry. The HW vendor driver may
2478 	 * allocate memory to contain this information and store it in @context
2479 	 * when a new GID entry is written to. Params are consistent until the
2480 	 * next call of add_gid or delete_gid. The function should return 0 on
2481 	 * success or error otherwise. The function could be called
2482 	 * concurrently for different ports. This function is only called when
2483 	 * roce_gid_table is used.
2484 	 */
2485 	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2486 	/*
2487 	 * When calling del_gid, the HW vendor's driver should delete the
2488 	 * gid of device @device at gid index gid_index of port port_num
2489 	 * available in @attr.
2490 	 * Upon the deletion of a GID entry, the HW vendor must free any
2491 	 * allocated memory. The caller will clear @context afterwards.
2492 	 * This function is only called when roce_gid_table is used.
2493 	 */
2494 	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2495 	int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2496 			  u16 *pkey);
2497 	int (*alloc_ucontext)(struct ib_ucontext *context,
2498 			      struct ib_udata *udata);
2499 	void (*dealloc_ucontext)(struct ib_ucontext *context);
2500 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2501 	/*
2502 	 * This will be called once refcount of an entry in mmap_xa reaches
2503 	 * zero. The type of the memory that was mapped may differ between
2504 	 * entries and is opaque to the rdma_user_mmap interface.
2505 	 * Therefore needs to be implemented by the driver in mmap_free.
2506 	 */
2507 	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2508 	int (*mmap_get_pfns)(struct rdma_user_mmap_entry *entry,
2509 			     struct phys_vec *phys_vec,
2510 			     struct p2pdma_provider **provider);
2511 	struct rdma_user_mmap_entry *(*pgoff_to_mmap_entry)(struct ib_ucontext *ucontext,
2512 							    off_t pg_off);
2513 	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2514 	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2515 	int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2516 	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2517 			 struct ib_udata *udata);
2518 	int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2519 			      struct ib_udata *udata);
2520 	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2521 	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2522 	int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2523 	int (*create_srq)(struct ib_srq *srq,
2524 			  struct ib_srq_init_attr *srq_init_attr,
2525 			  struct ib_udata *udata);
2526 	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2527 			  enum ib_srq_attr_mask srq_attr_mask,
2528 			  struct ib_udata *udata);
2529 	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2530 	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2531 	int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2532 			 struct ib_udata *udata);
2533 	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2534 			 int qp_attr_mask, struct ib_udata *udata);
2535 	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2536 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2537 	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2538 	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2539 			 struct uverbs_attr_bundle *attrs);
2540 	int (*create_cq_umem)(struct ib_cq *cq,
2541 			      const struct ib_cq_init_attr *attr,
2542 			      struct ib_umem *umem,
2543 			      struct uverbs_attr_bundle *attrs);
2544 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2545 	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2546 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2547 	/*
2548 	 * pre_destroy_cq - Prevent a cq from generating any new work
2549 	 * completions, but not free any kernel resources
2550 	 */
2551 	int (*pre_destroy_cq)(struct ib_cq *cq);
2552 	/*
2553 	 * post_destroy_cq - Free all kernel resources
2554 	 */
2555 	void (*post_destroy_cq)(struct ib_cq *cq);
2556 	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2557 	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2558 				     u64 virt_addr, int mr_access_flags,
2559 				     struct ib_dmah *dmah,
2560 				     struct ib_udata *udata);
2561 	struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2562 					    u64 length, u64 virt_addr, int fd,
2563 					    int mr_access_flags,
2564 					    struct ib_dmah *dmah,
2565 					    struct uverbs_attr_bundle *attrs);
2566 	struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2567 				       u64 length, u64 virt_addr,
2568 				       int mr_access_flags, struct ib_pd *pd,
2569 				       struct ib_udata *udata);
2570 	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2571 	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2572 				  u32 max_num_sg);
2573 	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2574 					    u32 max_num_data_sg,
2575 					    u32 max_num_meta_sg);
2576 	int (*advise_mr)(struct ib_pd *pd,
2577 			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2578 			 struct ib_sge *sg_list, u32 num_sge,
2579 			 struct uverbs_attr_bundle *attrs);
2580 
2581 	/*
2582 	 * Kernel users should universally support relaxed ordering (RO), as
2583 	 * they are designed to read data only after observing the CQE and use
2584 	 * the DMA API correctly.
2585 	 *
2586 	 * Some drivers implicitly enable RO if platform supports it.
2587 	 */
2588 	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2589 			 unsigned int *sg_offset);
2590 	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2591 			       struct ib_mr_status *mr_status);
2592 	int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2593 	int (*dealloc_mw)(struct ib_mw *mw);
2594 	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2595 	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2596 	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2597 	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2598 	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2599 				       struct ib_flow_attr *flow_attr,
2600 				       struct ib_udata *udata);
2601 	int (*destroy_flow)(struct ib_flow *flow_id);
2602 	int (*destroy_flow_action)(struct ib_flow_action *action);
2603 	int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2604 				 int state);
2605 	int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2606 			     struct ifla_vf_info *ivf);
2607 	int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2608 			    struct ifla_vf_stats *stats);
2609 	int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2610 			    struct ifla_vf_guid *node_guid,
2611 			    struct ifla_vf_guid *port_guid);
2612 	int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2613 			   int type);
2614 	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2615 				   struct ib_wq_init_attr *init_attr,
2616 				   struct ib_udata *udata);
2617 	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2618 	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2619 			 u32 wq_attr_mask, struct ib_udata *udata);
2620 	int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2621 				    struct ib_rwq_ind_table_init_attr *init_attr,
2622 				    struct ib_udata *udata);
2623 	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2624 	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2625 				  struct ib_ucontext *context,
2626 				  struct ib_dm_alloc_attr *attr,
2627 				  struct uverbs_attr_bundle *attrs);
2628 	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2629 	int (*alloc_dmah)(struct ib_dmah *ibdmah,
2630 			  struct uverbs_attr_bundle *attrs);
2631 	int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs);
2632 	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2633 				   struct ib_dm_mr_attr *attr,
2634 				   struct uverbs_attr_bundle *attrs);
2635 	int (*create_counters)(struct ib_counters *counters,
2636 			       struct uverbs_attr_bundle *attrs);
2637 	int (*destroy_counters)(struct ib_counters *counters);
2638 	int (*read_counters)(struct ib_counters *counters,
2639 			     struct ib_counters_read_attr *counters_read_attr,
2640 			     struct uverbs_attr_bundle *attrs);
2641 	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2642 			    int data_sg_nents, unsigned int *data_sg_offset,
2643 			    struct scatterlist *meta_sg, int meta_sg_nents,
2644 			    unsigned int *meta_sg_offset);
2645 
2646 	/*
2647 	 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2648 	 *   fill in the driver initialized data.  The struct is kfree()'ed by
2649 	 *   the sysfs core when the device is removed.  A lifespan of -1 in the
2650 	 *   return struct tells the core to set a default lifespan.
2651 	 */
2652 	struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2653 	struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2654 						     u32 port_num);
2655 	/*
2656 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2657 	 * @index - The index in the value array we wish to have updated, or
2658 	 *   num_counters if we want all stats updated
2659 	 * Return codes -
2660 	 *   < 0 - Error, no counters updated
2661 	 *   index - Updated the single counter pointed to by index
2662 	 *   num_counters - Updated all counters (will reset the timestamp
2663 	 *     and prevent further calls for lifespan milliseconds)
2664 	 * Drivers are allowed to update all counters in leiu of just the
2665 	 *   one given in index at their option
2666 	 */
2667 	int (*get_hw_stats)(struct ib_device *device,
2668 			    struct rdma_hw_stats *stats, u32 port, int index);
2669 
2670 	/*
2671 	 * modify_hw_stat - Modify the counter configuration
2672 	 * @enable: true/false when enable/disable a counter
2673 	 * Return codes - 0 on success or error code otherwise.
2674 	 */
2675 	int (*modify_hw_stat)(struct ib_device *device, u32 port,
2676 			      unsigned int counter_index, bool enable);
2677 	/*
2678 	 * Allows rdma drivers to add their own restrack attributes.
2679 	 */
2680 	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2681 	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2682 	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2683 	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2684 	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2685 	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2686 	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2687 	int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
2688 	int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
2689 
2690 	/* Device lifecycle callbacks */
2691 	/*
2692 	 * Called after the device becomes registered, before clients are
2693 	 * attached
2694 	 */
2695 	int (*enable_driver)(struct ib_device *dev);
2696 	/*
2697 	 * This is called as part of ib_dealloc_device().
2698 	 */
2699 	void (*dealloc_driver)(struct ib_device *dev);
2700 
2701 	/* iWarp CM callbacks */
2702 	void (*iw_add_ref)(struct ib_qp *qp);
2703 	void (*iw_rem_ref)(struct ib_qp *qp);
2704 	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2705 	int (*iw_connect)(struct iw_cm_id *cm_id,
2706 			  struct iw_cm_conn_param *conn_param);
2707 	int (*iw_accept)(struct iw_cm_id *cm_id,
2708 			 struct iw_cm_conn_param *conn_param);
2709 	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2710 			 u8 pdata_len);
2711 	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2712 	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2713 	/*
2714 	 * counter_bind_qp - Bind a QP to a counter.
2715 	 * @counter - The counter to be bound. If counter->id is zero then
2716 	 *   the driver needs to allocate a new counter and set counter->id
2717 	 */
2718 	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
2719 			       u32 port);
2720 	/*
2721 	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2722 	 *   counter and bind it onto the default one
2723 	 */
2724 	int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
2725 	/*
2726 	 * counter_dealloc -De-allocate the hw counter
2727 	 */
2728 	int (*counter_dealloc)(struct rdma_counter *counter);
2729 	/*
2730 	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2731 	 * the driver initialized data.
2732 	 */
2733 	struct rdma_hw_stats *(*counter_alloc_stats)(
2734 		struct rdma_counter *counter);
2735 	/*
2736 	 * counter_update_stats - Query the stats value of this counter
2737 	 */
2738 	int (*counter_update_stats)(struct rdma_counter *counter);
2739 
2740 	/*
2741 	 * counter_init - Initialize the driver specific rdma counter struct.
2742 	 */
2743 	void (*counter_init)(struct rdma_counter *counter);
2744 
2745 	/*
2746 	 * Allows rdma drivers to add their own restrack attributes
2747 	 * dumped via 'rdma stat' iproute2 command.
2748 	 */
2749 	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2750 
2751 	/* query driver for its ucontext properties */
2752 	int (*query_ucontext)(struct ib_ucontext *context,
2753 			      struct uverbs_attr_bundle *attrs);
2754 
2755 	/*
2756 	 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2757 	 * Everyone else relies on Linux memory management model.
2758 	 */
2759 	int (*get_numa_node)(struct ib_device *dev);
2760 
2761 	/*
2762 	 * add_sub_dev - Add a sub IB device
2763 	 */
2764 	struct ib_device *(*add_sub_dev)(struct ib_device *parent,
2765 					 enum rdma_nl_dev_type type,
2766 					 const char *name);
2767 
2768 	/*
2769 	 * del_sub_dev - Delete a sub IB device
2770 	 */
2771 	void (*del_sub_dev)(struct ib_device *sub_dev);
2772 
2773 	/*
2774 	 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
2775 	 * the ufile.
2776 	 */
2777 	void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
2778 
2779 	/*
2780 	 * report_port_event - Drivers need to implement this if they have
2781 	 * some private stuff to handle when link status changes.
2782 	 */
2783 	void (*report_port_event)(struct ib_device *ibdev,
2784 				  struct net_device *ndev, unsigned long event);
2785 
2786 	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2787 	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2788 	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2789 	DECLARE_RDMA_OBJ_SIZE(ib_dmah);
2790 	DECLARE_RDMA_OBJ_SIZE(ib_mw);
2791 	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2792 	DECLARE_RDMA_OBJ_SIZE(ib_qp);
2793 	DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2794 	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2795 	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2796 	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2797 	DECLARE_RDMA_OBJ_SIZE(rdma_counter);
2798 };
2799 
2800 struct ib_core_device {
2801 	/* device must be the first element in structure until,
2802 	 * union of ib_core_device and device exists in ib_device.
2803 	 */
2804 	struct device dev;
2805 	possible_net_t rdma_net;
2806 	struct kobject *ports_kobj;
2807 	struct list_head port_list;
2808 	struct ib_device *owner; /* reach back to owner ib_device */
2809 };
2810 
2811 struct rdma_restrack_root;
2812 struct ib_device {
2813 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2814 	struct device                *dma_device;
2815 	struct ib_device_ops	     ops;
2816 	char                          name[IB_DEVICE_NAME_MAX];
2817 	struct rcu_head rcu_head;
2818 
2819 	struct list_head              event_handler_list;
2820 	/* Protects event_handler_list */
2821 	struct rw_semaphore event_handler_rwsem;
2822 
2823 	/* Protects QP's event_handler calls and open_qp list */
2824 	spinlock_t qp_open_list_lock;
2825 
2826 	struct rw_semaphore	      client_data_rwsem;
2827 	struct xarray                 client_data;
2828 	struct mutex                  unregistration_lock;
2829 
2830 	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2831 	rwlock_t cache_lock;
2832 	/**
2833 	 * port_data is indexed by port number
2834 	 */
2835 	struct ib_port_data *port_data;
2836 
2837 	int			      num_comp_vectors;
2838 
2839 	union {
2840 		struct device		dev;
2841 		struct ib_core_device	coredev;
2842 	};
2843 
2844 	/* First group is for device attributes,
2845 	 * Second group is for driver provided attributes (optional).
2846 	 * Third group is for the hw_stats
2847 	 * It is a NULL terminated array.
2848 	 */
2849 	const struct attribute_group	*groups[4];
2850 	u8				hw_stats_attr_index;
2851 
2852 	u64			     uverbs_cmd_mask;
2853 
2854 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2855 	__be64			     node_guid;
2856 	u32			     local_dma_lkey;
2857 	u16                          is_switch:1;
2858 	/* Indicates kernel verbs support, should not be used in drivers */
2859 	u16                          kverbs_provider:1;
2860 	/* CQ adaptive moderation (RDMA DIM) */
2861 	u16                          use_cq_dim:1;
2862 	u8                           node_type;
2863 	u32			     phys_port_cnt;
2864 	struct ib_device_attr        attrs;
2865 	struct hw_stats_device_data *hw_stats_data;
2866 
2867 #ifdef CONFIG_CGROUP_RDMA
2868 	struct rdmacg_device         cg_device;
2869 #endif
2870 
2871 	u32                          index;
2872 
2873 	spinlock_t                   cq_pools_lock;
2874 	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2875 
2876 	struct rdma_restrack_root *res;
2877 
2878 	const struct uapi_definition   *driver_def;
2879 
2880 	/*
2881 	 * Positive refcount indicates that the device is currently
2882 	 * registered and cannot be unregistered.
2883 	 */
2884 	refcount_t refcount;
2885 	struct completion unreg_completion;
2886 	struct work_struct unregistration_work;
2887 
2888 	const struct rdma_link_ops *link_ops;
2889 
2890 	/* Protects compat_devs xarray modifications */
2891 	struct mutex compat_devs_mutex;
2892 	/* Maintains compat devices for each net namespace */
2893 	struct xarray compat_devs;
2894 
2895 	/* Used by iWarp CM */
2896 	char iw_ifname[IFNAMSIZ];
2897 	u32 iw_driver_flags;
2898 	u32 lag_flags;
2899 
2900 	/* A parent device has a list of sub-devices */
2901 	struct mutex subdev_lock;
2902 	struct list_head subdev_list_head;
2903 
2904 	/* A sub device has a type and a parent */
2905 	enum rdma_nl_dev_type type;
2906 	struct ib_device *parent;
2907 	struct list_head subdev_list;
2908 
2909 	enum rdma_nl_name_assign_type name_assign_type;
2910 };
2911 
2912 static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2913 				    gfp_t gfp, bool is_numa_aware)
2914 {
2915 	if (is_numa_aware && dev->ops.get_numa_node)
2916 		return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2917 
2918 	return kzalloc(size, gfp);
2919 }
2920 
2921 struct ib_client_nl_info;
2922 struct ib_client {
2923 	const char *name;
2924 	int (*add)(struct ib_device *ibdev);
2925 	void (*remove)(struct ib_device *, void *client_data);
2926 	void (*rename)(struct ib_device *dev, void *client_data);
2927 	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2928 			   struct ib_client_nl_info *res);
2929 	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2930 
2931 	/* Returns the net_dev belonging to this ib_client and matching the
2932 	 * given parameters.
2933 	 * @dev:	 An RDMA device that the net_dev use for communication.
2934 	 * @port:	 A physical port number on the RDMA device.
2935 	 * @pkey:	 P_Key that the net_dev uses if applicable.
2936 	 * @gid:	 A GID that the net_dev uses to communicate.
2937 	 * @addr:	 An IP address the net_dev is configured with.
2938 	 * @client_data: The device's client data set by ib_set_client_data().
2939 	 *
2940 	 * An ib_client that implements a net_dev on top of RDMA devices
2941 	 * (such as IP over IB) should implement this callback, allowing the
2942 	 * rdma_cm module to find the right net_dev for a given request.
2943 	 *
2944 	 * The caller is responsible for calling dev_put on the returned
2945 	 * netdev. */
2946 	struct net_device *(*get_net_dev_by_params)(
2947 			struct ib_device *dev,
2948 			u32 port,
2949 			u16 pkey,
2950 			const union ib_gid *gid,
2951 			const struct sockaddr *addr,
2952 			void *client_data);
2953 
2954 	refcount_t uses;
2955 	struct completion uses_zero;
2956 	u32 client_id;
2957 
2958 	/* kverbs are not required by the client */
2959 	u8 no_kverbs_req:1;
2960 };
2961 
2962 /*
2963  * IB block DMA iterator
2964  *
2965  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2966  * to a HW supported page size.
2967  */
2968 struct ib_block_iter {
2969 	/* internal states */
2970 	struct scatterlist *__sg;	/* sg holding the current aligned block */
2971 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2972 	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
2973 	unsigned int __sg_nents;	/* number of SG entries */
2974 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2975 	unsigned int __pg_bit;		/* alignment of current block */
2976 };
2977 
2978 struct ib_device *_ib_alloc_device(size_t size, struct net *net);
2979 #define ib_alloc_device(drv_struct, member)                                    \
2980 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2981 				      BUILD_BUG_ON_ZERO(offsetof(              \
2982 					      struct drv_struct, member)),     \
2983 				      &init_net),			       \
2984 		     struct drv_struct, member)
2985 
2986 #define ib_alloc_device_with_net(drv_struct, member, net)		       \
2987 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2988 				      BUILD_BUG_ON_ZERO(offsetof(              \
2989 					struct drv_struct, member)), net),     \
2990 		     struct drv_struct, member)
2991 
2992 void ib_dealloc_device(struct ib_device *device);
2993 
2994 void ib_get_device_fw_str(struct ib_device *device, char *str);
2995 
2996 int ib_register_device(struct ib_device *device, const char *name,
2997 		       struct device *dma_device);
2998 void ib_unregister_device(struct ib_device *device);
2999 void ib_unregister_driver(enum rdma_driver_id driver_id);
3000 void ib_unregister_device_and_put(struct ib_device *device);
3001 void ib_unregister_device_queued(struct ib_device *ib_dev);
3002 
3003 int ib_register_client   (struct ib_client *client);
3004 void ib_unregister_client(struct ib_client *client);
3005 
3006 void __rdma_block_iter_start(struct ib_block_iter *biter,
3007 			     struct scatterlist *sglist,
3008 			     unsigned int nents,
3009 			     unsigned long pgsz);
3010 bool __rdma_block_iter_next(struct ib_block_iter *biter);
3011 
3012 /**
3013  * rdma_block_iter_dma_address - get the aligned dma address of the current
3014  * block held by the block iterator.
3015  * @biter: block iterator holding the memory block
3016  */
3017 static inline dma_addr_t
3018 rdma_block_iter_dma_address(struct ib_block_iter *biter)
3019 {
3020 	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
3021 }
3022 
3023 /**
3024  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
3025  * @sglist: sglist to iterate over
3026  * @biter: block iterator holding the memory block
3027  * @nents: maximum number of sg entries to iterate over
3028  * @pgsz: best HW supported page size to use
3029  *
3030  * Callers may use rdma_block_iter_dma_address() to get each
3031  * blocks aligned DMA address.
3032  */
3033 #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
3034 	for (__rdma_block_iter_start(biter, sglist, nents,	\
3035 				     pgsz);			\
3036 	     __rdma_block_iter_next(biter);)
3037 
3038 /**
3039  * ib_get_client_data - Get IB client context
3040  * @device:Device to get context for
3041  * @client:Client to get context for
3042  *
3043  * ib_get_client_data() returns the client context data set with
3044  * ib_set_client_data(). This can only be called while the client is
3045  * registered to the device, once the ib_client remove() callback returns this
3046  * cannot be called.
3047  */
3048 static inline void *ib_get_client_data(struct ib_device *device,
3049 				       struct ib_client *client)
3050 {
3051 	return xa_load(&device->client_data, client->client_id);
3052 }
3053 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
3054 			 void *data);
3055 void ib_set_device_ops(struct ib_device *device,
3056 		       const struct ib_device_ops *ops);
3057 
3058 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
3059 		      unsigned long pfn, unsigned long size, pgprot_t prot,
3060 		      struct rdma_user_mmap_entry *entry);
3061 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
3062 				struct rdma_user_mmap_entry *entry,
3063 				size_t length);
3064 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
3065 				      struct rdma_user_mmap_entry *entry,
3066 				      size_t length, u32 min_pgoff,
3067 				      u32 max_pgoff);
3068 
3069 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
3070 void rdma_user_mmap_disassociate(struct ib_device *device);
3071 #else
3072 static inline void rdma_user_mmap_disassociate(struct ib_device *device)
3073 {
3074 }
3075 #endif
3076 
3077 static inline int
3078 rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
3079 				  struct rdma_user_mmap_entry *entry,
3080 				  size_t length, u32 pgoff)
3081 {
3082 	return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
3083 						 pgoff);
3084 }
3085 
3086 struct rdma_user_mmap_entry *
3087 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
3088 			       unsigned long pgoff);
3089 struct rdma_user_mmap_entry *
3090 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
3091 			 struct vm_area_struct *vma);
3092 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
3093 
3094 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
3095 
3096 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
3097 {
3098 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
3099 }
3100 
3101 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
3102 {
3103 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
3104 }
3105 
3106 static inline bool ib_is_buffer_cleared(const void __user *p,
3107 					size_t len)
3108 {
3109 	bool ret;
3110 	u8 *buf;
3111 
3112 	if (len > USHRT_MAX)
3113 		return false;
3114 
3115 	buf = memdup_user(p, len);
3116 	if (IS_ERR(buf))
3117 		return false;
3118 
3119 	ret = !memchr_inv(buf, 0, len);
3120 	kfree(buf);
3121 	return ret;
3122 }
3123 
3124 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
3125 				       size_t offset,
3126 				       size_t len)
3127 {
3128 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
3129 }
3130 
3131 /**
3132  * ib_modify_qp_is_ok - Check that the supplied attribute mask
3133  * contains all required attributes and no attributes not allowed for
3134  * the given QP state transition.
3135  * @cur_state: Current QP state
3136  * @next_state: Next QP state
3137  * @type: QP type
3138  * @mask: Mask of supplied QP attributes
3139  *
3140  * This function is a helper function that a low-level driver's
3141  * modify_qp method can use to validate the consumer's input.  It
3142  * checks that cur_state and next_state are valid QP states, that a
3143  * transition from cur_state to next_state is allowed by the IB spec,
3144  * and that the attribute mask supplied is allowed for the transition.
3145  */
3146 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3147 			enum ib_qp_type type, enum ib_qp_attr_mask mask);
3148 
3149 void ib_register_event_handler(struct ib_event_handler *event_handler);
3150 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3151 void ib_dispatch_event(const struct ib_event *event);
3152 
3153 int ib_query_port(struct ib_device *device,
3154 		  u32 port_num, struct ib_port_attr *port_attr);
3155 
3156 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3157 					       u32 port_num);
3158 
3159 /**
3160  * rdma_cap_ib_switch - Check if the device is IB switch
3161  * @device: Device to check
3162  *
3163  * Device driver is responsible for setting is_switch bit on
3164  * in ib_device structure at init time.
3165  *
3166  * Return: true if the device is IB switch.
3167  */
3168 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3169 {
3170 	return device->is_switch;
3171 }
3172 
3173 /**
3174  * rdma_start_port - Return the first valid port number for the device
3175  * specified
3176  *
3177  * @device: Device to be checked
3178  *
3179  * Return start port number
3180  */
3181 static inline u32 rdma_start_port(const struct ib_device *device)
3182 {
3183 	return rdma_cap_ib_switch(device) ? 0 : 1;
3184 }
3185 
3186 /**
3187  * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3188  * @device: The struct ib_device * to iterate over
3189  * @iter: The unsigned int to store the port number
3190  */
3191 #define rdma_for_each_port(device, iter)                                       \
3192 	for (iter = rdma_start_port(device +				       \
3193 				    BUILD_BUG_ON_ZERO(!__same_type(u32,	       \
3194 								   iter)));    \
3195 	     iter <= rdma_end_port(device); iter++)
3196 
3197 /**
3198  * rdma_end_port - Return the last valid port number for the device
3199  * specified
3200  *
3201  * @device: Device to be checked
3202  *
3203  * Return last port number
3204  */
3205 static inline u32 rdma_end_port(const struct ib_device *device)
3206 {
3207 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3208 }
3209 
3210 static inline int rdma_is_port_valid(const struct ib_device *device,
3211 				     unsigned int port)
3212 {
3213 	return (port >= rdma_start_port(device) &&
3214 		port <= rdma_end_port(device));
3215 }
3216 
3217 static inline bool rdma_is_grh_required(const struct ib_device *device,
3218 					u32 port_num)
3219 {
3220 	return device->port_data[port_num].immutable.core_cap_flags &
3221 	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3222 }
3223 
3224 static inline bool rdma_protocol_ib(const struct ib_device *device,
3225 				    u32 port_num)
3226 {
3227 	return device->port_data[port_num].immutable.core_cap_flags &
3228 	       RDMA_CORE_CAP_PROT_IB;
3229 }
3230 
3231 static inline bool rdma_protocol_roce(const struct ib_device *device,
3232 				      u32 port_num)
3233 {
3234 	return device->port_data[port_num].immutable.core_cap_flags &
3235 	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3236 }
3237 
3238 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3239 						u32 port_num)
3240 {
3241 	return device->port_data[port_num].immutable.core_cap_flags &
3242 	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3243 }
3244 
3245 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3246 						u32 port_num)
3247 {
3248 	return device->port_data[port_num].immutable.core_cap_flags &
3249 	       RDMA_CORE_CAP_PROT_ROCE;
3250 }
3251 
3252 static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3253 				       u32 port_num)
3254 {
3255 	return device->port_data[port_num].immutable.core_cap_flags &
3256 	       RDMA_CORE_CAP_PROT_IWARP;
3257 }
3258 
3259 static inline bool rdma_ib_or_roce(const struct ib_device *device,
3260 				   u32 port_num)
3261 {
3262 	return rdma_protocol_ib(device, port_num) ||
3263 		rdma_protocol_roce(device, port_num);
3264 }
3265 
3266 static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3267 					    u32 port_num)
3268 {
3269 	return device->port_data[port_num].immutable.core_cap_flags &
3270 	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3271 }
3272 
3273 static inline bool rdma_protocol_usnic(const struct ib_device *device,
3274 				       u32 port_num)
3275 {
3276 	return device->port_data[port_num].immutable.core_cap_flags &
3277 	       RDMA_CORE_CAP_PROT_USNIC;
3278 }
3279 
3280 /**
3281  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3282  * Management Datagrams.
3283  * @device: Device to check
3284  * @port_num: Port number to check
3285  *
3286  * Management Datagrams (MAD) are a required part of the InfiniBand
3287  * specification and are supported on all InfiniBand devices.  A slightly
3288  * extended version are also supported on OPA interfaces.
3289  *
3290  * Return: true if the port supports sending/receiving of MAD packets.
3291  */
3292 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3293 {
3294 	return device->port_data[port_num].immutable.core_cap_flags &
3295 	       RDMA_CORE_CAP_IB_MAD;
3296 }
3297 
3298 /**
3299  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3300  * Management Datagrams.
3301  * @device: Device to check
3302  * @port_num: Port number to check
3303  *
3304  * Intel OmniPath devices extend and/or replace the InfiniBand Management
3305  * datagrams with their own versions.  These OPA MADs share many but not all of
3306  * the characteristics of InfiniBand MADs.
3307  *
3308  * OPA MADs differ in the following ways:
3309  *
3310  *    1) MADs are variable size up to 2K
3311  *       IBTA defined MADs remain fixed at 256 bytes
3312  *    2) OPA SMPs must carry valid PKeys
3313  *    3) OPA SMP packets are a different format
3314  *
3315  * Return: true if the port supports OPA MAD packet formats.
3316  */
3317 static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3318 {
3319 	return device->port_data[port_num].immutable.core_cap_flags &
3320 		RDMA_CORE_CAP_OPA_MAD;
3321 }
3322 
3323 /**
3324  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3325  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3326  * @device: Device to check
3327  * @port_num: Port number to check
3328  *
3329  * Each InfiniBand node is required to provide a Subnet Management Agent
3330  * that the subnet manager can access.  Prior to the fabric being fully
3331  * configured by the subnet manager, the SMA is accessed via a well known
3332  * interface called the Subnet Management Interface (SMI).  This interface
3333  * uses directed route packets to communicate with the SM to get around the
3334  * chicken and egg problem of the SM needing to know what's on the fabric
3335  * in order to configure the fabric, and needing to configure the fabric in
3336  * order to send packets to the devices on the fabric.  These directed
3337  * route packets do not need the fabric fully configured in order to reach
3338  * their destination.  The SMI is the only method allowed to send
3339  * directed route packets on an InfiniBand fabric.
3340  *
3341  * Return: true if the port provides an SMI.
3342  */
3343 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3344 {
3345 	return device->port_data[port_num].immutable.core_cap_flags &
3346 	       RDMA_CORE_CAP_IB_SMI;
3347 }
3348 
3349 /**
3350  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3351  * Communication Manager.
3352  * @device: Device to check
3353  * @port_num: Port number to check
3354  *
3355  * The InfiniBand Communication Manager is one of many pre-defined General
3356  * Service Agents (GSA) that are accessed via the General Service
3357  * Interface (GSI).  It's role is to facilitate establishment of connections
3358  * between nodes as well as other management related tasks for established
3359  * connections.
3360  *
3361  * Return: true if the port supports an IB CM (this does not guarantee that
3362  * a CM is actually running however).
3363  */
3364 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3365 {
3366 	return device->port_data[port_num].immutable.core_cap_flags &
3367 	       RDMA_CORE_CAP_IB_CM;
3368 }
3369 
3370 /**
3371  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3372  * Communication Manager.
3373  * @device: Device to check
3374  * @port_num: Port number to check
3375  *
3376  * Similar to above, but specific to iWARP connections which have a different
3377  * managment protocol than InfiniBand.
3378  *
3379  * Return: true if the port supports an iWARP CM (this does not guarantee that
3380  * a CM is actually running however).
3381  */
3382 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3383 {
3384 	return device->port_data[port_num].immutable.core_cap_flags &
3385 	       RDMA_CORE_CAP_IW_CM;
3386 }
3387 
3388 /**
3389  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3390  * Subnet Administration.
3391  * @device: Device to check
3392  * @port_num: Port number to check
3393  *
3394  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3395  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3396  * fabrics, devices should resolve routes to other hosts by contacting the
3397  * SA to query the proper route.
3398  *
3399  * Return: true if the port should act as a client to the fabric Subnet
3400  * Administration interface.  This does not imply that the SA service is
3401  * running locally.
3402  */
3403 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3404 {
3405 	return device->port_data[port_num].immutable.core_cap_flags &
3406 	       RDMA_CORE_CAP_IB_SA;
3407 }
3408 
3409 /**
3410  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3411  * Multicast.
3412  * @device: Device to check
3413  * @port_num: Port number to check
3414  *
3415  * InfiniBand multicast registration is more complex than normal IPv4 or
3416  * IPv6 multicast registration.  Each Host Channel Adapter must register
3417  * with the Subnet Manager when it wishes to join a multicast group.  It
3418  * should do so only once regardless of how many queue pairs it subscribes
3419  * to this group.  And it should leave the group only after all queue pairs
3420  * attached to the group have been detached.
3421  *
3422  * Return: true if the port must undertake the additional adminstrative
3423  * overhead of registering/unregistering with the SM and tracking of the
3424  * total number of queue pairs attached to the multicast group.
3425  */
3426 static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3427 				     u32 port_num)
3428 {
3429 	return rdma_cap_ib_sa(device, port_num);
3430 }
3431 
3432 /**
3433  * rdma_cap_af_ib - Check if the port of device has the capability
3434  * Native Infiniband Address.
3435  * @device: Device to check
3436  * @port_num: Port number to check
3437  *
3438  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3439  * GID.  RoCE uses a different mechanism, but still generates a GID via
3440  * a prescribed mechanism and port specific data.
3441  *
3442  * Return: true if the port uses a GID address to identify devices on the
3443  * network.
3444  */
3445 static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3446 {
3447 	return device->port_data[port_num].immutable.core_cap_flags &
3448 	       RDMA_CORE_CAP_AF_IB;
3449 }
3450 
3451 /**
3452  * rdma_cap_eth_ah - Check if the port of device has the capability
3453  * Ethernet Address Handle.
3454  * @device: Device to check
3455  * @port_num: Port number to check
3456  *
3457  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3458  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3459  * port.  Normally, packet headers are generated by the sending host
3460  * adapter, but when sending connectionless datagrams, we must manually
3461  * inject the proper headers for the fabric we are communicating over.
3462  *
3463  * Return: true if we are running as a RoCE port and must force the
3464  * addition of a Global Route Header built from our Ethernet Address
3465  * Handle into our header list for connectionless packets.
3466  */
3467 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3468 {
3469 	return device->port_data[port_num].immutable.core_cap_flags &
3470 	       RDMA_CORE_CAP_ETH_AH;
3471 }
3472 
3473 /**
3474  * rdma_cap_opa_ah - Check if the port of device supports
3475  * OPA Address handles
3476  * @device: Device to check
3477  * @port_num: Port number to check
3478  *
3479  * Return: true if we are running on an OPA device which supports
3480  * the extended OPA addressing.
3481  */
3482 static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3483 {
3484 	return (device->port_data[port_num].immutable.core_cap_flags &
3485 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3486 }
3487 
3488 /**
3489  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3490  *
3491  * @device: Device
3492  * @port_num: Port number
3493  *
3494  * This MAD size includes the MAD headers and MAD payload.  No other headers
3495  * are included.
3496  *
3497  * Return the max MAD size required by the Port.  Will return 0 if the port
3498  * does not support MADs
3499  */
3500 static inline size_t rdma_max_mad_size(const struct ib_device *device,
3501 				       u32 port_num)
3502 {
3503 	return device->port_data[port_num].immutable.max_mad_size;
3504 }
3505 
3506 /**
3507  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3508  * @device: Device to check
3509  * @port_num: Port number to check
3510  *
3511  * RoCE GID table mechanism manages the various GIDs for a device.
3512  *
3513  * NOTE: if allocating the port's GID table has failed, this call will still
3514  * return true, but any RoCE GID table API will fail.
3515  *
3516  * Return: true if the port uses RoCE GID table mechanism in order to manage
3517  * its GIDs.
3518  */
3519 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3520 					   u32 port_num)
3521 {
3522 	return rdma_protocol_roce(device, port_num) &&
3523 		device->ops.add_gid && device->ops.del_gid;
3524 }
3525 
3526 /*
3527  * Check if the device supports READ W/ INVALIDATE.
3528  */
3529 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3530 {
3531 	/*
3532 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3533 	 * has support for it yet.
3534 	 */
3535 	return rdma_protocol_iwarp(dev, port_num);
3536 }
3537 
3538 /**
3539  * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3540  * @device: Device
3541  * @port_num: 1 based Port number
3542  *
3543  * Return true if port is an Intel OPA port , false if not
3544  */
3545 static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3546 					  u32 port_num)
3547 {
3548 	return (device->port_data[port_num].immutable.core_cap_flags &
3549 		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3550 }
3551 
3552 /**
3553  * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3554  * @device: Device
3555  * @port: Port number
3556  * @mtu: enum value of MTU
3557  *
3558  * Return the MTU size supported by the port as an integer value. Will return
3559  * -1 if enum value of mtu is not supported.
3560  */
3561 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3562 				       int mtu)
3563 {
3564 	if (rdma_core_cap_opa_port(device, port))
3565 		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3566 	else
3567 		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3568 }
3569 
3570 /**
3571  * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3572  * @device: Device
3573  * @port: Port number
3574  * @attr: port attribute
3575  *
3576  * Return the MTU size supported by the port as an integer value.
3577  */
3578 static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3579 				     struct ib_port_attr *attr)
3580 {
3581 	if (rdma_core_cap_opa_port(device, port))
3582 		return attr->phys_mtu;
3583 	else
3584 		return ib_mtu_enum_to_int(attr->max_mtu);
3585 }
3586 
3587 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3588 			 int state);
3589 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3590 		     struct ifla_vf_info *info);
3591 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3592 		    struct ifla_vf_stats *stats);
3593 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3594 		    struct ifla_vf_guid *node_guid,
3595 		    struct ifla_vf_guid *port_guid);
3596 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3597 		   int type);
3598 
3599 int ib_query_pkey(struct ib_device *device,
3600 		  u32 port_num, u16 index, u16 *pkey);
3601 
3602 int ib_modify_device(struct ib_device *device,
3603 		     int device_modify_mask,
3604 		     struct ib_device_modify *device_modify);
3605 
3606 int ib_modify_port(struct ib_device *device,
3607 		   u32 port_num, int port_modify_mask,
3608 		   struct ib_port_modify *port_modify);
3609 
3610 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3611 		u32 *port_num, u16 *index);
3612 
3613 int ib_find_pkey(struct ib_device *device,
3614 		 u32 port_num, u16 pkey, u16 *index);
3615 
3616 enum ib_pd_flags {
3617 	/*
3618 	 * Create a memory registration for all memory in the system and place
3619 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3620 	 * ULPs to avoid the overhead of dynamic MRs.
3621 	 *
3622 	 * This flag is generally considered unsafe and must only be used in
3623 	 * extremly trusted environments.  Every use of it will log a warning
3624 	 * in the kernel log.
3625 	 */
3626 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3627 };
3628 
3629 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3630 		const char *caller);
3631 
3632 /**
3633  * ib_alloc_pd - Allocates an unused protection domain.
3634  * @device: The device on which to allocate the protection domain.
3635  * @flags: protection domain flags
3636  *
3637  * A protection domain object provides an association between QPs, shared
3638  * receive queues, address handles, memory regions, and memory windows.
3639  *
3640  * Every PD has a local_dma_lkey which can be used as the lkey value for local
3641  * memory operations.
3642  */
3643 #define ib_alloc_pd(device, flags) \
3644 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3645 
3646 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3647 
3648 /**
3649  * ib_dealloc_pd - Deallocate kernel PD
3650  * @pd: The protection domain
3651  *
3652  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3653  */
3654 static inline void ib_dealloc_pd(struct ib_pd *pd)
3655 {
3656 	int ret = ib_dealloc_pd_user(pd, NULL);
3657 
3658 	WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3659 }
3660 
3661 enum rdma_create_ah_flags {
3662 	/* In a sleepable context */
3663 	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3664 };
3665 
3666 /**
3667  * rdma_create_ah - Creates an address handle for the given address vector.
3668  * @pd: The protection domain associated with the address handle.
3669  * @ah_attr: The attributes of the address vector.
3670  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3671  *
3672  * The address handle is used to reference a local or global destination
3673  * in all UD QP post sends.
3674  */
3675 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3676 			     u32 flags);
3677 
3678 /**
3679  * rdma_create_user_ah - Creates an address handle for the given address vector.
3680  * It resolves destination mac address for ah attribute of RoCE type.
3681  * @pd: The protection domain associated with the address handle.
3682  * @ah_attr: The attributes of the address vector.
3683  * @udata: pointer to user's input output buffer information need by
3684  *         provider driver.
3685  *
3686  * It returns 0 on success and returns appropriate error code on error.
3687  * The address handle is used to reference a local or global destination
3688  * in all UD QP post sends.
3689  */
3690 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3691 				  struct rdma_ah_attr *ah_attr,
3692 				  struct ib_udata *udata);
3693 /**
3694  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3695  *   work completion.
3696  * @hdr: the L3 header to parse
3697  * @net_type: type of header to parse
3698  * @sgid: place to store source gid
3699  * @dgid: place to store destination gid
3700  */
3701 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3702 			      enum rdma_network_type net_type,
3703 			      union ib_gid *sgid, union ib_gid *dgid);
3704 
3705 /**
3706  * ib_get_rdma_header_version - Get the header version
3707  * @hdr: the L3 header to parse
3708  */
3709 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3710 
3711 /**
3712  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3713  *   work completion.
3714  * @device: Device on which the received message arrived.
3715  * @port_num: Port on which the received message arrived.
3716  * @wc: Work completion associated with the received message.
3717  * @grh: References the received global route header.  This parameter is
3718  *   ignored unless the work completion indicates that the GRH is valid.
3719  * @ah_attr: Returned attributes that can be used when creating an address
3720  *   handle for replying to the message.
3721  * When ib_init_ah_attr_from_wc() returns success,
3722  * (a) for IB link layer it optionally contains a reference to SGID attribute
3723  * when GRH is present for IB link layer.
3724  * (b) for RoCE link layer it contains a reference to SGID attribute.
3725  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3726  * attributes which are initialized using ib_init_ah_attr_from_wc().
3727  *
3728  */
3729 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3730 			    const struct ib_wc *wc, const struct ib_grh *grh,
3731 			    struct rdma_ah_attr *ah_attr);
3732 
3733 /**
3734  * ib_create_ah_from_wc - Creates an address handle associated with the
3735  *   sender of the specified work completion.
3736  * @pd: The protection domain associated with the address handle.
3737  * @wc: Work completion information associated with a received message.
3738  * @grh: References the received global route header.  This parameter is
3739  *   ignored unless the work completion indicates that the GRH is valid.
3740  * @port_num: The outbound port number to associate with the address.
3741  *
3742  * The address handle is used to reference a local or global destination
3743  * in all UD QP post sends.
3744  */
3745 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3746 				   const struct ib_grh *grh, u32 port_num);
3747 
3748 /**
3749  * rdma_modify_ah - Modifies the address vector associated with an address
3750  *   handle.
3751  * @ah: The address handle to modify.
3752  * @ah_attr: The new address vector attributes to associate with the
3753  *   address handle.
3754  */
3755 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3756 
3757 /**
3758  * rdma_query_ah - Queries the address vector associated with an address
3759  *   handle.
3760  * @ah: The address handle to query.
3761  * @ah_attr: The address vector attributes associated with the address
3762  *   handle.
3763  */
3764 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3765 
3766 enum rdma_destroy_ah_flags {
3767 	/* In a sleepable context */
3768 	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3769 };
3770 
3771 /**
3772  * rdma_destroy_ah_user - Destroys an address handle.
3773  * @ah: The address handle to destroy.
3774  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3775  * @udata: Valid user data or NULL for kernel objects
3776  */
3777 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3778 
3779 /**
3780  * rdma_destroy_ah - Destroys an kernel address handle.
3781  * @ah: The address handle to destroy.
3782  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3783  *
3784  * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3785  */
3786 static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3787 {
3788 	int ret = rdma_destroy_ah_user(ah, flags, NULL);
3789 
3790 	WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3791 }
3792 
3793 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3794 				  struct ib_srq_init_attr *srq_init_attr,
3795 				  struct ib_usrq_object *uobject,
3796 				  struct ib_udata *udata);
3797 static inline struct ib_srq *
3798 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3799 {
3800 	if (!pd->device->ops.create_srq)
3801 		return ERR_PTR(-EOPNOTSUPP);
3802 
3803 	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3804 }
3805 
3806 /**
3807  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3808  * @srq: The SRQ to modify.
3809  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3810  *   the current values of selected SRQ attributes are returned.
3811  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3812  *   are being modified.
3813  *
3814  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3815  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3816  * the number of receives queued drops below the limit.
3817  */
3818 int ib_modify_srq(struct ib_srq *srq,
3819 		  struct ib_srq_attr *srq_attr,
3820 		  enum ib_srq_attr_mask srq_attr_mask);
3821 
3822 /**
3823  * ib_query_srq - Returns the attribute list and current values for the
3824  *   specified SRQ.
3825  * @srq: The SRQ to query.
3826  * @srq_attr: The attributes of the specified SRQ.
3827  */
3828 int ib_query_srq(struct ib_srq *srq,
3829 		 struct ib_srq_attr *srq_attr);
3830 
3831 /**
3832  * ib_destroy_srq_user - Destroys the specified SRQ.
3833  * @srq: The SRQ to destroy.
3834  * @udata: Valid user data or NULL for kernel objects
3835  */
3836 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3837 
3838 /**
3839  * ib_destroy_srq - Destroys the specified kernel SRQ.
3840  * @srq: The SRQ to destroy.
3841  *
3842  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3843  */
3844 static inline void ib_destroy_srq(struct ib_srq *srq)
3845 {
3846 	int ret = ib_destroy_srq_user(srq, NULL);
3847 
3848 	WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3849 }
3850 
3851 /**
3852  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3853  * @srq: The SRQ to post the work request on.
3854  * @recv_wr: A list of work requests to post on the receive queue.
3855  * @bad_recv_wr: On an immediate failure, this parameter will reference
3856  *   the work request that failed to be posted on the QP.
3857  */
3858 static inline int ib_post_srq_recv(struct ib_srq *srq,
3859 				   const struct ib_recv_wr *recv_wr,
3860 				   const struct ib_recv_wr **bad_recv_wr)
3861 {
3862 	const struct ib_recv_wr *dummy;
3863 
3864 	return srq->device->ops.post_srq_recv(srq, recv_wr,
3865 					      bad_recv_wr ? : &dummy);
3866 }
3867 
3868 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3869 				  struct ib_qp_init_attr *qp_init_attr,
3870 				  const char *caller);
3871 /**
3872  * ib_create_qp - Creates a kernel QP associated with the specific protection
3873  * domain.
3874  * @pd: The protection domain associated with the QP.
3875  * @init_attr: A list of initial attributes required to create the
3876  *   QP.  If QP creation succeeds, then the attributes are updated to
3877  *   the actual capabilities of the created QP.
3878  */
3879 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3880 					 struct ib_qp_init_attr *init_attr)
3881 {
3882 	return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3883 }
3884 
3885 /**
3886  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3887  * @qp: The QP to modify.
3888  * @attr: On input, specifies the QP attributes to modify.  On output,
3889  *   the current values of selected QP attributes are returned.
3890  * @attr_mask: A bit-mask used to specify which attributes of the QP
3891  *   are being modified.
3892  * @udata: pointer to user's input output buffer information
3893  *   are being modified.
3894  * It returns 0 on success and returns appropriate error code on error.
3895  */
3896 int ib_modify_qp_with_udata(struct ib_qp *qp,
3897 			    struct ib_qp_attr *attr,
3898 			    int attr_mask,
3899 			    struct ib_udata *udata);
3900 
3901 /**
3902  * ib_modify_qp - Modifies the attributes for the specified QP and then
3903  *   transitions the QP to the given state.
3904  * @qp: The QP to modify.
3905  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3906  *   the current values of selected QP attributes are returned.
3907  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3908  *   are being modified.
3909  */
3910 int ib_modify_qp(struct ib_qp *qp,
3911 		 struct ib_qp_attr *qp_attr,
3912 		 int qp_attr_mask);
3913 
3914 /**
3915  * ib_query_qp - Returns the attribute list and current values for the
3916  *   specified QP.
3917  * @qp: The QP to query.
3918  * @qp_attr: The attributes of the specified QP.
3919  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3920  * @qp_init_attr: Additional attributes of the selected QP.
3921  *
3922  * The qp_attr_mask may be used to limit the query to gathering only the
3923  * selected attributes.
3924  */
3925 int ib_query_qp(struct ib_qp *qp,
3926 		struct ib_qp_attr *qp_attr,
3927 		int qp_attr_mask,
3928 		struct ib_qp_init_attr *qp_init_attr);
3929 
3930 /**
3931  * ib_destroy_qp - Destroys the specified QP.
3932  * @qp: The QP to destroy.
3933  * @udata: Valid udata or NULL for kernel objects
3934  */
3935 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3936 
3937 /**
3938  * ib_destroy_qp - Destroys the specified kernel QP.
3939  * @qp: The QP to destroy.
3940  *
3941  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3942  */
3943 static inline int ib_destroy_qp(struct ib_qp *qp)
3944 {
3945 	return ib_destroy_qp_user(qp, NULL);
3946 }
3947 
3948 /**
3949  * ib_open_qp - Obtain a reference to an existing sharable QP.
3950  * @xrcd: XRC domain
3951  * @qp_open_attr: Attributes identifying the QP to open.
3952  *
3953  * Returns a reference to a sharable QP.
3954  */
3955 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3956 			 struct ib_qp_open_attr *qp_open_attr);
3957 
3958 /**
3959  * ib_close_qp - Release an external reference to a QP.
3960  * @qp: The QP handle to release
3961  *
3962  * The opened QP handle is released by the caller.  The underlying
3963  * shared QP is not destroyed until all internal references are released.
3964  */
3965 int ib_close_qp(struct ib_qp *qp);
3966 
3967 /**
3968  * ib_post_send - Posts a list of work requests to the send queue of
3969  *   the specified QP.
3970  * @qp: The QP to post the work request on.
3971  * @send_wr: A list of work requests to post on the send queue.
3972  * @bad_send_wr: On an immediate failure, this parameter will reference
3973  *   the work request that failed to be posted on the QP.
3974  *
3975  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3976  * error is returned, the QP state shall not be affected,
3977  * ib_post_send() will return an immediate error after queueing any
3978  * earlier work requests in the list.
3979  */
3980 static inline int ib_post_send(struct ib_qp *qp,
3981 			       const struct ib_send_wr *send_wr,
3982 			       const struct ib_send_wr **bad_send_wr)
3983 {
3984 	const struct ib_send_wr *dummy;
3985 
3986 	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3987 }
3988 
3989 /**
3990  * ib_post_recv - Posts a list of work requests to the receive queue of
3991  *   the specified QP.
3992  * @qp: The QP to post the work request on.
3993  * @recv_wr: A list of work requests to post on the receive queue.
3994  * @bad_recv_wr: On an immediate failure, this parameter will reference
3995  *   the work request that failed to be posted on the QP.
3996  */
3997 static inline int ib_post_recv(struct ib_qp *qp,
3998 			       const struct ib_recv_wr *recv_wr,
3999 			       const struct ib_recv_wr **bad_recv_wr)
4000 {
4001 	const struct ib_recv_wr *dummy;
4002 
4003 	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
4004 }
4005 
4006 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
4007 			    int comp_vector, enum ib_poll_context poll_ctx,
4008 			    const char *caller);
4009 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
4010 					int nr_cqe, int comp_vector,
4011 					enum ib_poll_context poll_ctx)
4012 {
4013 	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
4014 			     KBUILD_MODNAME);
4015 }
4016 
4017 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
4018 				int nr_cqe, enum ib_poll_context poll_ctx,
4019 				const char *caller);
4020 
4021 /**
4022  * ib_alloc_cq_any: Allocate kernel CQ
4023  * @dev: The IB device
4024  * @private: Private data attached to the CQE
4025  * @nr_cqe: Number of CQEs in the CQ
4026  * @poll_ctx: Context used for polling the CQ
4027  */
4028 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
4029 					    void *private, int nr_cqe,
4030 					    enum ib_poll_context poll_ctx)
4031 {
4032 	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
4033 				 KBUILD_MODNAME);
4034 }
4035 
4036 void ib_free_cq(struct ib_cq *cq);
4037 int ib_process_cq_direct(struct ib_cq *cq, int budget);
4038 
4039 /**
4040  * ib_create_cq - Creates a CQ on the specified device.
4041  * @device: The device on which to create the CQ.
4042  * @comp_handler: A user-specified callback that is invoked when a
4043  *   completion event occurs on the CQ.
4044  * @event_handler: A user-specified callback that is invoked when an
4045  *   asynchronous event not associated with a completion occurs on the CQ.
4046  * @cq_context: Context associated with the CQ returned to the user via
4047  *   the associated completion and event handlers.
4048  * @cq_attr: The attributes the CQ should be created upon.
4049  *
4050  * Users can examine the cq structure to determine the actual CQ size.
4051  */
4052 struct ib_cq *__ib_create_cq(struct ib_device *device,
4053 			     ib_comp_handler comp_handler,
4054 			     void (*event_handler)(struct ib_event *, void *),
4055 			     void *cq_context,
4056 			     const struct ib_cq_init_attr *cq_attr,
4057 			     const char *caller);
4058 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
4059 	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
4060 
4061 /**
4062  * ib_resize_cq - Modifies the capacity of the CQ.
4063  * @cq: The CQ to resize.
4064  * @cqe: The minimum size of the CQ.
4065  *
4066  * Users can examine the cq structure to determine the actual CQ size.
4067  */
4068 int ib_resize_cq(struct ib_cq *cq, int cqe);
4069 
4070 /**
4071  * rdma_set_cq_moderation - Modifies moderation params of the CQ
4072  * @cq: The CQ to modify.
4073  * @cq_count: number of CQEs that will trigger an event
4074  * @cq_period: max period of time in usec before triggering an event
4075  *
4076  */
4077 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
4078 
4079 /**
4080  * ib_destroy_cq_user - Destroys the specified CQ.
4081  * @cq: The CQ to destroy.
4082  * @udata: Valid user data or NULL for kernel objects
4083  */
4084 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
4085 
4086 /**
4087  * ib_destroy_cq - Destroys the specified kernel CQ.
4088  * @cq: The CQ to destroy.
4089  *
4090  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
4091  */
4092 static inline void ib_destroy_cq(struct ib_cq *cq)
4093 {
4094 	int ret = ib_destroy_cq_user(cq, NULL);
4095 
4096 	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
4097 }
4098 
4099 /**
4100  * ib_poll_cq - poll a CQ for completion(s)
4101  * @cq:the CQ being polled
4102  * @num_entries:maximum number of completions to return
4103  * @wc:array of at least @num_entries &struct ib_wc where completions
4104  *   will be returned
4105  *
4106  * Poll a CQ for (possibly multiple) completions.  If the return value
4107  * is < 0, an error occurred.  If the return value is >= 0, it is the
4108  * number of completions returned.  If the return value is
4109  * non-negative and < num_entries, then the CQ was emptied.
4110  */
4111 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
4112 			     struct ib_wc *wc)
4113 {
4114 	return cq->device->ops.poll_cq(cq, num_entries, wc);
4115 }
4116 
4117 /**
4118  * ib_req_notify_cq - Request completion notification on a CQ.
4119  * @cq: The CQ to generate an event for.
4120  * @flags:
4121  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4122  *   to request an event on the next solicited event or next work
4123  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4124  *   may also be |ed in to request a hint about missed events, as
4125  *   described below.
4126  *
4127  * Return Value:
4128  *    < 0 means an error occurred while requesting notification
4129  *   == 0 means notification was requested successfully, and if
4130  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4131  *        were missed and it is safe to wait for another event.  In
4132  *        this case is it guaranteed that any work completions added
4133  *        to the CQ since the last CQ poll will trigger a completion
4134  *        notification event.
4135  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4136  *        in.  It means that the consumer must poll the CQ again to
4137  *        make sure it is empty to avoid missing an event because of a
4138  *        race between requesting notification and an entry being
4139  *        added to the CQ.  This return value means it is possible
4140  *        (but not guaranteed) that a work completion has been added
4141  *        to the CQ since the last poll without triggering a
4142  *        completion notification event.
4143  */
4144 static inline int ib_req_notify_cq(struct ib_cq *cq,
4145 				   enum ib_cq_notify_flags flags)
4146 {
4147 	return cq->device->ops.req_notify_cq(cq, flags);
4148 }
4149 
4150 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4151 			     int comp_vector_hint,
4152 			     enum ib_poll_context poll_ctx);
4153 
4154 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4155 
4156 /*
4157  * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4158  * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4159  * address into the dma address.
4160  */
4161 static inline bool ib_uses_virt_dma(struct ib_device *dev)
4162 {
4163 	return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4164 }
4165 
4166 /*
4167  * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4168  */
4169 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4170 {
4171 	if (ib_uses_virt_dma(dev))
4172 		return false;
4173 
4174 	return dma_pci_p2pdma_supported(dev->dma_device);
4175 }
4176 
4177 /**
4178  * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4179  * @dma_addr: The DMA address
4180  *
4181  * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4182  * going through the dma_addr marshalling.
4183  */
4184 static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4185 {
4186 	/* virt_dma mode maps the kvs's directly into the dma addr */
4187 	return (void *)(uintptr_t)dma_addr;
4188 }
4189 
4190 /**
4191  * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4192  * @dma_addr: The DMA address
4193  *
4194  * Used by ib_uses_virt_dma() device to get back to the struct page after going
4195  * through the dma_addr marshalling.
4196  */
4197 static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4198 {
4199 	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4200 }
4201 
4202 /**
4203  * ib_dma_mapping_error - check a DMA addr for error
4204  * @dev: The device for which the dma_addr was created
4205  * @dma_addr: The DMA address to check
4206  */
4207 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4208 {
4209 	if (ib_uses_virt_dma(dev))
4210 		return 0;
4211 	return dma_mapping_error(dev->dma_device, dma_addr);
4212 }
4213 
4214 /**
4215  * ib_dma_map_single - Map a kernel virtual address to DMA address
4216  * @dev: The device for which the dma_addr is to be created
4217  * @cpu_addr: The kernel virtual address
4218  * @size: The size of the region in bytes
4219  * @direction: The direction of the DMA
4220  */
4221 static inline u64 ib_dma_map_single(struct ib_device *dev,
4222 				    void *cpu_addr, size_t size,
4223 				    enum dma_data_direction direction)
4224 {
4225 	if (ib_uses_virt_dma(dev))
4226 		return (uintptr_t)cpu_addr;
4227 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4228 }
4229 
4230 /**
4231  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4232  * @dev: The device for which the DMA address was created
4233  * @addr: The DMA address
4234  * @size: The size of the region in bytes
4235  * @direction: The direction of the DMA
4236  */
4237 static inline void ib_dma_unmap_single(struct ib_device *dev,
4238 				       u64 addr, size_t size,
4239 				       enum dma_data_direction direction)
4240 {
4241 	if (!ib_uses_virt_dma(dev))
4242 		dma_unmap_single(dev->dma_device, addr, size, direction);
4243 }
4244 
4245 /**
4246  * ib_dma_map_page - Map a physical page to DMA address
4247  * @dev: The device for which the dma_addr is to be created
4248  * @page: The page to be mapped
4249  * @offset: The offset within the page
4250  * @size: The size of the region in bytes
4251  * @direction: The direction of the DMA
4252  */
4253 static inline u64 ib_dma_map_page(struct ib_device *dev,
4254 				  struct page *page,
4255 				  unsigned long offset,
4256 				  size_t size,
4257 					 enum dma_data_direction direction)
4258 {
4259 	if (ib_uses_virt_dma(dev))
4260 		return (uintptr_t)(page_address(page) + offset);
4261 	return dma_map_page(dev->dma_device, page, offset, size, direction);
4262 }
4263 
4264 /**
4265  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4266  * @dev: The device for which the DMA address was created
4267  * @addr: The DMA address
4268  * @size: The size of the region in bytes
4269  * @direction: The direction of the DMA
4270  */
4271 static inline void ib_dma_unmap_page(struct ib_device *dev,
4272 				     u64 addr, size_t size,
4273 				     enum dma_data_direction direction)
4274 {
4275 	if (!ib_uses_virt_dma(dev))
4276 		dma_unmap_page(dev->dma_device, addr, size, direction);
4277 }
4278 
4279 /**
4280  * ib_dma_map_bvec - Map a bio_vec to DMA address
4281  * @dev: The device for which the dma_addr is to be created
4282  * @bvec: The bio_vec to map
4283  * @direction: The direction of the DMA
4284  *
4285  * Returns a DMA address for the bio_vec. The caller must check the
4286  * result with ib_dma_mapping_error() before use; a failed mapping
4287  * must not be passed to ib_dma_unmap_bvec().
4288  *
4289  * For software RDMA devices (rxe, siw), returns a virtual address
4290  * and no actual DMA mapping occurs.
4291  */
4292 static inline u64 ib_dma_map_bvec(struct ib_device *dev,
4293 				  struct bio_vec *bvec,
4294 				  enum dma_data_direction direction)
4295 {
4296 	if (ib_uses_virt_dma(dev))
4297 		return (uintptr_t)bvec_virt(bvec);
4298 	return dma_map_phys(dev->dma_device, bvec_phys(bvec),
4299 			    bvec->bv_len, direction, 0);
4300 }
4301 
4302 /**
4303  * ib_dma_unmap_bvec - Unmap a bio_vec DMA mapping
4304  * @dev: The device for which the DMA address was created
4305  * @addr: The DMA address returned by ib_dma_map_bvec()
4306  * @size: The size of the region in bytes
4307  * @direction: The direction of the DMA
4308  *
4309  * Releases a DMA mapping created by ib_dma_map_bvec(). For software
4310  * RDMA devices this is a no-op since no actual mapping occurred.
4311  */
4312 static inline void ib_dma_unmap_bvec(struct ib_device *dev,
4313 				     u64 addr, size_t size,
4314 				     enum dma_data_direction direction)
4315 {
4316 	if (!ib_uses_virt_dma(dev))
4317 		dma_unmap_phys(dev->dma_device, addr, size, direction, 0);
4318 }
4319 
4320 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4321 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4322 				      struct scatterlist *sg, int nents,
4323 				      enum dma_data_direction direction,
4324 				      unsigned long dma_attrs)
4325 {
4326 	if (ib_uses_virt_dma(dev))
4327 		return ib_dma_virt_map_sg(dev, sg, nents);
4328 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4329 				dma_attrs);
4330 }
4331 
4332 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4333 					 struct scatterlist *sg, int nents,
4334 					 enum dma_data_direction direction,
4335 					 unsigned long dma_attrs)
4336 {
4337 	if (!ib_uses_virt_dma(dev))
4338 		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4339 				   dma_attrs);
4340 }
4341 
4342 /**
4343  * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4344  * @dev: The device for which the DMA addresses are to be created
4345  * @sgt: The sg_table object describing the buffer
4346  * @direction: The direction of the DMA
4347  * @dma_attrs: Optional DMA attributes for the map operation
4348  */
4349 static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4350 					   struct sg_table *sgt,
4351 					   enum dma_data_direction direction,
4352 					   unsigned long dma_attrs)
4353 {
4354 	int nents;
4355 
4356 	if (ib_uses_virt_dma(dev)) {
4357 		nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4358 		if (!nents)
4359 			return -EIO;
4360 		sgt->nents = nents;
4361 		return 0;
4362 	}
4363 	return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4364 }
4365 
4366 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4367 					      struct sg_table *sgt,
4368 					      enum dma_data_direction direction,
4369 					      unsigned long dma_attrs)
4370 {
4371 	if (!ib_uses_virt_dma(dev))
4372 		dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4373 }
4374 
4375 /**
4376  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4377  * @dev: The device for which the DMA addresses are to be created
4378  * @sg: The array of scatter/gather entries
4379  * @nents: The number of scatter/gather entries
4380  * @direction: The direction of the DMA
4381  */
4382 static inline int ib_dma_map_sg(struct ib_device *dev,
4383 				struct scatterlist *sg, int nents,
4384 				enum dma_data_direction direction)
4385 {
4386 	return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4387 }
4388 
4389 /**
4390  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4391  * @dev: The device for which the DMA addresses were created
4392  * @sg: The array of scatter/gather entries
4393  * @nents: The number of scatter/gather entries
4394  * @direction: The direction of the DMA
4395  */
4396 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4397 				   struct scatterlist *sg, int nents,
4398 				   enum dma_data_direction direction)
4399 {
4400 	ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4401 }
4402 
4403 /**
4404  * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4405  * @dev: The device to query
4406  *
4407  * The returned value represents a size in bytes.
4408  */
4409 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4410 {
4411 	if (ib_uses_virt_dma(dev))
4412 		return UINT_MAX;
4413 	return dma_get_max_seg_size(dev->dma_device);
4414 }
4415 
4416 /**
4417  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4418  * @dev: The device for which the DMA address was created
4419  * @addr: The DMA address
4420  * @size: The size of the region in bytes
4421  * @dir: The direction of the DMA
4422  */
4423 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4424 					      u64 addr,
4425 					      size_t size,
4426 					      enum dma_data_direction dir)
4427 {
4428 	if (!ib_uses_virt_dma(dev))
4429 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4430 }
4431 
4432 /**
4433  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4434  * @dev: The device for which the DMA address was created
4435  * @addr: The DMA address
4436  * @size: The size of the region in bytes
4437  * @dir: The direction of the DMA
4438  */
4439 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4440 						 u64 addr,
4441 						 size_t size,
4442 						 enum dma_data_direction dir)
4443 {
4444 	if (!ib_uses_virt_dma(dev))
4445 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4446 }
4447 
4448 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4449  * space. This function should be called when 'current' is the owning MM.
4450  */
4451 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4452 			     u64 virt_addr, int mr_access_flags);
4453 
4454 /* ib_advise_mr -  give an advice about an address range in a memory region */
4455 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4456 		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4457 /**
4458  * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4459  *   HCA translation table.
4460  * @mr: The memory region to deregister.
4461  * @udata: Valid user data or NULL for kernel object
4462  *
4463  * This function can fail, if the memory region has memory windows bound to it.
4464  */
4465 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4466 
4467 /**
4468  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4469  *   HCA translation table.
4470  * @mr: The memory region to deregister.
4471  *
4472  * This function can fail, if the memory region has memory windows bound to it.
4473  *
4474  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4475  */
4476 static inline int ib_dereg_mr(struct ib_mr *mr)
4477 {
4478 	return ib_dereg_mr_user(mr, NULL);
4479 }
4480 
4481 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4482 			  u32 max_num_sg);
4483 
4484 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4485 				    u32 max_num_data_sg,
4486 				    u32 max_num_meta_sg);
4487 
4488 /**
4489  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4490  *   R_Key and L_Key.
4491  * @mr: struct ib_mr pointer to be updated.
4492  * @newkey: new key to be used.
4493  */
4494 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4495 {
4496 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4497 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4498 }
4499 
4500 /**
4501  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4502  * for calculating a new rkey for type 2 memory windows.
4503  * @rkey: the rkey to increment.
4504  */
4505 static inline u32 ib_inc_rkey(u32 rkey)
4506 {
4507 	const u32 mask = 0x000000ff;
4508 	return ((rkey + 1) & mask) | (rkey & ~mask);
4509 }
4510 
4511 /**
4512  * ib_attach_mcast - Attaches the specified QP to a multicast group.
4513  * @qp: QP to attach to the multicast group.  The QP must be type
4514  *   IB_QPT_UD.
4515  * @gid: Multicast group GID.
4516  * @lid: Multicast group LID in host byte order.
4517  *
4518  * In order to send and receive multicast packets, subnet
4519  * administration must have created the multicast group and configured
4520  * the fabric appropriately.  The port associated with the specified
4521  * QP must also be a member of the multicast group.
4522  */
4523 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4524 
4525 /**
4526  * ib_detach_mcast - Detaches the specified QP from a multicast group.
4527  * @qp: QP to detach from the multicast group.
4528  * @gid: Multicast group GID.
4529  * @lid: Multicast group LID in host byte order.
4530  */
4531 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4532 
4533 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4534 				   struct inode *inode, struct ib_udata *udata);
4535 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4536 
4537 static inline int ib_check_mr_access(struct ib_device *ib_dev,
4538 				     unsigned int flags)
4539 {
4540 	u64 device_cap = ib_dev->attrs.device_cap_flags;
4541 
4542 	/*
4543 	 * Local write permission is required if remote write or
4544 	 * remote atomic permission is also requested.
4545 	 */
4546 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4547 	    !(flags & IB_ACCESS_LOCAL_WRITE))
4548 		return -EINVAL;
4549 
4550 	if (flags & ~IB_ACCESS_SUPPORTED)
4551 		return -EINVAL;
4552 
4553 	if (flags & IB_ACCESS_ON_DEMAND &&
4554 	    !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4555 		return -EOPNOTSUPP;
4556 
4557 	if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4558 	    !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4559 	    (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4560 	    !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4561 		return -EOPNOTSUPP;
4562 
4563 	return 0;
4564 }
4565 
4566 static inline bool ib_access_writable(int access_flags)
4567 {
4568 	/*
4569 	 * We have writable memory backing the MR if any of the following
4570 	 * access flags are set.  "Local write" and "remote write" obviously
4571 	 * require write access.  "Remote atomic" can do things like fetch and
4572 	 * add, which will modify memory, and "MW bind" can change permissions
4573 	 * by binding a window.
4574 	 */
4575 	return access_flags &
4576 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4577 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4578 }
4579 
4580 /**
4581  * ib_check_mr_status: lightweight check of MR status.
4582  *     This routine may provide status checks on a selected
4583  *     ib_mr. first use is for signature status check.
4584  *
4585  * @mr: A memory region.
4586  * @check_mask: Bitmask of which checks to perform from
4587  *     ib_mr_status_check enumeration.
4588  * @mr_status: The container of relevant status checks.
4589  *     failed checks will be indicated in the status bitmask
4590  *     and the relevant info shall be in the error item.
4591  */
4592 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4593 		       struct ib_mr_status *mr_status);
4594 
4595 /**
4596  * ib_device_try_get: Hold a registration lock
4597  * @dev: The device to lock
4598  *
4599  * A device under an active registration lock cannot become unregistered. It
4600  * is only possible to obtain a registration lock on a device that is fully
4601  * registered, otherwise this function returns false.
4602  *
4603  * The registration lock is only necessary for actions which require the
4604  * device to still be registered. Uses that only require the device pointer to
4605  * be valid should use get_device(&ibdev->dev) to hold the memory.
4606  *
4607  */
4608 static inline bool ib_device_try_get(struct ib_device *dev)
4609 {
4610 	return refcount_inc_not_zero(&dev->refcount);
4611 }
4612 
4613 void ib_device_put(struct ib_device *device);
4614 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4615 					  enum rdma_driver_id driver_id);
4616 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4617 					    u16 pkey, const union ib_gid *gid,
4618 					    const struct sockaddr *addr);
4619 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4620 			 unsigned int port);
4621 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
4622 					u32 port);
4623 int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
4624 			 u32 *port);
4625 
4626 static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
4627 {
4628 	return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
4629 		IB_PORT_ACTIVE : IB_PORT_DOWN;
4630 }
4631 
4632 void ib_dispatch_port_state_event(struct ib_device *ibdev,
4633 				  struct net_device *ndev);
4634 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4635 			   struct ib_wq_init_attr *init_attr);
4636 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4637 
4638 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4639 		 unsigned int *sg_offset, unsigned int page_size);
4640 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4641 		    int data_sg_nents, unsigned int *data_sg_offset,
4642 		    struct scatterlist *meta_sg, int meta_sg_nents,
4643 		    unsigned int *meta_sg_offset, unsigned int page_size);
4644 
4645 static inline int
4646 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4647 		  unsigned int *sg_offset, unsigned int page_size)
4648 {
4649 	int n;
4650 
4651 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4652 	mr->iova = 0;
4653 
4654 	return n;
4655 }
4656 
4657 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4658 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4659 
4660 void ib_drain_rq(struct ib_qp *qp);
4661 void ib_drain_sq(struct ib_qp *qp);
4662 void ib_drain_qp(struct ib_qp *qp);
4663 
4664 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4665 		     u8 *width);
4666 
4667 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4668 {
4669 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4670 		return attr->roce.dmac;
4671 	return NULL;
4672 }
4673 
4674 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4675 {
4676 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4677 		attr->ib.dlid = (u16)dlid;
4678 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4679 		attr->opa.dlid = dlid;
4680 }
4681 
4682 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4683 {
4684 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4685 		return attr->ib.dlid;
4686 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4687 		return attr->opa.dlid;
4688 	return 0;
4689 }
4690 
4691 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4692 {
4693 	attr->sl = sl;
4694 }
4695 
4696 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4697 {
4698 	return attr->sl;
4699 }
4700 
4701 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4702 					 u8 src_path_bits)
4703 {
4704 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4705 		attr->ib.src_path_bits = src_path_bits;
4706 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4707 		attr->opa.src_path_bits = src_path_bits;
4708 }
4709 
4710 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4711 {
4712 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4713 		return attr->ib.src_path_bits;
4714 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4715 		return attr->opa.src_path_bits;
4716 	return 0;
4717 }
4718 
4719 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4720 					bool make_grd)
4721 {
4722 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4723 		attr->opa.make_grd = make_grd;
4724 }
4725 
4726 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4727 {
4728 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4729 		return attr->opa.make_grd;
4730 	return false;
4731 }
4732 
4733 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4734 {
4735 	attr->port_num = port_num;
4736 }
4737 
4738 static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4739 {
4740 	return attr->port_num;
4741 }
4742 
4743 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4744 					   u8 static_rate)
4745 {
4746 	attr->static_rate = static_rate;
4747 }
4748 
4749 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4750 {
4751 	return attr->static_rate;
4752 }
4753 
4754 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4755 					enum ib_ah_flags flag)
4756 {
4757 	attr->ah_flags = flag;
4758 }
4759 
4760 static inline enum ib_ah_flags
4761 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4762 {
4763 	return attr->ah_flags;
4764 }
4765 
4766 static inline const struct ib_global_route
4767 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4768 {
4769 	return &attr->grh;
4770 }
4771 
4772 /*To retrieve and modify the grh */
4773 static inline struct ib_global_route
4774 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4775 {
4776 	return &attr->grh;
4777 }
4778 
4779 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4780 {
4781 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4782 
4783 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4784 }
4785 
4786 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4787 					     __be64 prefix)
4788 {
4789 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4790 
4791 	grh->dgid.global.subnet_prefix = prefix;
4792 }
4793 
4794 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4795 					    __be64 if_id)
4796 {
4797 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4798 
4799 	grh->dgid.global.interface_id = if_id;
4800 }
4801 
4802 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4803 				   union ib_gid *dgid, u32 flow_label,
4804 				   u8 sgid_index, u8 hop_limit,
4805 				   u8 traffic_class)
4806 {
4807 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4808 
4809 	attr->ah_flags = IB_AH_GRH;
4810 	if (dgid)
4811 		grh->dgid = *dgid;
4812 	grh->flow_label = flow_label;
4813 	grh->sgid_index = sgid_index;
4814 	grh->hop_limit = hop_limit;
4815 	grh->traffic_class = traffic_class;
4816 	grh->sgid_attr = NULL;
4817 }
4818 
4819 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4820 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4821 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4822 			     const struct ib_gid_attr *sgid_attr);
4823 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4824 		       const struct rdma_ah_attr *src);
4825 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4826 			  const struct rdma_ah_attr *new);
4827 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4828 
4829 /**
4830  * rdma_ah_find_type - Return address handle type.
4831  *
4832  * @dev: Device to be checked
4833  * @port_num: Port number
4834  */
4835 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4836 						       u32 port_num)
4837 {
4838 	if (rdma_protocol_roce(dev, port_num))
4839 		return RDMA_AH_ATTR_TYPE_ROCE;
4840 	if (rdma_protocol_ib(dev, port_num)) {
4841 		if (rdma_cap_opa_ah(dev, port_num))
4842 			return RDMA_AH_ATTR_TYPE_OPA;
4843 		return RDMA_AH_ATTR_TYPE_IB;
4844 	}
4845 	if (dev->type == RDMA_DEVICE_TYPE_SMI)
4846 		return RDMA_AH_ATTR_TYPE_IB;
4847 
4848 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4849 }
4850 
4851 /**
4852  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4853  *     In the current implementation the only way to
4854  *     get the 32bit lid is from other sources for OPA.
4855  *     For IB, lids will always be 16bits so cast the
4856  *     value accordingly.
4857  *
4858  * @lid: A 32bit LID
4859  */
4860 static inline u16 ib_lid_cpu16(u32 lid)
4861 {
4862 	WARN_ON_ONCE(lid & 0xFFFF0000);
4863 	return (u16)lid;
4864 }
4865 
4866 /**
4867  * ib_lid_be16 - Return lid in 16bit BE encoding.
4868  *
4869  * @lid: A 32bit LID
4870  */
4871 static inline __be16 ib_lid_be16(u32 lid)
4872 {
4873 	WARN_ON_ONCE(lid & 0xFFFF0000);
4874 	return cpu_to_be16((u16)lid);
4875 }
4876 
4877 /**
4878  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4879  *   vector
4880  * @device:         the rdma device
4881  * @comp_vector:    index of completion vector
4882  *
4883  * Returns NULL on failure, otherwise a corresponding cpu map of the
4884  * completion vector (returns all-cpus map if the device driver doesn't
4885  * implement get_vector_affinity).
4886  */
4887 static inline const struct cpumask *
4888 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4889 {
4890 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4891 	    !device->ops.get_vector_affinity)
4892 		return NULL;
4893 
4894 	return device->ops.get_vector_affinity(device, comp_vector);
4895 
4896 }
4897 
4898 /**
4899  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4900  * and add their gids, as needed, to the relevant RoCE devices.
4901  *
4902  * @ibdev:         the rdma device
4903  */
4904 void rdma_roce_rescan_device(struct ib_device *ibdev);
4905 void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
4906 void roce_del_all_netdev_gids(struct ib_device *ib_dev,
4907 			      u32 port, struct net_device *ndev);
4908 
4909 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4910 
4911 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
4912 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4913 bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
4914 #else
4915 static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
4916 {
4917 	return 0;
4918 }
4919 static inline bool
4920 rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
4921 {
4922 	return false;
4923 }
4924 #endif
4925 
4926 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4927 				     enum rdma_netdev_t type, const char *name,
4928 				     unsigned char name_assign_type,
4929 				     void (*setup)(struct net_device *));
4930 
4931 int rdma_init_netdev(struct ib_device *device, u32 port_num,
4932 		     enum rdma_netdev_t type, const char *name,
4933 		     unsigned char name_assign_type,
4934 		     void (*setup)(struct net_device *),
4935 		     struct net_device *netdev);
4936 
4937 /**
4938  * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4939  *
4940  * @device:	device pointer for which ib_device pointer to retrieve
4941  *
4942  * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4943  *
4944  */
4945 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4946 {
4947 	struct ib_core_device *coredev =
4948 		container_of(device, struct ib_core_device, dev);
4949 
4950 	return coredev->owner;
4951 }
4952 
4953 /**
4954  * ibdev_to_node - return the NUMA node for a given ib_device
4955  * @ibdev:	device to get the NUMA node for.
4956  */
4957 static inline int ibdev_to_node(struct ib_device *ibdev)
4958 {
4959 	struct device *parent = ibdev->dev.parent;
4960 
4961 	if (!parent)
4962 		return NUMA_NO_NODE;
4963 	return dev_to_node(parent);
4964 }
4965 
4966 /**
4967  * rdma_device_to_drv_device - Helper macro to reach back to driver's
4968  *			       ib_device holder structure from device pointer.
4969  *
4970  * NOTE: New drivers should not make use of this API; This API is only for
4971  * existing drivers who have exposed sysfs entries using
4972  * ops->device_group.
4973  */
4974 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4975 	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4976 
4977 bool rdma_dev_access_netns(const struct ib_device *device,
4978 			   const struct net *net);
4979 
4980 bool rdma_dev_has_raw_cap(const struct ib_device *dev);
4981 static inline struct net *rdma_dev_net(struct ib_device *device)
4982 {
4983 	return read_pnet(&device->coredev.rdma_net);
4984 }
4985 
4986 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4987 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4988 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4989 
4990 /**
4991  * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4992  *                               on the flow_label
4993  * @fl: flow_label value
4994  *
4995  * This function will convert the 20 bit flow_label input to a valid RoCE v2
4996  * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4997  * convention.
4998  */
4999 static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
5000 {
5001 	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
5002 
5003 	fl_low ^= fl_high >> 14;
5004 	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
5005 }
5006 
5007 /**
5008  * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
5009  *                        local and remote qpn values
5010  *
5011  * This function folded the multiplication results of two qpns, 24 bit each,
5012  * fields, and converts it to a 20 bit results.
5013  *
5014  * This function will create symmetric flow_label value based on the local
5015  * and remote qpn values. this will allow both the requester and responder
5016  * to calculate the same flow_label for a given connection.
5017  *
5018  * This helper function should be used by driver in case the upper layer
5019  * provide a zero flow_label value. This is to improve entropy of RDMA
5020  * traffic in the network.
5021  */
5022 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
5023 {
5024 	u64 v = (u64)lqpn * rqpn;
5025 
5026 	v ^= v >> 20;
5027 	v ^= v >> 40;
5028 
5029 	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
5030 }
5031 
5032 /**
5033  * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
5034  *                      label. If flow label is not defined in GRH then
5035  *                      calculate it based on lqpn/rqpn.
5036  *
5037  * @fl:                 flow label from GRH
5038  * @lqpn:               local qp number
5039  * @rqpn:               remote qp number
5040  */
5041 static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
5042 {
5043 	if (!fl)
5044 		fl = rdma_calc_flow_label(lqpn, rqpn);
5045 
5046 	return rdma_flow_label_to_udp_sport(fl);
5047 }
5048 
5049 const struct ib_port_immutable*
5050 ib_port_immutable_read(struct ib_device *dev, unsigned int port);
5051 
5052 /** ib_add_sub_device - Add a sub IB device on an existing one
5053  *
5054  * @parent: The IB device that needs to add a sub device
5055  * @type: The type of the new sub device
5056  * @name: The name of the new sub device
5057  *
5058  *
5059  * Return 0 on success, an error code otherwise
5060  */
5061 int ib_add_sub_device(struct ib_device *parent,
5062 		      enum rdma_nl_dev_type type,
5063 		      const char *name);
5064 
5065 
5066 /** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
5067  *
5068  * @sub: The sub device that is going to be deleted
5069  *
5070  * Return 0 on success, an error code otherwise
5071  */
5072 int ib_del_sub_device_and_put(struct ib_device *sub);
5073 
5074 static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev)
5075 {
5076 	ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER;
5077 }
5078 
5079 #endif /* IB_VERBS_H */
5080