xref: /linux/include/rdma/ib_verbs.h (revision 62de0e67328e9503459a24b9343c3358937cdeef)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
5  * Copyright (c) 2004, 2020 Intel Corporation.  All rights reserved.
6  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10  */
11 
12 #ifndef IB_VERBS_H
13 #define IB_VERBS_H
14 
15 #include <linux/ethtool.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kref.h>
20 #include <linux/list.h>
21 #include <linux/rwsem.h>
22 #include <linux/workqueue.h>
23 #include <linux/irq_poll.h>
24 #include <uapi/linux/if_ether.h>
25 #include <net/ipv6.h>
26 #include <net/ip.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/refcount.h>
31 #include <linux/if_link.h>
32 #include <linux/atomic.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/uaccess.h>
35 #include <linux/cgroup_rdma.h>
36 #include <linux/irqflags.h>
37 #include <linux/preempt.h>
38 #include <linux/dim.h>
39 #include <uapi/rdma/ib_user_verbs.h>
40 #include <rdma/rdma_counter.h>
41 #include <rdma/restrack.h>
42 #include <rdma/signature.h>
43 #include <uapi/rdma/rdma_user_ioctl.h>
44 #include <uapi/rdma/ib_user_ioctl_verbs.h>
45 
46 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
47 
48 struct ib_umem_odp;
49 struct ib_uqp_object;
50 struct ib_usrq_object;
51 struct ib_uwq_object;
52 struct rdma_cm_id;
53 struct ib_port;
54 struct hw_stats_device_data;
55 
56 extern struct workqueue_struct *ib_wq;
57 extern struct workqueue_struct *ib_comp_wq;
58 extern struct workqueue_struct *ib_comp_unbound_wq;
59 
60 struct ib_ucq_object;
61 
62 __printf(2, 3) __cold
63 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
64 __printf(2, 3) __cold
65 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
66 __printf(2, 3) __cold
67 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
68 __printf(2, 3) __cold
69 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
70 __printf(2, 3) __cold
71 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
72 __printf(2, 3) __cold
73 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
74 __printf(2, 3) __cold
75 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
76 
77 #if defined(CONFIG_DYNAMIC_DEBUG) || \
78 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
79 #define ibdev_dbg(__dev, format, args...)                       \
80 	dynamic_ibdev_dbg(__dev, format, ##args)
81 #else
82 __printf(2, 3) __cold
83 static inline
84 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
85 #endif
86 
87 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
88 do {                                                                    \
89 	static DEFINE_RATELIMIT_STATE(_rs,                              \
90 				      DEFAULT_RATELIMIT_INTERVAL,       \
91 				      DEFAULT_RATELIMIT_BURST);         \
92 	if (__ratelimit(&_rs))                                          \
93 		ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
94 } while (0)
95 
96 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 	ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 	ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 	ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 	ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 	ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 	ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 	ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
110 
111 #if defined(CONFIG_DYNAMIC_DEBUG) || \
112 	(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
113 /* descriptor check is first to prevent flooding with "callbacks suppressed" */
114 #define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
115 do {                                                                    \
116 	static DEFINE_RATELIMIT_STATE(_rs,                              \
117 				      DEFAULT_RATELIMIT_INTERVAL,       \
118 				      DEFAULT_RATELIMIT_BURST);         \
119 	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
120 	if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
121 		__dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
122 				    ##__VA_ARGS__);                     \
123 } while (0)
124 #else
125 __printf(2, 3) __cold
126 static inline
127 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
128 #endif
129 
130 union ib_gid {
131 	u8	raw[16];
132 	struct {
133 		__be64	subnet_prefix;
134 		__be64	interface_id;
135 	} global;
136 };
137 
138 extern union ib_gid zgid;
139 
140 enum ib_gid_type {
141 	IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
142 	IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
143 	IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
144 	IB_GID_TYPE_SIZE
145 };
146 
147 #define ROCE_V2_UDP_DPORT      4791
148 struct ib_gid_attr {
149 	struct net_device __rcu	*ndev;
150 	struct ib_device	*device;
151 	union ib_gid		gid;
152 	enum ib_gid_type	gid_type;
153 	u16			index;
154 	u32			port_num;
155 };
156 
157 enum {
158 	/* set the local administered indication */
159 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
160 };
161 
162 enum rdma_transport_type {
163 	RDMA_TRANSPORT_IB,
164 	RDMA_TRANSPORT_IWARP,
165 	RDMA_TRANSPORT_USNIC,
166 	RDMA_TRANSPORT_USNIC_UDP,
167 	RDMA_TRANSPORT_UNSPECIFIED,
168 };
169 
170 enum rdma_protocol_type {
171 	RDMA_PROTOCOL_IB,
172 	RDMA_PROTOCOL_IBOE,
173 	RDMA_PROTOCOL_IWARP,
174 	RDMA_PROTOCOL_USNIC_UDP
175 };
176 
177 __attribute_const__ enum rdma_transport_type
178 rdma_node_get_transport(unsigned int node_type);
179 
180 enum rdma_network_type {
181 	RDMA_NETWORK_IB,
182 	RDMA_NETWORK_ROCE_V1,
183 	RDMA_NETWORK_IPV4,
184 	RDMA_NETWORK_IPV6
185 };
186 
187 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
188 {
189 	if (network_type == RDMA_NETWORK_IPV4 ||
190 	    network_type == RDMA_NETWORK_IPV6)
191 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
192 	else if (network_type == RDMA_NETWORK_ROCE_V1)
193 		return IB_GID_TYPE_ROCE;
194 	else
195 		return IB_GID_TYPE_IB;
196 }
197 
198 static inline enum rdma_network_type
199 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
200 {
201 	if (attr->gid_type == IB_GID_TYPE_IB)
202 		return RDMA_NETWORK_IB;
203 
204 	if (attr->gid_type == IB_GID_TYPE_ROCE)
205 		return RDMA_NETWORK_ROCE_V1;
206 
207 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
208 		return RDMA_NETWORK_IPV4;
209 	else
210 		return RDMA_NETWORK_IPV6;
211 }
212 
213 enum rdma_link_layer {
214 	IB_LINK_LAYER_UNSPECIFIED,
215 	IB_LINK_LAYER_INFINIBAND,
216 	IB_LINK_LAYER_ETHERNET,
217 };
218 
219 enum ib_device_cap_flags {
220 	IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
221 	IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
222 	IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
223 	IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
224 	IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
225 	IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
226 	IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
227 	IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
228 	IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
229 	/* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
230 	IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
231 	IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
232 	IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
233 	IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
234 	IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
235 
236 	/* Reserved, old SEND_W_INV = 1 << 16,*/
237 	IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
238 	/*
239 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
240 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
241 	 * messages and can verify the validity of checksum for
242 	 * incoming messages.  Setting this flag implies that the
243 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
244 	 */
245 	IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
246 	IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
247 
248 	/*
249 	 * This device supports the IB "base memory management extension",
250 	 * which includes support for fast registrations (IB_WR_REG_MR,
251 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
252 	 * also be set by any iWarp device which must support FRs to comply
253 	 * to the iWarp verbs spec.  iWarp devices also support the
254 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
255 	 * stag.
256 	 */
257 	IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
258 	IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
259 	IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
260 	IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
261 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
262 	IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
263 	IB_DEVICE_MANAGED_FLOW_STEERING =
264 		IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
265 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
266 	IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
267 	/* The device supports padding incoming writes to cacheline. */
268 	IB_DEVICE_PCI_WRITE_END_PADDING =
269 		IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
270 	/* Placement type attributes */
271 	IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
272 	IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
273 	IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
274 };
275 
276 enum ib_kernel_cap_flags {
277 	/*
278 	 * This device supports a per-device lkey or stag that can be
279 	 * used without performing a memory registration for the local
280 	 * memory.  Note that ULPs should never check this flag, but
281 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
282 	 * which will always contain a usable lkey.
283 	 */
284 	IBK_LOCAL_DMA_LKEY = 1 << 0,
285 	/* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
286 	IBK_INTEGRITY_HANDOVER = 1 << 1,
287 	/* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
288 	IBK_ON_DEMAND_PAGING = 1 << 2,
289 	/* IB_MR_TYPE_SG_GAPS is supported */
290 	IBK_SG_GAPS_REG = 1 << 3,
291 	/* Driver supports RDMA_NLDEV_CMD_DELLINK */
292 	IBK_ALLOW_USER_UNREG = 1 << 4,
293 
294 	/* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
295 	IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
296 	/* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
297 	IBK_UD_TSO = 1 << 6,
298 	/* iopib will use the device ops:
299 	 *   get_vf_config
300 	 *   get_vf_guid
301 	 *   get_vf_stats
302 	 *   set_vf_guid
303 	 *   set_vf_link_state
304 	 */
305 	IBK_VIRTUAL_FUNCTION = 1 << 7,
306 	/* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
307 	IBK_RDMA_NETDEV_OPA = 1 << 8,
308 };
309 
310 enum ib_atomic_cap {
311 	IB_ATOMIC_NONE,
312 	IB_ATOMIC_HCA,
313 	IB_ATOMIC_GLOB
314 };
315 
316 enum ib_odp_general_cap_bits {
317 	IB_ODP_SUPPORT		= IB_UVERBS_ODP_SUPPORT,
318 	IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT,
319 };
320 
321 enum ib_odp_transport_cap_bits {
322 	IB_ODP_SUPPORT_SEND	= IB_UVERBS_ODP_SUPPORT_SEND,
323 	IB_ODP_SUPPORT_RECV	= IB_UVERBS_ODP_SUPPORT_RECV,
324 	IB_ODP_SUPPORT_WRITE	= IB_UVERBS_ODP_SUPPORT_WRITE,
325 	IB_ODP_SUPPORT_READ	= IB_UVERBS_ODP_SUPPORT_READ,
326 	IB_ODP_SUPPORT_ATOMIC	= IB_UVERBS_ODP_SUPPORT_ATOMIC,
327 	IB_ODP_SUPPORT_SRQ_RECV	= IB_UVERBS_ODP_SUPPORT_SRQ_RECV,
328 	IB_ODP_SUPPORT_FLUSH	= IB_UVERBS_ODP_SUPPORT_FLUSH,
329 	IB_ODP_SUPPORT_ATOMIC_WRITE	= IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE,
330 };
331 
332 struct ib_odp_caps {
333 	uint64_t general_caps;
334 	struct {
335 		uint32_t  rc_odp_caps;
336 		uint32_t  uc_odp_caps;
337 		uint32_t  ud_odp_caps;
338 		uint32_t  xrc_odp_caps;
339 	} per_transport_caps;
340 };
341 
342 struct ib_rss_caps {
343 	/* Corresponding bit will be set if qp type from
344 	 * 'enum ib_qp_type' is supported, e.g.
345 	 * supported_qpts |= 1 << IB_QPT_UD
346 	 */
347 	u32 supported_qpts;
348 	u32 max_rwq_indirection_tables;
349 	u32 max_rwq_indirection_table_size;
350 };
351 
352 enum ib_tm_cap_flags {
353 	/*  Support tag matching with rendezvous offload for RC transport */
354 	IB_TM_CAP_RNDV_RC = 1 << 0,
355 };
356 
357 struct ib_tm_caps {
358 	/* Max size of RNDV header */
359 	u32 max_rndv_hdr_size;
360 	/* Max number of entries in tag matching list */
361 	u32 max_num_tags;
362 	/* From enum ib_tm_cap_flags */
363 	u32 flags;
364 	/* Max number of outstanding list operations */
365 	u32 max_ops;
366 	/* Max number of SGE in tag matching entry */
367 	u32 max_sge;
368 };
369 
370 struct ib_cq_init_attr {
371 	unsigned int	cqe;
372 	u32		comp_vector;
373 	u32		flags;
374 };
375 
376 enum ib_cq_attr_mask {
377 	IB_CQ_MODERATE = 1 << 0,
378 };
379 
380 struct ib_cq_caps {
381 	u16     max_cq_moderation_count;
382 	u16     max_cq_moderation_period;
383 };
384 
385 struct ib_dm_mr_attr {
386 	u64		length;
387 	u64		offset;
388 	u32		access_flags;
389 };
390 
391 struct ib_dm_alloc_attr {
392 	u64	length;
393 	u32	alignment;
394 	u32	flags;
395 };
396 
397 struct ib_device_attr {
398 	u64			fw_ver;
399 	__be64			sys_image_guid;
400 	u64			max_mr_size;
401 	u64			page_size_cap;
402 	u32			vendor_id;
403 	u32			vendor_part_id;
404 	u32			hw_ver;
405 	int			max_qp;
406 	int			max_qp_wr;
407 	u64			device_cap_flags;
408 	u64			kernel_cap_flags;
409 	int			max_send_sge;
410 	int			max_recv_sge;
411 	int			max_sge_rd;
412 	int			max_cq;
413 	int			max_cqe;
414 	int			max_mr;
415 	int			max_pd;
416 	int			max_qp_rd_atom;
417 	int			max_ee_rd_atom;
418 	int			max_res_rd_atom;
419 	int			max_qp_init_rd_atom;
420 	int			max_ee_init_rd_atom;
421 	enum ib_atomic_cap	atomic_cap;
422 	enum ib_atomic_cap	masked_atomic_cap;
423 	int			max_ee;
424 	int			max_rdd;
425 	int			max_mw;
426 	int			max_raw_ipv6_qp;
427 	int			max_raw_ethy_qp;
428 	int			max_mcast_grp;
429 	int			max_mcast_qp_attach;
430 	int			max_total_mcast_qp_attach;
431 	int			max_ah;
432 	int			max_srq;
433 	int			max_srq_wr;
434 	int			max_srq_sge;
435 	unsigned int		max_fast_reg_page_list_len;
436 	unsigned int		max_pi_fast_reg_page_list_len;
437 	u16			max_pkeys;
438 	u8			local_ca_ack_delay;
439 	int			sig_prot_cap;
440 	int			sig_guard_cap;
441 	struct ib_odp_caps	odp_caps;
442 	uint64_t		timestamp_mask;
443 	uint64_t		hca_core_clock; /* in KHZ */
444 	struct ib_rss_caps	rss_caps;
445 	u32			max_wq_type_rq;
446 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
447 	struct ib_tm_caps	tm_caps;
448 	struct ib_cq_caps       cq_caps;
449 	u64			max_dm_size;
450 	/* Max entries for sgl for optimized performance per READ */
451 	u32			max_sgl_rd;
452 };
453 
454 enum ib_mtu {
455 	IB_MTU_256  = 1,
456 	IB_MTU_512  = 2,
457 	IB_MTU_1024 = 3,
458 	IB_MTU_2048 = 4,
459 	IB_MTU_4096 = 5
460 };
461 
462 enum opa_mtu {
463 	OPA_MTU_8192 = 6,
464 	OPA_MTU_10240 = 7
465 };
466 
467 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
468 {
469 	switch (mtu) {
470 	case IB_MTU_256:  return  256;
471 	case IB_MTU_512:  return  512;
472 	case IB_MTU_1024: return 1024;
473 	case IB_MTU_2048: return 2048;
474 	case IB_MTU_4096: return 4096;
475 	default: 	  return -1;
476 	}
477 }
478 
479 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
480 {
481 	if (mtu >= 4096)
482 		return IB_MTU_4096;
483 	else if (mtu >= 2048)
484 		return IB_MTU_2048;
485 	else if (mtu >= 1024)
486 		return IB_MTU_1024;
487 	else if (mtu >= 512)
488 		return IB_MTU_512;
489 	else
490 		return IB_MTU_256;
491 }
492 
493 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
494 {
495 	switch (mtu) {
496 	case OPA_MTU_8192:
497 		return 8192;
498 	case OPA_MTU_10240:
499 		return 10240;
500 	default:
501 		return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
502 	}
503 }
504 
505 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
506 {
507 	if (mtu >= 10240)
508 		return OPA_MTU_10240;
509 	else if (mtu >= 8192)
510 		return OPA_MTU_8192;
511 	else
512 		return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
513 }
514 
515 enum ib_port_state {
516 	IB_PORT_NOP		= 0,
517 	IB_PORT_DOWN		= 1,
518 	IB_PORT_INIT		= 2,
519 	IB_PORT_ARMED		= 3,
520 	IB_PORT_ACTIVE		= 4,
521 	IB_PORT_ACTIVE_DEFER	= 5
522 };
523 
524 static inline const char *__attribute_const__
525 ib_port_state_to_str(enum ib_port_state state)
526 {
527 	const char * const states[] = {
528 		[IB_PORT_NOP] = "NOP",
529 		[IB_PORT_DOWN] = "DOWN",
530 		[IB_PORT_INIT] = "INIT",
531 		[IB_PORT_ARMED] = "ARMED",
532 		[IB_PORT_ACTIVE] = "ACTIVE",
533 		[IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER",
534 	};
535 
536 	if (state < ARRAY_SIZE(states))
537 		return states[state];
538 	return "UNKNOWN";
539 }
540 
541 enum ib_port_phys_state {
542 	IB_PORT_PHYS_STATE_SLEEP = 1,
543 	IB_PORT_PHYS_STATE_POLLING = 2,
544 	IB_PORT_PHYS_STATE_DISABLED = 3,
545 	IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
546 	IB_PORT_PHYS_STATE_LINK_UP = 5,
547 	IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
548 	IB_PORT_PHYS_STATE_PHY_TEST = 7,
549 };
550 
551 enum ib_port_width {
552 	IB_WIDTH_1X	= 1,
553 	IB_WIDTH_2X	= 16,
554 	IB_WIDTH_4X	= 2,
555 	IB_WIDTH_8X	= 4,
556 	IB_WIDTH_12X	= 8
557 };
558 
559 static inline int ib_width_enum_to_int(enum ib_port_width width)
560 {
561 	switch (width) {
562 	case IB_WIDTH_1X:  return  1;
563 	case IB_WIDTH_2X:  return  2;
564 	case IB_WIDTH_4X:  return  4;
565 	case IB_WIDTH_8X:  return  8;
566 	case IB_WIDTH_12X: return 12;
567 	default: 	  return -1;
568 	}
569 }
570 
571 enum ib_port_speed {
572 	IB_SPEED_SDR	= 1,
573 	IB_SPEED_DDR	= 2,
574 	IB_SPEED_QDR	= 4,
575 	IB_SPEED_FDR10	= 8,
576 	IB_SPEED_FDR	= 16,
577 	IB_SPEED_EDR	= 32,
578 	IB_SPEED_HDR	= 64,
579 	IB_SPEED_NDR	= 128,
580 	IB_SPEED_XDR	= 256,
581 };
582 
583 enum ib_stat_flag {
584 	IB_STAT_FLAG_OPTIONAL = 1 << 0,
585 };
586 
587 /**
588  * struct rdma_stat_desc
589  * @name - The name of the counter
590  * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
591  * @priv - Driver private information; Core code should not use
592  */
593 struct rdma_stat_desc {
594 	const char *name;
595 	unsigned int flags;
596 	const void *priv;
597 };
598 
599 /**
600  * struct rdma_hw_stats
601  * @lock - Mutex to protect parallel write access to lifespan and values
602  *    of counters, which are 64bits and not guaranteed to be written
603  *    atomicaly on 32bits systems.
604  * @timestamp - Used by the core code to track when the last update was
605  * @lifespan - Used by the core code to determine how old the counters
606  *   should be before being updated again.  Stored in jiffies, defaults
607  *   to 10 milliseconds, drivers can override the default be specifying
608  *   their own value during their allocation routine.
609  * @descs - Array of pointers to static descriptors used for the counters
610  *   in directory.
611  * @is_disabled - A bitmap to indicate each counter is currently disabled
612  *   or not.
613  * @num_counters - How many hardware counters there are.  If name is
614  *   shorter than this number, a kernel oops will result.  Driver authors
615  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
616  *   in their code to prevent this.
617  * @value - Array of u64 counters that are accessed by the sysfs code and
618  *   filled in by the drivers get_stats routine
619  */
620 struct rdma_hw_stats {
621 	struct mutex	lock; /* Protect lifespan and values[] */
622 	unsigned long	timestamp;
623 	unsigned long	lifespan;
624 	const struct rdma_stat_desc *descs;
625 	unsigned long	*is_disabled;
626 	int		num_counters;
627 	u64		value[] __counted_by(num_counters);
628 };
629 
630 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
631 
632 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
633 	const struct rdma_stat_desc *descs, int num_counters,
634 	unsigned long lifespan);
635 
636 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
637 
638 /* Define bits for the various functionality this port needs to be supported by
639  * the core.
640  */
641 /* Management                           0x00000FFF */
642 #define RDMA_CORE_CAP_IB_MAD            0x00000001
643 #define RDMA_CORE_CAP_IB_SMI            0x00000002
644 #define RDMA_CORE_CAP_IB_CM             0x00000004
645 #define RDMA_CORE_CAP_IW_CM             0x00000008
646 #define RDMA_CORE_CAP_IB_SA             0x00000010
647 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
648 
649 /* Address format                       0x000FF000 */
650 #define RDMA_CORE_CAP_AF_IB             0x00001000
651 #define RDMA_CORE_CAP_ETH_AH            0x00002000
652 #define RDMA_CORE_CAP_OPA_AH            0x00004000
653 #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
654 
655 /* Protocol                             0xFFF00000 */
656 #define RDMA_CORE_CAP_PROT_IB           0x00100000
657 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
658 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
659 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
660 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
661 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
662 
663 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
664 					| RDMA_CORE_CAP_PROT_ROCE     \
665 					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
666 
667 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
668 					| RDMA_CORE_CAP_IB_MAD \
669 					| RDMA_CORE_CAP_IB_SMI \
670 					| RDMA_CORE_CAP_IB_CM  \
671 					| RDMA_CORE_CAP_IB_SA  \
672 					| RDMA_CORE_CAP_AF_IB)
673 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
674 					| RDMA_CORE_CAP_IB_MAD  \
675 					| RDMA_CORE_CAP_IB_CM   \
676 					| RDMA_CORE_CAP_AF_IB   \
677 					| RDMA_CORE_CAP_ETH_AH)
678 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
679 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
680 					| RDMA_CORE_CAP_IB_MAD  \
681 					| RDMA_CORE_CAP_IB_CM   \
682 					| RDMA_CORE_CAP_AF_IB   \
683 					| RDMA_CORE_CAP_ETH_AH)
684 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
685 					| RDMA_CORE_CAP_IW_CM)
686 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
687 					| RDMA_CORE_CAP_OPA_MAD)
688 
689 #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
690 
691 #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
692 
693 struct ib_port_attr {
694 	u64			subnet_prefix;
695 	enum ib_port_state	state;
696 	enum ib_mtu		max_mtu;
697 	enum ib_mtu		active_mtu;
698 	u32                     phys_mtu;
699 	int			gid_tbl_len;
700 	unsigned int		ip_gids:1;
701 	/* This is the value from PortInfo CapabilityMask, defined by IBA */
702 	u32			port_cap_flags;
703 	u32			max_msg_sz;
704 	u32			bad_pkey_cntr;
705 	u32			qkey_viol_cntr;
706 	u16			pkey_tbl_len;
707 	u32			sm_lid;
708 	u32			lid;
709 	u8			lmc;
710 	u8			max_vl_num;
711 	u8			sm_sl;
712 	u8			subnet_timeout;
713 	u8			init_type_reply;
714 	u8			active_width;
715 	u16			active_speed;
716 	u8                      phys_state;
717 	u16			port_cap_flags2;
718 };
719 
720 enum ib_device_modify_flags {
721 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
722 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
723 };
724 
725 #define IB_DEVICE_NODE_DESC_MAX 64
726 
727 struct ib_device_modify {
728 	u64	sys_image_guid;
729 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
730 };
731 
732 enum ib_port_modify_flags {
733 	IB_PORT_SHUTDOWN		= 1,
734 	IB_PORT_INIT_TYPE		= (1<<2),
735 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
736 	IB_PORT_OPA_MASK_CHG		= (1<<4)
737 };
738 
739 struct ib_port_modify {
740 	u32	set_port_cap_mask;
741 	u32	clr_port_cap_mask;
742 	u8	init_type;
743 };
744 
745 enum ib_event_type {
746 	IB_EVENT_CQ_ERR,
747 	IB_EVENT_QP_FATAL,
748 	IB_EVENT_QP_REQ_ERR,
749 	IB_EVENT_QP_ACCESS_ERR,
750 	IB_EVENT_COMM_EST,
751 	IB_EVENT_SQ_DRAINED,
752 	IB_EVENT_PATH_MIG,
753 	IB_EVENT_PATH_MIG_ERR,
754 	IB_EVENT_DEVICE_FATAL,
755 	IB_EVENT_PORT_ACTIVE,
756 	IB_EVENT_PORT_ERR,
757 	IB_EVENT_LID_CHANGE,
758 	IB_EVENT_PKEY_CHANGE,
759 	IB_EVENT_SM_CHANGE,
760 	IB_EVENT_SRQ_ERR,
761 	IB_EVENT_SRQ_LIMIT_REACHED,
762 	IB_EVENT_QP_LAST_WQE_REACHED,
763 	IB_EVENT_CLIENT_REREGISTER,
764 	IB_EVENT_GID_CHANGE,
765 	IB_EVENT_WQ_FATAL,
766 };
767 
768 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
769 
770 struct ib_event {
771 	struct ib_device	*device;
772 	union {
773 		struct ib_cq	*cq;
774 		struct ib_qp	*qp;
775 		struct ib_srq	*srq;
776 		struct ib_wq	*wq;
777 		u32		port_num;
778 	} element;
779 	enum ib_event_type	event;
780 };
781 
782 struct ib_event_handler {
783 	struct ib_device *device;
784 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
785 	struct list_head  list;
786 };
787 
788 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
789 	do {							\
790 		(_ptr)->device  = _device;			\
791 		(_ptr)->handler = _handler;			\
792 		INIT_LIST_HEAD(&(_ptr)->list);			\
793 	} while (0)
794 
795 struct ib_global_route {
796 	const struct ib_gid_attr *sgid_attr;
797 	union ib_gid	dgid;
798 	u32		flow_label;
799 	u8		sgid_index;
800 	u8		hop_limit;
801 	u8		traffic_class;
802 };
803 
804 struct ib_grh {
805 	__be32		version_tclass_flow;
806 	__be16		paylen;
807 	u8		next_hdr;
808 	u8		hop_limit;
809 	union ib_gid	sgid;
810 	union ib_gid	dgid;
811 };
812 
813 union rdma_network_hdr {
814 	struct ib_grh ibgrh;
815 	struct {
816 		/* The IB spec states that if it's IPv4, the header
817 		 * is located in the last 20 bytes of the header.
818 		 */
819 		u8		reserved[20];
820 		struct iphdr	roce4grh;
821 	};
822 };
823 
824 #define IB_QPN_MASK		0xFFFFFF
825 
826 enum {
827 	IB_MULTICAST_QPN = 0xffffff
828 };
829 
830 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
831 #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
832 
833 enum ib_ah_flags {
834 	IB_AH_GRH	= 1
835 };
836 
837 enum ib_rate {
838 	IB_RATE_PORT_CURRENT = 0,
839 	IB_RATE_2_5_GBPS = 2,
840 	IB_RATE_5_GBPS   = 5,
841 	IB_RATE_10_GBPS  = 3,
842 	IB_RATE_20_GBPS  = 6,
843 	IB_RATE_30_GBPS  = 4,
844 	IB_RATE_40_GBPS  = 7,
845 	IB_RATE_60_GBPS  = 8,
846 	IB_RATE_80_GBPS  = 9,
847 	IB_RATE_120_GBPS = 10,
848 	IB_RATE_14_GBPS  = 11,
849 	IB_RATE_56_GBPS  = 12,
850 	IB_RATE_112_GBPS = 13,
851 	IB_RATE_168_GBPS = 14,
852 	IB_RATE_25_GBPS  = 15,
853 	IB_RATE_100_GBPS = 16,
854 	IB_RATE_200_GBPS = 17,
855 	IB_RATE_300_GBPS = 18,
856 	IB_RATE_28_GBPS  = 19,
857 	IB_RATE_50_GBPS  = 20,
858 	IB_RATE_400_GBPS = 21,
859 	IB_RATE_600_GBPS = 22,
860 	IB_RATE_800_GBPS = 23,
861 };
862 
863 /**
864  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
865  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
866  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
867  * @rate: rate to convert.
868  */
869 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
870 
871 /**
872  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
873  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
874  * @rate: rate to convert.
875  */
876 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
877 
878 
879 /**
880  * enum ib_mr_type - memory region type
881  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
882  *                            normal registration
883  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
884  *                            register any arbitrary sg lists (without
885  *                            the normal mr constraints - see
886  *                            ib_map_mr_sg)
887  * @IB_MR_TYPE_DM:            memory region that is used for device
888  *                            memory registration
889  * @IB_MR_TYPE_USER:          memory region that is used for the user-space
890  *                            application
891  * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
892  *                            without address translations (VA=PA)
893  * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
894  *                            data integrity operations
895  */
896 enum ib_mr_type {
897 	IB_MR_TYPE_MEM_REG,
898 	IB_MR_TYPE_SG_GAPS,
899 	IB_MR_TYPE_DM,
900 	IB_MR_TYPE_USER,
901 	IB_MR_TYPE_DMA,
902 	IB_MR_TYPE_INTEGRITY,
903 };
904 
905 enum ib_mr_status_check {
906 	IB_MR_CHECK_SIG_STATUS = 1,
907 };
908 
909 /**
910  * struct ib_mr_status - Memory region status container
911  *
912  * @fail_status: Bitmask of MR checks status. For each
913  *     failed check a corresponding status bit is set.
914  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
915  *     failure.
916  */
917 struct ib_mr_status {
918 	u32		    fail_status;
919 	struct ib_sig_err   sig_err;
920 };
921 
922 /**
923  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
924  * enum.
925  * @mult: multiple to convert.
926  */
927 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
928 
929 struct rdma_ah_init_attr {
930 	struct rdma_ah_attr *ah_attr;
931 	u32 flags;
932 	struct net_device *xmit_slave;
933 };
934 
935 enum rdma_ah_attr_type {
936 	RDMA_AH_ATTR_TYPE_UNDEFINED,
937 	RDMA_AH_ATTR_TYPE_IB,
938 	RDMA_AH_ATTR_TYPE_ROCE,
939 	RDMA_AH_ATTR_TYPE_OPA,
940 };
941 
942 struct ib_ah_attr {
943 	u16			dlid;
944 	u8			src_path_bits;
945 };
946 
947 struct roce_ah_attr {
948 	u8			dmac[ETH_ALEN];
949 };
950 
951 struct opa_ah_attr {
952 	u32			dlid;
953 	u8			src_path_bits;
954 	bool			make_grd;
955 };
956 
957 struct rdma_ah_attr {
958 	struct ib_global_route	grh;
959 	u8			sl;
960 	u8			static_rate;
961 	u32			port_num;
962 	u8			ah_flags;
963 	enum rdma_ah_attr_type type;
964 	union {
965 		struct ib_ah_attr ib;
966 		struct roce_ah_attr roce;
967 		struct opa_ah_attr opa;
968 	};
969 };
970 
971 enum ib_wc_status {
972 	IB_WC_SUCCESS,
973 	IB_WC_LOC_LEN_ERR,
974 	IB_WC_LOC_QP_OP_ERR,
975 	IB_WC_LOC_EEC_OP_ERR,
976 	IB_WC_LOC_PROT_ERR,
977 	IB_WC_WR_FLUSH_ERR,
978 	IB_WC_MW_BIND_ERR,
979 	IB_WC_BAD_RESP_ERR,
980 	IB_WC_LOC_ACCESS_ERR,
981 	IB_WC_REM_INV_REQ_ERR,
982 	IB_WC_REM_ACCESS_ERR,
983 	IB_WC_REM_OP_ERR,
984 	IB_WC_RETRY_EXC_ERR,
985 	IB_WC_RNR_RETRY_EXC_ERR,
986 	IB_WC_LOC_RDD_VIOL_ERR,
987 	IB_WC_REM_INV_RD_REQ_ERR,
988 	IB_WC_REM_ABORT_ERR,
989 	IB_WC_INV_EECN_ERR,
990 	IB_WC_INV_EEC_STATE_ERR,
991 	IB_WC_FATAL_ERR,
992 	IB_WC_RESP_TIMEOUT_ERR,
993 	IB_WC_GENERAL_ERR
994 };
995 
996 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
997 
998 enum ib_wc_opcode {
999 	IB_WC_SEND = IB_UVERBS_WC_SEND,
1000 	IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
1001 	IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
1002 	IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
1003 	IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
1004 	IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
1005 	IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
1006 	IB_WC_LSO = IB_UVERBS_WC_TSO,
1007 	IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
1008 	IB_WC_REG_MR,
1009 	IB_WC_MASKED_COMP_SWAP,
1010 	IB_WC_MASKED_FETCH_ADD,
1011 	IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
1012 /*
1013  * Set value of IB_WC_RECV so consumers can test if a completion is a
1014  * receive by testing (opcode & IB_WC_RECV).
1015  */
1016 	IB_WC_RECV			= 1 << 7,
1017 	IB_WC_RECV_RDMA_WITH_IMM
1018 };
1019 
1020 enum ib_wc_flags {
1021 	IB_WC_GRH		= 1,
1022 	IB_WC_WITH_IMM		= (1<<1),
1023 	IB_WC_WITH_INVALIDATE	= (1<<2),
1024 	IB_WC_IP_CSUM_OK	= (1<<3),
1025 	IB_WC_WITH_SMAC		= (1<<4),
1026 	IB_WC_WITH_VLAN		= (1<<5),
1027 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
1028 };
1029 
1030 struct ib_wc {
1031 	union {
1032 		u64		wr_id;
1033 		struct ib_cqe	*wr_cqe;
1034 	};
1035 	enum ib_wc_status	status;
1036 	enum ib_wc_opcode	opcode;
1037 	u32			vendor_err;
1038 	u32			byte_len;
1039 	struct ib_qp	       *qp;
1040 	union {
1041 		__be32		imm_data;
1042 		u32		invalidate_rkey;
1043 	} ex;
1044 	u32			src_qp;
1045 	u32			slid;
1046 	int			wc_flags;
1047 	u16			pkey_index;
1048 	u8			sl;
1049 	u8			dlid_path_bits;
1050 	u32 port_num; /* valid only for DR SMPs on switches */
1051 	u8			smac[ETH_ALEN];
1052 	u16			vlan_id;
1053 	u8			network_hdr_type;
1054 };
1055 
1056 enum ib_cq_notify_flags {
1057 	IB_CQ_SOLICITED			= 1 << 0,
1058 	IB_CQ_NEXT_COMP			= 1 << 1,
1059 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1060 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1061 };
1062 
1063 enum ib_srq_type {
1064 	IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1065 	IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1066 	IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1067 };
1068 
1069 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1070 {
1071 	return srq_type == IB_SRQT_XRC ||
1072 	       srq_type == IB_SRQT_TM;
1073 }
1074 
1075 enum ib_srq_attr_mask {
1076 	IB_SRQ_MAX_WR	= 1 << 0,
1077 	IB_SRQ_LIMIT	= 1 << 1,
1078 };
1079 
1080 struct ib_srq_attr {
1081 	u32	max_wr;
1082 	u32	max_sge;
1083 	u32	srq_limit;
1084 };
1085 
1086 struct ib_srq_init_attr {
1087 	void		      (*event_handler)(struct ib_event *, void *);
1088 	void		       *srq_context;
1089 	struct ib_srq_attr	attr;
1090 	enum ib_srq_type	srq_type;
1091 
1092 	struct {
1093 		struct ib_cq   *cq;
1094 		union {
1095 			struct {
1096 				struct ib_xrcd *xrcd;
1097 			} xrc;
1098 
1099 			struct {
1100 				u32		max_num_tags;
1101 			} tag_matching;
1102 		};
1103 	} ext;
1104 };
1105 
1106 struct ib_qp_cap {
1107 	u32	max_send_wr;
1108 	u32	max_recv_wr;
1109 	u32	max_send_sge;
1110 	u32	max_recv_sge;
1111 	u32	max_inline_data;
1112 
1113 	/*
1114 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1115 	 * ib_create_qp() will calculate the right amount of needed WRs
1116 	 * and MRs based on this.
1117 	 */
1118 	u32	max_rdma_ctxs;
1119 };
1120 
1121 enum ib_sig_type {
1122 	IB_SIGNAL_ALL_WR,
1123 	IB_SIGNAL_REQ_WR
1124 };
1125 
1126 enum ib_qp_type {
1127 	/*
1128 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1129 	 * here (and in that order) since the MAD layer uses them as
1130 	 * indices into a 2-entry table.
1131 	 */
1132 	IB_QPT_SMI,
1133 	IB_QPT_GSI,
1134 
1135 	IB_QPT_RC = IB_UVERBS_QPT_RC,
1136 	IB_QPT_UC = IB_UVERBS_QPT_UC,
1137 	IB_QPT_UD = IB_UVERBS_QPT_UD,
1138 	IB_QPT_RAW_IPV6,
1139 	IB_QPT_RAW_ETHERTYPE,
1140 	IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1141 	IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1142 	IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1143 	IB_QPT_MAX,
1144 	IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1145 	/* Reserve a range for qp types internal to the low level driver.
1146 	 * These qp types will not be visible at the IB core layer, so the
1147 	 * IB_QPT_MAX usages should not be affected in the core layer
1148 	 */
1149 	IB_QPT_RESERVED1 = 0x1000,
1150 	IB_QPT_RESERVED2,
1151 	IB_QPT_RESERVED3,
1152 	IB_QPT_RESERVED4,
1153 	IB_QPT_RESERVED5,
1154 	IB_QPT_RESERVED6,
1155 	IB_QPT_RESERVED7,
1156 	IB_QPT_RESERVED8,
1157 	IB_QPT_RESERVED9,
1158 	IB_QPT_RESERVED10,
1159 };
1160 
1161 enum ib_qp_create_flags {
1162 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1163 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	=
1164 		IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1165 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1166 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1167 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1168 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1169 	IB_QP_CREATE_INTEGRITY_EN		= 1 << 6,
1170 	IB_QP_CREATE_NETDEV_USE			= 1 << 7,
1171 	IB_QP_CREATE_SCATTER_FCS		=
1172 		IB_UVERBS_QP_CREATE_SCATTER_FCS,
1173 	IB_QP_CREATE_CVLAN_STRIPPING		=
1174 		IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1175 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1176 	IB_QP_CREATE_PCI_WRITE_END_PADDING	=
1177 		IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1178 	/* reserve bits 26-31 for low level drivers' internal use */
1179 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1180 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1181 };
1182 
1183 /*
1184  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1185  * callback to destroy the passed in QP.
1186  */
1187 
1188 struct ib_qp_init_attr {
1189 	/* This callback occurs in workqueue context */
1190 	void                  (*event_handler)(struct ib_event *, void *);
1191 
1192 	void		       *qp_context;
1193 	struct ib_cq	       *send_cq;
1194 	struct ib_cq	       *recv_cq;
1195 	struct ib_srq	       *srq;
1196 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1197 	struct ib_qp_cap	cap;
1198 	enum ib_sig_type	sq_sig_type;
1199 	enum ib_qp_type		qp_type;
1200 	u32			create_flags;
1201 
1202 	/*
1203 	 * Only needed for special QP types, or when using the RW API.
1204 	 */
1205 	u32			port_num;
1206 	struct ib_rwq_ind_table *rwq_ind_tbl;
1207 	u32			source_qpn;
1208 };
1209 
1210 struct ib_qp_open_attr {
1211 	void                  (*event_handler)(struct ib_event *, void *);
1212 	void		       *qp_context;
1213 	u32			qp_num;
1214 	enum ib_qp_type		qp_type;
1215 };
1216 
1217 enum ib_rnr_timeout {
1218 	IB_RNR_TIMER_655_36 =  0,
1219 	IB_RNR_TIMER_000_01 =  1,
1220 	IB_RNR_TIMER_000_02 =  2,
1221 	IB_RNR_TIMER_000_03 =  3,
1222 	IB_RNR_TIMER_000_04 =  4,
1223 	IB_RNR_TIMER_000_06 =  5,
1224 	IB_RNR_TIMER_000_08 =  6,
1225 	IB_RNR_TIMER_000_12 =  7,
1226 	IB_RNR_TIMER_000_16 =  8,
1227 	IB_RNR_TIMER_000_24 =  9,
1228 	IB_RNR_TIMER_000_32 = 10,
1229 	IB_RNR_TIMER_000_48 = 11,
1230 	IB_RNR_TIMER_000_64 = 12,
1231 	IB_RNR_TIMER_000_96 = 13,
1232 	IB_RNR_TIMER_001_28 = 14,
1233 	IB_RNR_TIMER_001_92 = 15,
1234 	IB_RNR_TIMER_002_56 = 16,
1235 	IB_RNR_TIMER_003_84 = 17,
1236 	IB_RNR_TIMER_005_12 = 18,
1237 	IB_RNR_TIMER_007_68 = 19,
1238 	IB_RNR_TIMER_010_24 = 20,
1239 	IB_RNR_TIMER_015_36 = 21,
1240 	IB_RNR_TIMER_020_48 = 22,
1241 	IB_RNR_TIMER_030_72 = 23,
1242 	IB_RNR_TIMER_040_96 = 24,
1243 	IB_RNR_TIMER_061_44 = 25,
1244 	IB_RNR_TIMER_081_92 = 26,
1245 	IB_RNR_TIMER_122_88 = 27,
1246 	IB_RNR_TIMER_163_84 = 28,
1247 	IB_RNR_TIMER_245_76 = 29,
1248 	IB_RNR_TIMER_327_68 = 30,
1249 	IB_RNR_TIMER_491_52 = 31
1250 };
1251 
1252 enum ib_qp_attr_mask {
1253 	IB_QP_STATE			= 1,
1254 	IB_QP_CUR_STATE			= (1<<1),
1255 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1256 	IB_QP_ACCESS_FLAGS		= (1<<3),
1257 	IB_QP_PKEY_INDEX		= (1<<4),
1258 	IB_QP_PORT			= (1<<5),
1259 	IB_QP_QKEY			= (1<<6),
1260 	IB_QP_AV			= (1<<7),
1261 	IB_QP_PATH_MTU			= (1<<8),
1262 	IB_QP_TIMEOUT			= (1<<9),
1263 	IB_QP_RETRY_CNT			= (1<<10),
1264 	IB_QP_RNR_RETRY			= (1<<11),
1265 	IB_QP_RQ_PSN			= (1<<12),
1266 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1267 	IB_QP_ALT_PATH			= (1<<14),
1268 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1269 	IB_QP_SQ_PSN			= (1<<16),
1270 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1271 	IB_QP_PATH_MIG_STATE		= (1<<18),
1272 	IB_QP_CAP			= (1<<19),
1273 	IB_QP_DEST_QPN			= (1<<20),
1274 	IB_QP_RESERVED1			= (1<<21),
1275 	IB_QP_RESERVED2			= (1<<22),
1276 	IB_QP_RESERVED3			= (1<<23),
1277 	IB_QP_RESERVED4			= (1<<24),
1278 	IB_QP_RATE_LIMIT		= (1<<25),
1279 
1280 	IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1281 };
1282 
1283 enum ib_qp_state {
1284 	IB_QPS_RESET,
1285 	IB_QPS_INIT,
1286 	IB_QPS_RTR,
1287 	IB_QPS_RTS,
1288 	IB_QPS_SQD,
1289 	IB_QPS_SQE,
1290 	IB_QPS_ERR
1291 };
1292 
1293 enum ib_mig_state {
1294 	IB_MIG_MIGRATED,
1295 	IB_MIG_REARM,
1296 	IB_MIG_ARMED
1297 };
1298 
1299 enum ib_mw_type {
1300 	IB_MW_TYPE_1 = 1,
1301 	IB_MW_TYPE_2 = 2
1302 };
1303 
1304 struct ib_qp_attr {
1305 	enum ib_qp_state	qp_state;
1306 	enum ib_qp_state	cur_qp_state;
1307 	enum ib_mtu		path_mtu;
1308 	enum ib_mig_state	path_mig_state;
1309 	u32			qkey;
1310 	u32			rq_psn;
1311 	u32			sq_psn;
1312 	u32			dest_qp_num;
1313 	int			qp_access_flags;
1314 	struct ib_qp_cap	cap;
1315 	struct rdma_ah_attr	ah_attr;
1316 	struct rdma_ah_attr	alt_ah_attr;
1317 	u16			pkey_index;
1318 	u16			alt_pkey_index;
1319 	u8			en_sqd_async_notify;
1320 	u8			sq_draining;
1321 	u8			max_rd_atomic;
1322 	u8			max_dest_rd_atomic;
1323 	u8			min_rnr_timer;
1324 	u32			port_num;
1325 	u8			timeout;
1326 	u8			retry_cnt;
1327 	u8			rnr_retry;
1328 	u32			alt_port_num;
1329 	u8			alt_timeout;
1330 	u32			rate_limit;
1331 	struct net_device	*xmit_slave;
1332 };
1333 
1334 enum ib_wr_opcode {
1335 	/* These are shared with userspace */
1336 	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1337 	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1338 	IB_WR_SEND = IB_UVERBS_WR_SEND,
1339 	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1340 	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1341 	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1342 	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1343 	IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1344 	IB_WR_LSO = IB_UVERBS_WR_TSO,
1345 	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1346 	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1347 	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1348 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1349 		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1350 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1351 		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1352 	IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
1353 	IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
1354 
1355 	/* These are kernel only and can not be issued by userspace */
1356 	IB_WR_REG_MR = 0x20,
1357 	IB_WR_REG_MR_INTEGRITY,
1358 
1359 	/* reserve values for low level drivers' internal use.
1360 	 * These values will not be used at all in the ib core layer.
1361 	 */
1362 	IB_WR_RESERVED1 = 0xf0,
1363 	IB_WR_RESERVED2,
1364 	IB_WR_RESERVED3,
1365 	IB_WR_RESERVED4,
1366 	IB_WR_RESERVED5,
1367 	IB_WR_RESERVED6,
1368 	IB_WR_RESERVED7,
1369 	IB_WR_RESERVED8,
1370 	IB_WR_RESERVED9,
1371 	IB_WR_RESERVED10,
1372 };
1373 
1374 enum ib_send_flags {
1375 	IB_SEND_FENCE		= 1,
1376 	IB_SEND_SIGNALED	= (1<<1),
1377 	IB_SEND_SOLICITED	= (1<<2),
1378 	IB_SEND_INLINE		= (1<<3),
1379 	IB_SEND_IP_CSUM		= (1<<4),
1380 
1381 	/* reserve bits 26-31 for low level drivers' internal use */
1382 	IB_SEND_RESERVED_START	= (1 << 26),
1383 	IB_SEND_RESERVED_END	= (1 << 31),
1384 };
1385 
1386 struct ib_sge {
1387 	u64	addr;
1388 	u32	length;
1389 	u32	lkey;
1390 };
1391 
1392 struct ib_cqe {
1393 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1394 };
1395 
1396 struct ib_send_wr {
1397 	struct ib_send_wr      *next;
1398 	union {
1399 		u64		wr_id;
1400 		struct ib_cqe	*wr_cqe;
1401 	};
1402 	struct ib_sge	       *sg_list;
1403 	int			num_sge;
1404 	enum ib_wr_opcode	opcode;
1405 	int			send_flags;
1406 	union {
1407 		__be32		imm_data;
1408 		u32		invalidate_rkey;
1409 	} ex;
1410 };
1411 
1412 struct ib_rdma_wr {
1413 	struct ib_send_wr	wr;
1414 	u64			remote_addr;
1415 	u32			rkey;
1416 };
1417 
1418 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1419 {
1420 	return container_of(wr, struct ib_rdma_wr, wr);
1421 }
1422 
1423 struct ib_atomic_wr {
1424 	struct ib_send_wr	wr;
1425 	u64			remote_addr;
1426 	u64			compare_add;
1427 	u64			swap;
1428 	u64			compare_add_mask;
1429 	u64			swap_mask;
1430 	u32			rkey;
1431 };
1432 
1433 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1434 {
1435 	return container_of(wr, struct ib_atomic_wr, wr);
1436 }
1437 
1438 struct ib_ud_wr {
1439 	struct ib_send_wr	wr;
1440 	struct ib_ah		*ah;
1441 	void			*header;
1442 	int			hlen;
1443 	int			mss;
1444 	u32			remote_qpn;
1445 	u32			remote_qkey;
1446 	u16			pkey_index; /* valid for GSI only */
1447 	u32			port_num; /* valid for DR SMPs on switch only */
1448 };
1449 
1450 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1451 {
1452 	return container_of(wr, struct ib_ud_wr, wr);
1453 }
1454 
1455 struct ib_reg_wr {
1456 	struct ib_send_wr	wr;
1457 	struct ib_mr		*mr;
1458 	u32			key;
1459 	int			access;
1460 };
1461 
1462 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1463 {
1464 	return container_of(wr, struct ib_reg_wr, wr);
1465 }
1466 
1467 struct ib_recv_wr {
1468 	struct ib_recv_wr      *next;
1469 	union {
1470 		u64		wr_id;
1471 		struct ib_cqe	*wr_cqe;
1472 	};
1473 	struct ib_sge	       *sg_list;
1474 	int			num_sge;
1475 };
1476 
1477 enum ib_access_flags {
1478 	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1479 	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1480 	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1481 	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1482 	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1483 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1484 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1485 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1486 	IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1487 	IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1488 	IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
1489 
1490 	IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1491 	IB_ACCESS_SUPPORTED =
1492 		((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1493 };
1494 
1495 /*
1496  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1497  * are hidden here instead of a uapi header!
1498  */
1499 enum ib_mr_rereg_flags {
1500 	IB_MR_REREG_TRANS	= 1,
1501 	IB_MR_REREG_PD		= (1<<1),
1502 	IB_MR_REREG_ACCESS	= (1<<2),
1503 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1504 };
1505 
1506 struct ib_umem;
1507 
1508 enum rdma_remove_reason {
1509 	/*
1510 	 * Userspace requested uobject deletion or initial try
1511 	 * to remove uobject via cleanup. Call could fail
1512 	 */
1513 	RDMA_REMOVE_DESTROY,
1514 	/* Context deletion. This call should delete the actual object itself */
1515 	RDMA_REMOVE_CLOSE,
1516 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1517 	RDMA_REMOVE_DRIVER_REMOVE,
1518 	/* uobj is being cleaned-up before being committed */
1519 	RDMA_REMOVE_ABORT,
1520 	/* The driver failed to destroy the uobject and is being disconnected */
1521 	RDMA_REMOVE_DRIVER_FAILURE,
1522 };
1523 
1524 struct ib_rdmacg_object {
1525 #ifdef CONFIG_CGROUP_RDMA
1526 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1527 #endif
1528 };
1529 
1530 struct ib_ucontext {
1531 	struct ib_device       *device;
1532 	struct ib_uverbs_file  *ufile;
1533 
1534 	struct ib_rdmacg_object	cg_obj;
1535 	u64 enabled_caps;
1536 	/*
1537 	 * Implementation details of the RDMA core, don't use in drivers:
1538 	 */
1539 	struct rdma_restrack_entry res;
1540 	struct xarray mmap_xa;
1541 };
1542 
1543 struct ib_uobject {
1544 	u64			user_handle;	/* handle given to us by userspace */
1545 	/* ufile & ucontext owning this object */
1546 	struct ib_uverbs_file  *ufile;
1547 	/* FIXME, save memory: ufile->context == context */
1548 	struct ib_ucontext     *context;	/* associated user context */
1549 	void		       *object;		/* containing object */
1550 	struct list_head	list;		/* link to context's list */
1551 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1552 	int			id;		/* index into kernel idr */
1553 	struct kref		ref;
1554 	atomic_t		usecnt;		/* protects exclusive access */
1555 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1556 
1557 	const struct uverbs_api_object *uapi_object;
1558 };
1559 
1560 struct ib_udata {
1561 	const void __user *inbuf;
1562 	void __user *outbuf;
1563 	size_t       inlen;
1564 	size_t       outlen;
1565 };
1566 
1567 struct ib_pd {
1568 	u32			local_dma_lkey;
1569 	u32			flags;
1570 	struct ib_device       *device;
1571 	struct ib_uobject      *uobject;
1572 	atomic_t          	usecnt; /* count all resources */
1573 
1574 	u32			unsafe_global_rkey;
1575 
1576 	/*
1577 	 * Implementation details of the RDMA core, don't use in drivers:
1578 	 */
1579 	struct ib_mr	       *__internal_mr;
1580 	struct rdma_restrack_entry res;
1581 };
1582 
1583 struct ib_xrcd {
1584 	struct ib_device       *device;
1585 	atomic_t		usecnt; /* count all exposed resources */
1586 	struct inode	       *inode;
1587 	struct rw_semaphore	tgt_qps_rwsem;
1588 	struct xarray		tgt_qps;
1589 };
1590 
1591 struct ib_ah {
1592 	struct ib_device	*device;
1593 	struct ib_pd		*pd;
1594 	struct ib_uobject	*uobject;
1595 	const struct ib_gid_attr *sgid_attr;
1596 	enum rdma_ah_attr_type	type;
1597 };
1598 
1599 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1600 
1601 enum ib_poll_context {
1602 	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1603 	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1604 	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1605 	IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1606 
1607 	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1608 };
1609 
1610 struct ib_cq {
1611 	struct ib_device       *device;
1612 	struct ib_ucq_object   *uobject;
1613 	ib_comp_handler   	comp_handler;
1614 	void                  (*event_handler)(struct ib_event *, void *);
1615 	void                   *cq_context;
1616 	int               	cqe;
1617 	unsigned int		cqe_used;
1618 	atomic_t          	usecnt; /* count number of work queues */
1619 	enum ib_poll_context	poll_ctx;
1620 	struct ib_wc		*wc;
1621 	struct list_head        pool_entry;
1622 	union {
1623 		struct irq_poll		iop;
1624 		struct work_struct	work;
1625 	};
1626 	struct workqueue_struct *comp_wq;
1627 	struct dim *dim;
1628 
1629 	/* updated only by trace points */
1630 	ktime_t timestamp;
1631 	u8 interrupt:1;
1632 	u8 shared:1;
1633 	unsigned int comp_vector;
1634 
1635 	/*
1636 	 * Implementation details of the RDMA core, don't use in drivers:
1637 	 */
1638 	struct rdma_restrack_entry res;
1639 };
1640 
1641 struct ib_srq {
1642 	struct ib_device       *device;
1643 	struct ib_pd	       *pd;
1644 	struct ib_usrq_object  *uobject;
1645 	void		      (*event_handler)(struct ib_event *, void *);
1646 	void		       *srq_context;
1647 	enum ib_srq_type	srq_type;
1648 	atomic_t		usecnt;
1649 
1650 	struct {
1651 		struct ib_cq   *cq;
1652 		union {
1653 			struct {
1654 				struct ib_xrcd *xrcd;
1655 				u32		srq_num;
1656 			} xrc;
1657 		};
1658 	} ext;
1659 
1660 	/*
1661 	 * Implementation details of the RDMA core, don't use in drivers:
1662 	 */
1663 	struct rdma_restrack_entry res;
1664 };
1665 
1666 enum ib_raw_packet_caps {
1667 	/*
1668 	 * Strip cvlan from incoming packet and report it in the matching work
1669 	 * completion is supported.
1670 	 */
1671 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1672 		IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1673 	/*
1674 	 * Scatter FCS field of an incoming packet to host memory is supported.
1675 	 */
1676 	IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1677 	/* Checksum offloads are supported (for both send and receive). */
1678 	IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1679 	/*
1680 	 * When a packet is received for an RQ with no receive WQEs, the
1681 	 * packet processing is delayed.
1682 	 */
1683 	IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1684 };
1685 
1686 enum ib_wq_type {
1687 	IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1688 };
1689 
1690 enum ib_wq_state {
1691 	IB_WQS_RESET,
1692 	IB_WQS_RDY,
1693 	IB_WQS_ERR
1694 };
1695 
1696 struct ib_wq {
1697 	struct ib_device       *device;
1698 	struct ib_uwq_object   *uobject;
1699 	void		    *wq_context;
1700 	void		    (*event_handler)(struct ib_event *, void *);
1701 	struct ib_pd	       *pd;
1702 	struct ib_cq	       *cq;
1703 	u32		wq_num;
1704 	enum ib_wq_state       state;
1705 	enum ib_wq_type	wq_type;
1706 	atomic_t		usecnt;
1707 };
1708 
1709 enum ib_wq_flags {
1710 	IB_WQ_FLAGS_CVLAN_STRIPPING	= IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1711 	IB_WQ_FLAGS_SCATTER_FCS		= IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1712 	IB_WQ_FLAGS_DELAY_DROP		= IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1713 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1714 				IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1715 };
1716 
1717 struct ib_wq_init_attr {
1718 	void		       *wq_context;
1719 	enum ib_wq_type	wq_type;
1720 	u32		max_wr;
1721 	u32		max_sge;
1722 	struct	ib_cq	       *cq;
1723 	void		    (*event_handler)(struct ib_event *, void *);
1724 	u32		create_flags; /* Use enum ib_wq_flags */
1725 };
1726 
1727 enum ib_wq_attr_mask {
1728 	IB_WQ_STATE		= 1 << 0,
1729 	IB_WQ_CUR_STATE		= 1 << 1,
1730 	IB_WQ_FLAGS		= 1 << 2,
1731 };
1732 
1733 struct ib_wq_attr {
1734 	enum	ib_wq_state	wq_state;
1735 	enum	ib_wq_state	curr_wq_state;
1736 	u32			flags; /* Use enum ib_wq_flags */
1737 	u32			flags_mask; /* Use enum ib_wq_flags */
1738 };
1739 
1740 struct ib_rwq_ind_table {
1741 	struct ib_device	*device;
1742 	struct ib_uobject      *uobject;
1743 	atomic_t		usecnt;
1744 	u32		ind_tbl_num;
1745 	u32		log_ind_tbl_size;
1746 	struct ib_wq	**ind_tbl;
1747 };
1748 
1749 struct ib_rwq_ind_table_init_attr {
1750 	u32		log_ind_tbl_size;
1751 	/* Each entry is a pointer to Receive Work Queue */
1752 	struct ib_wq	**ind_tbl;
1753 };
1754 
1755 enum port_pkey_state {
1756 	IB_PORT_PKEY_NOT_VALID = 0,
1757 	IB_PORT_PKEY_VALID = 1,
1758 	IB_PORT_PKEY_LISTED = 2,
1759 };
1760 
1761 struct ib_qp_security;
1762 
1763 struct ib_port_pkey {
1764 	enum port_pkey_state	state;
1765 	u16			pkey_index;
1766 	u32			port_num;
1767 	struct list_head	qp_list;
1768 	struct list_head	to_error_list;
1769 	struct ib_qp_security  *sec;
1770 };
1771 
1772 struct ib_ports_pkeys {
1773 	struct ib_port_pkey	main;
1774 	struct ib_port_pkey	alt;
1775 };
1776 
1777 struct ib_qp_security {
1778 	struct ib_qp	       *qp;
1779 	struct ib_device       *dev;
1780 	/* Hold this mutex when changing port and pkey settings. */
1781 	struct mutex		mutex;
1782 	struct ib_ports_pkeys  *ports_pkeys;
1783 	/* A list of all open shared QP handles.  Required to enforce security
1784 	 * properly for all users of a shared QP.
1785 	 */
1786 	struct list_head        shared_qp_list;
1787 	void                   *security;
1788 	bool			destroying;
1789 	atomic_t		error_list_count;
1790 	struct completion	error_complete;
1791 	int			error_comps_pending;
1792 };
1793 
1794 /*
1795  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1796  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1797  */
1798 struct ib_qp {
1799 	struct ib_device       *device;
1800 	struct ib_pd	       *pd;
1801 	struct ib_cq	       *send_cq;
1802 	struct ib_cq	       *recv_cq;
1803 	spinlock_t		mr_lock;
1804 	int			mrs_used;
1805 	struct list_head	rdma_mrs;
1806 	struct list_head	sig_mrs;
1807 	struct ib_srq	       *srq;
1808 	struct completion	srq_completion;
1809 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1810 	struct list_head	xrcd_list;
1811 
1812 	/* count times opened, mcast attaches, flow attaches */
1813 	atomic_t		usecnt;
1814 	struct list_head	open_list;
1815 	struct ib_qp           *real_qp;
1816 	struct ib_uqp_object   *uobject;
1817 	void                  (*event_handler)(struct ib_event *, void *);
1818 	void                  (*registered_event_handler)(struct ib_event *, void *);
1819 	void		       *qp_context;
1820 	/* sgid_attrs associated with the AV's */
1821 	const struct ib_gid_attr *av_sgid_attr;
1822 	const struct ib_gid_attr *alt_path_sgid_attr;
1823 	u32			qp_num;
1824 	u32			max_write_sge;
1825 	u32			max_read_sge;
1826 	enum ib_qp_type		qp_type;
1827 	struct ib_rwq_ind_table *rwq_ind_tbl;
1828 	struct ib_qp_security  *qp_sec;
1829 	u32			port;
1830 
1831 	bool			integrity_en;
1832 	/*
1833 	 * Implementation details of the RDMA core, don't use in drivers:
1834 	 */
1835 	struct rdma_restrack_entry     res;
1836 
1837 	/* The counter the qp is bind to */
1838 	struct rdma_counter    *counter;
1839 };
1840 
1841 struct ib_dm {
1842 	struct ib_device  *device;
1843 	u32		   length;
1844 	u32		   flags;
1845 	struct ib_uobject *uobject;
1846 	atomic_t	   usecnt;
1847 };
1848 
1849 struct ib_mr {
1850 	struct ib_device  *device;
1851 	struct ib_pd	  *pd;
1852 	u32		   lkey;
1853 	u32		   rkey;
1854 	u64		   iova;
1855 	u64		   length;
1856 	unsigned int	   page_size;
1857 	enum ib_mr_type	   type;
1858 	bool		   need_inval;
1859 	union {
1860 		struct ib_uobject	*uobject;	/* user */
1861 		struct list_head	qp_entry;	/* FR */
1862 	};
1863 
1864 	struct ib_dm      *dm;
1865 	struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1866 	/*
1867 	 * Implementation details of the RDMA core, don't use in drivers:
1868 	 */
1869 	struct rdma_restrack_entry res;
1870 };
1871 
1872 struct ib_mw {
1873 	struct ib_device	*device;
1874 	struct ib_pd		*pd;
1875 	struct ib_uobject	*uobject;
1876 	u32			rkey;
1877 	enum ib_mw_type         type;
1878 };
1879 
1880 /* Supported steering options */
1881 enum ib_flow_attr_type {
1882 	/* steering according to rule specifications */
1883 	IB_FLOW_ATTR_NORMAL		= 0x0,
1884 	/* default unicast and multicast rule -
1885 	 * receive all Eth traffic which isn't steered to any QP
1886 	 */
1887 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1888 	/* default multicast rule -
1889 	 * receive all Eth multicast traffic which isn't steered to any QP
1890 	 */
1891 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1892 	/* sniffer rule - receive all port traffic */
1893 	IB_FLOW_ATTR_SNIFFER		= 0x3
1894 };
1895 
1896 /* Supported steering header types */
1897 enum ib_flow_spec_type {
1898 	/* L2 headers*/
1899 	IB_FLOW_SPEC_ETH		= 0x20,
1900 	IB_FLOW_SPEC_IB			= 0x22,
1901 	/* L3 header*/
1902 	IB_FLOW_SPEC_IPV4		= 0x30,
1903 	IB_FLOW_SPEC_IPV6		= 0x31,
1904 	IB_FLOW_SPEC_ESP                = 0x34,
1905 	/* L4 headers*/
1906 	IB_FLOW_SPEC_TCP		= 0x40,
1907 	IB_FLOW_SPEC_UDP		= 0x41,
1908 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1909 	IB_FLOW_SPEC_GRE		= 0x51,
1910 	IB_FLOW_SPEC_MPLS		= 0x60,
1911 	IB_FLOW_SPEC_INNER		= 0x100,
1912 	/* Actions */
1913 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1914 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1915 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1916 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1917 };
1918 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1919 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1920 
1921 enum ib_flow_flags {
1922 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1923 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1924 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1925 };
1926 
1927 struct ib_flow_eth_filter {
1928 	u8	dst_mac[6];
1929 	u8	src_mac[6];
1930 	__be16	ether_type;
1931 	__be16	vlan_tag;
1932 };
1933 
1934 struct ib_flow_spec_eth {
1935 	u32			  type;
1936 	u16			  size;
1937 	struct ib_flow_eth_filter val;
1938 	struct ib_flow_eth_filter mask;
1939 };
1940 
1941 struct ib_flow_ib_filter {
1942 	__be16 dlid;
1943 	__u8   sl;
1944 };
1945 
1946 struct ib_flow_spec_ib {
1947 	u32			 type;
1948 	u16			 size;
1949 	struct ib_flow_ib_filter val;
1950 	struct ib_flow_ib_filter mask;
1951 };
1952 
1953 /* IPv4 header flags */
1954 enum ib_ipv4_flags {
1955 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1956 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1957 				    last have this flag set */
1958 };
1959 
1960 struct ib_flow_ipv4_filter {
1961 	__be32	src_ip;
1962 	__be32	dst_ip;
1963 	u8	proto;
1964 	u8	tos;
1965 	u8	ttl;
1966 	u8	flags;
1967 };
1968 
1969 struct ib_flow_spec_ipv4 {
1970 	u32			   type;
1971 	u16			   size;
1972 	struct ib_flow_ipv4_filter val;
1973 	struct ib_flow_ipv4_filter mask;
1974 };
1975 
1976 struct ib_flow_ipv6_filter {
1977 	u8	src_ip[16];
1978 	u8	dst_ip[16];
1979 	__be32	flow_label;
1980 	u8	next_hdr;
1981 	u8	traffic_class;
1982 	u8	hop_limit;
1983 } __packed;
1984 
1985 struct ib_flow_spec_ipv6 {
1986 	u32			   type;
1987 	u16			   size;
1988 	struct ib_flow_ipv6_filter val;
1989 	struct ib_flow_ipv6_filter mask;
1990 };
1991 
1992 struct ib_flow_tcp_udp_filter {
1993 	__be16	dst_port;
1994 	__be16	src_port;
1995 };
1996 
1997 struct ib_flow_spec_tcp_udp {
1998 	u32			      type;
1999 	u16			      size;
2000 	struct ib_flow_tcp_udp_filter val;
2001 	struct ib_flow_tcp_udp_filter mask;
2002 };
2003 
2004 struct ib_flow_tunnel_filter {
2005 	__be32	tunnel_id;
2006 };
2007 
2008 /* ib_flow_spec_tunnel describes the Vxlan tunnel
2009  * the tunnel_id from val has the vni value
2010  */
2011 struct ib_flow_spec_tunnel {
2012 	u32			      type;
2013 	u16			      size;
2014 	struct ib_flow_tunnel_filter  val;
2015 	struct ib_flow_tunnel_filter  mask;
2016 };
2017 
2018 struct ib_flow_esp_filter {
2019 	__be32	spi;
2020 	__be32  seq;
2021 };
2022 
2023 struct ib_flow_spec_esp {
2024 	u32                           type;
2025 	u16			      size;
2026 	struct ib_flow_esp_filter     val;
2027 	struct ib_flow_esp_filter     mask;
2028 };
2029 
2030 struct ib_flow_gre_filter {
2031 	__be16 c_ks_res0_ver;
2032 	__be16 protocol;
2033 	__be32 key;
2034 };
2035 
2036 struct ib_flow_spec_gre {
2037 	u32                           type;
2038 	u16			      size;
2039 	struct ib_flow_gre_filter     val;
2040 	struct ib_flow_gre_filter     mask;
2041 };
2042 
2043 struct ib_flow_mpls_filter {
2044 	__be32 tag;
2045 };
2046 
2047 struct ib_flow_spec_mpls {
2048 	u32                           type;
2049 	u16			      size;
2050 	struct ib_flow_mpls_filter     val;
2051 	struct ib_flow_mpls_filter     mask;
2052 };
2053 
2054 struct ib_flow_spec_action_tag {
2055 	enum ib_flow_spec_type	      type;
2056 	u16			      size;
2057 	u32                           tag_id;
2058 };
2059 
2060 struct ib_flow_spec_action_drop {
2061 	enum ib_flow_spec_type	      type;
2062 	u16			      size;
2063 };
2064 
2065 struct ib_flow_spec_action_handle {
2066 	enum ib_flow_spec_type	      type;
2067 	u16			      size;
2068 	struct ib_flow_action	     *act;
2069 };
2070 
2071 enum ib_counters_description {
2072 	IB_COUNTER_PACKETS,
2073 	IB_COUNTER_BYTES,
2074 };
2075 
2076 struct ib_flow_spec_action_count {
2077 	enum ib_flow_spec_type type;
2078 	u16 size;
2079 	struct ib_counters *counters;
2080 };
2081 
2082 union ib_flow_spec {
2083 	struct {
2084 		u32			type;
2085 		u16			size;
2086 	};
2087 	struct ib_flow_spec_eth		eth;
2088 	struct ib_flow_spec_ib		ib;
2089 	struct ib_flow_spec_ipv4        ipv4;
2090 	struct ib_flow_spec_tcp_udp	tcp_udp;
2091 	struct ib_flow_spec_ipv6        ipv6;
2092 	struct ib_flow_spec_tunnel      tunnel;
2093 	struct ib_flow_spec_esp		esp;
2094 	struct ib_flow_spec_gre		gre;
2095 	struct ib_flow_spec_mpls	mpls;
2096 	struct ib_flow_spec_action_tag  flow_tag;
2097 	struct ib_flow_spec_action_drop drop;
2098 	struct ib_flow_spec_action_handle action;
2099 	struct ib_flow_spec_action_count flow_count;
2100 };
2101 
2102 struct ib_flow_attr {
2103 	enum ib_flow_attr_type type;
2104 	u16	     size;
2105 	u16	     priority;
2106 	u32	     flags;
2107 	u8	     num_of_specs;
2108 	u32	     port;
2109 	union ib_flow_spec flows[];
2110 };
2111 
2112 struct ib_flow {
2113 	struct ib_qp		*qp;
2114 	struct ib_device	*device;
2115 	struct ib_uobject	*uobject;
2116 };
2117 
2118 enum ib_flow_action_type {
2119 	IB_FLOW_ACTION_UNSPECIFIED,
2120 	IB_FLOW_ACTION_ESP = 1,
2121 };
2122 
2123 struct ib_flow_action_attrs_esp_keymats {
2124 	enum ib_uverbs_flow_action_esp_keymat			protocol;
2125 	union {
2126 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2127 	} keymat;
2128 };
2129 
2130 struct ib_flow_action_attrs_esp_replays {
2131 	enum ib_uverbs_flow_action_esp_replay			protocol;
2132 	union {
2133 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2134 	} replay;
2135 };
2136 
2137 enum ib_flow_action_attrs_esp_flags {
2138 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2139 	 * This is done in order to share the same flags between user-space and
2140 	 * kernel and spare an unnecessary translation.
2141 	 */
2142 
2143 	/* Kernel flags */
2144 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2145 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2146 };
2147 
2148 struct ib_flow_spec_list {
2149 	struct ib_flow_spec_list	*next;
2150 	union ib_flow_spec		spec;
2151 };
2152 
2153 struct ib_flow_action_attrs_esp {
2154 	struct ib_flow_action_attrs_esp_keymats		*keymat;
2155 	struct ib_flow_action_attrs_esp_replays		*replay;
2156 	struct ib_flow_spec_list			*encap;
2157 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2158 	 * Value of 0 is a valid value.
2159 	 */
2160 	u32						esn;
2161 	u32						spi;
2162 	u32						seq;
2163 	u32						tfc_pad;
2164 	/* Use enum ib_flow_action_attrs_esp_flags */
2165 	u64						flags;
2166 	u64						hard_limit_pkts;
2167 };
2168 
2169 struct ib_flow_action {
2170 	struct ib_device		*device;
2171 	struct ib_uobject		*uobject;
2172 	enum ib_flow_action_type	type;
2173 	atomic_t			usecnt;
2174 };
2175 
2176 struct ib_mad;
2177 
2178 enum ib_process_mad_flags {
2179 	IB_MAD_IGNORE_MKEY	= 1,
2180 	IB_MAD_IGNORE_BKEY	= 2,
2181 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2182 };
2183 
2184 enum ib_mad_result {
2185 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2186 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2187 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2188 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2189 };
2190 
2191 struct ib_port_cache {
2192 	u64		      subnet_prefix;
2193 	struct ib_pkey_cache  *pkey;
2194 	struct ib_gid_table   *gid;
2195 	u8                     lmc;
2196 	enum ib_port_state     port_state;
2197 	enum ib_port_state     last_port_state;
2198 };
2199 
2200 struct ib_port_immutable {
2201 	int                           pkey_tbl_len;
2202 	int                           gid_tbl_len;
2203 	u32                           core_cap_flags;
2204 	u32                           max_mad_size;
2205 };
2206 
2207 struct ib_port_data {
2208 	struct ib_device *ib_dev;
2209 
2210 	struct ib_port_immutable immutable;
2211 
2212 	spinlock_t pkey_list_lock;
2213 
2214 	spinlock_t netdev_lock;
2215 
2216 	struct list_head pkey_list;
2217 
2218 	struct ib_port_cache cache;
2219 
2220 	struct net_device __rcu *netdev;
2221 	netdevice_tracker netdev_tracker;
2222 	struct hlist_node ndev_hash_link;
2223 	struct rdma_port_counter port_counter;
2224 	struct ib_port *sysfs;
2225 };
2226 
2227 /* rdma netdev type - specifies protocol type */
2228 enum rdma_netdev_t {
2229 	RDMA_NETDEV_OPA_VNIC,
2230 	RDMA_NETDEV_IPOIB,
2231 };
2232 
2233 /**
2234  * struct rdma_netdev - rdma netdev
2235  * For cases where netstack interfacing is required.
2236  */
2237 struct rdma_netdev {
2238 	void              *clnt_priv;
2239 	struct ib_device  *hca;
2240 	u32		   port_num;
2241 	int                mtu;
2242 
2243 	/*
2244 	 * cleanup function must be specified.
2245 	 * FIXME: This is only used for OPA_VNIC and that usage should be
2246 	 * removed too.
2247 	 */
2248 	void (*free_rdma_netdev)(struct net_device *netdev);
2249 
2250 	/* control functions */
2251 	void (*set_id)(struct net_device *netdev, int id);
2252 	/* send packet */
2253 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2254 		    struct ib_ah *address, u32 dqpn);
2255 	/* multicast */
2256 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2257 			    union ib_gid *gid, u16 mlid,
2258 			    int set_qkey, u32 qkey);
2259 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2260 			    union ib_gid *gid, u16 mlid);
2261 	/* timeout */
2262 	void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2263 };
2264 
2265 struct rdma_netdev_alloc_params {
2266 	size_t sizeof_priv;
2267 	unsigned int txqs;
2268 	unsigned int rxqs;
2269 	void *param;
2270 
2271 	int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2272 				      struct net_device *netdev, void *param);
2273 };
2274 
2275 struct ib_odp_counters {
2276 	atomic64_t faults;
2277 	atomic64_t faults_handled;
2278 	atomic64_t invalidations;
2279 	atomic64_t invalidations_handled;
2280 	atomic64_t prefetch;
2281 };
2282 
2283 struct ib_counters {
2284 	struct ib_device	*device;
2285 	struct ib_uobject	*uobject;
2286 	/* num of objects attached */
2287 	atomic_t	usecnt;
2288 };
2289 
2290 struct ib_counters_read_attr {
2291 	u64	*counters_buff;
2292 	u32	ncounters;
2293 	u32	flags; /* use enum ib_read_counters_flags */
2294 };
2295 
2296 struct uverbs_attr_bundle;
2297 struct iw_cm_id;
2298 struct iw_cm_conn_param;
2299 
2300 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2301 	.size_##ib_struct =                                                    \
2302 		(sizeof(struct drv_struct) +                                   \
2303 		 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2304 		 BUILD_BUG_ON_ZERO(                                            \
2305 			 !__same_type(((struct drv_struct *)NULL)->member,     \
2306 				      struct ib_struct)))
2307 
2308 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                          \
2309 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2310 					   gfp, false))
2311 
2312 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type)                              \
2313 	((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2314 					   GFP_KERNEL, true))
2315 
2316 #define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2317 	rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2318 
2319 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2320 
2321 struct rdma_user_mmap_entry {
2322 	struct kref ref;
2323 	struct ib_ucontext *ucontext;
2324 	unsigned long start_pgoff;
2325 	size_t npages;
2326 	bool driver_removed;
2327 };
2328 
2329 /* Return the offset (in bytes) the user should pass to libc's mmap() */
2330 static inline u64
2331 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2332 {
2333 	return (u64)entry->start_pgoff << PAGE_SHIFT;
2334 }
2335 
2336 /**
2337  * struct ib_device_ops - InfiniBand device operations
2338  * This structure defines all the InfiniBand device operations, providers will
2339  * need to define the supported operations, otherwise they will be set to null.
2340  */
2341 struct ib_device_ops {
2342 	struct module *owner;
2343 	enum rdma_driver_id driver_id;
2344 	u32 uverbs_abi_ver;
2345 	unsigned int uverbs_no_driver_id_binding:1;
2346 
2347 	/*
2348 	 * NOTE: New drivers should not make use of device_group; instead new
2349 	 * device parameter should be exposed via netlink command. This
2350 	 * mechanism exists only for existing drivers.
2351 	 */
2352 	const struct attribute_group *device_group;
2353 	const struct attribute_group **port_groups;
2354 
2355 	int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2356 			 const struct ib_send_wr **bad_send_wr);
2357 	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2358 			 const struct ib_recv_wr **bad_recv_wr);
2359 	void (*drain_rq)(struct ib_qp *qp);
2360 	void (*drain_sq)(struct ib_qp *qp);
2361 	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2362 	int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2363 	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2364 	int (*post_srq_recv)(struct ib_srq *srq,
2365 			     const struct ib_recv_wr *recv_wr,
2366 			     const struct ib_recv_wr **bad_recv_wr);
2367 	int (*process_mad)(struct ib_device *device, int process_mad_flags,
2368 			   u32 port_num, const struct ib_wc *in_wc,
2369 			   const struct ib_grh *in_grh,
2370 			   const struct ib_mad *in_mad, struct ib_mad *out_mad,
2371 			   size_t *out_mad_size, u16 *out_mad_pkey_index);
2372 	int (*query_device)(struct ib_device *device,
2373 			    struct ib_device_attr *device_attr,
2374 			    struct ib_udata *udata);
2375 	int (*modify_device)(struct ib_device *device, int device_modify_mask,
2376 			     struct ib_device_modify *device_modify);
2377 	void (*get_dev_fw_str)(struct ib_device *device, char *str);
2378 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2379 						     int comp_vector);
2380 	int (*query_port)(struct ib_device *device, u32 port_num,
2381 			  struct ib_port_attr *port_attr);
2382 	int (*modify_port)(struct ib_device *device, u32 port_num,
2383 			   int port_modify_mask,
2384 			   struct ib_port_modify *port_modify);
2385 	/**
2386 	 * The following mandatory functions are used only at device
2387 	 * registration.  Keep functions such as these at the end of this
2388 	 * structure to avoid cache line misses when accessing struct ib_device
2389 	 * in fast paths.
2390 	 */
2391 	int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2392 				  struct ib_port_immutable *immutable);
2393 	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2394 					       u32 port_num);
2395 	/**
2396 	 * When calling get_netdev, the HW vendor's driver should return the
2397 	 * net device of device @device at port @port_num or NULL if such
2398 	 * a net device doesn't exist. The vendor driver should call dev_hold
2399 	 * on this net device. The HW vendor's device driver must guarantee
2400 	 * that this function returns NULL before the net device has finished
2401 	 * NETDEV_UNREGISTER state.
2402 	 */
2403 	struct net_device *(*get_netdev)(struct ib_device *device,
2404 					 u32 port_num);
2405 	/**
2406 	 * rdma netdev operation
2407 	 *
2408 	 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2409 	 * must return -EOPNOTSUPP if it doesn't support the specified type.
2410 	 */
2411 	struct net_device *(*alloc_rdma_netdev)(
2412 		struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2413 		const char *name, unsigned char name_assign_type,
2414 		void (*setup)(struct net_device *));
2415 
2416 	int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2417 				      enum rdma_netdev_t type,
2418 				      struct rdma_netdev_alloc_params *params);
2419 	/**
2420 	 * query_gid should be return GID value for @device, when @port_num
2421 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2422 	 * is RoCE link layer.
2423 	 */
2424 	int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2425 			 union ib_gid *gid);
2426 	/**
2427 	 * When calling add_gid, the HW vendor's driver should add the gid
2428 	 * of device of port at gid index available at @attr. Meta-info of
2429 	 * that gid (for example, the network device related to this gid) is
2430 	 * available at @attr. @context allows the HW vendor driver to store
2431 	 * extra information together with a GID entry. The HW vendor driver may
2432 	 * allocate memory to contain this information and store it in @context
2433 	 * when a new GID entry is written to. Params are consistent until the
2434 	 * next call of add_gid or delete_gid. The function should return 0 on
2435 	 * success or error otherwise. The function could be called
2436 	 * concurrently for different ports. This function is only called when
2437 	 * roce_gid_table is used.
2438 	 */
2439 	int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2440 	/**
2441 	 * When calling del_gid, the HW vendor's driver should delete the
2442 	 * gid of device @device at gid index gid_index of port port_num
2443 	 * available in @attr.
2444 	 * Upon the deletion of a GID entry, the HW vendor must free any
2445 	 * allocated memory. The caller will clear @context afterwards.
2446 	 * This function is only called when roce_gid_table is used.
2447 	 */
2448 	int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2449 	int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2450 			  u16 *pkey);
2451 	int (*alloc_ucontext)(struct ib_ucontext *context,
2452 			      struct ib_udata *udata);
2453 	void (*dealloc_ucontext)(struct ib_ucontext *context);
2454 	int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2455 	/**
2456 	 * This will be called once refcount of an entry in mmap_xa reaches
2457 	 * zero. The type of the memory that was mapped may differ between
2458 	 * entries and is opaque to the rdma_user_mmap interface.
2459 	 * Therefore needs to be implemented by the driver in mmap_free.
2460 	 */
2461 	void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2462 	void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2463 	int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2464 	int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2465 	int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2466 			 struct ib_udata *udata);
2467 	int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2468 			      struct ib_udata *udata);
2469 	int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2470 	int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2471 	int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2472 	int (*create_srq)(struct ib_srq *srq,
2473 			  struct ib_srq_init_attr *srq_init_attr,
2474 			  struct ib_udata *udata);
2475 	int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2476 			  enum ib_srq_attr_mask srq_attr_mask,
2477 			  struct ib_udata *udata);
2478 	int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2479 	int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2480 	int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2481 			 struct ib_udata *udata);
2482 	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2483 			 int qp_attr_mask, struct ib_udata *udata);
2484 	int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2485 			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2486 	int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2487 	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2488 			 struct uverbs_attr_bundle *attrs);
2489 	int (*create_cq_umem)(struct ib_cq *cq,
2490 			      const struct ib_cq_init_attr *attr,
2491 			      struct ib_umem *umem,
2492 			      struct uverbs_attr_bundle *attrs);
2493 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2494 	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2495 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2496 	/**
2497 	 * pre_destroy_cq - Prevent a cq from generating any new work
2498 	 * completions, but not free any kernel resources
2499 	 */
2500 	int (*pre_destroy_cq)(struct ib_cq *cq);
2501 	/**
2502 	 * post_destroy_cq - Free all kernel resources
2503 	 */
2504 	void (*post_destroy_cq)(struct ib_cq *cq);
2505 	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2506 	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2507 				     u64 virt_addr, int mr_access_flags,
2508 				     struct ib_udata *udata);
2509 	struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2510 					    u64 length, u64 virt_addr, int fd,
2511 					    int mr_access_flags,
2512 					    struct uverbs_attr_bundle *attrs);
2513 	struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2514 				       u64 length, u64 virt_addr,
2515 				       int mr_access_flags, struct ib_pd *pd,
2516 				       struct ib_udata *udata);
2517 	int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2518 	struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2519 				  u32 max_num_sg);
2520 	struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2521 					    u32 max_num_data_sg,
2522 					    u32 max_num_meta_sg);
2523 	int (*advise_mr)(struct ib_pd *pd,
2524 			 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2525 			 struct ib_sge *sg_list, u32 num_sge,
2526 			 struct uverbs_attr_bundle *attrs);
2527 
2528 	/*
2529 	 * Kernel users should universally support relaxed ordering (RO), as
2530 	 * they are designed to read data only after observing the CQE and use
2531 	 * the DMA API correctly.
2532 	 *
2533 	 * Some drivers implicitly enable RO if platform supports it.
2534 	 */
2535 	int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2536 			 unsigned int *sg_offset);
2537 	int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2538 			       struct ib_mr_status *mr_status);
2539 	int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2540 	int (*dealloc_mw)(struct ib_mw *mw);
2541 	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2542 	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2543 	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2544 	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2545 	struct ib_flow *(*create_flow)(struct ib_qp *qp,
2546 				       struct ib_flow_attr *flow_attr,
2547 				       struct ib_udata *udata);
2548 	int (*destroy_flow)(struct ib_flow *flow_id);
2549 	int (*destroy_flow_action)(struct ib_flow_action *action);
2550 	int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2551 				 int state);
2552 	int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2553 			     struct ifla_vf_info *ivf);
2554 	int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2555 			    struct ifla_vf_stats *stats);
2556 	int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2557 			    struct ifla_vf_guid *node_guid,
2558 			    struct ifla_vf_guid *port_guid);
2559 	int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2560 			   int type);
2561 	struct ib_wq *(*create_wq)(struct ib_pd *pd,
2562 				   struct ib_wq_init_attr *init_attr,
2563 				   struct ib_udata *udata);
2564 	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2565 	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2566 			 u32 wq_attr_mask, struct ib_udata *udata);
2567 	int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2568 				    struct ib_rwq_ind_table_init_attr *init_attr,
2569 				    struct ib_udata *udata);
2570 	int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2571 	struct ib_dm *(*alloc_dm)(struct ib_device *device,
2572 				  struct ib_ucontext *context,
2573 				  struct ib_dm_alloc_attr *attr,
2574 				  struct uverbs_attr_bundle *attrs);
2575 	int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2576 	struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2577 				   struct ib_dm_mr_attr *attr,
2578 				   struct uverbs_attr_bundle *attrs);
2579 	int (*create_counters)(struct ib_counters *counters,
2580 			       struct uverbs_attr_bundle *attrs);
2581 	int (*destroy_counters)(struct ib_counters *counters);
2582 	int (*read_counters)(struct ib_counters *counters,
2583 			     struct ib_counters_read_attr *counters_read_attr,
2584 			     struct uverbs_attr_bundle *attrs);
2585 	int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2586 			    int data_sg_nents, unsigned int *data_sg_offset,
2587 			    struct scatterlist *meta_sg, int meta_sg_nents,
2588 			    unsigned int *meta_sg_offset);
2589 
2590 	/**
2591 	 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2592 	 *   fill in the driver initialized data.  The struct is kfree()'ed by
2593 	 *   the sysfs core when the device is removed.  A lifespan of -1 in the
2594 	 *   return struct tells the core to set a default lifespan.
2595 	 */
2596 	struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2597 	struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2598 						     u32 port_num);
2599 	/**
2600 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2601 	 * @index - The index in the value array we wish to have updated, or
2602 	 *   num_counters if we want all stats updated
2603 	 * Return codes -
2604 	 *   < 0 - Error, no counters updated
2605 	 *   index - Updated the single counter pointed to by index
2606 	 *   num_counters - Updated all counters (will reset the timestamp
2607 	 *     and prevent further calls for lifespan milliseconds)
2608 	 * Drivers are allowed to update all counters in leiu of just the
2609 	 *   one given in index at their option
2610 	 */
2611 	int (*get_hw_stats)(struct ib_device *device,
2612 			    struct rdma_hw_stats *stats, u32 port, int index);
2613 
2614 	/**
2615 	 * modify_hw_stat - Modify the counter configuration
2616 	 * @enable: true/false when enable/disable a counter
2617 	 * Return codes - 0 on success or error code otherwise.
2618 	 */
2619 	int (*modify_hw_stat)(struct ib_device *device, u32 port,
2620 			      unsigned int counter_index, bool enable);
2621 	/**
2622 	 * Allows rdma drivers to add their own restrack attributes.
2623 	 */
2624 	int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2625 	int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2626 	int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2627 	int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2628 	int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2629 	int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2630 	int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2631 	int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
2632 	int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
2633 
2634 	/* Device lifecycle callbacks */
2635 	/*
2636 	 * Called after the device becomes registered, before clients are
2637 	 * attached
2638 	 */
2639 	int (*enable_driver)(struct ib_device *dev);
2640 	/*
2641 	 * This is called as part of ib_dealloc_device().
2642 	 */
2643 	void (*dealloc_driver)(struct ib_device *dev);
2644 
2645 	/* iWarp CM callbacks */
2646 	void (*iw_add_ref)(struct ib_qp *qp);
2647 	void (*iw_rem_ref)(struct ib_qp *qp);
2648 	struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2649 	int (*iw_connect)(struct iw_cm_id *cm_id,
2650 			  struct iw_cm_conn_param *conn_param);
2651 	int (*iw_accept)(struct iw_cm_id *cm_id,
2652 			 struct iw_cm_conn_param *conn_param);
2653 	int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2654 			 u8 pdata_len);
2655 	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2656 	int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2657 	/**
2658 	 * counter_bind_qp - Bind a QP to a counter.
2659 	 * @counter - The counter to be bound. If counter->id is zero then
2660 	 *   the driver needs to allocate a new counter and set counter->id
2661 	 */
2662 	int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
2663 			       u32 port);
2664 	/**
2665 	 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2666 	 *   counter and bind it onto the default one
2667 	 */
2668 	int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
2669 	/**
2670 	 * counter_dealloc -De-allocate the hw counter
2671 	 */
2672 	int (*counter_dealloc)(struct rdma_counter *counter);
2673 	/**
2674 	 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2675 	 * the driver initialized data.
2676 	 */
2677 	struct rdma_hw_stats *(*counter_alloc_stats)(
2678 		struct rdma_counter *counter);
2679 	/**
2680 	 * counter_update_stats - Query the stats value of this counter
2681 	 */
2682 	int (*counter_update_stats)(struct rdma_counter *counter);
2683 
2684 	/**
2685 	 * counter_init - Initialize the driver specific rdma counter struct.
2686 	 */
2687 	void (*counter_init)(struct rdma_counter *counter);
2688 
2689 	/**
2690 	 * Allows rdma drivers to add their own restrack attributes
2691 	 * dumped via 'rdma stat' iproute2 command.
2692 	 */
2693 	int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2694 
2695 	/* query driver for its ucontext properties */
2696 	int (*query_ucontext)(struct ib_ucontext *context,
2697 			      struct uverbs_attr_bundle *attrs);
2698 
2699 	/*
2700 	 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2701 	 * Everyone else relies on Linux memory management model.
2702 	 */
2703 	int (*get_numa_node)(struct ib_device *dev);
2704 
2705 	/**
2706 	 * add_sub_dev - Add a sub IB device
2707 	 */
2708 	struct ib_device *(*add_sub_dev)(struct ib_device *parent,
2709 					 enum rdma_nl_dev_type type,
2710 					 const char *name);
2711 
2712 	/**
2713 	 * del_sub_dev - Delete a sub IB device
2714 	 */
2715 	void (*del_sub_dev)(struct ib_device *sub_dev);
2716 
2717 	/**
2718 	 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
2719 	 * the ufile.
2720 	 */
2721 	void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
2722 
2723 	/**
2724 	 * report_port_event - Drivers need to implement this if they have
2725 	 * some private stuff to handle when link status changes.
2726 	 */
2727 	void (*report_port_event)(struct ib_device *ibdev,
2728 				  struct net_device *ndev, unsigned long event);
2729 
2730 	DECLARE_RDMA_OBJ_SIZE(ib_ah);
2731 	DECLARE_RDMA_OBJ_SIZE(ib_counters);
2732 	DECLARE_RDMA_OBJ_SIZE(ib_cq);
2733 	DECLARE_RDMA_OBJ_SIZE(ib_mw);
2734 	DECLARE_RDMA_OBJ_SIZE(ib_pd);
2735 	DECLARE_RDMA_OBJ_SIZE(ib_qp);
2736 	DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2737 	DECLARE_RDMA_OBJ_SIZE(ib_srq);
2738 	DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2739 	DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2740 	DECLARE_RDMA_OBJ_SIZE(rdma_counter);
2741 };
2742 
2743 struct ib_core_device {
2744 	/* device must be the first element in structure until,
2745 	 * union of ib_core_device and device exists in ib_device.
2746 	 */
2747 	struct device dev;
2748 	possible_net_t rdma_net;
2749 	struct kobject *ports_kobj;
2750 	struct list_head port_list;
2751 	struct ib_device *owner; /* reach back to owner ib_device */
2752 };
2753 
2754 struct rdma_restrack_root;
2755 struct ib_device {
2756 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2757 	struct device                *dma_device;
2758 	struct ib_device_ops	     ops;
2759 	char                          name[IB_DEVICE_NAME_MAX];
2760 	struct rcu_head rcu_head;
2761 
2762 	struct list_head              event_handler_list;
2763 	/* Protects event_handler_list */
2764 	struct rw_semaphore event_handler_rwsem;
2765 
2766 	/* Protects QP's event_handler calls and open_qp list */
2767 	spinlock_t qp_open_list_lock;
2768 
2769 	struct rw_semaphore	      client_data_rwsem;
2770 	struct xarray                 client_data;
2771 	struct mutex                  unregistration_lock;
2772 
2773 	/* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2774 	rwlock_t cache_lock;
2775 	/**
2776 	 * port_data is indexed by port number
2777 	 */
2778 	struct ib_port_data *port_data;
2779 
2780 	int			      num_comp_vectors;
2781 
2782 	union {
2783 		struct device		dev;
2784 		struct ib_core_device	coredev;
2785 	};
2786 
2787 	/* First group is for device attributes,
2788 	 * Second group is for driver provided attributes (optional).
2789 	 * Third group is for the hw_stats
2790 	 * It is a NULL terminated array.
2791 	 */
2792 	const struct attribute_group	*groups[4];
2793 	u8				hw_stats_attr_index;
2794 
2795 	u64			     uverbs_cmd_mask;
2796 
2797 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2798 	__be64			     node_guid;
2799 	u32			     local_dma_lkey;
2800 	u16                          is_switch:1;
2801 	/* Indicates kernel verbs support, should not be used in drivers */
2802 	u16                          kverbs_provider:1;
2803 	/* CQ adaptive moderation (RDMA DIM) */
2804 	u16                          use_cq_dim:1;
2805 	u8                           node_type;
2806 	u32			     phys_port_cnt;
2807 	struct ib_device_attr        attrs;
2808 	struct hw_stats_device_data *hw_stats_data;
2809 
2810 #ifdef CONFIG_CGROUP_RDMA
2811 	struct rdmacg_device         cg_device;
2812 #endif
2813 
2814 	u32                          index;
2815 
2816 	spinlock_t                   cq_pools_lock;
2817 	struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2818 
2819 	struct rdma_restrack_root *res;
2820 
2821 	const struct uapi_definition   *driver_def;
2822 
2823 	/*
2824 	 * Positive refcount indicates that the device is currently
2825 	 * registered and cannot be unregistered.
2826 	 */
2827 	refcount_t refcount;
2828 	struct completion unreg_completion;
2829 	struct work_struct unregistration_work;
2830 
2831 	const struct rdma_link_ops *link_ops;
2832 
2833 	/* Protects compat_devs xarray modifications */
2834 	struct mutex compat_devs_mutex;
2835 	/* Maintains compat devices for each net namespace */
2836 	struct xarray compat_devs;
2837 
2838 	/* Used by iWarp CM */
2839 	char iw_ifname[IFNAMSIZ];
2840 	u32 iw_driver_flags;
2841 	u32 lag_flags;
2842 
2843 	/* A parent device has a list of sub-devices */
2844 	struct mutex subdev_lock;
2845 	struct list_head subdev_list_head;
2846 
2847 	/* A sub device has a type and a parent */
2848 	enum rdma_nl_dev_type type;
2849 	struct ib_device *parent;
2850 	struct list_head subdev_list;
2851 
2852 	enum rdma_nl_name_assign_type name_assign_type;
2853 };
2854 
2855 static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
2856 				    gfp_t gfp, bool is_numa_aware)
2857 {
2858 	if (is_numa_aware && dev->ops.get_numa_node)
2859 		return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
2860 
2861 	return kzalloc(size, gfp);
2862 }
2863 
2864 struct ib_client_nl_info;
2865 struct ib_client {
2866 	const char *name;
2867 	int (*add)(struct ib_device *ibdev);
2868 	void (*remove)(struct ib_device *, void *client_data);
2869 	void (*rename)(struct ib_device *dev, void *client_data);
2870 	int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2871 			   struct ib_client_nl_info *res);
2872 	int (*get_global_nl_info)(struct ib_client_nl_info *res);
2873 
2874 	/* Returns the net_dev belonging to this ib_client and matching the
2875 	 * given parameters.
2876 	 * @dev:	 An RDMA device that the net_dev use for communication.
2877 	 * @port:	 A physical port number on the RDMA device.
2878 	 * @pkey:	 P_Key that the net_dev uses if applicable.
2879 	 * @gid:	 A GID that the net_dev uses to communicate.
2880 	 * @addr:	 An IP address the net_dev is configured with.
2881 	 * @client_data: The device's client data set by ib_set_client_data().
2882 	 *
2883 	 * An ib_client that implements a net_dev on top of RDMA devices
2884 	 * (such as IP over IB) should implement this callback, allowing the
2885 	 * rdma_cm module to find the right net_dev for a given request.
2886 	 *
2887 	 * The caller is responsible for calling dev_put on the returned
2888 	 * netdev. */
2889 	struct net_device *(*get_net_dev_by_params)(
2890 			struct ib_device *dev,
2891 			u32 port,
2892 			u16 pkey,
2893 			const union ib_gid *gid,
2894 			const struct sockaddr *addr,
2895 			void *client_data);
2896 
2897 	refcount_t uses;
2898 	struct completion uses_zero;
2899 	u32 client_id;
2900 
2901 	/* kverbs are not required by the client */
2902 	u8 no_kverbs_req:1;
2903 };
2904 
2905 /*
2906  * IB block DMA iterator
2907  *
2908  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2909  * to a HW supported page size.
2910  */
2911 struct ib_block_iter {
2912 	/* internal states */
2913 	struct scatterlist *__sg;	/* sg holding the current aligned block */
2914 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
2915 	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
2916 	unsigned int __sg_nents;	/* number of SG entries */
2917 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
2918 	unsigned int __pg_bit;		/* alignment of current block */
2919 };
2920 
2921 struct ib_device *_ib_alloc_device(size_t size, struct net *net);
2922 #define ib_alloc_device(drv_struct, member)                                    \
2923 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2924 				      BUILD_BUG_ON_ZERO(offsetof(              \
2925 					      struct drv_struct, member)),     \
2926 				      &init_net),			       \
2927 		     struct drv_struct, member)
2928 
2929 #define ib_alloc_device_with_net(drv_struct, member, net)		       \
2930 	container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2931 				      BUILD_BUG_ON_ZERO(offsetof(              \
2932 					struct drv_struct, member)), net),     \
2933 		     struct drv_struct, member)
2934 
2935 void ib_dealloc_device(struct ib_device *device);
2936 
2937 void ib_get_device_fw_str(struct ib_device *device, char *str);
2938 
2939 int ib_register_device(struct ib_device *device, const char *name,
2940 		       struct device *dma_device);
2941 void ib_unregister_device(struct ib_device *device);
2942 void ib_unregister_driver(enum rdma_driver_id driver_id);
2943 void ib_unregister_device_and_put(struct ib_device *device);
2944 void ib_unregister_device_queued(struct ib_device *ib_dev);
2945 
2946 int ib_register_client   (struct ib_client *client);
2947 void ib_unregister_client(struct ib_client *client);
2948 
2949 void __rdma_block_iter_start(struct ib_block_iter *biter,
2950 			     struct scatterlist *sglist,
2951 			     unsigned int nents,
2952 			     unsigned long pgsz);
2953 bool __rdma_block_iter_next(struct ib_block_iter *biter);
2954 
2955 /**
2956  * rdma_block_iter_dma_address - get the aligned dma address of the current
2957  * block held by the block iterator.
2958  * @biter: block iterator holding the memory block
2959  */
2960 static inline dma_addr_t
2961 rdma_block_iter_dma_address(struct ib_block_iter *biter)
2962 {
2963 	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2964 }
2965 
2966 /**
2967  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2968  * @sglist: sglist to iterate over
2969  * @biter: block iterator holding the memory block
2970  * @nents: maximum number of sg entries to iterate over
2971  * @pgsz: best HW supported page size to use
2972  *
2973  * Callers may use rdma_block_iter_dma_address() to get each
2974  * blocks aligned DMA address.
2975  */
2976 #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
2977 	for (__rdma_block_iter_start(biter, sglist, nents,	\
2978 				     pgsz);			\
2979 	     __rdma_block_iter_next(biter);)
2980 
2981 /**
2982  * ib_get_client_data - Get IB client context
2983  * @device:Device to get context for
2984  * @client:Client to get context for
2985  *
2986  * ib_get_client_data() returns the client context data set with
2987  * ib_set_client_data(). This can only be called while the client is
2988  * registered to the device, once the ib_client remove() callback returns this
2989  * cannot be called.
2990  */
2991 static inline void *ib_get_client_data(struct ib_device *device,
2992 				       struct ib_client *client)
2993 {
2994 	return xa_load(&device->client_data, client->client_id);
2995 }
2996 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2997 			 void *data);
2998 void ib_set_device_ops(struct ib_device *device,
2999 		       const struct ib_device_ops *ops);
3000 
3001 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
3002 		      unsigned long pfn, unsigned long size, pgprot_t prot,
3003 		      struct rdma_user_mmap_entry *entry);
3004 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
3005 				struct rdma_user_mmap_entry *entry,
3006 				size_t length);
3007 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
3008 				      struct rdma_user_mmap_entry *entry,
3009 				      size_t length, u32 min_pgoff,
3010 				      u32 max_pgoff);
3011 
3012 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
3013 void rdma_user_mmap_disassociate(struct ib_device *device);
3014 #else
3015 static inline void rdma_user_mmap_disassociate(struct ib_device *device)
3016 {
3017 }
3018 #endif
3019 
3020 static inline int
3021 rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
3022 				  struct rdma_user_mmap_entry *entry,
3023 				  size_t length, u32 pgoff)
3024 {
3025 	return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
3026 						 pgoff);
3027 }
3028 
3029 struct rdma_user_mmap_entry *
3030 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
3031 			       unsigned long pgoff);
3032 struct rdma_user_mmap_entry *
3033 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
3034 			 struct vm_area_struct *vma);
3035 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
3036 
3037 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
3038 
3039 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
3040 {
3041 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
3042 }
3043 
3044 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
3045 {
3046 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
3047 }
3048 
3049 static inline bool ib_is_buffer_cleared(const void __user *p,
3050 					size_t len)
3051 {
3052 	bool ret;
3053 	u8 *buf;
3054 
3055 	if (len > USHRT_MAX)
3056 		return false;
3057 
3058 	buf = memdup_user(p, len);
3059 	if (IS_ERR(buf))
3060 		return false;
3061 
3062 	ret = !memchr_inv(buf, 0, len);
3063 	kfree(buf);
3064 	return ret;
3065 }
3066 
3067 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
3068 				       size_t offset,
3069 				       size_t len)
3070 {
3071 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
3072 }
3073 
3074 /**
3075  * ib_modify_qp_is_ok - Check that the supplied attribute mask
3076  * contains all required attributes and no attributes not allowed for
3077  * the given QP state transition.
3078  * @cur_state: Current QP state
3079  * @next_state: Next QP state
3080  * @type: QP type
3081  * @mask: Mask of supplied QP attributes
3082  *
3083  * This function is a helper function that a low-level driver's
3084  * modify_qp method can use to validate the consumer's input.  It
3085  * checks that cur_state and next_state are valid QP states, that a
3086  * transition from cur_state to next_state is allowed by the IB spec,
3087  * and that the attribute mask supplied is allowed for the transition.
3088  */
3089 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3090 			enum ib_qp_type type, enum ib_qp_attr_mask mask);
3091 
3092 void ib_register_event_handler(struct ib_event_handler *event_handler);
3093 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3094 void ib_dispatch_event(const struct ib_event *event);
3095 
3096 int ib_query_port(struct ib_device *device,
3097 		  u32 port_num, struct ib_port_attr *port_attr);
3098 
3099 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3100 					       u32 port_num);
3101 
3102 /**
3103  * rdma_cap_ib_switch - Check if the device is IB switch
3104  * @device: Device to check
3105  *
3106  * Device driver is responsible for setting is_switch bit on
3107  * in ib_device structure at init time.
3108  *
3109  * Return: true if the device is IB switch.
3110  */
3111 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3112 {
3113 	return device->is_switch;
3114 }
3115 
3116 /**
3117  * rdma_start_port - Return the first valid port number for the device
3118  * specified
3119  *
3120  * @device: Device to be checked
3121  *
3122  * Return start port number
3123  */
3124 static inline u32 rdma_start_port(const struct ib_device *device)
3125 {
3126 	return rdma_cap_ib_switch(device) ? 0 : 1;
3127 }
3128 
3129 /**
3130  * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3131  * @device - The struct ib_device * to iterate over
3132  * @iter - The unsigned int to store the port number
3133  */
3134 #define rdma_for_each_port(device, iter)                                       \
3135 	for (iter = rdma_start_port(device +				       \
3136 				    BUILD_BUG_ON_ZERO(!__same_type(u32,	       \
3137 								   iter)));    \
3138 	     iter <= rdma_end_port(device); iter++)
3139 
3140 /**
3141  * rdma_end_port - Return the last valid port number for the device
3142  * specified
3143  *
3144  * @device: Device to be checked
3145  *
3146  * Return last port number
3147  */
3148 static inline u32 rdma_end_port(const struct ib_device *device)
3149 {
3150 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3151 }
3152 
3153 static inline int rdma_is_port_valid(const struct ib_device *device,
3154 				     unsigned int port)
3155 {
3156 	return (port >= rdma_start_port(device) &&
3157 		port <= rdma_end_port(device));
3158 }
3159 
3160 static inline bool rdma_is_grh_required(const struct ib_device *device,
3161 					u32 port_num)
3162 {
3163 	return device->port_data[port_num].immutable.core_cap_flags &
3164 	       RDMA_CORE_PORT_IB_GRH_REQUIRED;
3165 }
3166 
3167 static inline bool rdma_protocol_ib(const struct ib_device *device,
3168 				    u32 port_num)
3169 {
3170 	return device->port_data[port_num].immutable.core_cap_flags &
3171 	       RDMA_CORE_CAP_PROT_IB;
3172 }
3173 
3174 static inline bool rdma_protocol_roce(const struct ib_device *device,
3175 				      u32 port_num)
3176 {
3177 	return device->port_data[port_num].immutable.core_cap_flags &
3178 	       (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3179 }
3180 
3181 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3182 						u32 port_num)
3183 {
3184 	return device->port_data[port_num].immutable.core_cap_flags &
3185 	       RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3186 }
3187 
3188 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3189 						u32 port_num)
3190 {
3191 	return device->port_data[port_num].immutable.core_cap_flags &
3192 	       RDMA_CORE_CAP_PROT_ROCE;
3193 }
3194 
3195 static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3196 				       u32 port_num)
3197 {
3198 	return device->port_data[port_num].immutable.core_cap_flags &
3199 	       RDMA_CORE_CAP_PROT_IWARP;
3200 }
3201 
3202 static inline bool rdma_ib_or_roce(const struct ib_device *device,
3203 				   u32 port_num)
3204 {
3205 	return rdma_protocol_ib(device, port_num) ||
3206 		rdma_protocol_roce(device, port_num);
3207 }
3208 
3209 static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3210 					    u32 port_num)
3211 {
3212 	return device->port_data[port_num].immutable.core_cap_flags &
3213 	       RDMA_CORE_CAP_PROT_RAW_PACKET;
3214 }
3215 
3216 static inline bool rdma_protocol_usnic(const struct ib_device *device,
3217 				       u32 port_num)
3218 {
3219 	return device->port_data[port_num].immutable.core_cap_flags &
3220 	       RDMA_CORE_CAP_PROT_USNIC;
3221 }
3222 
3223 /**
3224  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3225  * Management Datagrams.
3226  * @device: Device to check
3227  * @port_num: Port number to check
3228  *
3229  * Management Datagrams (MAD) are a required part of the InfiniBand
3230  * specification and are supported on all InfiniBand devices.  A slightly
3231  * extended version are also supported on OPA interfaces.
3232  *
3233  * Return: true if the port supports sending/receiving of MAD packets.
3234  */
3235 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3236 {
3237 	return device->port_data[port_num].immutable.core_cap_flags &
3238 	       RDMA_CORE_CAP_IB_MAD;
3239 }
3240 
3241 /**
3242  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3243  * Management Datagrams.
3244  * @device: Device to check
3245  * @port_num: Port number to check
3246  *
3247  * Intel OmniPath devices extend and/or replace the InfiniBand Management
3248  * datagrams with their own versions.  These OPA MADs share many but not all of
3249  * the characteristics of InfiniBand MADs.
3250  *
3251  * OPA MADs differ in the following ways:
3252  *
3253  *    1) MADs are variable size up to 2K
3254  *       IBTA defined MADs remain fixed at 256 bytes
3255  *    2) OPA SMPs must carry valid PKeys
3256  *    3) OPA SMP packets are a different format
3257  *
3258  * Return: true if the port supports OPA MAD packet formats.
3259  */
3260 static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3261 {
3262 	return device->port_data[port_num].immutable.core_cap_flags &
3263 		RDMA_CORE_CAP_OPA_MAD;
3264 }
3265 
3266 /**
3267  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3268  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3269  * @device: Device to check
3270  * @port_num: Port number to check
3271  *
3272  * Each InfiniBand node is required to provide a Subnet Management Agent
3273  * that the subnet manager can access.  Prior to the fabric being fully
3274  * configured by the subnet manager, the SMA is accessed via a well known
3275  * interface called the Subnet Management Interface (SMI).  This interface
3276  * uses directed route packets to communicate with the SM to get around the
3277  * chicken and egg problem of the SM needing to know what's on the fabric
3278  * in order to configure the fabric, and needing to configure the fabric in
3279  * order to send packets to the devices on the fabric.  These directed
3280  * route packets do not need the fabric fully configured in order to reach
3281  * their destination.  The SMI is the only method allowed to send
3282  * directed route packets on an InfiniBand fabric.
3283  *
3284  * Return: true if the port provides an SMI.
3285  */
3286 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3287 {
3288 	return device->port_data[port_num].immutable.core_cap_flags &
3289 	       RDMA_CORE_CAP_IB_SMI;
3290 }
3291 
3292 /**
3293  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3294  * Communication Manager.
3295  * @device: Device to check
3296  * @port_num: Port number to check
3297  *
3298  * The InfiniBand Communication Manager is one of many pre-defined General
3299  * Service Agents (GSA) that are accessed via the General Service
3300  * Interface (GSI).  It's role is to facilitate establishment of connections
3301  * between nodes as well as other management related tasks for established
3302  * connections.
3303  *
3304  * Return: true if the port supports an IB CM (this does not guarantee that
3305  * a CM is actually running however).
3306  */
3307 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3308 {
3309 	return device->port_data[port_num].immutable.core_cap_flags &
3310 	       RDMA_CORE_CAP_IB_CM;
3311 }
3312 
3313 /**
3314  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3315  * Communication Manager.
3316  * @device: Device to check
3317  * @port_num: Port number to check
3318  *
3319  * Similar to above, but specific to iWARP connections which have a different
3320  * managment protocol than InfiniBand.
3321  *
3322  * Return: true if the port supports an iWARP CM (this does not guarantee that
3323  * a CM is actually running however).
3324  */
3325 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3326 {
3327 	return device->port_data[port_num].immutable.core_cap_flags &
3328 	       RDMA_CORE_CAP_IW_CM;
3329 }
3330 
3331 /**
3332  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3333  * Subnet Administration.
3334  * @device: Device to check
3335  * @port_num: Port number to check
3336  *
3337  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3338  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3339  * fabrics, devices should resolve routes to other hosts by contacting the
3340  * SA to query the proper route.
3341  *
3342  * Return: true if the port should act as a client to the fabric Subnet
3343  * Administration interface.  This does not imply that the SA service is
3344  * running locally.
3345  */
3346 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3347 {
3348 	return device->port_data[port_num].immutable.core_cap_flags &
3349 	       RDMA_CORE_CAP_IB_SA;
3350 }
3351 
3352 /**
3353  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3354  * Multicast.
3355  * @device: Device to check
3356  * @port_num: Port number to check
3357  *
3358  * InfiniBand multicast registration is more complex than normal IPv4 or
3359  * IPv6 multicast registration.  Each Host Channel Adapter must register
3360  * with the Subnet Manager when it wishes to join a multicast group.  It
3361  * should do so only once regardless of how many queue pairs it subscribes
3362  * to this group.  And it should leave the group only after all queue pairs
3363  * attached to the group have been detached.
3364  *
3365  * Return: true if the port must undertake the additional adminstrative
3366  * overhead of registering/unregistering with the SM and tracking of the
3367  * total number of queue pairs attached to the multicast group.
3368  */
3369 static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3370 				     u32 port_num)
3371 {
3372 	return rdma_cap_ib_sa(device, port_num);
3373 }
3374 
3375 /**
3376  * rdma_cap_af_ib - Check if the port of device has the capability
3377  * Native Infiniband Address.
3378  * @device: Device to check
3379  * @port_num: Port number to check
3380  *
3381  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3382  * GID.  RoCE uses a different mechanism, but still generates a GID via
3383  * a prescribed mechanism and port specific data.
3384  *
3385  * Return: true if the port uses a GID address to identify devices on the
3386  * network.
3387  */
3388 static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3389 {
3390 	return device->port_data[port_num].immutable.core_cap_flags &
3391 	       RDMA_CORE_CAP_AF_IB;
3392 }
3393 
3394 /**
3395  * rdma_cap_eth_ah - Check if the port of device has the capability
3396  * Ethernet Address Handle.
3397  * @device: Device to check
3398  * @port_num: Port number to check
3399  *
3400  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3401  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3402  * port.  Normally, packet headers are generated by the sending host
3403  * adapter, but when sending connectionless datagrams, we must manually
3404  * inject the proper headers for the fabric we are communicating over.
3405  *
3406  * Return: true if we are running as a RoCE port and must force the
3407  * addition of a Global Route Header built from our Ethernet Address
3408  * Handle into our header list for connectionless packets.
3409  */
3410 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3411 {
3412 	return device->port_data[port_num].immutable.core_cap_flags &
3413 	       RDMA_CORE_CAP_ETH_AH;
3414 }
3415 
3416 /**
3417  * rdma_cap_opa_ah - Check if the port of device supports
3418  * OPA Address handles
3419  * @device: Device to check
3420  * @port_num: Port number to check
3421  *
3422  * Return: true if we are running on an OPA device which supports
3423  * the extended OPA addressing.
3424  */
3425 static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3426 {
3427 	return (device->port_data[port_num].immutable.core_cap_flags &
3428 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3429 }
3430 
3431 /**
3432  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3433  *
3434  * @device: Device
3435  * @port_num: Port number
3436  *
3437  * This MAD size includes the MAD headers and MAD payload.  No other headers
3438  * are included.
3439  *
3440  * Return the max MAD size required by the Port.  Will return 0 if the port
3441  * does not support MADs
3442  */
3443 static inline size_t rdma_max_mad_size(const struct ib_device *device,
3444 				       u32 port_num)
3445 {
3446 	return device->port_data[port_num].immutable.max_mad_size;
3447 }
3448 
3449 /**
3450  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3451  * @device: Device to check
3452  * @port_num: Port number to check
3453  *
3454  * RoCE GID table mechanism manages the various GIDs for a device.
3455  *
3456  * NOTE: if allocating the port's GID table has failed, this call will still
3457  * return true, but any RoCE GID table API will fail.
3458  *
3459  * Return: true if the port uses RoCE GID table mechanism in order to manage
3460  * its GIDs.
3461  */
3462 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3463 					   u32 port_num)
3464 {
3465 	return rdma_protocol_roce(device, port_num) &&
3466 		device->ops.add_gid && device->ops.del_gid;
3467 }
3468 
3469 /*
3470  * Check if the device supports READ W/ INVALIDATE.
3471  */
3472 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3473 {
3474 	/*
3475 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3476 	 * has support for it yet.
3477 	 */
3478 	return rdma_protocol_iwarp(dev, port_num);
3479 }
3480 
3481 /**
3482  * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3483  * @device: Device
3484  * @port_num: 1 based Port number
3485  *
3486  * Return true if port is an Intel OPA port , false if not
3487  */
3488 static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3489 					  u32 port_num)
3490 {
3491 	return (device->port_data[port_num].immutable.core_cap_flags &
3492 		RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3493 }
3494 
3495 /**
3496  * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3497  * @device: Device
3498  * @port_num: Port number
3499  * @mtu: enum value of MTU
3500  *
3501  * Return the MTU size supported by the port as an integer value. Will return
3502  * -1 if enum value of mtu is not supported.
3503  */
3504 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3505 				       int mtu)
3506 {
3507 	if (rdma_core_cap_opa_port(device, port))
3508 		return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3509 	else
3510 		return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3511 }
3512 
3513 /**
3514  * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3515  * @device: Device
3516  * @port_num: Port number
3517  * @attr: port attribute
3518  *
3519  * Return the MTU size supported by the port as an integer value.
3520  */
3521 static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3522 				     struct ib_port_attr *attr)
3523 {
3524 	if (rdma_core_cap_opa_port(device, port))
3525 		return attr->phys_mtu;
3526 	else
3527 		return ib_mtu_enum_to_int(attr->max_mtu);
3528 }
3529 
3530 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3531 			 int state);
3532 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3533 		     struct ifla_vf_info *info);
3534 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3535 		    struct ifla_vf_stats *stats);
3536 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3537 		    struct ifla_vf_guid *node_guid,
3538 		    struct ifla_vf_guid *port_guid);
3539 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3540 		   int type);
3541 
3542 int ib_query_pkey(struct ib_device *device,
3543 		  u32 port_num, u16 index, u16 *pkey);
3544 
3545 int ib_modify_device(struct ib_device *device,
3546 		     int device_modify_mask,
3547 		     struct ib_device_modify *device_modify);
3548 
3549 int ib_modify_port(struct ib_device *device,
3550 		   u32 port_num, int port_modify_mask,
3551 		   struct ib_port_modify *port_modify);
3552 
3553 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3554 		u32 *port_num, u16 *index);
3555 
3556 int ib_find_pkey(struct ib_device *device,
3557 		 u32 port_num, u16 pkey, u16 *index);
3558 
3559 enum ib_pd_flags {
3560 	/*
3561 	 * Create a memory registration for all memory in the system and place
3562 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3563 	 * ULPs to avoid the overhead of dynamic MRs.
3564 	 *
3565 	 * This flag is generally considered unsafe and must only be used in
3566 	 * extremly trusted environments.  Every use of it will log a warning
3567 	 * in the kernel log.
3568 	 */
3569 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3570 };
3571 
3572 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3573 		const char *caller);
3574 
3575 /**
3576  * ib_alloc_pd - Allocates an unused protection domain.
3577  * @device: The device on which to allocate the protection domain.
3578  * @flags: protection domain flags
3579  *
3580  * A protection domain object provides an association between QPs, shared
3581  * receive queues, address handles, memory regions, and memory windows.
3582  *
3583  * Every PD has a local_dma_lkey which can be used as the lkey value for local
3584  * memory operations.
3585  */
3586 #define ib_alloc_pd(device, flags) \
3587 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3588 
3589 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3590 
3591 /**
3592  * ib_dealloc_pd - Deallocate kernel PD
3593  * @pd: The protection domain
3594  *
3595  * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3596  */
3597 static inline void ib_dealloc_pd(struct ib_pd *pd)
3598 {
3599 	int ret = ib_dealloc_pd_user(pd, NULL);
3600 
3601 	WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3602 }
3603 
3604 enum rdma_create_ah_flags {
3605 	/* In a sleepable context */
3606 	RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3607 };
3608 
3609 /**
3610  * rdma_create_ah - Creates an address handle for the given address vector.
3611  * @pd: The protection domain associated with the address handle.
3612  * @ah_attr: The attributes of the address vector.
3613  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3614  *
3615  * The address handle is used to reference a local or global destination
3616  * in all UD QP post sends.
3617  */
3618 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3619 			     u32 flags);
3620 
3621 /**
3622  * rdma_create_user_ah - Creates an address handle for the given address vector.
3623  * It resolves destination mac address for ah attribute of RoCE type.
3624  * @pd: The protection domain associated with the address handle.
3625  * @ah_attr: The attributes of the address vector.
3626  * @udata: pointer to user's input output buffer information need by
3627  *         provider driver.
3628  *
3629  * It returns 0 on success and returns appropriate error code on error.
3630  * The address handle is used to reference a local or global destination
3631  * in all UD QP post sends.
3632  */
3633 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3634 				  struct rdma_ah_attr *ah_attr,
3635 				  struct ib_udata *udata);
3636 /**
3637  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3638  *   work completion.
3639  * @hdr: the L3 header to parse
3640  * @net_type: type of header to parse
3641  * @sgid: place to store source gid
3642  * @dgid: place to store destination gid
3643  */
3644 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3645 			      enum rdma_network_type net_type,
3646 			      union ib_gid *sgid, union ib_gid *dgid);
3647 
3648 /**
3649  * ib_get_rdma_header_version - Get the header version
3650  * @hdr: the L3 header to parse
3651  */
3652 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3653 
3654 /**
3655  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3656  *   work completion.
3657  * @device: Device on which the received message arrived.
3658  * @port_num: Port on which the received message arrived.
3659  * @wc: Work completion associated with the received message.
3660  * @grh: References the received global route header.  This parameter is
3661  *   ignored unless the work completion indicates that the GRH is valid.
3662  * @ah_attr: Returned attributes that can be used when creating an address
3663  *   handle for replying to the message.
3664  * When ib_init_ah_attr_from_wc() returns success,
3665  * (a) for IB link layer it optionally contains a reference to SGID attribute
3666  * when GRH is present for IB link layer.
3667  * (b) for RoCE link layer it contains a reference to SGID attribute.
3668  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3669  * attributes which are initialized using ib_init_ah_attr_from_wc().
3670  *
3671  */
3672 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3673 			    const struct ib_wc *wc, const struct ib_grh *grh,
3674 			    struct rdma_ah_attr *ah_attr);
3675 
3676 /**
3677  * ib_create_ah_from_wc - Creates an address handle associated with the
3678  *   sender of the specified work completion.
3679  * @pd: The protection domain associated with the address handle.
3680  * @wc: Work completion information associated with a received message.
3681  * @grh: References the received global route header.  This parameter is
3682  *   ignored unless the work completion indicates that the GRH is valid.
3683  * @port_num: The outbound port number to associate with the address.
3684  *
3685  * The address handle is used to reference a local or global destination
3686  * in all UD QP post sends.
3687  */
3688 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3689 				   const struct ib_grh *grh, u32 port_num);
3690 
3691 /**
3692  * rdma_modify_ah - Modifies the address vector associated with an address
3693  *   handle.
3694  * @ah: The address handle to modify.
3695  * @ah_attr: The new address vector attributes to associate with the
3696  *   address handle.
3697  */
3698 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3699 
3700 /**
3701  * rdma_query_ah - Queries the address vector associated with an address
3702  *   handle.
3703  * @ah: The address handle to query.
3704  * @ah_attr: The address vector attributes associated with the address
3705  *   handle.
3706  */
3707 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3708 
3709 enum rdma_destroy_ah_flags {
3710 	/* In a sleepable context */
3711 	RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3712 };
3713 
3714 /**
3715  * rdma_destroy_ah_user - Destroys an address handle.
3716  * @ah: The address handle to destroy.
3717  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3718  * @udata: Valid user data or NULL for kernel objects
3719  */
3720 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3721 
3722 /**
3723  * rdma_destroy_ah - Destroys an kernel address handle.
3724  * @ah: The address handle to destroy.
3725  * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3726  *
3727  * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3728  */
3729 static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3730 {
3731 	int ret = rdma_destroy_ah_user(ah, flags, NULL);
3732 
3733 	WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3734 }
3735 
3736 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3737 				  struct ib_srq_init_attr *srq_init_attr,
3738 				  struct ib_usrq_object *uobject,
3739 				  struct ib_udata *udata);
3740 static inline struct ib_srq *
3741 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3742 {
3743 	if (!pd->device->ops.create_srq)
3744 		return ERR_PTR(-EOPNOTSUPP);
3745 
3746 	return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3747 }
3748 
3749 /**
3750  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3751  * @srq: The SRQ to modify.
3752  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3753  *   the current values of selected SRQ attributes are returned.
3754  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3755  *   are being modified.
3756  *
3757  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3758  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3759  * the number of receives queued drops below the limit.
3760  */
3761 int ib_modify_srq(struct ib_srq *srq,
3762 		  struct ib_srq_attr *srq_attr,
3763 		  enum ib_srq_attr_mask srq_attr_mask);
3764 
3765 /**
3766  * ib_query_srq - Returns the attribute list and current values for the
3767  *   specified SRQ.
3768  * @srq: The SRQ to query.
3769  * @srq_attr: The attributes of the specified SRQ.
3770  */
3771 int ib_query_srq(struct ib_srq *srq,
3772 		 struct ib_srq_attr *srq_attr);
3773 
3774 /**
3775  * ib_destroy_srq_user - Destroys the specified SRQ.
3776  * @srq: The SRQ to destroy.
3777  * @udata: Valid user data or NULL for kernel objects
3778  */
3779 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3780 
3781 /**
3782  * ib_destroy_srq - Destroys the specified kernel SRQ.
3783  * @srq: The SRQ to destroy.
3784  *
3785  * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3786  */
3787 static inline void ib_destroy_srq(struct ib_srq *srq)
3788 {
3789 	int ret = ib_destroy_srq_user(srq, NULL);
3790 
3791 	WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3792 }
3793 
3794 /**
3795  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3796  * @srq: The SRQ to post the work request on.
3797  * @recv_wr: A list of work requests to post on the receive queue.
3798  * @bad_recv_wr: On an immediate failure, this parameter will reference
3799  *   the work request that failed to be posted on the QP.
3800  */
3801 static inline int ib_post_srq_recv(struct ib_srq *srq,
3802 				   const struct ib_recv_wr *recv_wr,
3803 				   const struct ib_recv_wr **bad_recv_wr)
3804 {
3805 	const struct ib_recv_wr *dummy;
3806 
3807 	return srq->device->ops.post_srq_recv(srq, recv_wr,
3808 					      bad_recv_wr ? : &dummy);
3809 }
3810 
3811 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3812 				  struct ib_qp_init_attr *qp_init_attr,
3813 				  const char *caller);
3814 /**
3815  * ib_create_qp - Creates a kernel QP associated with the specific protection
3816  * domain.
3817  * @pd: The protection domain associated with the QP.
3818  * @init_attr: A list of initial attributes required to create the
3819  *   QP.  If QP creation succeeds, then the attributes are updated to
3820  *   the actual capabilities of the created QP.
3821  */
3822 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3823 					 struct ib_qp_init_attr *init_attr)
3824 {
3825 	return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3826 }
3827 
3828 /**
3829  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3830  * @qp: The QP to modify.
3831  * @attr: On input, specifies the QP attributes to modify.  On output,
3832  *   the current values of selected QP attributes are returned.
3833  * @attr_mask: A bit-mask used to specify which attributes of the QP
3834  *   are being modified.
3835  * @udata: pointer to user's input output buffer information
3836  *   are being modified.
3837  * It returns 0 on success and returns appropriate error code on error.
3838  */
3839 int ib_modify_qp_with_udata(struct ib_qp *qp,
3840 			    struct ib_qp_attr *attr,
3841 			    int attr_mask,
3842 			    struct ib_udata *udata);
3843 
3844 /**
3845  * ib_modify_qp - Modifies the attributes for the specified QP and then
3846  *   transitions the QP to the given state.
3847  * @qp: The QP to modify.
3848  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3849  *   the current values of selected QP attributes are returned.
3850  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3851  *   are being modified.
3852  */
3853 int ib_modify_qp(struct ib_qp *qp,
3854 		 struct ib_qp_attr *qp_attr,
3855 		 int qp_attr_mask);
3856 
3857 /**
3858  * ib_query_qp - Returns the attribute list and current values for the
3859  *   specified QP.
3860  * @qp: The QP to query.
3861  * @qp_attr: The attributes of the specified QP.
3862  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3863  * @qp_init_attr: Additional attributes of the selected QP.
3864  *
3865  * The qp_attr_mask may be used to limit the query to gathering only the
3866  * selected attributes.
3867  */
3868 int ib_query_qp(struct ib_qp *qp,
3869 		struct ib_qp_attr *qp_attr,
3870 		int qp_attr_mask,
3871 		struct ib_qp_init_attr *qp_init_attr);
3872 
3873 /**
3874  * ib_destroy_qp - Destroys the specified QP.
3875  * @qp: The QP to destroy.
3876  * @udata: Valid udata or NULL for kernel objects
3877  */
3878 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3879 
3880 /**
3881  * ib_destroy_qp - Destroys the specified kernel QP.
3882  * @qp: The QP to destroy.
3883  *
3884  * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3885  */
3886 static inline int ib_destroy_qp(struct ib_qp *qp)
3887 {
3888 	return ib_destroy_qp_user(qp, NULL);
3889 }
3890 
3891 /**
3892  * ib_open_qp - Obtain a reference to an existing sharable QP.
3893  * @xrcd - XRC domain
3894  * @qp_open_attr: Attributes identifying the QP to open.
3895  *
3896  * Returns a reference to a sharable QP.
3897  */
3898 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3899 			 struct ib_qp_open_attr *qp_open_attr);
3900 
3901 /**
3902  * ib_close_qp - Release an external reference to a QP.
3903  * @qp: The QP handle to release
3904  *
3905  * The opened QP handle is released by the caller.  The underlying
3906  * shared QP is not destroyed until all internal references are released.
3907  */
3908 int ib_close_qp(struct ib_qp *qp);
3909 
3910 /**
3911  * ib_post_send - Posts a list of work requests to the send queue of
3912  *   the specified QP.
3913  * @qp: The QP to post the work request on.
3914  * @send_wr: A list of work requests to post on the send queue.
3915  * @bad_send_wr: On an immediate failure, this parameter will reference
3916  *   the work request that failed to be posted on the QP.
3917  *
3918  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3919  * error is returned, the QP state shall not be affected,
3920  * ib_post_send() will return an immediate error after queueing any
3921  * earlier work requests in the list.
3922  */
3923 static inline int ib_post_send(struct ib_qp *qp,
3924 			       const struct ib_send_wr *send_wr,
3925 			       const struct ib_send_wr **bad_send_wr)
3926 {
3927 	const struct ib_send_wr *dummy;
3928 
3929 	return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3930 }
3931 
3932 /**
3933  * ib_post_recv - Posts a list of work requests to the receive queue of
3934  *   the specified QP.
3935  * @qp: The QP to post the work request on.
3936  * @recv_wr: A list of work requests to post on the receive queue.
3937  * @bad_recv_wr: On an immediate failure, this parameter will reference
3938  *   the work request that failed to be posted on the QP.
3939  */
3940 static inline int ib_post_recv(struct ib_qp *qp,
3941 			       const struct ib_recv_wr *recv_wr,
3942 			       const struct ib_recv_wr **bad_recv_wr)
3943 {
3944 	const struct ib_recv_wr *dummy;
3945 
3946 	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3947 }
3948 
3949 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3950 			    int comp_vector, enum ib_poll_context poll_ctx,
3951 			    const char *caller);
3952 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3953 					int nr_cqe, int comp_vector,
3954 					enum ib_poll_context poll_ctx)
3955 {
3956 	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3957 			     KBUILD_MODNAME);
3958 }
3959 
3960 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3961 				int nr_cqe, enum ib_poll_context poll_ctx,
3962 				const char *caller);
3963 
3964 /**
3965  * ib_alloc_cq_any: Allocate kernel CQ
3966  * @dev: The IB device
3967  * @private: Private data attached to the CQE
3968  * @nr_cqe: Number of CQEs in the CQ
3969  * @poll_ctx: Context used for polling the CQ
3970  */
3971 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3972 					    void *private, int nr_cqe,
3973 					    enum ib_poll_context poll_ctx)
3974 {
3975 	return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3976 				 KBUILD_MODNAME);
3977 }
3978 
3979 void ib_free_cq(struct ib_cq *cq);
3980 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3981 
3982 /**
3983  * ib_create_cq - Creates a CQ on the specified device.
3984  * @device: The device on which to create the CQ.
3985  * @comp_handler: A user-specified callback that is invoked when a
3986  *   completion event occurs on the CQ.
3987  * @event_handler: A user-specified callback that is invoked when an
3988  *   asynchronous event not associated with a completion occurs on the CQ.
3989  * @cq_context: Context associated with the CQ returned to the user via
3990  *   the associated completion and event handlers.
3991  * @cq_attr: The attributes the CQ should be created upon.
3992  *
3993  * Users can examine the cq structure to determine the actual CQ size.
3994  */
3995 struct ib_cq *__ib_create_cq(struct ib_device *device,
3996 			     ib_comp_handler comp_handler,
3997 			     void (*event_handler)(struct ib_event *, void *),
3998 			     void *cq_context,
3999 			     const struct ib_cq_init_attr *cq_attr,
4000 			     const char *caller);
4001 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
4002 	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
4003 
4004 /**
4005  * ib_resize_cq - Modifies the capacity of the CQ.
4006  * @cq: The CQ to resize.
4007  * @cqe: The minimum size of the CQ.
4008  *
4009  * Users can examine the cq structure to determine the actual CQ size.
4010  */
4011 int ib_resize_cq(struct ib_cq *cq, int cqe);
4012 
4013 /**
4014  * rdma_set_cq_moderation - Modifies moderation params of the CQ
4015  * @cq: The CQ to modify.
4016  * @cq_count: number of CQEs that will trigger an event
4017  * @cq_period: max period of time in usec before triggering an event
4018  *
4019  */
4020 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
4021 
4022 /**
4023  * ib_destroy_cq_user - Destroys the specified CQ.
4024  * @cq: The CQ to destroy.
4025  * @udata: Valid user data or NULL for kernel objects
4026  */
4027 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
4028 
4029 /**
4030  * ib_destroy_cq - Destroys the specified kernel CQ.
4031  * @cq: The CQ to destroy.
4032  *
4033  * NOTE: for user cq use ib_destroy_cq_user with valid udata!
4034  */
4035 static inline void ib_destroy_cq(struct ib_cq *cq)
4036 {
4037 	int ret = ib_destroy_cq_user(cq, NULL);
4038 
4039 	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
4040 }
4041 
4042 /**
4043  * ib_poll_cq - poll a CQ for completion(s)
4044  * @cq:the CQ being polled
4045  * @num_entries:maximum number of completions to return
4046  * @wc:array of at least @num_entries &struct ib_wc where completions
4047  *   will be returned
4048  *
4049  * Poll a CQ for (possibly multiple) completions.  If the return value
4050  * is < 0, an error occurred.  If the return value is >= 0, it is the
4051  * number of completions returned.  If the return value is
4052  * non-negative and < num_entries, then the CQ was emptied.
4053  */
4054 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
4055 			     struct ib_wc *wc)
4056 {
4057 	return cq->device->ops.poll_cq(cq, num_entries, wc);
4058 }
4059 
4060 /**
4061  * ib_req_notify_cq - Request completion notification on a CQ.
4062  * @cq: The CQ to generate an event for.
4063  * @flags:
4064  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4065  *   to request an event on the next solicited event or next work
4066  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4067  *   may also be |ed in to request a hint about missed events, as
4068  *   described below.
4069  *
4070  * Return Value:
4071  *    < 0 means an error occurred while requesting notification
4072  *   == 0 means notification was requested successfully, and if
4073  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4074  *        were missed and it is safe to wait for another event.  In
4075  *        this case is it guaranteed that any work completions added
4076  *        to the CQ since the last CQ poll will trigger a completion
4077  *        notification event.
4078  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4079  *        in.  It means that the consumer must poll the CQ again to
4080  *        make sure it is empty to avoid missing an event because of a
4081  *        race between requesting notification and an entry being
4082  *        added to the CQ.  This return value means it is possible
4083  *        (but not guaranteed) that a work completion has been added
4084  *        to the CQ since the last poll without triggering a
4085  *        completion notification event.
4086  */
4087 static inline int ib_req_notify_cq(struct ib_cq *cq,
4088 				   enum ib_cq_notify_flags flags)
4089 {
4090 	return cq->device->ops.req_notify_cq(cq, flags);
4091 }
4092 
4093 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4094 			     int comp_vector_hint,
4095 			     enum ib_poll_context poll_ctx);
4096 
4097 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4098 
4099 /*
4100  * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4101  * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4102  * address into the dma address.
4103  */
4104 static inline bool ib_uses_virt_dma(struct ib_device *dev)
4105 {
4106 	return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4107 }
4108 
4109 /*
4110  * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4111  */
4112 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4113 {
4114 	if (ib_uses_virt_dma(dev))
4115 		return false;
4116 
4117 	return dma_pci_p2pdma_supported(dev->dma_device);
4118 }
4119 
4120 /**
4121  * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4122  * @dma_addr: The DMA address
4123  *
4124  * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4125  * going through the dma_addr marshalling.
4126  */
4127 static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4128 {
4129 	/* virt_dma mode maps the kvs's directly into the dma addr */
4130 	return (void *)(uintptr_t)dma_addr;
4131 }
4132 
4133 /**
4134  * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4135  * @dma_addr: The DMA address
4136  *
4137  * Used by ib_uses_virt_dma() device to get back to the struct page after going
4138  * through the dma_addr marshalling.
4139  */
4140 static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4141 {
4142 	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4143 }
4144 
4145 /**
4146  * ib_dma_mapping_error - check a DMA addr for error
4147  * @dev: The device for which the dma_addr was created
4148  * @dma_addr: The DMA address to check
4149  */
4150 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4151 {
4152 	if (ib_uses_virt_dma(dev))
4153 		return 0;
4154 	return dma_mapping_error(dev->dma_device, dma_addr);
4155 }
4156 
4157 /**
4158  * ib_dma_map_single - Map a kernel virtual address to DMA address
4159  * @dev: The device for which the dma_addr is to be created
4160  * @cpu_addr: The kernel virtual address
4161  * @size: The size of the region in bytes
4162  * @direction: The direction of the DMA
4163  */
4164 static inline u64 ib_dma_map_single(struct ib_device *dev,
4165 				    void *cpu_addr, size_t size,
4166 				    enum dma_data_direction direction)
4167 {
4168 	if (ib_uses_virt_dma(dev))
4169 		return (uintptr_t)cpu_addr;
4170 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4171 }
4172 
4173 /**
4174  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4175  * @dev: The device for which the DMA address was created
4176  * @addr: The DMA address
4177  * @size: The size of the region in bytes
4178  * @direction: The direction of the DMA
4179  */
4180 static inline void ib_dma_unmap_single(struct ib_device *dev,
4181 				       u64 addr, size_t size,
4182 				       enum dma_data_direction direction)
4183 {
4184 	if (!ib_uses_virt_dma(dev))
4185 		dma_unmap_single(dev->dma_device, addr, size, direction);
4186 }
4187 
4188 /**
4189  * ib_dma_map_page - Map a physical page to DMA address
4190  * @dev: The device for which the dma_addr is to be created
4191  * @page: The page to be mapped
4192  * @offset: The offset within the page
4193  * @size: The size of the region in bytes
4194  * @direction: The direction of the DMA
4195  */
4196 static inline u64 ib_dma_map_page(struct ib_device *dev,
4197 				  struct page *page,
4198 				  unsigned long offset,
4199 				  size_t size,
4200 					 enum dma_data_direction direction)
4201 {
4202 	if (ib_uses_virt_dma(dev))
4203 		return (uintptr_t)(page_address(page) + offset);
4204 	return dma_map_page(dev->dma_device, page, offset, size, direction);
4205 }
4206 
4207 /**
4208  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4209  * @dev: The device for which the DMA address was created
4210  * @addr: The DMA address
4211  * @size: The size of the region in bytes
4212  * @direction: The direction of the DMA
4213  */
4214 static inline void ib_dma_unmap_page(struct ib_device *dev,
4215 				     u64 addr, size_t size,
4216 				     enum dma_data_direction direction)
4217 {
4218 	if (!ib_uses_virt_dma(dev))
4219 		dma_unmap_page(dev->dma_device, addr, size, direction);
4220 }
4221 
4222 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4223 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4224 				      struct scatterlist *sg, int nents,
4225 				      enum dma_data_direction direction,
4226 				      unsigned long dma_attrs)
4227 {
4228 	if (ib_uses_virt_dma(dev))
4229 		return ib_dma_virt_map_sg(dev, sg, nents);
4230 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4231 				dma_attrs);
4232 }
4233 
4234 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4235 					 struct scatterlist *sg, int nents,
4236 					 enum dma_data_direction direction,
4237 					 unsigned long dma_attrs)
4238 {
4239 	if (!ib_uses_virt_dma(dev))
4240 		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4241 				   dma_attrs);
4242 }
4243 
4244 /**
4245  * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4246  * @dev: The device for which the DMA addresses are to be created
4247  * @sg: The sg_table object describing the buffer
4248  * @direction: The direction of the DMA
4249  * @attrs: Optional DMA attributes for the map operation
4250  */
4251 static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4252 					   struct sg_table *sgt,
4253 					   enum dma_data_direction direction,
4254 					   unsigned long dma_attrs)
4255 {
4256 	int nents;
4257 
4258 	if (ib_uses_virt_dma(dev)) {
4259 		nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4260 		if (!nents)
4261 			return -EIO;
4262 		sgt->nents = nents;
4263 		return 0;
4264 	}
4265 	return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4266 }
4267 
4268 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4269 					      struct sg_table *sgt,
4270 					      enum dma_data_direction direction,
4271 					      unsigned long dma_attrs)
4272 {
4273 	if (!ib_uses_virt_dma(dev))
4274 		dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4275 }
4276 
4277 /**
4278  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4279  * @dev: The device for which the DMA addresses are to be created
4280  * @sg: The array of scatter/gather entries
4281  * @nents: The number of scatter/gather entries
4282  * @direction: The direction of the DMA
4283  */
4284 static inline int ib_dma_map_sg(struct ib_device *dev,
4285 				struct scatterlist *sg, int nents,
4286 				enum dma_data_direction direction)
4287 {
4288 	return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4289 }
4290 
4291 /**
4292  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4293  * @dev: The device for which the DMA addresses were created
4294  * @sg: The array of scatter/gather entries
4295  * @nents: The number of scatter/gather entries
4296  * @direction: The direction of the DMA
4297  */
4298 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4299 				   struct scatterlist *sg, int nents,
4300 				   enum dma_data_direction direction)
4301 {
4302 	ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4303 }
4304 
4305 /**
4306  * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4307  * @dev: The device to query
4308  *
4309  * The returned value represents a size in bytes.
4310  */
4311 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4312 {
4313 	if (ib_uses_virt_dma(dev))
4314 		return UINT_MAX;
4315 	return dma_get_max_seg_size(dev->dma_device);
4316 }
4317 
4318 /**
4319  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4320  * @dev: The device for which the DMA address was created
4321  * @addr: The DMA address
4322  * @size: The size of the region in bytes
4323  * @dir: The direction of the DMA
4324  */
4325 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4326 					      u64 addr,
4327 					      size_t size,
4328 					      enum dma_data_direction dir)
4329 {
4330 	if (!ib_uses_virt_dma(dev))
4331 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4332 }
4333 
4334 /**
4335  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4336  * @dev: The device for which the DMA address was created
4337  * @addr: The DMA address
4338  * @size: The size of the region in bytes
4339  * @dir: The direction of the DMA
4340  */
4341 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4342 						 u64 addr,
4343 						 size_t size,
4344 						 enum dma_data_direction dir)
4345 {
4346 	if (!ib_uses_virt_dma(dev))
4347 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4348 }
4349 
4350 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4351  * space. This function should be called when 'current' is the owning MM.
4352  */
4353 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4354 			     u64 virt_addr, int mr_access_flags);
4355 
4356 /* ib_advise_mr -  give an advice about an address range in a memory region */
4357 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4358 		 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4359 /**
4360  * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4361  *   HCA translation table.
4362  * @mr: The memory region to deregister.
4363  * @udata: Valid user data or NULL for kernel object
4364  *
4365  * This function can fail, if the memory region has memory windows bound to it.
4366  */
4367 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4368 
4369 /**
4370  * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4371  *   HCA translation table.
4372  * @mr: The memory region to deregister.
4373  *
4374  * This function can fail, if the memory region has memory windows bound to it.
4375  *
4376  * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4377  */
4378 static inline int ib_dereg_mr(struct ib_mr *mr)
4379 {
4380 	return ib_dereg_mr_user(mr, NULL);
4381 }
4382 
4383 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4384 			  u32 max_num_sg);
4385 
4386 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4387 				    u32 max_num_data_sg,
4388 				    u32 max_num_meta_sg);
4389 
4390 /**
4391  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4392  *   R_Key and L_Key.
4393  * @mr - struct ib_mr pointer to be updated.
4394  * @newkey - new key to be used.
4395  */
4396 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4397 {
4398 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4399 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4400 }
4401 
4402 /**
4403  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4404  * for calculating a new rkey for type 2 memory windows.
4405  * @rkey - the rkey to increment.
4406  */
4407 static inline u32 ib_inc_rkey(u32 rkey)
4408 {
4409 	const u32 mask = 0x000000ff;
4410 	return ((rkey + 1) & mask) | (rkey & ~mask);
4411 }
4412 
4413 /**
4414  * ib_attach_mcast - Attaches the specified QP to a multicast group.
4415  * @qp: QP to attach to the multicast group.  The QP must be type
4416  *   IB_QPT_UD.
4417  * @gid: Multicast group GID.
4418  * @lid: Multicast group LID in host byte order.
4419  *
4420  * In order to send and receive multicast packets, subnet
4421  * administration must have created the multicast group and configured
4422  * the fabric appropriately.  The port associated with the specified
4423  * QP must also be a member of the multicast group.
4424  */
4425 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4426 
4427 /**
4428  * ib_detach_mcast - Detaches the specified QP from a multicast group.
4429  * @qp: QP to detach from the multicast group.
4430  * @gid: Multicast group GID.
4431  * @lid: Multicast group LID in host byte order.
4432  */
4433 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4434 
4435 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4436 				   struct inode *inode, struct ib_udata *udata);
4437 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4438 
4439 static inline int ib_check_mr_access(struct ib_device *ib_dev,
4440 				     unsigned int flags)
4441 {
4442 	u64 device_cap = ib_dev->attrs.device_cap_flags;
4443 
4444 	/*
4445 	 * Local write permission is required if remote write or
4446 	 * remote atomic permission is also requested.
4447 	 */
4448 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4449 	    !(flags & IB_ACCESS_LOCAL_WRITE))
4450 		return -EINVAL;
4451 
4452 	if (flags & ~IB_ACCESS_SUPPORTED)
4453 		return -EINVAL;
4454 
4455 	if (flags & IB_ACCESS_ON_DEMAND &&
4456 	    !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4457 		return -EOPNOTSUPP;
4458 
4459 	if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4460 	    !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4461 	    (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4462 	    !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4463 		return -EOPNOTSUPP;
4464 
4465 	return 0;
4466 }
4467 
4468 static inline bool ib_access_writable(int access_flags)
4469 {
4470 	/*
4471 	 * We have writable memory backing the MR if any of the following
4472 	 * access flags are set.  "Local write" and "remote write" obviously
4473 	 * require write access.  "Remote atomic" can do things like fetch and
4474 	 * add, which will modify memory, and "MW bind" can change permissions
4475 	 * by binding a window.
4476 	 */
4477 	return access_flags &
4478 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4479 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4480 }
4481 
4482 /**
4483  * ib_check_mr_status: lightweight check of MR status.
4484  *     This routine may provide status checks on a selected
4485  *     ib_mr. first use is for signature status check.
4486  *
4487  * @mr: A memory region.
4488  * @check_mask: Bitmask of which checks to perform from
4489  *     ib_mr_status_check enumeration.
4490  * @mr_status: The container of relevant status checks.
4491  *     failed checks will be indicated in the status bitmask
4492  *     and the relevant info shall be in the error item.
4493  */
4494 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4495 		       struct ib_mr_status *mr_status);
4496 
4497 /**
4498  * ib_device_try_get: Hold a registration lock
4499  * device: The device to lock
4500  *
4501  * A device under an active registration lock cannot become unregistered. It
4502  * is only possible to obtain a registration lock on a device that is fully
4503  * registered, otherwise this function returns false.
4504  *
4505  * The registration lock is only necessary for actions which require the
4506  * device to still be registered. Uses that only require the device pointer to
4507  * be valid should use get_device(&ibdev->dev) to hold the memory.
4508  *
4509  */
4510 static inline bool ib_device_try_get(struct ib_device *dev)
4511 {
4512 	return refcount_inc_not_zero(&dev->refcount);
4513 }
4514 
4515 void ib_device_put(struct ib_device *device);
4516 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4517 					  enum rdma_driver_id driver_id);
4518 struct ib_device *ib_device_get_by_name(const char *name,
4519 					enum rdma_driver_id driver_id);
4520 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4521 					    u16 pkey, const union ib_gid *gid,
4522 					    const struct sockaddr *addr);
4523 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4524 			 unsigned int port);
4525 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
4526 					u32 port);
4527 int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
4528 			 u32 *port);
4529 
4530 static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
4531 {
4532 	return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
4533 		IB_PORT_ACTIVE : IB_PORT_DOWN;
4534 }
4535 
4536 void ib_dispatch_port_state_event(struct ib_device *ibdev,
4537 				  struct net_device *ndev);
4538 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4539 			   struct ib_wq_init_attr *init_attr);
4540 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4541 
4542 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4543 		 unsigned int *sg_offset, unsigned int page_size);
4544 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4545 		    int data_sg_nents, unsigned int *data_sg_offset,
4546 		    struct scatterlist *meta_sg, int meta_sg_nents,
4547 		    unsigned int *meta_sg_offset, unsigned int page_size);
4548 
4549 static inline int
4550 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4551 		  unsigned int *sg_offset, unsigned int page_size)
4552 {
4553 	int n;
4554 
4555 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4556 	mr->iova = 0;
4557 
4558 	return n;
4559 }
4560 
4561 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4562 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4563 
4564 void ib_drain_rq(struct ib_qp *qp);
4565 void ib_drain_sq(struct ib_qp *qp);
4566 void ib_drain_qp(struct ib_qp *qp);
4567 
4568 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4569 		     u8 *width);
4570 
4571 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4572 {
4573 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4574 		return attr->roce.dmac;
4575 	return NULL;
4576 }
4577 
4578 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4579 {
4580 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4581 		attr->ib.dlid = (u16)dlid;
4582 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4583 		attr->opa.dlid = dlid;
4584 }
4585 
4586 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4587 {
4588 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4589 		return attr->ib.dlid;
4590 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4591 		return attr->opa.dlid;
4592 	return 0;
4593 }
4594 
4595 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4596 {
4597 	attr->sl = sl;
4598 }
4599 
4600 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4601 {
4602 	return attr->sl;
4603 }
4604 
4605 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4606 					 u8 src_path_bits)
4607 {
4608 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4609 		attr->ib.src_path_bits = src_path_bits;
4610 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4611 		attr->opa.src_path_bits = src_path_bits;
4612 }
4613 
4614 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4615 {
4616 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4617 		return attr->ib.src_path_bits;
4618 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4619 		return attr->opa.src_path_bits;
4620 	return 0;
4621 }
4622 
4623 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4624 					bool make_grd)
4625 {
4626 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4627 		attr->opa.make_grd = make_grd;
4628 }
4629 
4630 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4631 {
4632 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4633 		return attr->opa.make_grd;
4634 	return false;
4635 }
4636 
4637 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4638 {
4639 	attr->port_num = port_num;
4640 }
4641 
4642 static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4643 {
4644 	return attr->port_num;
4645 }
4646 
4647 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4648 					   u8 static_rate)
4649 {
4650 	attr->static_rate = static_rate;
4651 }
4652 
4653 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4654 {
4655 	return attr->static_rate;
4656 }
4657 
4658 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4659 					enum ib_ah_flags flag)
4660 {
4661 	attr->ah_flags = flag;
4662 }
4663 
4664 static inline enum ib_ah_flags
4665 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4666 {
4667 	return attr->ah_flags;
4668 }
4669 
4670 static inline const struct ib_global_route
4671 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4672 {
4673 	return &attr->grh;
4674 }
4675 
4676 /*To retrieve and modify the grh */
4677 static inline struct ib_global_route
4678 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4679 {
4680 	return &attr->grh;
4681 }
4682 
4683 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4684 {
4685 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4686 
4687 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4688 }
4689 
4690 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4691 					     __be64 prefix)
4692 {
4693 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4694 
4695 	grh->dgid.global.subnet_prefix = prefix;
4696 }
4697 
4698 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4699 					    __be64 if_id)
4700 {
4701 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4702 
4703 	grh->dgid.global.interface_id = if_id;
4704 }
4705 
4706 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4707 				   union ib_gid *dgid, u32 flow_label,
4708 				   u8 sgid_index, u8 hop_limit,
4709 				   u8 traffic_class)
4710 {
4711 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4712 
4713 	attr->ah_flags = IB_AH_GRH;
4714 	if (dgid)
4715 		grh->dgid = *dgid;
4716 	grh->flow_label = flow_label;
4717 	grh->sgid_index = sgid_index;
4718 	grh->hop_limit = hop_limit;
4719 	grh->traffic_class = traffic_class;
4720 	grh->sgid_attr = NULL;
4721 }
4722 
4723 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4724 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4725 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4726 			     const struct ib_gid_attr *sgid_attr);
4727 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4728 		       const struct rdma_ah_attr *src);
4729 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4730 			  const struct rdma_ah_attr *new);
4731 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4732 
4733 /**
4734  * rdma_ah_find_type - Return address handle type.
4735  *
4736  * @dev: Device to be checked
4737  * @port_num: Port number
4738  */
4739 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4740 						       u32 port_num)
4741 {
4742 	if (rdma_protocol_roce(dev, port_num))
4743 		return RDMA_AH_ATTR_TYPE_ROCE;
4744 	if (rdma_protocol_ib(dev, port_num)) {
4745 		if (rdma_cap_opa_ah(dev, port_num))
4746 			return RDMA_AH_ATTR_TYPE_OPA;
4747 		return RDMA_AH_ATTR_TYPE_IB;
4748 	}
4749 	if (dev->type == RDMA_DEVICE_TYPE_SMI)
4750 		return RDMA_AH_ATTR_TYPE_IB;
4751 
4752 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4753 }
4754 
4755 /**
4756  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4757  *     In the current implementation the only way to
4758  *     get the 32bit lid is from other sources for OPA.
4759  *     For IB, lids will always be 16bits so cast the
4760  *     value accordingly.
4761  *
4762  * @lid: A 32bit LID
4763  */
4764 static inline u16 ib_lid_cpu16(u32 lid)
4765 {
4766 	WARN_ON_ONCE(lid & 0xFFFF0000);
4767 	return (u16)lid;
4768 }
4769 
4770 /**
4771  * ib_lid_be16 - Return lid in 16bit BE encoding.
4772  *
4773  * @lid: A 32bit LID
4774  */
4775 static inline __be16 ib_lid_be16(u32 lid)
4776 {
4777 	WARN_ON_ONCE(lid & 0xFFFF0000);
4778 	return cpu_to_be16((u16)lid);
4779 }
4780 
4781 /**
4782  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4783  *   vector
4784  * @device:         the rdma device
4785  * @comp_vector:    index of completion vector
4786  *
4787  * Returns NULL on failure, otherwise a corresponding cpu map of the
4788  * completion vector (returns all-cpus map if the device driver doesn't
4789  * implement get_vector_affinity).
4790  */
4791 static inline const struct cpumask *
4792 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4793 {
4794 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4795 	    !device->ops.get_vector_affinity)
4796 		return NULL;
4797 
4798 	return device->ops.get_vector_affinity(device, comp_vector);
4799 
4800 }
4801 
4802 /**
4803  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4804  * and add their gids, as needed, to the relevant RoCE devices.
4805  *
4806  * @device:         the rdma device
4807  */
4808 void rdma_roce_rescan_device(struct ib_device *ibdev);
4809 void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
4810 void roce_del_all_netdev_gids(struct ib_device *ib_dev,
4811 			      u32 port, struct net_device *ndev);
4812 
4813 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4814 
4815 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
4816 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4817 bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
4818 #else
4819 static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
4820 {
4821 	return 0;
4822 }
4823 static inline bool
4824 rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
4825 {
4826 	return false;
4827 }
4828 #endif
4829 
4830 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4831 				     enum rdma_netdev_t type, const char *name,
4832 				     unsigned char name_assign_type,
4833 				     void (*setup)(struct net_device *));
4834 
4835 int rdma_init_netdev(struct ib_device *device, u32 port_num,
4836 		     enum rdma_netdev_t type, const char *name,
4837 		     unsigned char name_assign_type,
4838 		     void (*setup)(struct net_device *),
4839 		     struct net_device *netdev);
4840 
4841 /**
4842  * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4843  *
4844  * @device:	device pointer for which ib_device pointer to retrieve
4845  *
4846  * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4847  *
4848  */
4849 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4850 {
4851 	struct ib_core_device *coredev =
4852 		container_of(device, struct ib_core_device, dev);
4853 
4854 	return coredev->owner;
4855 }
4856 
4857 /**
4858  * ibdev_to_node - return the NUMA node for a given ib_device
4859  * @dev:	device to get the NUMA node for.
4860  */
4861 static inline int ibdev_to_node(struct ib_device *ibdev)
4862 {
4863 	struct device *parent = ibdev->dev.parent;
4864 
4865 	if (!parent)
4866 		return NUMA_NO_NODE;
4867 	return dev_to_node(parent);
4868 }
4869 
4870 /**
4871  * rdma_device_to_drv_device - Helper macro to reach back to driver's
4872  *			       ib_device holder structure from device pointer.
4873  *
4874  * NOTE: New drivers should not make use of this API; This API is only for
4875  * existing drivers who have exposed sysfs entries using
4876  * ops->device_group.
4877  */
4878 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4879 	container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4880 
4881 bool rdma_dev_access_netns(const struct ib_device *device,
4882 			   const struct net *net);
4883 
4884 bool rdma_dev_has_raw_cap(const struct ib_device *dev);
4885 static inline struct net *rdma_dev_net(struct ib_device *device)
4886 {
4887 	return read_pnet(&device->coredev.rdma_net);
4888 }
4889 
4890 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4891 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4892 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4893 
4894 /**
4895  * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4896  *                               on the flow_label
4897  *
4898  * This function will convert the 20 bit flow_label input to a valid RoCE v2
4899  * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4900  * convention.
4901  */
4902 static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4903 {
4904 	u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4905 
4906 	fl_low ^= fl_high >> 14;
4907 	return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4908 }
4909 
4910 /**
4911  * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4912  *                        local and remote qpn values
4913  *
4914  * This function folded the multiplication results of two qpns, 24 bit each,
4915  * fields, and converts it to a 20 bit results.
4916  *
4917  * This function will create symmetric flow_label value based on the local
4918  * and remote qpn values. this will allow both the requester and responder
4919  * to calculate the same flow_label for a given connection.
4920  *
4921  * This helper function should be used by driver in case the upper layer
4922  * provide a zero flow_label value. This is to improve entropy of RDMA
4923  * traffic in the network.
4924  */
4925 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4926 {
4927 	u64 v = (u64)lqpn * rqpn;
4928 
4929 	v ^= v >> 20;
4930 	v ^= v >> 40;
4931 
4932 	return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4933 }
4934 
4935 /**
4936  * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
4937  *                      label. If flow label is not defined in GRH then
4938  *                      calculate it based on lqpn/rqpn.
4939  *
4940  * @fl:                 flow label from GRH
4941  * @lqpn:               local qp number
4942  * @rqpn:               remote qp number
4943  */
4944 static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4945 {
4946 	if (!fl)
4947 		fl = rdma_calc_flow_label(lqpn, rqpn);
4948 
4949 	return rdma_flow_label_to_udp_sport(fl);
4950 }
4951 
4952 const struct ib_port_immutable*
4953 ib_port_immutable_read(struct ib_device *dev, unsigned int port);
4954 
4955 /** ib_add_sub_device - Add a sub IB device on an existing one
4956  *
4957  * @parent: The IB device that needs to add a sub device
4958  * @type: The type of the new sub device
4959  * @name: The name of the new sub device
4960  *
4961  *
4962  * Return 0 on success, an error code otherwise
4963  */
4964 int ib_add_sub_device(struct ib_device *parent,
4965 		      enum rdma_nl_dev_type type,
4966 		      const char *name);
4967 
4968 
4969 /** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
4970  *
4971  * @sub: The sub device that is going to be deleted
4972  *
4973  * Return 0 on success, an error code otherwise
4974  */
4975 int ib_del_sub_device_and_put(struct ib_device *sub);
4976 
4977 static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev)
4978 {
4979 	ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER;
4980 }
4981 
4982 #endif /* IB_VERBS_H */
4983