xref: /freebsd/sys/ofed/include/rdma/ib_verbs.h (revision fed1ca4b719c56c930f2259d80663cd34be812bb)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/if_ether.h>
52 #include <linux/mutex.h>
53 
54 #include <asm/uaccess.h>
55 
56 extern struct workqueue_struct *ib_wq;
57 
58 union ib_gid {
59 	u8	raw[16];
60 	struct {
61 		__be64	subnet_prefix;
62 		__be64	interface_id;
63 	} global;
64 };
65 
66 enum rdma_node_type {
67 	/* IB values map to NodeInfo:NodeType. */
68 	RDMA_NODE_IB_CA 	= 1,
69 	RDMA_NODE_IB_SWITCH,
70 	RDMA_NODE_IB_ROUTER,
71 	RDMA_NODE_RNIC,
72 	RDMA_NODE_MIC
73 };
74 
75 enum rdma_transport_type {
76 	RDMA_TRANSPORT_IB,
77 	RDMA_TRANSPORT_IWARP,
78 	RDMA_TRANSPORT_SCIF
79 };
80 
81 enum rdma_transport_type
82 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
83 
84 enum rdma_link_layer {
85 	IB_LINK_LAYER_UNSPECIFIED,
86 	IB_LINK_LAYER_INFINIBAND,
87 	IB_LINK_LAYER_ETHERNET,
88 	IB_LINK_LAYER_SCIF
89 };
90 
91 enum ib_device_cap_flags {
92 	IB_DEVICE_RESIZE_MAX_WR		= 1,
93 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
94 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
95 	IB_DEVICE_RAW_MULTI		= (1<<3),
96 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
97 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
98 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
99 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
100 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
101 	IB_DEVICE_INIT_TYPE		= (1<<9),
102 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
103 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
104 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
105 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
106 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
107 	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
108 	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
109 	IB_DEVICE_MEM_WINDOW		= (1<<17),
110 	/*
111 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
112 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
113 	 * messages and can verify the validity of checksum for
114 	 * incoming messages.  Setting this flag implies that the
115 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
116 	 */
117 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
118 	IB_DEVICE_UD_TSO		= (1<<19),
119 	IB_DEVICE_XRC			= (1<<20),
120 	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
121 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
122 	IB_DEVICE_MR_ALLOCATE		= (1<<23),
123 	IB_DEVICE_SHARED_MR             = (1<<24),
124 	IB_DEVICE_QPG			= (1<<25),
125 	IB_DEVICE_UD_RSS		= (1<<26),
126 	IB_DEVICE_UD_TSS		= (1<<27),
127 	IB_DEVICE_CROSS_CHANNEL		= (1<<28),
128 	IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
129 	/*
130 	 * Devices can set either IB_DEVICE_MEM_WINDOW_TYPE_2A or
131 	 * IB_DEVICE_MEM_WINDOW_TYPE_2B if it supports type 2A or type 2B
132 	 * memory windows. It can set neither to indicate it doesn't support
133 	 * type 2 windows at all.
134 	 */
135 	IB_DEVICE_MEM_WINDOW_TYPE_2A	= (1<<30),
136 	IB_DEVICE_MEM_WINDOW_TYPE_2B	= (1<<31),
137 	IB_DEVICE_SIGNATURE_HANDOVER	= (1LL<<32)
138 };
139 
140 enum ib_signature_prot_cap {
141 	IB_PROT_T10DIF_TYPE_1 = 1,
142 	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
143 	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
144 };
145 
146 enum ib_signature_guard_cap {
147 	IB_GUARD_T10DIF_CRC	= 1,
148 	IB_GUARD_T10DIF_CSUM	= 1 << 1,
149 };
150 
151 enum ib_atomic_cap {
152 	IB_ATOMIC_NONE,
153 	IB_ATOMIC_HCA,
154 	IB_ATOMIC_GLOB
155 };
156 
157 enum ib_cq_create_flags {
158 	IB_CQ_CREATE_CROSS_CHANNEL	= 1 << 0,
159 	IB_CQ_TIMESTAMP			= 1 << 1,
160 	IB_CQ_TIMESTAMP_TO_SYS_TIME	= 1 << 2
161 };
162 
163 struct ib_device_attr {
164 	u64			fw_ver;
165 	__be64			sys_image_guid;
166 	u64			max_mr_size;
167 	u64			page_size_cap;
168 	u32			vendor_id;
169 	u32			vendor_part_id;
170 	u32			hw_ver;
171 	int			max_qp;
172 	int			max_qp_wr;
173 	u64			device_cap_flags;
174 	int			max_sge;
175 	int			max_sge_rd;
176 	int			max_cq;
177 	int			max_cqe;
178 	int			max_mr;
179 	int			max_pd;
180 	int			max_qp_rd_atom;
181 	int			max_ee_rd_atom;
182 	int			max_res_rd_atom;
183 	int			max_qp_init_rd_atom;
184 	int			max_ee_init_rd_atom;
185 	enum ib_atomic_cap	atomic_cap;
186 	enum ib_atomic_cap	masked_atomic_cap;
187 	int			max_ee;
188 	int			max_rdd;
189 	int			max_mw;
190 	int			max_raw_ipv6_qp;
191 	int			max_raw_ethy_qp;
192 	int			max_mcast_grp;
193 	int			max_mcast_qp_attach;
194 	int			max_total_mcast_qp_attach;
195 	int			max_ah;
196 	int			max_fmr;
197 	int			max_map_per_fmr;
198 	int			max_srq;
199 	int			max_srq_wr;
200 	int			max_srq_sge;
201 	unsigned int		max_fast_reg_page_list_len;
202 	int			max_rss_tbl_sz;
203 	u16			max_pkeys;
204 	u8			local_ca_ack_delay;
205 	int                     comp_mask;
206 	uint64_t                timestamp_mask;
207 	uint64_t                hca_core_clock;
208 	unsigned int		sig_prot_cap;
209 	unsigned int		sig_guard_cap;
210 };
211 
212 enum ib_device_attr_comp_mask {
213 	IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK = 1ULL << 1,
214 	IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK = 1ULL << 2
215 };
216 
217 enum ib_mtu {
218 	IB_MTU_256  = 1,
219 	IB_MTU_512  = 2,
220 	IB_MTU_1024 = 3,
221 	IB_MTU_2048 = 4,
222 	IB_MTU_4096 = 5
223 };
224 
225 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
226 {
227 	switch (mtu) {
228 	case IB_MTU_256:  return  256;
229 	case IB_MTU_512:  return  512;
230 	case IB_MTU_1024: return 1024;
231 	case IB_MTU_2048: return 2048;
232 	case IB_MTU_4096: return 4096;
233 	default: 	  return -1;
234 	}
235 }
236 
237 enum ib_port_state {
238 	IB_PORT_NOP		= 0,
239 	IB_PORT_DOWN		= 1,
240 	IB_PORT_INIT		= 2,
241 	IB_PORT_ARMED		= 3,
242 	IB_PORT_ACTIVE		= 4,
243 	IB_PORT_ACTIVE_DEFER	= 5,
244 	IB_PORT_DUMMY		= -1	/* force enum signed */
245 };
246 
247 enum ib_port_cap_flags {
248 	IB_PORT_SM				= 1 <<  1,
249 	IB_PORT_NOTICE_SUP			= 1 <<  2,
250 	IB_PORT_TRAP_SUP			= 1 <<  3,
251 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
252 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
253 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
254 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
255 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
256 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
257 	IB_PORT_SM_DISABLED			= 1 << 10,
258 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
259 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
260 	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
261 	IB_PORT_CM_SUP				= 1 << 16,
262 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
263 	IB_PORT_REINIT_SUP			= 1 << 18,
264 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
265 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
266 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
267 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
268 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
269 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
270 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
271 };
272 
273 enum ib_port_width {
274 	IB_WIDTH_1X	= 1,
275 	IB_WIDTH_4X	= 2,
276 	IB_WIDTH_8X	= 4,
277 	IB_WIDTH_12X	= 8
278 };
279 
280 static inline int ib_width_enum_to_int(enum ib_port_width width)
281 {
282 	switch (width) {
283 	case IB_WIDTH_1X:  return  1;
284 	case IB_WIDTH_4X:  return  4;
285 	case IB_WIDTH_8X:  return  8;
286 	case IB_WIDTH_12X: return 12;
287 	default: 	  return -1;
288 	}
289 }
290 
291 enum ib_port_speed {
292 	IB_SPEED_SDR	= 1,
293 	IB_SPEED_DDR	= 2,
294 	IB_SPEED_QDR	= 4,
295 	IB_SPEED_FDR10	= 8,
296 	IB_SPEED_FDR	= 16,
297 	IB_SPEED_EDR	= 32
298 };
299 
300 struct ib_protocol_stats {
301 	/* TBD... */
302 };
303 
304 struct iw_protocol_stats {
305 	u64	ipInReceives;
306 	u64	ipInHdrErrors;
307 	u64	ipInTooBigErrors;
308 	u64	ipInNoRoutes;
309 	u64	ipInAddrErrors;
310 	u64	ipInUnknownProtos;
311 	u64	ipInTruncatedPkts;
312 	u64	ipInDiscards;
313 	u64	ipInDelivers;
314 	u64	ipOutForwDatagrams;
315 	u64	ipOutRequests;
316 	u64	ipOutDiscards;
317 	u64	ipOutNoRoutes;
318 	u64	ipReasmTimeout;
319 	u64	ipReasmReqds;
320 	u64	ipReasmOKs;
321 	u64	ipReasmFails;
322 	u64	ipFragOKs;
323 	u64	ipFragFails;
324 	u64	ipFragCreates;
325 	u64	ipInMcastPkts;
326 	u64	ipOutMcastPkts;
327 	u64	ipInBcastPkts;
328 	u64	ipOutBcastPkts;
329 
330 	u64	tcpRtoAlgorithm;
331 	u64	tcpRtoMin;
332 	u64	tcpRtoMax;
333 	u64	tcpMaxConn;
334 	u64	tcpActiveOpens;
335 	u64	tcpPassiveOpens;
336 	u64	tcpAttemptFails;
337 	u64	tcpEstabResets;
338 	u64	tcpCurrEstab;
339 	u64	tcpInSegs;
340 	u64	tcpOutSegs;
341 	u64	tcpRetransSegs;
342 	u64	tcpInErrs;
343 	u64	tcpOutRsts;
344 };
345 
346 union rdma_protocol_stats {
347 	struct ib_protocol_stats	ib;
348 	struct iw_protocol_stats	iw;
349 };
350 
351 struct ib_port_attr {
352 	enum ib_port_state	state;
353 	enum ib_mtu		max_mtu;
354 	enum ib_mtu		active_mtu;
355 	int			gid_tbl_len;
356 	u32			port_cap_flags;
357 	u32			max_msg_sz;
358 	u32			bad_pkey_cntr;
359 	u32			qkey_viol_cntr;
360 	u16			pkey_tbl_len;
361 	u16			lid;
362 	u16			sm_lid;
363 	u8			lmc;
364 	u8			max_vl_num;
365 	u8			sm_sl;
366 	u8			subnet_timeout;
367 	u8			init_type_reply;
368 	u8			active_width;
369 	u8			active_speed;
370 	u8                      phys_state;
371 };
372 
373 enum ib_device_modify_flags {
374 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
375 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
376 };
377 
378 struct ib_device_modify {
379 	u64	sys_image_guid;
380 	char	node_desc[64];
381 };
382 
383 enum ib_port_modify_flags {
384 	IB_PORT_SHUTDOWN		= 1,
385 	IB_PORT_INIT_TYPE		= (1<<2),
386 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
387 };
388 
389 struct ib_port_modify {
390 	u32	set_port_cap_mask;
391 	u32	clr_port_cap_mask;
392 	u8	init_type;
393 };
394 
395 enum ib_event_type {
396 	IB_EVENT_CQ_ERR,
397 	IB_EVENT_QP_FATAL,
398 	IB_EVENT_QP_REQ_ERR,
399 	IB_EVENT_QP_ACCESS_ERR,
400 	IB_EVENT_COMM_EST,
401 	IB_EVENT_SQ_DRAINED,
402 	IB_EVENT_PATH_MIG,
403 	IB_EVENT_PATH_MIG_ERR,
404 	IB_EVENT_DEVICE_FATAL,
405 	IB_EVENT_PORT_ACTIVE,
406 	IB_EVENT_PORT_ERR,
407 	IB_EVENT_LID_CHANGE,
408 	IB_EVENT_PKEY_CHANGE,
409 	IB_EVENT_SM_CHANGE,
410 	IB_EVENT_SRQ_ERR,
411 	IB_EVENT_SRQ_LIMIT_REACHED,
412 	IB_EVENT_QP_LAST_WQE_REACHED,
413 	IB_EVENT_CLIENT_REREGISTER,
414 	IB_EVENT_GID_CHANGE,
415 };
416 
417 struct ib_event {
418 	struct ib_device	*device;
419 	union {
420 		struct ib_cq	*cq;
421 		struct ib_qp	*qp;
422 		struct ib_srq	*srq;
423 		u8		port_num;
424 	} element;
425 	enum ib_event_type	event;
426 };
427 
428 struct ib_event_handler {
429 	struct ib_device *device;
430 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
431 	struct list_head  list;
432 };
433 
434 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
435 	do {							\
436 		(_ptr)->device  = _device;			\
437 		(_ptr)->handler = _handler;			\
438 		INIT_LIST_HEAD(&(_ptr)->list);			\
439 	} while (0)
440 
441 struct ib_global_route {
442 	union ib_gid	dgid;
443 	u32		flow_label;
444 	u8		sgid_index;
445 	u8		hop_limit;
446 	u8		traffic_class;
447 };
448 
449 struct ib_grh {
450 	__be32		version_tclass_flow;
451 	__be16		paylen;
452 	u8		next_hdr;
453 	u8		hop_limit;
454 	union ib_gid	sgid;
455 	union ib_gid	dgid;
456 };
457 
458 enum {
459 	IB_MULTICAST_QPN = 0xffffff
460 };
461 
462 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
463 
464 enum ib_ah_flags {
465 	IB_AH_GRH	= 1
466 };
467 
468 enum ib_rate {
469 	IB_RATE_PORT_CURRENT = 0,
470 	IB_RATE_2_5_GBPS = 2,
471 	IB_RATE_5_GBPS   = 5,
472 	IB_RATE_10_GBPS  = 3,
473 	IB_RATE_20_GBPS  = 6,
474 	IB_RATE_30_GBPS  = 4,
475 	IB_RATE_40_GBPS  = 7,
476 	IB_RATE_60_GBPS  = 8,
477 	IB_RATE_80_GBPS  = 9,
478 	IB_RATE_120_GBPS = 10,
479 	IB_RATE_14_GBPS  = 11,
480 	IB_RATE_56_GBPS  = 12,
481 	IB_RATE_112_GBPS = 13,
482 	IB_RATE_168_GBPS = 14,
483 	IB_RATE_25_GBPS  = 15,
484 	IB_RATE_100_GBPS = 16,
485 	IB_RATE_200_GBPS = 17,
486 	IB_RATE_300_GBPS = 18
487 };
488 
489 enum ib_mr_create_flags {
490 		IB_MR_SIGNATURE_EN = 1,
491 };
492 
493 /**
494  * ib_mr_init_attr - Memory region init attributes passed to routine
495  *     ib_create_mr.
496  * @max_reg_descriptors: max number of registration descriptors that
497  *     may be used with registration work requests.
498  * @flags: MR creation flags bit mask.
499  */
500 struct ib_mr_init_attr {
501 	int	    max_reg_descriptors;
502 	u32	    flags;
503 };
504 
505 /**
506  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
507  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
508  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
509  * @rate: rate to convert.
510  */
511 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
512 
513 /**
514  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
515  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
516  * @rate: rate to convert.
517  */
518 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
519 
520 struct ib_cq_init_attr {
521 	int	cqe;
522 	int	comp_vector;
523 	u32	flags;
524 };
525 
526 enum ib_signature_type {
527 	IB_SIG_TYPE_T10_DIF,
528 };
529 
530 /**
531  * T10-DIF Signature types
532  * T10-DIF types are defined by SCSI
533  * specifications.
534  */
535 enum ib_t10_dif_type {
536 	IB_T10DIF_NONE,
537 	IB_T10DIF_TYPE1,
538 	IB_T10DIF_TYPE2,
539 	IB_T10DIF_TYPE3
540 };
541 
542 /**
543  * Signature T10-DIF block-guard types
544  * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
545  * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
546  */
547 enum ib_t10_dif_bg_type {
548 	IB_T10DIF_CRC,
549 	IB_T10DIF_CSUM
550 };
551 
552 /**
553  * struct ib_t10_dif_domain - Parameters specific for T10-DIF
554  *     domain.
555  * @type: T10-DIF type (0|1|2|3)
556  * @bg_type: T10-DIF block guard type (CRC|CSUM)
557  * @pi_interval: protection information interval.
558  * @bg: seed of guard computation.
559  * @app_tag: application tag of guard block
560  * @ref_tag: initial guard block reference tag.
561  * @type3_inc_reftag: T10-DIF type 3 does not state
562  *     about the reference tag, it is the user
563  *     choice to increment it or not.
564  */
565 struct ib_t10_dif_domain {
566 	enum ib_t10_dif_type	type;
567 	enum ib_t10_dif_bg_type bg_type;
568 	u32			pi_interval;
569 	u16			bg;
570 	u16			app_tag;
571 	u32			ref_tag;
572 	bool			type3_inc_reftag;
573 };
574 
575 /**
576  * struct ib_sig_domain - Parameters for signature domain
577  * @sig_type: specific signauture type
578  * @sig: union of all signature domain attributes that may
579  *     be used to set domain layout.
580  */
581 struct ib_sig_domain {
582 	enum ib_signature_type sig_type;
583 	union {
584 		struct ib_t10_dif_domain dif;
585 	} sig;
586 };
587 
588 /**
589  * struct ib_sig_attrs - Parameters for signature handover operation
590  * @check_mask: bitmask for signature byte check (8 bytes)
591  * @mem: memory domain layout desciptor.
592  * @wire: wire domain layout desciptor.
593  */
594 struct ib_sig_attrs {
595 	u8			check_mask;
596 	struct ib_sig_domain	mem;
597 	struct ib_sig_domain	wire;
598 };
599 
600 enum ib_sig_err_type {
601 	IB_SIG_BAD_GUARD,
602 	IB_SIG_BAD_REFTAG,
603 	IB_SIG_BAD_APPTAG,
604 };
605 
606 /**
607  * struct ib_sig_err - signature error descriptor
608  */
609 struct ib_sig_err {
610 	enum ib_sig_err_type	err_type;
611 	u32			expected;
612 	u32			actual;
613 	u64			sig_err_offset;
614 	u32			key;
615 };
616 
617 enum ib_mr_status_check {
618 	IB_MR_CHECK_SIG_STATUS = 1,
619 };
620 
621 /**
622  * struct ib_mr_status - Memory region status container
623  *
624  * @fail_status: Bitmask of MR checks status. For each
625  *     failed check a corresponding status bit is set.
626  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
627  *     failure.
628  */
629 struct ib_mr_status {
630 	u32		    fail_status;
631 	struct ib_sig_err   sig_err;
632 };
633 
634 /**
635  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
636  * enum.
637  * @mult: multiple to convert.
638  */
639 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
640 
641 struct ib_ah_attr {
642 	struct ib_global_route	grh;
643 	u16			dlid;
644 	u8			sl;
645 	u8			src_path_bits;
646 	u8			static_rate;
647 	u8			ah_flags;
648 	u8			port_num;
649 	u8			dmac[6];
650 	u16			vlan_id;
651 };
652 
653 enum ib_wc_status {
654 	IB_WC_SUCCESS,
655 	IB_WC_LOC_LEN_ERR,
656 	IB_WC_LOC_QP_OP_ERR,
657 	IB_WC_LOC_EEC_OP_ERR,
658 	IB_WC_LOC_PROT_ERR,
659 	IB_WC_WR_FLUSH_ERR,
660 	IB_WC_MW_BIND_ERR,
661 	IB_WC_BAD_RESP_ERR,
662 	IB_WC_LOC_ACCESS_ERR,
663 	IB_WC_REM_INV_REQ_ERR,
664 	IB_WC_REM_ACCESS_ERR,
665 	IB_WC_REM_OP_ERR,
666 	IB_WC_RETRY_EXC_ERR,
667 	IB_WC_RNR_RETRY_EXC_ERR,
668 	IB_WC_LOC_RDD_VIOL_ERR,
669 	IB_WC_REM_INV_RD_REQ_ERR,
670 	IB_WC_REM_ABORT_ERR,
671 	IB_WC_INV_EECN_ERR,
672 	IB_WC_INV_EEC_STATE_ERR,
673 	IB_WC_FATAL_ERR,
674 	IB_WC_RESP_TIMEOUT_ERR,
675 	IB_WC_GENERAL_ERR
676 };
677 
678 enum ib_wc_opcode {
679 	IB_WC_SEND,
680 	IB_WC_RDMA_WRITE,
681 	IB_WC_RDMA_READ,
682 	IB_WC_COMP_SWAP,
683 	IB_WC_FETCH_ADD,
684 	IB_WC_BIND_MW,
685 	IB_WC_LSO,
686 	IB_WC_LOCAL_INV,
687 	IB_WC_FAST_REG_MR,
688 	IB_WC_MASKED_COMP_SWAP,
689 	IB_WC_MASKED_FETCH_ADD,
690 /*
691  * Set value of IB_WC_RECV so consumers can test if a completion is a
692  * receive by testing (opcode & IB_WC_RECV).
693  */
694 	IB_WC_RECV			= 1 << 7,
695 	IB_WC_RECV_RDMA_WITH_IMM
696 };
697 
698 enum ib_wc_flags {
699 	IB_WC_GRH		= 1,
700 	IB_WC_WITH_IMM		= (1<<1),
701 	IB_WC_WITH_INVALIDATE	= (1<<2),
702 	IB_WC_IP_CSUM_OK	= (1<<3),
703 	IB_WC_WITH_SL		= (1<<4),
704 	IB_WC_WITH_SLID		= (1<<5),
705 	IB_WC_WITH_TIMESTAMP	= (1<<6),
706 	IB_WC_WITH_SMAC		= (1<<7),
707 	IB_WC_WITH_VLAN		= (1<<8),
708 };
709 
710 struct ib_wc {
711 	u64			wr_id;
712 	enum ib_wc_status	status;
713 	enum ib_wc_opcode	opcode;
714 	u32			vendor_err;
715 	u32			byte_len;
716 	struct ib_qp	       *qp;
717 	union {
718 		__be32		imm_data;
719 		u32		invalidate_rkey;
720 	} ex;
721 	u32			src_qp;
722 	int			wc_flags;
723 	u16			pkey_index;
724 	u16			slid;
725 	u8			sl;
726 	u8			dlid_path_bits;
727 	u8			port_num;	/* valid only for DR SMPs on switches */
728 	int			csum_ok;
729 	struct {
730 		uint64_t	timestamp; /* timestamp = 0 indicates error*/
731 	} ts;
732 	u8			smac[6];
733 	u16			vlan_id;
734 };
735 
736 enum ib_cq_notify_flags {
737 	IB_CQ_SOLICITED			= 1 << 0,
738 	IB_CQ_NEXT_COMP			= 1 << 1,
739 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
740 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
741 };
742 
743 enum ib_srq_type {
744 	IB_SRQT_BASIC,
745 	IB_SRQT_XRC
746 };
747 
748 enum ib_srq_attr_mask {
749 	IB_SRQ_MAX_WR	= 1 << 0,
750 	IB_SRQ_LIMIT	= 1 << 1,
751 };
752 
753 struct ib_srq_attr {
754 	u32	max_wr;
755 	u32	max_sge;
756 	u32	srq_limit;
757 };
758 
759 struct ib_srq_init_attr {
760 	void		      (*event_handler)(struct ib_event *, void *);
761 	void		       *srq_context;
762 	struct ib_srq_attr	attr;
763 	enum ib_srq_type	srq_type;
764 
765 	union {
766 		struct {
767 			struct ib_xrcd *xrcd;
768 			struct ib_cq   *cq;
769 		} xrc;
770 	} ext;
771 };
772 
773 struct ib_qp_cap {
774 	u32	max_send_wr;
775 	u32	max_recv_wr;
776 	u32	max_send_sge;
777 	u32	max_recv_sge;
778 	u32	max_inline_data;
779 	u32	qpg_tss_mask_sz;
780 };
781 
782 enum ib_sig_type {
783 	IB_SIGNAL_ALL_WR,
784 	IB_SIGNAL_REQ_WR
785 };
786 
787 enum ib_qp_type {
788 	/*
789 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
790 	 * here (and in that order) since the MAD layer uses them as
791 	 * indices into a 2-entry table.
792 	 */
793 	IB_QPT_SMI,
794 	IB_QPT_GSI,
795 
796 	IB_QPT_RC,
797 	IB_QPT_UC,
798 	IB_QPT_UD,
799 	IB_QPT_RAW_IPV6,
800 	IB_QPT_RAW_ETHERTYPE,
801 	IB_QPT_RAW_PACKET = 8,
802 	IB_QPT_XRC_INI = 9,
803 	IB_QPT_XRC_TGT,
804 	IB_QPT_DC_INI,
805 	IB_QPT_MAX,
806 	/* Reserve a range for qp types internal to the low level driver.
807 	 * These qp types will not be visible at the IB core layer, so the
808 	 * IB_QPT_MAX usages should not be affected in the core layer
809 	 */
810 	IB_QPT_RESERVED1 = 0x1000,
811 	IB_QPT_RESERVED2,
812 	IB_QPT_RESERVED3,
813 	IB_QPT_RESERVED4,
814 	IB_QPT_RESERVED5,
815 	IB_QPT_RESERVED6,
816 	IB_QPT_RESERVED7,
817 	IB_QPT_RESERVED8,
818 	IB_QPT_RESERVED9,
819 	IB_QPT_RESERVED10,
820 };
821 
822 enum ib_qp_create_flags {
823 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
824 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
825 	IB_QP_CREATE_CROSS_CHANNEL		= 1 << 2,
826 	IB_QP_CREATE_MANAGED_SEND		= 1 << 3,
827 	IB_QP_CREATE_MANAGED_RECV		= 1 << 4,
828 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
829 	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
830 	/* reserve bits 26-31 for low level drivers' internal use */
831 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
832 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
833 };
834 
835 enum ib_qpg_type {
836 	IB_QPG_NONE	= 0,
837 	IB_QPG_PARENT	= (1<<0),
838 	IB_QPG_CHILD_RX = (1<<1),
839 	IB_QPG_CHILD_TX = (1<<2)
840 };
841 
842 struct ib_qpg_init_attrib {
843 	u32 tss_child_count;
844 	u32 rss_child_count;
845 };
846 
847 struct ib_qp_init_attr {
848 	void                  (*event_handler)(struct ib_event *, void *);
849 	void		       *qp_context;
850 	struct ib_cq	       *send_cq;
851 	struct ib_cq	       *recv_cq;
852 	struct ib_srq	       *srq;
853 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
854 	struct ib_qp_cap	cap;
855 	union {
856 		struct ib_qp *qpg_parent; /* see qpg_type */
857 		struct ib_qpg_init_attrib parent_attrib;
858 	};
859 	enum ib_sig_type	sq_sig_type;
860 	enum ib_qp_type		qp_type;
861 	enum ib_qp_create_flags	create_flags;
862 	enum ib_qpg_type	qpg_type;
863 	u8			port_num; /* special QP types only */
864 };
865 
866 enum {
867 	IB_DCT_CREATE_FLAG_RCV_INLINE		= 1 << 0,
868 	IB_DCT_CREATE_FLAGS_MASK		= IB_DCT_CREATE_FLAG_RCV_INLINE,
869 };
870 
871 struct ib_dct_init_attr {
872 	struct ib_pd	       *pd;
873 	struct ib_cq	       *cq;
874 	struct ib_srq	       *srq;
875 	u64			dc_key;
876 	u8			port;
877 	u32			access_flags;
878 	u8			min_rnr_timer;
879 	u8			tclass;
880 	u32			flow_label;
881 	enum ib_mtu		mtu;
882 	u8			pkey_index;
883 	u8			gid_index;
884 	u8			hop_limit;
885 	u32			create_flags;
886 };
887 
888 struct ib_dct_attr {
889 	u64			dc_key;
890 	u8			port;
891 	u32			access_flags;
892 	u8			min_rnr_timer;
893 	u8			tclass;
894 	u32			flow_label;
895 	enum ib_mtu		mtu;
896 	u8			pkey_index;
897 	u8			gid_index;
898 	u8			hop_limit;
899 	u32			key_violations;
900 	u8			state;
901 };
902 
903 struct ib_qp_open_attr {
904 	void                  (*event_handler)(struct ib_event *, void *);
905 	void		       *qp_context;
906 	u32			qp_num;
907 	enum ib_qp_type		qp_type;
908 };
909 
910 enum ib_rnr_timeout {
911 	IB_RNR_TIMER_655_36 =  0,
912 	IB_RNR_TIMER_000_01 =  1,
913 	IB_RNR_TIMER_000_02 =  2,
914 	IB_RNR_TIMER_000_03 =  3,
915 	IB_RNR_TIMER_000_04 =  4,
916 	IB_RNR_TIMER_000_06 =  5,
917 	IB_RNR_TIMER_000_08 =  6,
918 	IB_RNR_TIMER_000_12 =  7,
919 	IB_RNR_TIMER_000_16 =  8,
920 	IB_RNR_TIMER_000_24 =  9,
921 	IB_RNR_TIMER_000_32 = 10,
922 	IB_RNR_TIMER_000_48 = 11,
923 	IB_RNR_TIMER_000_64 = 12,
924 	IB_RNR_TIMER_000_96 = 13,
925 	IB_RNR_TIMER_001_28 = 14,
926 	IB_RNR_TIMER_001_92 = 15,
927 	IB_RNR_TIMER_002_56 = 16,
928 	IB_RNR_TIMER_003_84 = 17,
929 	IB_RNR_TIMER_005_12 = 18,
930 	IB_RNR_TIMER_007_68 = 19,
931 	IB_RNR_TIMER_010_24 = 20,
932 	IB_RNR_TIMER_015_36 = 21,
933 	IB_RNR_TIMER_020_48 = 22,
934 	IB_RNR_TIMER_030_72 = 23,
935 	IB_RNR_TIMER_040_96 = 24,
936 	IB_RNR_TIMER_061_44 = 25,
937 	IB_RNR_TIMER_081_92 = 26,
938 	IB_RNR_TIMER_122_88 = 27,
939 	IB_RNR_TIMER_163_84 = 28,
940 	IB_RNR_TIMER_245_76 = 29,
941 	IB_RNR_TIMER_327_68 = 30,
942 	IB_RNR_TIMER_491_52 = 31
943 };
944 
945 enum ib_qp_attr_mask {
946 	IB_QP_STATE			= 1,
947 	IB_QP_CUR_STATE			= (1<<1),
948 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
949 	IB_QP_ACCESS_FLAGS		= (1<<3),
950 	IB_QP_PKEY_INDEX		= (1<<4),
951 	IB_QP_PORT			= (1<<5),
952 	IB_QP_QKEY			= (1<<6),
953 	IB_QP_AV			= (1<<7),
954 	IB_QP_PATH_MTU			= (1<<8),
955 	IB_QP_TIMEOUT			= (1<<9),
956 	IB_QP_RETRY_CNT			= (1<<10),
957 	IB_QP_RNR_RETRY			= (1<<11),
958 	IB_QP_RQ_PSN			= (1<<12),
959 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
960 	IB_QP_ALT_PATH			= (1<<14),
961 	IB_QP_MIN_RNR_TIMER		= (1<<15),
962 	IB_QP_SQ_PSN			= (1<<16),
963 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
964 	IB_QP_PATH_MIG_STATE		= (1<<18),
965 	IB_QP_CAP			= (1<<19),
966 	IB_QP_DEST_QPN			= (1<<20),
967 	IB_QP_GROUP_RSS			= (1<<21),
968 	IB_QP_DC_KEY			= (1<<22),
969 	IB_QP_SMAC			= (1<<23),
970 	IB_QP_ALT_SMAC			= (1<<24),
971 	IB_QP_VID			= (1<<25),
972 	IB_QP_ALT_VID			= (1<<26)
973 };
974 
975 enum ib_qp_state {
976 	IB_QPS_RESET,
977 	IB_QPS_INIT,
978 	IB_QPS_RTR,
979 	IB_QPS_RTS,
980 	IB_QPS_SQD,
981 	IB_QPS_SQE,
982 	IB_QPS_ERR,
983 	IB_QPS_DUMMY = -1	/* force enum signed */
984 };
985 
986 enum ib_mig_state {
987 	IB_MIG_MIGRATED,
988 	IB_MIG_REARM,
989 	IB_MIG_ARMED
990 };
991 
992 enum ib_mw_type {
993 	IB_MW_TYPE_1 = 1,
994 	IB_MW_TYPE_2 = 2
995 };
996 
997 struct ib_qp_attr {
998 	enum ib_qp_state	qp_state;
999 	enum ib_qp_state	cur_qp_state;
1000 	enum ib_mtu		path_mtu;
1001 	enum ib_mig_state	path_mig_state;
1002 	u32			qkey;
1003 	u32			rq_psn;
1004 	u32			sq_psn;
1005 	u32			dest_qp_num;
1006 	int			qp_access_flags;
1007 	struct ib_qp_cap	cap;
1008 	struct ib_ah_attr	ah_attr;
1009 	struct ib_ah_attr	alt_ah_attr;
1010 	u16			pkey_index;
1011 	u16			alt_pkey_index;
1012 	u8			en_sqd_async_notify;
1013 	u8			sq_draining;
1014 	u8			max_rd_atomic;
1015 	u8			max_dest_rd_atomic;
1016 	u8			min_rnr_timer;
1017 	u8			port_num;
1018 	u8			timeout;
1019 	u8			retry_cnt;
1020 	u8			rnr_retry;
1021 	u8			alt_port_num;
1022 	u8			alt_timeout;
1023 	u8                      smac[ETH_ALEN];
1024 	u8                      alt_smac[ETH_ALEN];
1025 	u16                     vlan_id;
1026 	u16                     alt_vlan_id;
1027 
1028 };
1029 
1030 struct ib_qp_attr_ex {
1031 	enum ib_qp_state	qp_state;
1032 	enum ib_qp_state	cur_qp_state;
1033 	enum ib_mtu		path_mtu;
1034 	enum ib_mig_state	path_mig_state;
1035 	u32			qkey;
1036 	u32			rq_psn;
1037 	u32			sq_psn;
1038 	u32			dest_qp_num;
1039 	int			qp_access_flags;
1040 	struct ib_qp_cap	cap;
1041 	struct ib_ah_attr	ah_attr;
1042 	struct ib_ah_attr	alt_ah_attr;
1043 	u16			pkey_index;
1044 	u16			alt_pkey_index;
1045 	u8			en_sqd_async_notify;
1046 	u8			sq_draining;
1047 	u8			max_rd_atomic;
1048 	u8			max_dest_rd_atomic;
1049 	u8			min_rnr_timer;
1050 	u8			port_num;
1051 	u8			timeout;
1052 	u8			retry_cnt;
1053 	u8			rnr_retry;
1054 	u8			alt_port_num;
1055 	u8			alt_timeout;
1056 	u64			dct_key;
1057 };
1058 
1059 enum ib_wr_opcode {
1060 	IB_WR_RDMA_WRITE,
1061 	IB_WR_RDMA_WRITE_WITH_IMM,
1062 	IB_WR_SEND,
1063 	IB_WR_SEND_WITH_IMM,
1064 	IB_WR_RDMA_READ,
1065 	IB_WR_ATOMIC_CMP_AND_SWP,
1066 	IB_WR_ATOMIC_FETCH_AND_ADD,
1067 	IB_WR_LSO,
1068 	IB_WR_SEND_WITH_INV,
1069 	IB_WR_RDMA_READ_WITH_INV,
1070 	IB_WR_LOCAL_INV,
1071 	IB_WR_FAST_REG_MR,
1072 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1073 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1074 	IB_WR_BIND_MW,
1075 	IB_WR_REG_SIG_MR,
1076 	/* reserve values for low level drivers' internal use.
1077 	 * These values will not be used at all in the ib core layer.
1078 	 */
1079 	IB_WR_RESERVED1 = 0xf0,
1080 	IB_WR_RESERVED2,
1081 	IB_WR_RESERVED3,
1082 	IB_WR_RESERVED4,
1083 	IB_WR_RESERVED5,
1084 	IB_WR_RESERVED6,
1085 	IB_WR_RESERVED7,
1086 	IB_WR_RESERVED8,
1087 	IB_WR_RESERVED9,
1088 	IB_WR_RESERVED10,
1089 };
1090 
1091 enum ib_send_flags {
1092 	IB_SEND_FENCE		= 1,
1093 	IB_SEND_SIGNALED	= (1<<1),
1094 	IB_SEND_SOLICITED	= (1<<2),
1095 	IB_SEND_INLINE		= (1<<3),
1096 	IB_SEND_IP_CSUM		= (1<<4),
1097 
1098 	/* reserve bits 26-31 for low level drivers' internal use */
1099 	IB_SEND_RESERVED_START	= (1 << 26),
1100 	IB_SEND_RESERVED_END	= (1 << 31),
1101 	IB_SEND_UMR_UNREG       = (1<<5)
1102 };
1103 
1104 struct ib_sge {
1105 	u64	addr;
1106 	u32	length;
1107 	u32	lkey;
1108 };
1109 
1110 struct ib_fast_reg_page_list {
1111 	struct ib_device       *device;
1112 	u64		       *page_list;
1113 	unsigned int		max_page_list_len;
1114 };
1115 
1116 /**
1117  * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1118  * @mr: A memory region to bind the memory window to.
1119  * @addr: The address where the memory window should begin.
1120  * @length: The length of the memory window, in bytes.
1121  * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1122  *
1123  * This struct contains the shared parameters for type 1 and type 2
1124  * memory window bind operations.
1125  */
1126 struct ib_mw_bind_info {
1127 	struct ib_mr   *mr;
1128 	u64		addr;
1129 	u64		length;
1130 	int		mw_access_flags;
1131 };
1132 
1133 struct ib_send_wr {
1134 	struct ib_send_wr      *next;
1135 	u64			wr_id;
1136 	struct ib_sge	       *sg_list;
1137 	int			num_sge;
1138 	enum ib_wr_opcode	opcode;
1139 	int			send_flags;
1140 	union {
1141 		__be32		imm_data;
1142 		u32		invalidate_rkey;
1143 	} ex;
1144 	union {
1145 		struct {
1146 			u64	remote_addr;
1147 			u32	rkey;
1148 		} rdma;
1149 		struct {
1150 			u64	remote_addr;
1151 			u64	compare_add;
1152 			u64	swap;
1153 			u64	compare_add_mask;
1154 			u64	swap_mask;
1155 			u32	rkey;
1156 		} atomic;
1157 		struct {
1158 			struct ib_ah *ah;
1159 			void   *header;
1160 			int     hlen;
1161 			int     mss;
1162 			u32	remote_qpn;
1163 			u32	remote_qkey;
1164 			u16	pkey_index; /* valid for GSI only */
1165 			u8	port_num;   /* valid for DR SMPs on switch only */
1166 		} ud;
1167 		struct {
1168 			u64				iova_start;
1169 			struct ib_fast_reg_page_list   *page_list;
1170 			unsigned int			page_shift;
1171 			unsigned int			page_list_len;
1172 			u32				length;
1173 			int				access_flags;
1174 			u32				rkey;
1175 		} fast_reg;
1176 		struct {
1177 			int		npages;
1178 			int		access_flags;
1179 			u32		mkey;
1180 			struct ib_pd   *pd;
1181 			u64		virt_addr;
1182 			u64		length;
1183 			int		page_shift;
1184 		} umr;
1185 		struct {
1186 			struct ib_mw            *mw;
1187 			/* The new rkey for the memory window. */
1188 			u32                      rkey;
1189 			struct ib_mw_bind_info   bind_info;
1190 		} bind_mw;
1191 		struct {
1192 			struct ib_sig_attrs    *sig_attrs;
1193 			struct ib_mr	       *sig_mr;
1194 			int			access_flags;
1195 			struct ib_sge	       *prot;
1196 		} sig_handover;
1197 	} wr;
1198 	u32			xrc_remote_srq_num;	/* XRC TGT QPs only */
1199 };
1200 
1201 struct ib_recv_wr {
1202 	struct ib_recv_wr      *next;
1203 	u64			wr_id;
1204 	struct ib_sge	       *sg_list;
1205 	int			num_sge;
1206 };
1207 
1208 enum ib_access_flags {
1209 	IB_ACCESS_LOCAL_WRITE	= 1,
1210 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1211 	IB_ACCESS_REMOTE_READ	= (1<<2),
1212 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1213 	IB_ACCESS_MW_BIND	= (1<<4),
1214 	IB_ACCESS_ALLOCATE_MR	= (1<<5),
1215 	IB_ZERO_BASED		= (1<<13)
1216 };
1217 
1218 struct ib_phys_buf {
1219 	u64      addr;
1220 	u64      size;
1221 };
1222 
1223 struct ib_mr_attr {
1224 	struct ib_pd	*pd;
1225 	u64		device_virt_addr;
1226 	u64		size;
1227 	int		mr_access_flags;
1228 	u32		lkey;
1229 	u32		rkey;
1230 };
1231 
1232 enum ib_mr_rereg_flags {
1233 	IB_MR_REREG_TRANS	= 1,
1234 	IB_MR_REREG_PD		= (1<<1),
1235 	IB_MR_REREG_ACCESS	= (1<<2)
1236 };
1237 
1238 /**
1239  * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1240  * @wr_id:      Work request id.
1241  * @send_flags: Flags from ib_send_flags enum.
1242  * @bind_info:  More parameters of the bind operation.
1243  */
1244 struct ib_mw_bind {
1245 	u64		wr_id;
1246 	int		send_flags;
1247 	struct ib_mw_bind_info bind_info;
1248 };
1249 
1250 struct ib_fmr_attr {
1251 	int	max_pages;
1252 	int	max_maps;
1253 	u8	page_shift;
1254 };
1255 
1256 struct ib_ucontext {
1257 	struct ib_device       *device;
1258 	struct list_head	pd_list;
1259 	struct list_head	mr_list;
1260 	struct list_head	mw_list;
1261 	struct list_head	cq_list;
1262 	struct list_head	qp_list;
1263 	struct list_head	srq_list;
1264 	struct list_head	ah_list;
1265 	struct list_head	xrcd_list;
1266 	struct list_head	rule_list;
1267 	struct list_head	dct_list;
1268 	int			closing;
1269 	void		 *peer_mem_private_data;
1270 	char		 *peer_mem_name;
1271 };
1272 
1273 struct ib_uobject {
1274 	u64			user_handle;	/* handle given to us by userspace */
1275 	struct ib_ucontext     *context;	/* associated user context */
1276 	void		       *object;		/* containing object */
1277 	struct list_head	list;		/* link to context's list */
1278 	int			id;		/* index into kernel idr */
1279 	struct kref		ref;
1280 	struct rw_semaphore	mutex;		/* protects .live */
1281 	int			live;
1282 };
1283 
1284 struct ib_udata;
1285 struct ib_udata_ops {
1286 	int		      (*copy_from)(void *dest, struct ib_udata *udata,
1287 					   size_t len);
1288 	int		      (*copy_to)(struct ib_udata *udata, void *src,
1289 					 size_t len);
1290 };
1291 
1292 struct ib_udata {
1293 	struct ib_udata_ops    *ops;
1294 	void __user *inbuf;
1295 	void __user *outbuf;
1296 	size_t       inlen;
1297 	size_t       outlen;
1298 };
1299 
1300 struct ib_pd {
1301 	struct ib_device       *device;
1302 	struct ib_uobject      *uobject;
1303 	atomic_t          	usecnt; /* count all resources */
1304 };
1305 
1306 struct ib_xrcd {
1307 	struct ib_device       *device;
1308 	atomic_t		usecnt; /* count all exposed resources */
1309 	struct inode	       *inode;
1310 
1311 	struct mutex		tgt_qp_mutex;
1312 	struct list_head	tgt_qp_list;
1313 };
1314 
1315 struct ib_ah {
1316 	struct ib_device	*device;
1317 	struct ib_pd		*pd;
1318 	struct ib_uobject	*uobject;
1319 };
1320 
1321 enum ib_cq_attr_mask {
1322 	IB_CQ_MODERATION               = (1 << 0),
1323 	IB_CQ_CAP_FLAGS                = (1 << 1)
1324 };
1325 
1326 enum ib_cq_cap_flags {
1327 	IB_CQ_IGNORE_OVERRUN           = (1 << 0)
1328 };
1329 
1330 struct ib_cq_attr {
1331 	struct {
1332 		u16     cq_count;
1333 		u16     cq_period;
1334 	} moderation;
1335 	u32     cq_cap_flags;
1336 };
1337 
1338 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1339 
1340 struct ib_cq {
1341 	struct ib_device       *device;
1342 	struct ib_uobject      *uobject;
1343 	ib_comp_handler   	comp_handler;
1344 	void                  (*event_handler)(struct ib_event *, void *);
1345 	void                   *cq_context;
1346 	int               	cqe;
1347 	atomic_t          	usecnt; /* count number of work queues */
1348 };
1349 
1350 struct ib_srq {
1351 	struct ib_device       *device;
1352 	struct ib_pd	       *pd;
1353 	struct ib_uobject      *uobject;
1354 	void		      (*event_handler)(struct ib_event *, void *);
1355 	void		       *srq_context;
1356 	enum ib_srq_type	srq_type;
1357 	atomic_t		usecnt;
1358 
1359 	union {
1360 		struct {
1361 			struct ib_xrcd *xrcd;
1362 			struct ib_cq   *cq;
1363 			u32		srq_num;
1364 		} xrc;
1365 	} ext;
1366 };
1367 
1368 struct ib_qp {
1369 	struct ib_device       *device;
1370 	struct ib_pd	       *pd;
1371 	struct ib_cq	       *send_cq;
1372 	struct ib_cq	       *recv_cq;
1373 	struct ib_srq	       *srq;
1374 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1375 	struct list_head	xrcd_list;
1376 	/* count times opened, mcast attaches, flow attaches */
1377 	atomic_t		usecnt;
1378 	struct list_head	open_list;
1379 	struct ib_qp           *real_qp;
1380 	struct ib_uobject      *uobject;
1381 	void                  (*event_handler)(struct ib_event *, void *);
1382 	void		       *qp_context;
1383 	u32			qp_num;
1384 	enum ib_qp_type		qp_type;
1385 	enum ib_qpg_type	qpg_type;
1386 	u8			port_num;
1387 };
1388 
1389 struct ib_dct {
1390 	struct ib_device       *device;
1391 	struct ib_uobject      *uobject;
1392 	struct ib_pd	       *pd;
1393 	struct ib_cq	       *cq;
1394 	struct ib_srq	       *srq;
1395 	u32			dct_num;
1396 };
1397 
1398 struct ib_mr {
1399 	struct ib_device  *device;
1400 	struct ib_pd	  *pd;
1401 	struct ib_uobject *uobject;
1402 	u32		   lkey;
1403 	u32		   rkey;
1404 	atomic_t	   usecnt; /* count number of MWs */
1405 };
1406 
1407 struct ib_mw {
1408 	struct ib_device	*device;
1409 	struct ib_pd		*pd;
1410 	struct ib_uobject	*uobject;
1411 	u32			rkey;
1412 	enum ib_mw_type         type;
1413 };
1414 
1415 struct ib_fmr {
1416 	struct ib_device	*device;
1417 	struct ib_pd		*pd;
1418 	struct list_head	list;
1419 	u32			lkey;
1420 	u32			rkey;
1421 };
1422 
1423 /* Supported steering options */
1424 enum ib_flow_attr_type {
1425 	/* steering according to rule specifications */
1426 	IB_FLOW_ATTR_NORMAL		= 0x0,
1427 	/* default unicast and multicast rule -
1428 	 * receive all Eth traffic which isn't steered to any QP
1429 	 */
1430 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1431 	/* default multicast rule -
1432 	 * receive all Eth multicast traffic which isn't steered to any QP
1433 	 */
1434 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1435 	/* sniffer rule - receive all port traffic */
1436 	IB_FLOW_ATTR_SNIFFER		= 0x3
1437 };
1438 
1439 /* Supported steering header types */
1440 enum ib_flow_spec_type {
1441 	/* L2 headers*/
1442 	IB_FLOW_SPEC_ETH	= 0x20,
1443 	IB_FLOW_SPEC_IB		= 0x21,
1444 	/* L3 header*/
1445 	IB_FLOW_SPEC_IPV4	= 0x30,
1446 	/* L4 headers*/
1447 	IB_FLOW_SPEC_TCP	= 0x40,
1448 	IB_FLOW_SPEC_UDP	= 0x41
1449 };
1450 
1451 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1452 
1453 /* Flow steering rule priority is set according to it's domain.
1454  * Lower domain value means higher priority.
1455  */
1456 enum ib_flow_domain {
1457 	IB_FLOW_DOMAIN_USER,
1458 	IB_FLOW_DOMAIN_ETHTOOL,
1459 	IB_FLOW_DOMAIN_RFS,
1460 	IB_FLOW_DOMAIN_NIC,
1461 	IB_FLOW_DOMAIN_NUM /* Must be last */
1462 };
1463 
1464 enum ib_flow_flags {
1465 	IB_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1
1466 };
1467 
1468 struct ib_flow_eth_filter {
1469 	u8	dst_mac[6];
1470 	u8	src_mac[6];
1471 	__be16	ether_type;
1472 	__be16	vlan_tag;
1473 };
1474 
1475 struct ib_flow_spec_eth {
1476 	enum ib_flow_spec_type	  type;
1477 	u16			  size;
1478 	struct ib_flow_eth_filter val;
1479 	struct ib_flow_eth_filter mask;
1480 };
1481 
1482 struct ib_flow_ib_filter {
1483 	__be32	l3_type_qpn;
1484 	u8	dst_gid[16];
1485 };
1486 
1487 struct ib_flow_spec_ib {
1488 	enum ib_flow_spec_type	 type;
1489 	u16			 size;
1490 	struct ib_flow_ib_filter val;
1491 	struct ib_flow_ib_filter mask;
1492 };
1493 
1494 struct ib_flow_ipv4_filter {
1495 	__be32 src_ip;
1496 	__be32 dst_ip;
1497 };
1498 
1499 struct ib_flow_spec_ipv4 {
1500 	enum ib_flow_spec_type	   type;
1501 	u16			   size;
1502 	struct ib_flow_ipv4_filter val;
1503 	struct ib_flow_ipv4_filter mask;
1504 };
1505 
1506 struct ib_flow_tcp_udp_filter {
1507 	__be16 dst_port;
1508 	__be16	src_port;
1509 };
1510 
1511 struct ib_flow_spec_tcp_udp {
1512 	enum ib_flow_spec_type	      type;
1513 	u16			      size;
1514 	struct ib_flow_tcp_udp_filter val;
1515 	struct ib_flow_tcp_udp_filter mask;
1516 };
1517 
1518 union ib_flow_spec {
1519 	struct {
1520 		enum ib_flow_spec_type	type;
1521 		u16			size;
1522 	};
1523 	struct ib_flow_spec_ib ib;
1524 	struct ib_flow_spec_eth eth;
1525 	struct ib_flow_spec_ipv4 ipv4;
1526 	struct ib_flow_spec_tcp_udp tcp_udp;
1527 };
1528 
1529 struct ib_flow_attr {
1530 	enum ib_flow_attr_type type;
1531 	u16	     size;
1532 	u16	     priority;
1533 	u8	     num_of_specs;
1534 	u8	     port;
1535 	u32	     flags;
1536 	/* Following are the optional layers according to user request
1537 	 * struct ib_flow_spec_xxx
1538 	 * struct ib_flow_spec_yyy
1539 	 */
1540 };
1541 
1542 struct ib_flow {
1543 	struct ib_qp		*qp;
1544 	struct ib_uobject	*uobject;
1545 };
1546 
1547 struct ib_mad;
1548 struct ib_grh;
1549 
1550 enum ib_process_mad_flags {
1551 	IB_MAD_IGNORE_MKEY	= 1,
1552 	IB_MAD_IGNORE_BKEY	= 2,
1553 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1554 };
1555 
1556 enum ib_mad_result {
1557 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1558 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1559 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1560 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1561 };
1562 
1563 #define IB_DEVICE_NAME_MAX 64
1564 
1565 struct ib_cache {
1566 	rwlock_t                lock;
1567 	struct ib_event_handler event_handler;
1568 	struct ib_pkey_cache  **pkey_cache;
1569 	struct ib_gid_cache   **gid_cache;
1570 	u8                     *lmc_cache;
1571 };
1572 
1573 enum verbs_values_mask {
1574 	IBV_VALUES_HW_CLOCK = 1 << 0
1575 };
1576 
1577 struct ib_device_values {
1578 	int values_mask;
1579 	uint64_t hwclock;
1580 };
1581 
1582 struct ib_dma_mapping_ops {
1583 	int		(*mapping_error)(struct ib_device *dev,
1584 					 u64 dma_addr);
1585 	u64		(*map_single)(struct ib_device *dev,
1586 				      void *ptr, size_t size,
1587 				      enum dma_data_direction direction);
1588 	void		(*unmap_single)(struct ib_device *dev,
1589 					u64 addr, size_t size,
1590 					enum dma_data_direction direction);
1591 	u64		(*map_page)(struct ib_device *dev,
1592 				    struct page *page, unsigned long offset,
1593 				    size_t size,
1594 				    enum dma_data_direction direction);
1595 	void		(*unmap_page)(struct ib_device *dev,
1596 				      u64 addr, size_t size,
1597 				      enum dma_data_direction direction);
1598 	int		(*map_sg)(struct ib_device *dev,
1599 				  struct scatterlist *sg, int nents,
1600 				  enum dma_data_direction direction);
1601 	void		(*unmap_sg)(struct ib_device *dev,
1602 				    struct scatterlist *sg, int nents,
1603 				    enum dma_data_direction direction);
1604 	u64		(*dma_address)(struct ib_device *dev,
1605 				       struct scatterlist *sg);
1606 	unsigned int	(*dma_len)(struct ib_device *dev,
1607 				   struct scatterlist *sg);
1608 	void		(*sync_single_for_cpu)(struct ib_device *dev,
1609 					       u64 dma_handle,
1610 					       size_t size,
1611 					       enum dma_data_direction dir);
1612 	void		(*sync_single_for_device)(struct ib_device *dev,
1613 						  u64 dma_handle,
1614 						  size_t size,
1615 						  enum dma_data_direction dir);
1616 	void		*(*alloc_coherent)(struct ib_device *dev,
1617 					   size_t size,
1618 					   u64 *dma_handle,
1619 					   gfp_t flag);
1620 	void		(*free_coherent)(struct ib_device *dev,
1621 					 size_t size, void *cpu_addr,
1622 					 u64 dma_handle);
1623 };
1624 
1625 struct iw_cm_verbs;
1626 struct ib_exp_device_attr;
1627 struct ib_exp_qp_init_attr;
1628 
1629 struct ib_device {
1630 	struct device                *dma_device;
1631 
1632 	char                          name[IB_DEVICE_NAME_MAX];
1633 
1634 	struct list_head              event_handler_list;
1635 	spinlock_t                    event_handler_lock;
1636 
1637 	spinlock_t                    client_data_lock;
1638 	struct list_head              core_list;
1639 	struct list_head              client_data_list;
1640 
1641 	struct ib_cache               cache;
1642 	int                          *pkey_tbl_len;
1643 	int                          *gid_tbl_len;
1644 
1645 	int			      num_comp_vectors;
1646 
1647 	struct iw_cm_verbs	     *iwcm;
1648 
1649 	int		           (*get_protocol_stats)(struct ib_device *device,
1650 							 union rdma_protocol_stats *stats);
1651 	int		           (*query_device)(struct ib_device *device,
1652 						   struct ib_device_attr *device_attr);
1653 	int		           (*query_port)(struct ib_device *device,
1654 						 u8 port_num,
1655 						 struct ib_port_attr *port_attr);
1656 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1657 						     u8 port_num);
1658 	/* When calling get_netdev, the HW vendor's driver should return the
1659 	 * net device of device @device at port @port_num. The function
1660 	 * is called in rtnl_lock. The HW vendor's device driver must guarantee
1661 	 * to return NULL before the net device has reached
1662 	 * NETDEV_UNREGISTER_FINAL state.
1663 	 */
1664 	struct net_device	*(*get_netdev)(struct ib_device *device,
1665 					       u8 port_num);
1666 	int		           (*query_gid)(struct ib_device *device,
1667 						u8 port_num, int index,
1668 						union ib_gid *gid);
1669 	int		           (*query_pkey)(struct ib_device *device,
1670 						 u8 port_num, u16 index, u16 *pkey);
1671 	int		           (*modify_device)(struct ib_device *device,
1672 						    int device_modify_mask,
1673 						    struct ib_device_modify *device_modify);
1674 	int		           (*modify_port)(struct ib_device *device,
1675 						  u8 port_num, int port_modify_mask,
1676 						  struct ib_port_modify *port_modify);
1677 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1678 						     struct ib_udata *udata);
1679 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1680 	int                        (*mmap)(struct ib_ucontext *context,
1681 					   struct vm_area_struct *vma);
1682 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1683 					       struct ib_ucontext *context,
1684 					       struct ib_udata *udata);
1685 	int                        (*dealloc_pd)(struct ib_pd *pd);
1686 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1687 						struct ib_ah_attr *ah_attr);
1688 	int                        (*modify_ah)(struct ib_ah *ah,
1689 						struct ib_ah_attr *ah_attr);
1690 	int                        (*query_ah)(struct ib_ah *ah,
1691 					       struct ib_ah_attr *ah_attr);
1692 	int                        (*destroy_ah)(struct ib_ah *ah);
1693 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1694 						 struct ib_srq_init_attr *srq_init_attr,
1695 						 struct ib_udata *udata);
1696 	int                        (*modify_srq)(struct ib_srq *srq,
1697 						 struct ib_srq_attr *srq_attr,
1698 						 enum ib_srq_attr_mask srq_attr_mask,
1699 						 struct ib_udata *udata);
1700 	int                        (*query_srq)(struct ib_srq *srq,
1701 						struct ib_srq_attr *srq_attr);
1702 	int                        (*destroy_srq)(struct ib_srq *srq);
1703 	int                        (*post_srq_recv)(struct ib_srq *srq,
1704 						    struct ib_recv_wr *recv_wr,
1705 						    struct ib_recv_wr **bad_recv_wr);
1706 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1707 						struct ib_qp_init_attr *qp_init_attr,
1708 						struct ib_udata *udata);
1709 	int                        (*modify_qp)(struct ib_qp *qp,
1710 						struct ib_qp_attr *qp_attr,
1711 						int qp_attr_mask,
1712 						struct ib_udata *udata);
1713 	int                        (*query_qp)(struct ib_qp *qp,
1714 					       struct ib_qp_attr *qp_attr,
1715 					       int qp_attr_mask,
1716 					       struct ib_qp_init_attr *qp_init_attr);
1717 	int                        (*destroy_qp)(struct ib_qp *qp);
1718 	int                        (*post_send)(struct ib_qp *qp,
1719 						struct ib_send_wr *send_wr,
1720 						struct ib_send_wr **bad_send_wr);
1721 	int                        (*post_recv)(struct ib_qp *qp,
1722 						struct ib_recv_wr *recv_wr,
1723 						struct ib_recv_wr **bad_recv_wr);
1724 	struct ib_cq *             (*create_cq)(struct ib_device *device,
1725 						struct ib_cq_init_attr *attr,
1726 						struct ib_ucontext *context,
1727 						struct ib_udata *udata);
1728 	int                        (*modify_cq)(struct ib_cq *cq,
1729 						struct ib_cq_attr *cq_attr,
1730 						int cq_attr_mask);
1731 	int                        (*destroy_cq)(struct ib_cq *cq);
1732 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1733 						struct ib_udata *udata);
1734 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1735 					      struct ib_wc *wc);
1736 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1737 	int                        (*req_notify_cq)(struct ib_cq *cq,
1738 						    enum ib_cq_notify_flags flags);
1739 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1740 						      int wc_cnt);
1741 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1742 						 int mr_access_flags);
1743 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1744 						  struct ib_phys_buf *phys_buf_array,
1745 						  int num_phys_buf,
1746 						  int mr_access_flags,
1747 						  u64 *iova_start);
1748 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1749 						  u64 start, u64 length,
1750 						  u64 virt_addr,
1751 						  int mr_access_flags,
1752 						  struct ib_udata *udata,
1753 							int mr_id);
1754 	int                        (*query_mr)(struct ib_mr *mr,
1755 					       struct ib_mr_attr *mr_attr);
1756 	int                        (*dereg_mr)(struct ib_mr *mr);
1757 	int                        (*destroy_mr)(struct ib_mr *mr);
1758 	struct ib_mr *		   (*create_mr)(struct ib_pd *pd,
1759 						struct ib_mr_init_attr *mr_init_attr);
1760 	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1761 					       int max_page_list_len);
1762 	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1763 								   int page_list_len);
1764 	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1765 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1766 						    int mr_rereg_mask,
1767 						    struct ib_pd *pd,
1768 						    struct ib_phys_buf *phys_buf_array,
1769 						    int num_phys_buf,
1770 						    int mr_access_flags,
1771 						    u64 *iova_start);
1772 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
1773 					       enum ib_mw_type type);
1774 	int                        (*bind_mw)(struct ib_qp *qp,
1775 					      struct ib_mw *mw,
1776 					      struct ib_mw_bind *mw_bind);
1777 	int                        (*dealloc_mw)(struct ib_mw *mw);
1778 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1779 						int mr_access_flags,
1780 						struct ib_fmr_attr *fmr_attr);
1781 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1782 						   u64 *page_list, int list_len,
1783 						   u64 iova);
1784 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1785 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1786 	int                        (*attach_mcast)(struct ib_qp *qp,
1787 						   union ib_gid *gid,
1788 						   u16 lid);
1789 	int                        (*detach_mcast)(struct ib_qp *qp,
1790 						   union ib_gid *gid,
1791 						   u16 lid);
1792 	int                        (*process_mad)(struct ib_device *device,
1793 						  int process_mad_flags,
1794 						  u8 port_num,
1795 						  struct ib_wc *in_wc,
1796 						  struct ib_grh *in_grh,
1797 						  struct ib_mad *in_mad,
1798 						  struct ib_mad *out_mad);
1799 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
1800 						 struct ib_ucontext *ucontext,
1801 						 struct ib_udata *udata);
1802 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1803 	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
1804 						  struct ib_flow_attr
1805 						  *flow_attr,
1806 						  int domain);
1807 	int			   (*destroy_flow)(struct ib_flow *flow_id);
1808 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1809 						      struct ib_mr_status *mr_status);
1810 
1811 	unsigned long		   (*get_unmapped_area)(struct file *file,
1812 					unsigned long addr,
1813 					unsigned long len, unsigned long pgoff,
1814 					unsigned long flags);
1815 	int			   (*ioctl)(struct ib_ucontext *context,
1816 					    unsigned int cmd,
1817 					    unsigned long arg);
1818 	int			   (*query_values)(struct ib_device *device,
1819 					    int q_values,
1820 					    struct ib_device_values *values);
1821 	struct ib_dma_mapping_ops   *dma_ops;
1822 
1823 	struct module               *owner;
1824 	struct device                dev;
1825 	struct kobject               *ports_parent;
1826 	struct list_head             port_list;
1827 
1828 	enum {
1829 		IB_DEV_UNINITIALIZED,
1830 		IB_DEV_REGISTERED,
1831 		IB_DEV_UNREGISTERED
1832 	}                            reg_state;
1833 
1834 	int			     uverbs_abi_ver;
1835 	u64			     uverbs_cmd_mask;
1836 	u64			     uverbs_ex_cmd_mask;
1837 
1838 	char			     node_desc[64];
1839 	__be64			     node_guid;
1840 	u32			     local_dma_lkey;
1841 	u8                           node_type;
1842 	u8                           phys_port_cnt;
1843 	int			     cmd_perf;
1844 	u64			     cmd_avg;
1845 	u32			     cmd_n;
1846 	spinlock_t		     cmd_perf_lock;
1847 
1848 	/*
1849 	 * Experimental data and functions
1850 	 */
1851 	int			(*exp_query_device)(struct ib_device *device,
1852 						    struct ib_exp_device_attr *device_attr);
1853 	struct ib_qp *		(*exp_create_qp)(struct ib_pd *pd,
1854 						 struct ib_exp_qp_init_attr *qp_init_attr,
1855 						 struct ib_udata *udata);
1856 	struct ib_dct *		(*exp_create_dct)(struct ib_pd *pd,
1857 					      struct ib_dct_init_attr *attr,
1858 					      struct ib_udata *udata);
1859 	int			(*exp_destroy_dct)(struct ib_dct *dct);
1860 	int			(*exp_query_dct)(struct ib_dct *dct, struct ib_dct_attr *attr);
1861 
1862 	u64			uverbs_exp_cmd_mask;
1863 };
1864 
1865 struct ib_client {
1866 	char  *name;
1867 	void (*add)   (struct ib_device *);
1868 	void (*remove)(struct ib_device *);
1869 
1870 	struct list_head list;
1871 };
1872 
1873 struct ib_device *ib_alloc_device(size_t size);
1874 void ib_dealloc_device(struct ib_device *device);
1875 
1876 int ib_register_device(struct ib_device *device,
1877 		       int (*port_callback)(struct ib_device *,
1878 					    u8, struct kobject *));
1879 void ib_unregister_device(struct ib_device *device);
1880 
1881 int ib_register_client   (struct ib_client *client);
1882 void ib_unregister_client(struct ib_client *client);
1883 
1884 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1885 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1886 			 void *data);
1887 
1888 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1889 {
1890 	return udata->ops->copy_from(dest, udata, len);
1891 }
1892 
1893 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1894 {
1895 	return udata->ops->copy_to(udata, src, len);
1896 }
1897 
1898 /**
1899  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1900  * contains all required attributes and no attributes not allowed for
1901  * the given QP state transition.
1902  * @cur_state: Current QP state
1903  * @next_state: Next QP state
1904  * @type: QP type
1905  * @mask: Mask of supplied QP attributes
1906  * @ll : link layer of port
1907  *
1908  * This function is a helper function that a low-level driver's
1909  * modify_qp method can use to validate the consumer's input.  It
1910  * checks that cur_state and next_state are valid QP states, that a
1911  * transition from cur_state to next_state is allowed by the IB spec,
1912  * and that the attribute mask supplied is allowed for the transition.
1913  */
1914 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1915 		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1916 		       enum rdma_link_layer ll);
1917 
1918 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1919 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1920 void ib_dispatch_event(struct ib_event *event);
1921 
1922 int ib_query_device(struct ib_device *device,
1923 		    struct ib_device_attr *device_attr);
1924 
1925 int ib_query_port(struct ib_device *device,
1926 		  u8 port_num, struct ib_port_attr *port_attr);
1927 
1928 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1929 					       u8 port_num);
1930 
1931 int ib_query_gid(struct ib_device *device,
1932 		 u8 port_num, int index, union ib_gid *gid);
1933 
1934 int ib_query_pkey(struct ib_device *device,
1935 		  u8 port_num, u16 index, u16 *pkey);
1936 
1937 int ib_modify_device(struct ib_device *device,
1938 		     int device_modify_mask,
1939 		     struct ib_device_modify *device_modify);
1940 
1941 int ib_modify_port(struct ib_device *device,
1942 		   u8 port_num, int port_modify_mask,
1943 		   struct ib_port_modify *port_modify);
1944 
1945 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1946 		u8 *port_num, u16 *index);
1947 
1948 int ib_find_pkey(struct ib_device *device,
1949 		 u8 port_num, u16 pkey, u16 *index);
1950 
1951 /**
1952  * ib_alloc_pd - Allocates an unused protection domain.
1953  * @device: The device on which to allocate the protection domain.
1954  *
1955  * A protection domain object provides an association between QPs, shared
1956  * receive queues, address handles, memory regions, and memory windows.
1957  */
1958 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1959 
1960 /**
1961  * ib_dealloc_pd - Deallocates a protection domain.
1962  * @pd: The protection domain to deallocate.
1963  */
1964 int ib_dealloc_pd(struct ib_pd *pd);
1965 
1966 /**
1967  * ib_create_ah - Creates an address handle for the given address vector.
1968  * @pd: The protection domain associated with the address handle.
1969  * @ah_attr: The attributes of the address vector.
1970  *
1971  * The address handle is used to reference a local or global destination
1972  * in all UD QP post sends.
1973  */
1974 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1975 
1976 /**
1977  * ib_init_ah_from_wc - Initializes address handle attributes from a
1978  *   work completion.
1979  * @device: Device on which the received message arrived.
1980  * @port_num: Port on which the received message arrived.
1981  * @wc: Work completion associated with the received message.
1982  * @grh: References the received global route header.  This parameter is
1983  *   ignored unless the work completion indicates that the GRH is valid.
1984  * @ah_attr: Returned attributes that can be used when creating an address
1985  *   handle for replying to the message.
1986  */
1987 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1988 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1989 
1990 /**
1991  * ib_create_ah_from_wc - Creates an address handle associated with the
1992  *   sender of the specified work completion.
1993  * @pd: The protection domain associated with the address handle.
1994  * @wc: Work completion information associated with a received message.
1995  * @grh: References the received global route header.  This parameter is
1996  *   ignored unless the work completion indicates that the GRH is valid.
1997  * @port_num: The outbound port number to associate with the address.
1998  *
1999  * The address handle is used to reference a local or global destination
2000  * in all UD QP post sends.
2001  */
2002 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
2003 				   struct ib_grh *grh, u8 port_num);
2004 
2005 /**
2006  * ib_modify_ah - Modifies the address vector associated with an address
2007  *   handle.
2008  * @ah: The address handle to modify.
2009  * @ah_attr: The new address vector attributes to associate with the
2010  *   address handle.
2011  */
2012 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2013 
2014 /**
2015  * ib_query_ah - Queries the address vector associated with an address
2016  *   handle.
2017  * @ah: The address handle to query.
2018  * @ah_attr: The address vector attributes associated with the address
2019  *   handle.
2020  */
2021 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2022 
2023 /**
2024  * ib_destroy_ah - Destroys an address handle.
2025  * @ah: The address handle to destroy.
2026  */
2027 int ib_destroy_ah(struct ib_ah *ah);
2028 
2029 /**
2030  * ib_create_srq - Creates a SRQ associated with the specified protection
2031  *   domain.
2032  * @pd: The protection domain associated with the SRQ.
2033  * @srq_init_attr: A list of initial attributes required to create the
2034  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2035  *   the actual capabilities of the created SRQ.
2036  *
2037  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2038  * requested size of the SRQ, and set to the actual values allocated
2039  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2040  * will always be at least as large as the requested values.
2041  */
2042 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2043 			     struct ib_srq_init_attr *srq_init_attr);
2044 
2045 /**
2046  * ib_modify_srq - Modifies the attributes for the specified SRQ.
2047  * @srq: The SRQ to modify.
2048  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2049  *   the current values of selected SRQ attributes are returned.
2050  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2051  *   are being modified.
2052  *
2053  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2054  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2055  * the number of receives queued drops below the limit.
2056  */
2057 int ib_modify_srq(struct ib_srq *srq,
2058 		  struct ib_srq_attr *srq_attr,
2059 		  enum ib_srq_attr_mask srq_attr_mask);
2060 
2061 /**
2062  * ib_query_srq - Returns the attribute list and current values for the
2063  *   specified SRQ.
2064  * @srq: The SRQ to query.
2065  * @srq_attr: The attributes of the specified SRQ.
2066  */
2067 int ib_query_srq(struct ib_srq *srq,
2068 		 struct ib_srq_attr *srq_attr);
2069 
2070 /**
2071  * ib_destroy_srq - Destroys the specified SRQ.
2072  * @srq: The SRQ to destroy.
2073  */
2074 int ib_destroy_srq(struct ib_srq *srq);
2075 
2076 /**
2077  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2078  * @srq: The SRQ to post the work request on.
2079  * @recv_wr: A list of work requests to post on the receive queue.
2080  * @bad_recv_wr: On an immediate failure, this parameter will reference
2081  *   the work request that failed to be posted on the QP.
2082  */
2083 static inline int ib_post_srq_recv(struct ib_srq *srq,
2084 				   struct ib_recv_wr *recv_wr,
2085 				   struct ib_recv_wr **bad_recv_wr)
2086 {
2087 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2088 }
2089 
2090 /**
2091  * ib_create_qp - Creates a QP associated with the specified protection
2092  *   domain.
2093  * @pd: The protection domain associated with the QP.
2094  * @qp_init_attr: A list of initial attributes required to create the
2095  *   QP.  If QP creation succeeds, then the attributes are updated to
2096  *   the actual capabilities of the created QP.
2097  */
2098 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2099 			   struct ib_qp_init_attr *qp_init_attr);
2100 
2101 /**
2102  * ib_modify_qp - Modifies the attributes for the specified QP and then
2103  *   transitions the QP to the given state.
2104  * @qp: The QP to modify.
2105  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
2106  *   the current values of selected QP attributes are returned.
2107  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2108  *   are being modified.
2109  */
2110 int ib_modify_qp(struct ib_qp *qp,
2111 		 struct ib_qp_attr *qp_attr,
2112 		 int qp_attr_mask);
2113 
2114 /**
2115  * ib_query_qp - Returns the attribute list and current values for the
2116  *   specified QP.
2117  * @qp: The QP to query.
2118  * @qp_attr: The attributes of the specified QP.
2119  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2120  * @qp_init_attr: Additional attributes of the selected QP.
2121  *
2122  * The qp_attr_mask may be used to limit the query to gathering only the
2123  * selected attributes.
2124  */
2125 int ib_query_qp(struct ib_qp *qp,
2126 		struct ib_qp_attr *qp_attr,
2127 		int qp_attr_mask,
2128 		struct ib_qp_init_attr *qp_init_attr);
2129 
2130 /**
2131  * ib_destroy_qp - Destroys the specified QP.
2132  * @qp: The QP to destroy.
2133  */
2134 int ib_destroy_qp(struct ib_qp *qp);
2135 
2136 /**
2137  * ib_open_qp - Obtain a reference to an existing sharable QP.
2138  * @xrcd - XRC domain
2139  * @qp_open_attr: Attributes identifying the QP to open.
2140  *
2141  * Returns a reference to a sharable QP.
2142  */
2143 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2144 			 struct ib_qp_open_attr *qp_open_attr);
2145 
2146 /**
2147  * ib_close_qp - Release an external reference to a QP.
2148  * @qp: The QP handle to release
2149  *
2150  * The opened QP handle is released by the caller.  The underlying
2151  * shared QP is not destroyed until all internal references are released.
2152  */
2153 int ib_close_qp(struct ib_qp *qp);
2154 
2155 /**
2156  * ib_post_send - Posts a list of work requests to the send queue of
2157  *   the specified QP.
2158  * @qp: The QP to post the work request on.
2159  * @send_wr: A list of work requests to post on the send queue.
2160  * @bad_send_wr: On an immediate failure, this parameter will reference
2161  *   the work request that failed to be posted on the QP.
2162  *
2163  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2164  * error is returned, the QP state shall not be affected,
2165  * ib_post_send() will return an immediate error after queueing any
2166  * earlier work requests in the list.
2167  */
2168 static inline int ib_post_send(struct ib_qp *qp,
2169 			       struct ib_send_wr *send_wr,
2170 			       struct ib_send_wr **bad_send_wr)
2171 {
2172 	return qp->device->post_send(qp, send_wr, bad_send_wr);
2173 }
2174 
2175 /**
2176  * ib_post_recv - Posts a list of work requests to the receive queue of
2177  *   the specified QP.
2178  * @qp: The QP to post the work request on.
2179  * @recv_wr: A list of work requests to post on the receive queue.
2180  * @bad_recv_wr: On an immediate failure, this parameter will reference
2181  *   the work request that failed to be posted on the QP.
2182  */
2183 static inline int ib_post_recv(struct ib_qp *qp,
2184 			       struct ib_recv_wr *recv_wr,
2185 			       struct ib_recv_wr **bad_recv_wr)
2186 {
2187 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2188 }
2189 
2190 /**
2191  * ib_create_cq - Creates a CQ on the specified device.
2192  * @device: The device on which to create the CQ.
2193  * @comp_handler: A user-specified callback that is invoked when a
2194  *   completion event occurs on the CQ.
2195  * @event_handler: A user-specified callback that is invoked when an
2196  *   asynchronous event not associated with a completion occurs on the CQ.
2197  * @cq_context: Context associated with the CQ returned to the user via
2198  *   the associated completion and event handlers.
2199  * @cqe: The minimum size of the CQ.
2200  * @comp_vector - Completion vector used to signal completion events.
2201  *     Must be >= 0 and < context->num_comp_vectors.
2202  *
2203  * Users can examine the cq structure to determine the actual CQ size.
2204  */
2205 struct ib_cq *ib_create_cq(struct ib_device *device,
2206 			   ib_comp_handler comp_handler,
2207 			   void (*event_handler)(struct ib_event *, void *),
2208 			   void *cq_context, int cqe, int comp_vector);
2209 
2210 /**
2211  * ib_resize_cq - Modifies the capacity of the CQ.
2212  * @cq: The CQ to resize.
2213  * @cqe: The minimum size of the CQ.
2214  *
2215  * Users can examine the cq structure to determine the actual CQ size.
2216  */
2217 int ib_resize_cq(struct ib_cq *cq, int cqe);
2218 
2219 /**
2220  * ib_modify_cq - Modifies the attributes for the specified CQ and then
2221  *   transitions the CQ to the given state.
2222  * @cq: The CQ to modify.
2223  * @cq_attr: specifies the CQ attributes to modify.
2224  * @cq_attr_mask: A bit-mask used to specify which attributes of the CQ
2225  *   are being modified.
2226  */
2227 int ib_modify_cq(struct ib_cq *cq,
2228 		 struct ib_cq_attr *cq_attr,
2229 		 int cq_attr_mask);
2230 
2231 /**
2232  * ib_destroy_cq - Destroys the specified CQ.
2233  * @cq: The CQ to destroy.
2234  */
2235 int ib_destroy_cq(struct ib_cq *cq);
2236 
2237 /**
2238  * ib_poll_cq - poll a CQ for completion(s)
2239  * @cq:the CQ being polled
2240  * @num_entries:maximum number of completions to return
2241  * @wc:array of at least @num_entries &struct ib_wc where completions
2242  *   will be returned
2243  *
2244  * Poll a CQ for (possibly multiple) completions.  If the return value
2245  * is < 0, an error occurred.  If the return value is >= 0, it is the
2246  * number of completions returned.  If the return value is
2247  * non-negative and < num_entries, then the CQ was emptied.
2248  */
2249 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2250 			     struct ib_wc *wc)
2251 {
2252 	return cq->device->poll_cq(cq, num_entries, wc);
2253 }
2254 
2255 /**
2256  * ib_peek_cq - Returns the number of unreaped completions currently
2257  *   on the specified CQ.
2258  * @cq: The CQ to peek.
2259  * @wc_cnt: A minimum number of unreaped completions to check for.
2260  *
2261  * If the number of unreaped completions is greater than or equal to wc_cnt,
2262  * this function returns wc_cnt, otherwise, it returns the actual number of
2263  * unreaped completions.
2264  */
2265 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2266 
2267 /**
2268  * ib_req_notify_cq - Request completion notification on a CQ.
2269  * @cq: The CQ to generate an event for.
2270  * @flags:
2271  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2272  *   to request an event on the next solicited event or next work
2273  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2274  *   may also be |ed in to request a hint about missed events, as
2275  *   described below.
2276  *
2277  * Return Value:
2278  *    < 0 means an error occurred while requesting notification
2279  *   == 0 means notification was requested successfully, and if
2280  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2281  *        were missed and it is safe to wait for another event.  In
2282  *        this case is it guaranteed that any work completions added
2283  *        to the CQ since the last CQ poll will trigger a completion
2284  *        notification event.
2285  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2286  *        in.  It means that the consumer must poll the CQ again to
2287  *        make sure it is empty to avoid missing an event because of a
2288  *        race between requesting notification and an entry being
2289  *        added to the CQ.  This return value means it is possible
2290  *        (but not guaranteed) that a work completion has been added
2291  *        to the CQ since the last poll without triggering a
2292  *        completion notification event.
2293  */
2294 static inline int ib_req_notify_cq(struct ib_cq *cq,
2295 				   enum ib_cq_notify_flags flags)
2296 {
2297 	return cq->device->req_notify_cq(cq, flags);
2298 }
2299 
2300 /**
2301  * ib_req_ncomp_notif - Request completion notification when there are
2302  *   at least the specified number of unreaped completions on the CQ.
2303  * @cq: The CQ to generate an event for.
2304  * @wc_cnt: The number of unreaped completions that should be on the
2305  *   CQ before an event is generated.
2306  */
2307 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2308 {
2309 	return cq->device->req_ncomp_notif ?
2310 		cq->device->req_ncomp_notif(cq, wc_cnt) :
2311 		-ENOSYS;
2312 }
2313 
2314 /**
2315  * ib_get_dma_mr - Returns a memory region for system memory that is
2316  *   usable for DMA.
2317  * @pd: The protection domain associated with the memory region.
2318  * @mr_access_flags: Specifies the memory access rights.
2319  *
2320  * Note that the ib_dma_*() functions defined below must be used
2321  * to create/destroy addresses used with the Lkey or Rkey returned
2322  * by ib_get_dma_mr().
2323  */
2324 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2325 
2326 /**
2327  * ib_dma_mapping_error - check a DMA addr for error
2328  * @dev: The device for which the dma_addr was created
2329  * @dma_addr: The DMA address to check
2330  */
2331 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2332 {
2333 	if (dev->dma_ops)
2334 		return dev->dma_ops->mapping_error(dev, dma_addr);
2335 	return dma_mapping_error(dev->dma_device, dma_addr);
2336 }
2337 
2338 /**
2339  * ib_dma_map_single - Map a kernel virtual address to DMA address
2340  * @dev: The device for which the dma_addr is to be created
2341  * @cpu_addr: The kernel virtual address
2342  * @size: The size of the region in bytes
2343  * @direction: The direction of the DMA
2344  */
2345 static inline u64 ib_dma_map_single(struct ib_device *dev,
2346 				    void *cpu_addr, size_t size,
2347 				    enum dma_data_direction direction)
2348 {
2349 	if (dev->dma_ops)
2350 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2351 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2352 }
2353 
2354 /**
2355  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2356  * @dev: The device for which the DMA address was created
2357  * @addr: The DMA address
2358  * @size: The size of the region in bytes
2359  * @direction: The direction of the DMA
2360  */
2361 static inline void ib_dma_unmap_single(struct ib_device *dev,
2362 				       u64 addr, size_t size,
2363 				       enum dma_data_direction direction)
2364 {
2365 	if (dev->dma_ops)
2366 		dev->dma_ops->unmap_single(dev, addr, size, direction);
2367 	else
2368 		dma_unmap_single(dev->dma_device, addr, size, direction);
2369 }
2370 
2371 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2372 					  void *cpu_addr, size_t size,
2373 					  enum dma_data_direction direction,
2374 					  struct dma_attrs *attrs)
2375 {
2376 	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2377 				    direction, attrs);
2378 }
2379 
2380 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2381 					     u64 addr, size_t size,
2382 					     enum dma_data_direction direction,
2383 					     struct dma_attrs *attrs)
2384 {
2385 	return dma_unmap_single_attrs(dev->dma_device, addr, size,
2386 				      direction, attrs);
2387 }
2388 
2389 /**
2390  * ib_dma_map_page - Map a physical page to DMA address
2391  * @dev: The device for which the dma_addr is to be created
2392  * @page: The page to be mapped
2393  * @offset: The offset within the page
2394  * @size: The size of the region in bytes
2395  * @direction: The direction of the DMA
2396  */
2397 static inline u64 ib_dma_map_page(struct ib_device *dev,
2398 				  struct page *page,
2399 				  unsigned long offset,
2400 				  size_t size,
2401 					 enum dma_data_direction direction)
2402 {
2403 	if (dev->dma_ops)
2404 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
2405 	return dma_map_page(dev->dma_device, page, offset, size, direction);
2406 }
2407 
2408 /**
2409  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2410  * @dev: The device for which the DMA address was created
2411  * @addr: The DMA address
2412  * @size: The size of the region in bytes
2413  * @direction: The direction of the DMA
2414  */
2415 static inline void ib_dma_unmap_page(struct ib_device *dev,
2416 				     u64 addr, size_t size,
2417 				     enum dma_data_direction direction)
2418 {
2419 	if (dev->dma_ops)
2420 		dev->dma_ops->unmap_page(dev, addr, size, direction);
2421 	else
2422 		dma_unmap_page(dev->dma_device, addr, size, direction);
2423 }
2424 
2425 /**
2426  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2427  * @dev: The device for which the DMA addresses are to be created
2428  * @sg: The array of scatter/gather entries
2429  * @nents: The number of scatter/gather entries
2430  * @direction: The direction of the DMA
2431  */
2432 static inline int ib_dma_map_sg(struct ib_device *dev,
2433 				struct scatterlist *sg, int nents,
2434 				enum dma_data_direction direction)
2435 {
2436 	if (dev->dma_ops)
2437 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
2438 	return dma_map_sg(dev->dma_device, sg, nents, direction);
2439 }
2440 
2441 /**
2442  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2443  * @dev: The device for which the DMA addresses were created
2444  * @sg: The array of scatter/gather entries
2445  * @nents: The number of scatter/gather entries
2446  * @direction: The direction of the DMA
2447  */
2448 static inline void ib_dma_unmap_sg(struct ib_device *dev,
2449 				   struct scatterlist *sg, int nents,
2450 				   enum dma_data_direction direction)
2451 {
2452 	if (dev->dma_ops)
2453 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2454 	else
2455 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
2456 }
2457 
2458 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2459 				      struct scatterlist *sg, int nents,
2460 				      enum dma_data_direction direction,
2461 				      struct dma_attrs *attrs)
2462 {
2463 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2464 }
2465 
2466 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2467 					 struct scatterlist *sg, int nents,
2468 					 enum dma_data_direction direction,
2469 					 struct dma_attrs *attrs)
2470 {
2471 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2472 }
2473 /**
2474  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2475  * @dev: The device for which the DMA addresses were created
2476  * @sg: The scatter/gather entry
2477  */
2478 static inline u64 ib_sg_dma_address(struct ib_device *dev,
2479 				    struct scatterlist *sg)
2480 {
2481 	if (dev->dma_ops)
2482 		return dev->dma_ops->dma_address(dev, sg);
2483 	return sg_dma_address(sg);
2484 }
2485 
2486 /**
2487  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2488  * @dev: The device for which the DMA addresses were created
2489  * @sg: The scatter/gather entry
2490  */
2491 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2492 					 struct scatterlist *sg)
2493 {
2494 	if (dev->dma_ops)
2495 		return dev->dma_ops->dma_len(dev, sg);
2496 	return sg_dma_len(sg);
2497 }
2498 
2499 /**
2500  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2501  * @dev: The device for which the DMA address was created
2502  * @addr: The DMA address
2503  * @size: The size of the region in bytes
2504  * @dir: The direction of the DMA
2505  */
2506 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2507 					      u64 addr,
2508 					      size_t size,
2509 					      enum dma_data_direction dir)
2510 {
2511 	if (dev->dma_ops)
2512 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2513 	else
2514 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2515 }
2516 
2517 /**
2518  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2519  * @dev: The device for which the DMA address was created
2520  * @addr: The DMA address
2521  * @size: The size of the region in bytes
2522  * @dir: The direction of the DMA
2523  */
2524 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2525 						 u64 addr,
2526 						 size_t size,
2527 						 enum dma_data_direction dir)
2528 {
2529 	if (dev->dma_ops)
2530 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2531 	else
2532 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2533 }
2534 
2535 /**
2536  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2537  * @dev: The device for which the DMA address is requested
2538  * @size: The size of the region to allocate in bytes
2539  * @dma_handle: A pointer for returning the DMA address of the region
2540  * @flag: memory allocator flags
2541  */
2542 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2543 					   size_t size,
2544 					   u64 *dma_handle,
2545 					   gfp_t flag)
2546 {
2547 	if (dev->dma_ops)
2548 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2549 	else {
2550 		dma_addr_t handle;
2551 		void *ret;
2552 
2553 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2554 		*dma_handle = handle;
2555 		return ret;
2556 	}
2557 }
2558 
2559 /**
2560  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2561  * @dev: The device for which the DMA addresses were allocated
2562  * @size: The size of the region
2563  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2564  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2565  */
2566 static inline void ib_dma_free_coherent(struct ib_device *dev,
2567 					size_t size, void *cpu_addr,
2568 					u64 dma_handle)
2569 {
2570 	if (dev->dma_ops)
2571 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2572 	else
2573 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2574 }
2575 
2576 /**
2577  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2578  *   by an HCA.
2579  * @pd: The protection domain associated assigned to the registered region.
2580  * @phys_buf_array: Specifies a list of physical buffers to use in the
2581  *   memory region.
2582  * @num_phys_buf: Specifies the size of the phys_buf_array.
2583  * @mr_access_flags: Specifies the memory access rights.
2584  * @iova_start: The offset of the region's starting I/O virtual address.
2585  */
2586 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2587 			     struct ib_phys_buf *phys_buf_array,
2588 			     int num_phys_buf,
2589 			     int mr_access_flags,
2590 			     u64 *iova_start);
2591 
2592 /**
2593  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2594  *   Conceptually, this call performs the functions deregister memory region
2595  *   followed by register physical memory region.  Where possible,
2596  *   resources are reused instead of deallocated and reallocated.
2597  * @mr: The memory region to modify.
2598  * @mr_rereg_mask: A bit-mask used to indicate which of the following
2599  *   properties of the memory region are being modified.
2600  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2601  *   the new protection domain to associated with the memory region,
2602  *   otherwise, this parameter is ignored.
2603  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2604  *   field specifies a list of physical buffers to use in the new
2605  *   translation, otherwise, this parameter is ignored.
2606  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2607  *   field specifies the size of the phys_buf_array, otherwise, this
2608  *   parameter is ignored.
2609  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2610  *   field specifies the new memory access rights, otherwise, this
2611  *   parameter is ignored.
2612  * @iova_start: The offset of the region's starting I/O virtual address.
2613  */
2614 int ib_rereg_phys_mr(struct ib_mr *mr,
2615 		     int mr_rereg_mask,
2616 		     struct ib_pd *pd,
2617 		     struct ib_phys_buf *phys_buf_array,
2618 		     int num_phys_buf,
2619 		     int mr_access_flags,
2620 		     u64 *iova_start);
2621 
2622 /**
2623  * ib_query_mr - Retrieves information about a specific memory region.
2624  * @mr: The memory region to retrieve information about.
2625  * @mr_attr: The attributes of the specified memory region.
2626  */
2627 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2628 
2629 /**
2630  * ib_dereg_mr - Deregisters a memory region and removes it from the
2631  *   HCA translation table.
2632  * @mr: The memory region to deregister.
2633  *
2634  * This function can fail, if the memory region has memory windows bound to it.
2635  */
2636 int ib_dereg_mr(struct ib_mr *mr);
2637 
2638 
2639 /**
2640  * ib_create_mr - Allocates a memory region that may be used for
2641  *     signature handover operations.
2642  * @pd: The protection domain associated with the region.
2643  * @mr_init_attr: memory region init attributes.
2644  */
2645 struct ib_mr *ib_create_mr(struct ib_pd *pd,
2646 			   struct ib_mr_init_attr *mr_init_attr);
2647 
2648 /**
2649  * ib_destroy_mr - Destroys a memory region that was created using
2650  *     ib_create_mr and removes it from HW translation tables.
2651  * @mr: The memory region to destroy.
2652  *
2653  * This function can fail, if the memory region has memory windows bound to it.
2654  */
2655 int ib_destroy_mr(struct ib_mr *mr);
2656 
2657 /**
2658  * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2659  *   IB_WR_FAST_REG_MR send work request.
2660  * @pd: The protection domain associated with the region.
2661  * @max_page_list_len: requested max physical buffer list length to be
2662  *   used with fast register work requests for this MR.
2663  */
2664 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2665 
2666 /**
2667  * ib_alloc_fast_reg_page_list - Allocates a page list array
2668  * @device - ib device pointer.
2669  * @page_list_len - size of the page list array to be allocated.
2670  *
2671  * This allocates and returns a struct ib_fast_reg_page_list * and a
2672  * page_list array that is at least page_list_len in size.  The actual
2673  * size is returned in max_page_list_len.  The caller is responsible
2674  * for initializing the contents of the page_list array before posting
2675  * a send work request with the IB_WC_FAST_REG_MR opcode.
2676  *
2677  * The page_list array entries must be translated using one of the
2678  * ib_dma_*() functions just like the addresses passed to
2679  * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2680  * ib_fast_reg_page_list must not be modified by the caller until the
2681  * IB_WC_FAST_REG_MR work request completes.
2682  */
2683 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2684 				struct ib_device *device, int page_list_len);
2685 
2686 /**
2687  * ib_free_fast_reg_page_list - Deallocates a previously allocated
2688  *   page list array.
2689  * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2690  */
2691 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2692 
2693 /**
2694  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2695  *   R_Key and L_Key.
2696  * @mr - struct ib_mr pointer to be updated.
2697  * @newkey - new key to be used.
2698  */
2699 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2700 {
2701 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2702 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2703 }
2704 
2705 /**
2706  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2707  * for calculating a new rkey for type 2 memory windows.
2708  * @rkey - the rkey to increment.
2709  */
2710 static inline u32 ib_inc_rkey(u32 rkey)
2711 {
2712 	const u32 mask = 0x000000ff;
2713 	return ((rkey + 1) & mask) | (rkey & ~mask);
2714 }
2715 
2716 /**
2717  * ib_alloc_mw - Allocates a memory window.
2718  * @pd: The protection domain associated with the memory window.
2719  * @type: The type of the memory window (1 or 2).
2720  */
2721 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2722 
2723 /**
2724  * ib_bind_mw - Posts a work request to the send queue of the specified
2725  *   QP, which binds the memory window to the given address range and
2726  *   remote access attributes.
2727  * @qp: QP to post the bind work request on.
2728  * @mw: The memory window to bind.
2729  * @mw_bind: Specifies information about the memory window, including
2730  *   its address range, remote access rights, and associated memory region.
2731  *
2732  * If there is no immediate error, the function will update the rkey member
2733  * of the mw parameter to its new value. The bind operation can still fail
2734  * asynchronously.
2735  */
2736 static inline int ib_bind_mw(struct ib_qp *qp,
2737 			     struct ib_mw *mw,
2738 			     struct ib_mw_bind *mw_bind)
2739 {
2740 	/* XXX reference counting in corresponding MR? */
2741 	return mw->device->bind_mw ?
2742 		mw->device->bind_mw(qp, mw, mw_bind) :
2743 		-ENOSYS;
2744 }
2745 
2746 /**
2747  * ib_dealloc_mw - Deallocates a memory window.
2748  * @mw: The memory window to deallocate.
2749  */
2750 int ib_dealloc_mw(struct ib_mw *mw);
2751 
2752 /**
2753  * ib_alloc_fmr - Allocates a unmapped fast memory region.
2754  * @pd: The protection domain associated with the unmapped region.
2755  * @mr_access_flags: Specifies the memory access rights.
2756  * @fmr_attr: Attributes of the unmapped region.
2757  *
2758  * A fast memory region must be mapped before it can be used as part of
2759  * a work request.
2760  */
2761 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2762 			    int mr_access_flags,
2763 			    struct ib_fmr_attr *fmr_attr);
2764 
2765 /**
2766  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2767  * @fmr: The fast memory region to associate with the pages.
2768  * @page_list: An array of physical pages to map to the fast memory region.
2769  * @list_len: The number of pages in page_list.
2770  * @iova: The I/O virtual address to use with the mapped region.
2771  */
2772 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2773 				  u64 *page_list, int list_len,
2774 				  u64 iova)
2775 {
2776 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2777 }
2778 
2779 /**
2780  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2781  * @fmr_list: A linked list of fast memory regions to unmap.
2782  */
2783 int ib_unmap_fmr(struct list_head *fmr_list);
2784 
2785 /**
2786  * ib_dealloc_fmr - Deallocates a fast memory region.
2787  * @fmr: The fast memory region to deallocate.
2788  */
2789 int ib_dealloc_fmr(struct ib_fmr *fmr);
2790 
2791 /**
2792  * ib_attach_mcast - Attaches the specified QP to a multicast group.
2793  * @qp: QP to attach to the multicast group.  The QP must be type
2794  *   IB_QPT_UD.
2795  * @gid: Multicast group GID.
2796  * @lid: Multicast group LID in host byte order.
2797  *
2798  * In order to send and receive multicast packets, subnet
2799  * administration must have created the multicast group and configured
2800  * the fabric appropriately.  The port associated with the specified
2801  * QP must also be a member of the multicast group.
2802  */
2803 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2804 
2805 /**
2806  * ib_detach_mcast - Detaches the specified QP from a multicast group.
2807  * @qp: QP to detach from the multicast group.
2808  * @gid: Multicast group GID.
2809  * @lid: Multicast group LID in host byte order.
2810  */
2811 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2812 
2813 /**
2814  * ib_alloc_xrcd - Allocates an XRC domain.
2815  * @device: The device on which to allocate the XRC domain.
2816  */
2817 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2818 
2819 /**
2820  * ib_dealloc_xrcd - Deallocates an XRC domain.
2821  * @xrcd: The XRC domain to deallocate.
2822  */
2823 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2824 
2825 struct ib_flow *ib_create_flow(struct ib_qp *qp,
2826 			       struct ib_flow_attr *flow_attr, int domain);
2827 int ib_destroy_flow(struct ib_flow *flow_id);
2828 
2829 struct ib_dct *ib_create_dct(struct ib_pd *pd, struct ib_dct_init_attr *attr,
2830 			     struct ib_udata *udata);
2831 int ib_destroy_dct(struct ib_dct *dct);
2832 int ib_query_dct(struct ib_dct *dct, struct ib_dct_attr *attr);
2833 
2834 int ib_query_values(struct ib_device *device,
2835                    int q_values, struct ib_device_values *values);
2836 
2837 static inline void ib_active_speed_enum_to_rate(u8 active_speed,
2838 						int *rate,
2839 						char **speed)
2840 {
2841 	switch (active_speed) {
2842 	case IB_SPEED_DDR:
2843 		*speed = " DDR";
2844 		*rate = 50;
2845 		break;
2846 	case IB_SPEED_QDR:
2847 		*speed = " QDR";
2848 		*rate = 100;
2849 		break;
2850 	case IB_SPEED_FDR10:
2851 		*speed = " FDR10";
2852 		*rate = 100;
2853 		break;
2854 	case IB_SPEED_FDR:
2855 		*speed = " FDR";
2856 		*rate = 140;
2857 		break;
2858 	case IB_SPEED_EDR:
2859 		*speed = " EDR";
2860 		*rate = 250;
2861 		break;
2862 	case IB_SPEED_SDR:
2863 	default:		/* default to SDR for invalid rates */
2864 		*rate = 25;
2865 		break;
2866 	}
2867 
2868 }
2869 
2870 static inline int ib_check_mr_access(int flags)
2871 {
2872 	/*
2873 	 * Local write permission is required if remote write or
2874 	 * remote atomic permission is also requested.
2875 	 */
2876 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2877 	    !(flags & IB_ACCESS_LOCAL_WRITE))
2878 		return -EINVAL;
2879 
2880 	return 0;
2881 }
2882 
2883 /**
2884  * ib_check_mr_status: lightweight check of MR status.
2885  *     This routine may provide status checks on a selected
2886  *     ib_mr. first use is for signature status check.
2887  *
2888  * @mr: A memory region.
2889  * @check_mask: Bitmask of which checks to perform from
2890  *     ib_mr_status_check enumeration.
2891  * @mr_status: The container of relevant status checks.
2892  *     failed checks will be indicated in the status bitmask
2893  *     and the relevant info shall be in the error item.
2894  */
2895 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2896 		       struct ib_mr_status *mr_status);
2897 
2898 #endif /* IB_VERBS_H */
2899