xref: /freebsd/sys/ofed/include/rdma/ib_verbs.h (revision 0e97acdf58fe27b09c4824a474b0344daf997c5f)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 
52 #include <asm/uaccess.h>
53 #include <linux/rbtree.h>
54 #include <linux/mutex.h>
55 
56 extern struct workqueue_struct *ib_wq;
57 
58 union ib_gid {
59 	u8	raw[16];
60 	struct {
61 		__be64	subnet_prefix;
62 		__be64	interface_id;
63 	} global;
64 };
65 
66 enum rdma_node_type {
67 	/* IB values map to NodeInfo:NodeType. */
68 	RDMA_NODE_IB_CA 	= 1,
69 	RDMA_NODE_IB_SWITCH,
70 	RDMA_NODE_IB_ROUTER,
71 	RDMA_NODE_RNIC
72 };
73 
74 enum rdma_transport_type {
75 	RDMA_TRANSPORT_IB,
76 	RDMA_TRANSPORT_IWARP
77 };
78 
79 enum rdma_transport_type
80 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
81 
82 enum rdma_link_layer {
83 	IB_LINK_LAYER_UNSPECIFIED,
84 	IB_LINK_LAYER_INFINIBAND,
85 	IB_LINK_LAYER_ETHERNET,
86 };
87 
88 enum ib_device_cap_flags {
89 	IB_DEVICE_RESIZE_MAX_WR		= 1,
90 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
91 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
92 	IB_DEVICE_RAW_MULTI		= (1<<3),
93 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
94 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
95 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
96 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
97 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
98 	IB_DEVICE_INIT_TYPE		= (1<<9),
99 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
100 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
101 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
102 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
103 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
104 	IB_DEVICE_LOCAL_DMA_LKEY	= (1<<15),
105 	IB_DEVICE_RESERVED		= (1<<16), /* old SEND_W_INV */
106 	IB_DEVICE_MEM_WINDOW		= (1<<17),
107 	/*
108 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
109 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
110 	 * messages and can verify the validity of checksum for
111 	 * incoming messages.  Setting this flag implies that the
112 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
113 	 */
114 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
115 	IB_DEVICE_UD_TSO		= (1<<19),
116 	IB_DEVICE_XRC			= (1<<20),
117 	IB_DEVICE_MEM_MGT_EXTENSIONS	= (1<<21),
118 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
119 	IB_DEVICE_MR_ALLOCATE		= (1<<23),
120 	IB_DEVICE_SHARED_MR             = (1<<24),
121 	IB_DEVICE_QPG			= (1<<25),
122 	IB_DEVICE_UD_RSS		= (1<<26),
123 	IB_DEVICE_UD_TSS		= (1<<27)
124 };
125 
126 enum ib_atomic_cap {
127 	IB_ATOMIC_NONE,
128 	IB_ATOMIC_HCA,
129 	IB_ATOMIC_GLOB
130 };
131 
132 struct ib_device_attr {
133 	u64			fw_ver;
134 	__be64			sys_image_guid;
135 	u64			max_mr_size;
136 	u64			page_size_cap;
137 	u32			vendor_id;
138 	u32			vendor_part_id;
139 	u32			hw_ver;
140 	int			max_qp;
141 	int			max_qp_wr;
142 	int			device_cap_flags;
143 	int			max_sge;
144 	int			max_sge_rd;
145 	int			max_cq;
146 	int			max_cqe;
147 	int			max_mr;
148 	int			max_pd;
149 	int			max_qp_rd_atom;
150 	int			max_ee_rd_atom;
151 	int			max_res_rd_atom;
152 	int			max_qp_init_rd_atom;
153 	int			max_ee_init_rd_atom;
154 	enum ib_atomic_cap	atomic_cap;
155 	enum ib_atomic_cap	masked_atomic_cap;
156 	int			max_ee;
157 	int			max_rdd;
158 	int			max_mw;
159 	int			max_raw_ipv6_qp;
160 	int			max_raw_ethy_qp;
161 	int			max_mcast_grp;
162 	int			max_mcast_qp_attach;
163 	int			max_total_mcast_qp_attach;
164 	int			max_ah;
165 	int			max_fmr;
166 	int			max_map_per_fmr;
167 	int			max_srq;
168 	int			max_srq_wr;
169 	int			max_srq_sge;
170 	unsigned int		max_fast_reg_page_list_len;
171 	int			max_rss_tbl_sz;
172 	u16			max_pkeys;
173 	u8			local_ca_ack_delay;
174 };
175 
176 enum ib_mtu {
177 	IB_MTU_256  = 1,
178 	IB_MTU_512  = 2,
179 	IB_MTU_1024 = 3,
180 	IB_MTU_2048 = 4,
181 	IB_MTU_4096 = 5
182 };
183 
184 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
185 {
186 	switch (mtu) {
187 	case IB_MTU_256:  return  256;
188 	case IB_MTU_512:  return  512;
189 	case IB_MTU_1024: return 1024;
190 	case IB_MTU_2048: return 2048;
191 	case IB_MTU_4096: return 4096;
192 	default: 	  return -1;
193 	}
194 }
195 
196 enum ib_port_state {
197 	IB_PORT_NOP		= 0,
198 	IB_PORT_DOWN		= 1,
199 	IB_PORT_INIT		= 2,
200 	IB_PORT_ARMED		= 3,
201 	IB_PORT_ACTIVE		= 4,
202 	IB_PORT_ACTIVE_DEFER	= 5
203 };
204 
205 enum ib_port_cap_flags {
206 	IB_PORT_SM				= 1 <<  1,
207 	IB_PORT_NOTICE_SUP			= 1 <<  2,
208 	IB_PORT_TRAP_SUP			= 1 <<  3,
209 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
210 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
211 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
212 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
213 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
214 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
215 	IB_PORT_SM_DISABLED			= 1 << 10,
216 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
217 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
218 	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
219 	IB_PORT_CM_SUP				= 1 << 16,
220 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
221 	IB_PORT_REINIT_SUP			= 1 << 18,
222 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
223 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
224 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
225 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
226 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
227 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
228 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
229 };
230 
231 enum ib_port_width {
232 	IB_WIDTH_1X	= 1,
233 	IB_WIDTH_4X	= 2,
234 	IB_WIDTH_8X	= 4,
235 	IB_WIDTH_12X	= 8
236 };
237 
238 static inline int ib_width_enum_to_int(enum ib_port_width width)
239 {
240 	switch (width) {
241 	case IB_WIDTH_1X:  return  1;
242 	case IB_WIDTH_4X:  return  4;
243 	case IB_WIDTH_8X:  return  8;
244 	case IB_WIDTH_12X: return 12;
245 	default: 	  return -1;
246 	}
247 }
248 
249 enum ib_port_speed {
250 	IB_SPEED_SDR	= 1,
251 	IB_SPEED_DDR	= 2,
252 	IB_SPEED_QDR	= 4,
253 	IB_SPEED_FDR10	= 8,
254 	IB_SPEED_FDR	= 16,
255 	IB_SPEED_EDR	= 32
256 };
257 
258 struct ib_protocol_stats {
259 	/* TBD... */
260 };
261 
262 struct iw_protocol_stats {
263 	u64	ipInReceives;
264 	u64	ipInHdrErrors;
265 	u64	ipInTooBigErrors;
266 	u64	ipInNoRoutes;
267 	u64	ipInAddrErrors;
268 	u64	ipInUnknownProtos;
269 	u64	ipInTruncatedPkts;
270 	u64	ipInDiscards;
271 	u64	ipInDelivers;
272 	u64	ipOutForwDatagrams;
273 	u64	ipOutRequests;
274 	u64	ipOutDiscards;
275 	u64	ipOutNoRoutes;
276 	u64	ipReasmTimeout;
277 	u64	ipReasmReqds;
278 	u64	ipReasmOKs;
279 	u64	ipReasmFails;
280 	u64	ipFragOKs;
281 	u64	ipFragFails;
282 	u64	ipFragCreates;
283 	u64	ipInMcastPkts;
284 	u64	ipOutMcastPkts;
285 	u64	ipInBcastPkts;
286 	u64	ipOutBcastPkts;
287 
288 	u64	tcpRtoAlgorithm;
289 	u64	tcpRtoMin;
290 	u64	tcpRtoMax;
291 	u64	tcpMaxConn;
292 	u64	tcpActiveOpens;
293 	u64	tcpPassiveOpens;
294 	u64	tcpAttemptFails;
295 	u64	tcpEstabResets;
296 	u64	tcpCurrEstab;
297 	u64	tcpInSegs;
298 	u64	tcpOutSegs;
299 	u64	tcpRetransSegs;
300 	u64	tcpInErrs;
301 	u64	tcpOutRsts;
302 };
303 
304 union rdma_protocol_stats {
305 	struct ib_protocol_stats	ib;
306 	struct iw_protocol_stats	iw;
307 };
308 
309 struct ib_port_attr {
310 	enum ib_port_state	state;
311 	enum ib_mtu		max_mtu;
312 	enum ib_mtu		active_mtu;
313 	int			gid_tbl_len;
314 	u32			port_cap_flags;
315 	u32			max_msg_sz;
316 	u32			bad_pkey_cntr;
317 	u32			qkey_viol_cntr;
318 	u16			pkey_tbl_len;
319 	u16			lid;
320 	u16			sm_lid;
321 	u8			lmc;
322 	u8			max_vl_num;
323 	u8			sm_sl;
324 	u8			subnet_timeout;
325 	u8			init_type_reply;
326 	u8			active_width;
327 	u8			active_speed;
328 	u8                      phys_state;
329 	enum rdma_link_layer	link_layer;
330 };
331 
332 enum ib_device_modify_flags {
333 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
334 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
335 };
336 
337 struct ib_device_modify {
338 	u64	sys_image_guid;
339 	char	node_desc[64];
340 };
341 
342 enum ib_port_modify_flags {
343 	IB_PORT_SHUTDOWN		= 1,
344 	IB_PORT_INIT_TYPE		= (1<<2),
345 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
346 };
347 
348 struct ib_port_modify {
349 	u32	set_port_cap_mask;
350 	u32	clr_port_cap_mask;
351 	u8	init_type;
352 };
353 
354 enum ib_event_type {
355 	IB_EVENT_CQ_ERR,
356 	IB_EVENT_QP_FATAL,
357 	IB_EVENT_QP_REQ_ERR,
358 	IB_EVENT_QP_ACCESS_ERR,
359 	IB_EVENT_COMM_EST,
360 	IB_EVENT_SQ_DRAINED,
361 	IB_EVENT_PATH_MIG,
362 	IB_EVENT_PATH_MIG_ERR,
363 	IB_EVENT_DEVICE_FATAL,
364 	IB_EVENT_PORT_ACTIVE,
365 	IB_EVENT_PORT_ERR,
366 	IB_EVENT_LID_CHANGE,
367 	IB_EVENT_PKEY_CHANGE,
368 	IB_EVENT_SM_CHANGE,
369 	IB_EVENT_SRQ_ERR,
370 	IB_EVENT_SRQ_LIMIT_REACHED,
371 	IB_EVENT_QP_LAST_WQE_REACHED,
372 	IB_EVENT_CLIENT_REREGISTER,
373 	IB_EVENT_GID_CHANGE,
374 };
375 
376 enum ib_event_flags {
377 	IB_XRC_QP_EVENT_FLAG = 0x80000000,
378 };
379 
380 struct ib_event {
381 	struct ib_device	*device;
382 	union {
383 		struct ib_cq	*cq;
384 		struct ib_qp	*qp;
385 		struct ib_srq	*srq;
386 		u8		port_num;
387 		u32		xrc_qp_num;
388 	} element;
389 	enum ib_event_type	event;
390 };
391 
392 struct ib_event_handler {
393 	struct ib_device *device;
394 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
395 	struct list_head  list;
396 };
397 
398 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
399 	do {							\
400 		(_ptr)->device  = _device;			\
401 		(_ptr)->handler = _handler;			\
402 		INIT_LIST_HEAD(&(_ptr)->list);			\
403 	} while (0)
404 
405 struct ib_global_route {
406 	union ib_gid	dgid;
407 	u32		flow_label;
408 	u8		sgid_index;
409 	u8		hop_limit;
410 	u8		traffic_class;
411 };
412 
413 struct ib_grh {
414 	__be32		version_tclass_flow;
415 	__be16		paylen;
416 	u8		next_hdr;
417 	u8		hop_limit;
418 	union ib_gid	sgid;
419 	union ib_gid	dgid;
420 };
421 
422 enum {
423 	IB_MULTICAST_QPN = 0xffffff
424 };
425 
426 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
427 
428 enum ib_ah_flags {
429 	IB_AH_GRH	= 1
430 };
431 
432 enum ib_rate {
433 	IB_RATE_PORT_CURRENT = 0,
434 	IB_RATE_2_5_GBPS = 2,
435 	IB_RATE_5_GBPS   = 5,
436 	IB_RATE_10_GBPS  = 3,
437 	IB_RATE_20_GBPS  = 6,
438 	IB_RATE_30_GBPS  = 4,
439 	IB_RATE_40_GBPS  = 7,
440 	IB_RATE_60_GBPS  = 8,
441 	IB_RATE_80_GBPS  = 9,
442 	IB_RATE_120_GBPS = 10,
443 	IB_RATE_14_GBPS  = 11,
444 	IB_RATE_56_GBPS  = 12,
445 	IB_RATE_112_GBPS = 13,
446 	IB_RATE_168_GBPS = 14,
447 	IB_RATE_25_GBPS  = 15,
448 	IB_RATE_100_GBPS = 16,
449 	IB_RATE_200_GBPS = 17,
450 	IB_RATE_300_GBPS = 18
451 };
452 
453 /**
454  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
455  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
456  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
457  * @rate: rate to convert.
458  */
459 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
460 
461 /**
462  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
463  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
464  * @rate: rate to convert.
465  */
466 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
467 
468 /**
469  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
470  * enum.
471  * @mult: multiple to convert.
472  */
473 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
474 
475 struct ib_ah_attr {
476 	struct ib_global_route	grh;
477 	u16			dlid;
478 	u8			sl;
479 	u8			src_path_bits;
480 	u8			static_rate;
481 	u8			ah_flags;
482 	u8			port_num;
483 };
484 
485 enum ib_wc_status {
486 	IB_WC_SUCCESS,
487 	IB_WC_LOC_LEN_ERR,
488 	IB_WC_LOC_QP_OP_ERR,
489 	IB_WC_LOC_EEC_OP_ERR,
490 	IB_WC_LOC_PROT_ERR,
491 	IB_WC_WR_FLUSH_ERR,
492 	IB_WC_MW_BIND_ERR,
493 	IB_WC_BAD_RESP_ERR,
494 	IB_WC_LOC_ACCESS_ERR,
495 	IB_WC_REM_INV_REQ_ERR,
496 	IB_WC_REM_ACCESS_ERR,
497 	IB_WC_REM_OP_ERR,
498 	IB_WC_RETRY_EXC_ERR,
499 	IB_WC_RNR_RETRY_EXC_ERR,
500 	IB_WC_LOC_RDD_VIOL_ERR,
501 	IB_WC_REM_INV_RD_REQ_ERR,
502 	IB_WC_REM_ABORT_ERR,
503 	IB_WC_INV_EECN_ERR,
504 	IB_WC_INV_EEC_STATE_ERR,
505 	IB_WC_FATAL_ERR,
506 	IB_WC_RESP_TIMEOUT_ERR,
507 	IB_WC_GENERAL_ERR
508 };
509 
510 enum ib_wc_opcode {
511 	IB_WC_SEND,
512 	IB_WC_RDMA_WRITE,
513 	IB_WC_RDMA_READ,
514 	IB_WC_COMP_SWAP,
515 	IB_WC_FETCH_ADD,
516 	IB_WC_BIND_MW,
517 	IB_WC_LSO,
518 	IB_WC_LOCAL_INV,
519 	IB_WC_FAST_REG_MR,
520 	IB_WC_MASKED_COMP_SWAP,
521 	IB_WC_MASKED_FETCH_ADD,
522 /*
523  * Set value of IB_WC_RECV so consumers can test if a completion is a
524  * receive by testing (opcode & IB_WC_RECV).
525  */
526 	IB_WC_RECV			= 1 << 7,
527 	IB_WC_RECV_RDMA_WITH_IMM
528 };
529 
530 enum ib_wc_flags {
531 	IB_WC_GRH		= 1,
532 	IB_WC_WITH_IMM		= (1<<1),
533 	IB_WC_WITH_INVALIDATE	= (1<<2),
534 	IB_WC_IP_CSUM_OK	= (1<<3),
535 };
536 
537 struct ib_wc {
538 	u64			wr_id;
539 	enum ib_wc_status	status;
540 	enum ib_wc_opcode	opcode;
541 	u32			vendor_err;
542 	u32			byte_len;
543 	struct ib_qp	       *qp;
544 	union {
545 		__be32		imm_data;
546 		u32		invalidate_rkey;
547 	} ex;
548 	u32			src_qp;
549 	int			wc_flags;
550 	u16			pkey_index;
551 	u16			slid;
552 	u8			sl;
553 	u8			dlid_path_bits;
554 	u8			port_num;	/* valid only for DR SMPs on switches */
555 	int			csum_ok;
556 };
557 
558 enum ib_cq_notify_flags {
559 	IB_CQ_SOLICITED			= 1 << 0,
560 	IB_CQ_NEXT_COMP			= 1 << 1,
561 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
562 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
563 };
564 
565 enum ib_srq_type {
566 	IB_SRQT_BASIC,
567 	IB_SRQT_XRC
568 };
569 
570 enum ib_srq_attr_mask {
571 	IB_SRQ_MAX_WR	= 1 << 0,
572 	IB_SRQ_LIMIT	= 1 << 1,
573 };
574 
575 struct ib_srq_attr {
576 	u32	max_wr;
577 	u32	max_sge;
578 	u32	srq_limit;
579 };
580 
581 struct ib_srq_init_attr {
582 	void		      (*event_handler)(struct ib_event *, void *);
583 	void		       *srq_context;
584 	struct ib_srq_attr	attr;
585 	enum ib_srq_type	srq_type;
586 
587 	union {
588 		struct {
589 			struct ib_xrcd *xrcd;
590 			struct ib_cq   *cq;
591 		} xrc;
592 	} ext;
593 };
594 
595 struct ib_qp_cap {
596 	u32	max_send_wr;
597 	u32	max_recv_wr;
598 	u32	max_send_sge;
599 	u32	max_recv_sge;
600 	u32	max_inline_data;
601 	u32	qpg_tss_mask_sz;
602 };
603 
604 enum ib_sig_type {
605 	IB_SIGNAL_ALL_WR,
606 	IB_SIGNAL_REQ_WR
607 };
608 
609 enum ib_qp_type {
610 	/*
611 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
612 	 * here (and in that order) since the MAD layer uses them as
613 	 * indices into a 2-entry table.
614 	 */
615 	IB_QPT_SMI,
616 	IB_QPT_GSI,
617 
618 	IB_QPT_RC,
619 	IB_QPT_UC,
620 	IB_QPT_UD,
621 	IB_QPT_XRC,
622 	IB_QPT_RAW_IPV6,
623 	IB_QPT_RAW_ETHERTYPE,
624 	IB_QPT_RAW_PACKET = 8,
625 	IB_QPT_XRC_INI = 9,
626 	IB_QPT_XRC_TGT,
627 	IB_QPT_MAX,
628 };
629 
630 enum ib_qp_create_flags {
631 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
632 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
633 	IB_QP_CREATE_NETIF_QP			= 1 << 2,
634 	/* reserve bits 26-31 for low level drivers' internal use */
635 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
636 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
637 };
638 
639 enum ib_qpg_type {
640 	IB_QPG_NONE	= 0,
641 	IB_QPG_PARENT	= (1<<0),
642 	IB_QPG_CHILD_RX = (1<<1),
643 	IB_QPG_CHILD_TX = (1<<2)
644 };
645 
646 struct ib_qpg_init_attrib {
647 	u32 tss_child_count;
648 	u32 rss_child_count;
649 };
650 
651 struct ib_qp_init_attr {
652 	void                  (*event_handler)(struct ib_event *, void *);
653 	void		       *qp_context;
654 	struct ib_cq	       *send_cq;
655 	struct ib_cq	       *recv_cq;
656 	struct ib_srq	       *srq;
657 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
658 	struct ib_qp_cap	cap;
659 	union {
660 		struct ib_qp *qpg_parent; /* see qpg_type */
661 		struct ib_qpg_init_attrib parent_attrib;
662 	} pp;
663 	enum ib_sig_type	sq_sig_type;
664 	enum ib_qp_type		qp_type;
665 	enum ib_qp_create_flags	create_flags;
666 	enum ib_qpg_type	qpg_type;
667 	u8			port_num; /* special QP types only */
668 };
669 
670 struct ib_qp_open_attr {
671 	void                  (*event_handler)(struct ib_event *, void *);
672 	void		       *qp_context;
673 	u32			qp_num;
674 	enum ib_qp_type		qp_type;
675 };
676 
677 enum ib_rnr_timeout {
678 	IB_RNR_TIMER_655_36 =  0,
679 	IB_RNR_TIMER_000_01 =  1,
680 	IB_RNR_TIMER_000_02 =  2,
681 	IB_RNR_TIMER_000_03 =  3,
682 	IB_RNR_TIMER_000_04 =  4,
683 	IB_RNR_TIMER_000_06 =  5,
684 	IB_RNR_TIMER_000_08 =  6,
685 	IB_RNR_TIMER_000_12 =  7,
686 	IB_RNR_TIMER_000_16 =  8,
687 	IB_RNR_TIMER_000_24 =  9,
688 	IB_RNR_TIMER_000_32 = 10,
689 	IB_RNR_TIMER_000_48 = 11,
690 	IB_RNR_TIMER_000_64 = 12,
691 	IB_RNR_TIMER_000_96 = 13,
692 	IB_RNR_TIMER_001_28 = 14,
693 	IB_RNR_TIMER_001_92 = 15,
694 	IB_RNR_TIMER_002_56 = 16,
695 	IB_RNR_TIMER_003_84 = 17,
696 	IB_RNR_TIMER_005_12 = 18,
697 	IB_RNR_TIMER_007_68 = 19,
698 	IB_RNR_TIMER_010_24 = 20,
699 	IB_RNR_TIMER_015_36 = 21,
700 	IB_RNR_TIMER_020_48 = 22,
701 	IB_RNR_TIMER_030_72 = 23,
702 	IB_RNR_TIMER_040_96 = 24,
703 	IB_RNR_TIMER_061_44 = 25,
704 	IB_RNR_TIMER_081_92 = 26,
705 	IB_RNR_TIMER_122_88 = 27,
706 	IB_RNR_TIMER_163_84 = 28,
707 	IB_RNR_TIMER_245_76 = 29,
708 	IB_RNR_TIMER_327_68 = 30,
709 	IB_RNR_TIMER_491_52 = 31
710 };
711 
712 enum ib_qp_attr_mask {
713 	IB_QP_STATE			= 1,
714 	IB_QP_CUR_STATE			= (1<<1),
715 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
716 	IB_QP_ACCESS_FLAGS		= (1<<3),
717 	IB_QP_PKEY_INDEX		= (1<<4),
718 	IB_QP_PORT			= (1<<5),
719 	IB_QP_QKEY			= (1<<6),
720 	IB_QP_AV			= (1<<7),
721 	IB_QP_PATH_MTU			= (1<<8),
722 	IB_QP_TIMEOUT			= (1<<9),
723 	IB_QP_RETRY_CNT			= (1<<10),
724 	IB_QP_RNR_RETRY			= (1<<11),
725 	IB_QP_RQ_PSN			= (1<<12),
726 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
727 	IB_QP_ALT_PATH			= (1<<14),
728 	IB_QP_MIN_RNR_TIMER		= (1<<15),
729 	IB_QP_SQ_PSN			= (1<<16),
730 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
731 	IB_QP_PATH_MIG_STATE		= (1<<18),
732 	IB_QP_CAP			= (1<<19),
733 	IB_QP_DEST_QPN			= (1<<20),
734 	IB_QP_GROUP_RSS			= (1<<21)
735 };
736 
737 enum ib_qp_state {
738 	IB_QPS_RESET,
739 	IB_QPS_INIT,
740 	IB_QPS_RTR,
741 	IB_QPS_RTS,
742 	IB_QPS_SQD,
743 	IB_QPS_SQE,
744 	IB_QPS_ERR
745 };
746 
747 enum ib_mig_state {
748 	IB_MIG_MIGRATED,
749 	IB_MIG_REARM,
750 	IB_MIG_ARMED
751 };
752 
753 struct ib_qp_attr {
754 	enum ib_qp_state	qp_state;
755 	enum ib_qp_state	cur_qp_state;
756 	enum ib_mtu		path_mtu;
757 	enum ib_mig_state	path_mig_state;
758 	u32			qkey;
759 	u32			rq_psn;
760 	u32			sq_psn;
761 	u32			dest_qp_num;
762 	int			qp_access_flags;
763 	struct ib_qp_cap	cap;
764 	struct ib_ah_attr	ah_attr;
765 	struct ib_ah_attr	alt_ah_attr;
766 	u16			pkey_index;
767 	u16			alt_pkey_index;
768 	u8			en_sqd_async_notify;
769 	u8			sq_draining;
770 	u8			max_rd_atomic;
771 	u8			max_dest_rd_atomic;
772 	u8			min_rnr_timer;
773 	u8			port_num;
774 	u8			timeout;
775 	u8			retry_cnt;
776 	u8			rnr_retry;
777 	u8			alt_port_num;
778 	u8			alt_timeout;
779 };
780 
781 enum ib_wr_opcode {
782 	IB_WR_RDMA_WRITE,
783 	IB_WR_RDMA_WRITE_WITH_IMM,
784 	IB_WR_SEND,
785 	IB_WR_SEND_WITH_IMM,
786 	IB_WR_RDMA_READ,
787 	IB_WR_ATOMIC_CMP_AND_SWP,
788 	IB_WR_ATOMIC_FETCH_AND_ADD,
789 	IB_WR_LSO,
790 	IB_WR_BIG_LSO,
791 	IB_WR_SEND_WITH_INV,
792 	IB_WR_RDMA_READ_WITH_INV,
793 	IB_WR_LOCAL_INV,
794 	IB_WR_FAST_REG_MR,
795 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
796 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
797 };
798 
799 enum ib_send_flags {
800 	IB_SEND_FENCE		= 1,
801 	IB_SEND_SIGNALED	= (1<<1),
802 	IB_SEND_SOLICITED	= (1<<2),
803 	IB_SEND_INLINE		= (1<<3),
804 	IB_SEND_IP_CSUM		= (1<<4)
805 };
806 
807 enum ib_flow_types {
808 	IB_FLOW_ETH = 0,
809 	IB_FLOW_IB_UC = 1,
810 	IB_FLOW_IB_MC_IPV4 = 2,
811 	IB_FLOW_IB_MC_IPV6 = 3
812 };
813 
814 enum {
815 	IB_FLOW_L4_NONE = 0,
816 	IB_FLOW_L4_OTHER = 3,
817 	IB_FLOW_L4_UDP = 5,
818 	IB_FLOW_L4_TCP = 6
819 };
820 
821 struct ib_sge {
822 	u64	addr;
823 	u32	length;
824 	u32	lkey;
825 };
826 
827 struct ib_fast_reg_page_list {
828 	struct ib_device       *device;
829 	u64		       *page_list;
830 	unsigned int		max_page_list_len;
831 };
832 
833 struct ib_send_wr {
834 	struct ib_send_wr      *next;
835 	u64			wr_id;
836 	struct ib_sge	       *sg_list;
837 	int			num_sge;
838 	enum ib_wr_opcode	opcode;
839 	int			send_flags;
840 	union {
841 		__be32		imm_data;
842 		u32		invalidate_rkey;
843 	} ex;
844 	union {
845 		struct {
846 			u64	remote_addr;
847 			u32	rkey;
848 		} rdma;
849 		struct {
850 			u64	remote_addr;
851 			u64	compare_add;
852 			u64	swap;
853 			u64	compare_add_mask;
854 			u64	swap_mask;
855 			u32	rkey;
856 		} atomic;
857 		struct {
858 			struct ib_ah *ah;
859 			void   *header;
860 			int     hlen;
861 			int     mss;
862 			u32	remote_qpn;
863 			u32	remote_qkey;
864 			u16	pkey_index; /* valid for GSI only */
865 			u8	port_num;   /* valid for DR SMPs on switch only */
866 		} ud;
867 		struct {
868 			u64				iova_start;
869 			struct ib_fast_reg_page_list   *page_list;
870 			unsigned int			page_shift;
871 			unsigned int			page_list_len;
872 			u32				length;
873 			int				access_flags;
874 			u32				rkey;
875 		} fast_reg;
876 		struct {
877 			struct ib_unpacked_lrh	*lrh;
878 			u32			eth_type;
879 			u8			static_rate;
880 		} raw_ety;
881 	} wr;
882 	u32			xrc_remote_srq_num;	/* XRC TGT QPs only */
883 };
884 
885 struct ib_recv_wr {
886 	struct ib_recv_wr      *next;
887 	u64			wr_id;
888 	struct ib_sge	       *sg_list;
889 	int			num_sge;
890 };
891 
892 enum ib_access_flags {
893 	IB_ACCESS_LOCAL_WRITE	= 1,
894 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
895 	IB_ACCESS_REMOTE_READ	= (1<<2),
896 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
897 	IB_ACCESS_MW_BIND	= (1<<4),
898 	IB_ACCESS_ALLOCATE_MR	= (1<<5),
899 	IB_ACCESS_SHARED_MR_USER_READ   = (1<<6),
900 	IB_ACCESS_SHARED_MR_USER_WRITE  = (1<<7),
901 	IB_ACCESS_SHARED_MR_GROUP_READ  = (1<<8),
902 	IB_ACCESS_SHARED_MR_GROUP_WRITE = (1<<9),
903 	IB_ACCESS_SHARED_MR_OTHER_READ  = (1<<10),
904 	IB_ACCESS_SHARED_MR_OTHER_WRITE = (1<<11)
905 
906 };
907 
908 struct ib_phys_buf {
909 	u64      addr;
910 	u64      size;
911 };
912 
913 struct ib_mr_attr {
914 	struct ib_pd	*pd;
915 	u64		device_virt_addr;
916 	u64		size;
917 	int		mr_access_flags;
918 	u32		lkey;
919 	u32		rkey;
920 };
921 
922 enum ib_mr_rereg_flags {
923 	IB_MR_REREG_TRANS	= 1,
924 	IB_MR_REREG_PD		= (1<<1),
925 	IB_MR_REREG_ACCESS	= (1<<2)
926 };
927 
928 struct ib_mw_bind {
929 	struct ib_mr   *mr;
930 	u64		wr_id;
931 	u64		addr;
932 	u32		length;
933 	int		send_flags;
934 	int		mw_access_flags;
935 };
936 
937 struct ib_fmr_attr {
938 	int	max_pages;
939 	int	max_maps;
940 	u8	page_shift;
941 };
942 
943 struct ib_ucontext {
944 	struct ib_device       *device;
945 	struct list_head	pd_list;
946 	struct list_head	mr_list;
947 	struct list_head	mw_list;
948 	struct list_head	cq_list;
949 	struct list_head	qp_list;
950 	struct list_head	srq_list;
951 	struct list_head	ah_list;
952 	struct list_head	xrcd_list;
953 	int			closing;
954 };
955 
956 struct ib_uobject {
957 	u64			user_handle;	/* handle given to us by userspace */
958 	struct ib_ucontext     *context;	/* associated user context */
959 	void		       *object;		/* containing object */
960 	struct list_head	list;		/* link to context's list */
961 	int			id;		/* index into kernel idr */
962 	struct kref		ref;
963 	struct rw_semaphore	mutex;		/* protects .live */
964 	int			live;
965 };
966 
967 struct ib_udata {
968 	void __user *inbuf;
969 	void __user *outbuf;
970 	size_t       inlen;
971 	size_t       outlen;
972 };
973 
974 struct ib_uxrc_rcv_object {
975 	struct list_head	list;		/* link to context's list */
976 	u32			qp_num;
977 	u32			domain_handle;
978 };
979 
980 struct ib_pd {
981 	struct ib_device       *device;
982 	struct ib_uobject      *uobject;
983 	atomic_t          	usecnt; /* count all resources */
984 };
985 
986 struct ib_xrcd {
987 	struct ib_device       *device;
988 	struct ib_uobject      *uobject;
989 	atomic_t		usecnt; /* count all exposed resources */
990 	struct inode	       *inode;
991 	struct rb_node		node;
992 
993 	struct mutex		tgt_qp_mutex;
994 	struct list_head	tgt_qp_list;
995 };
996 
997 struct ib_ah {
998 	struct ib_device	*device;
999 	struct ib_pd		*pd;
1000 	struct ib_uobject	*uobject;
1001 };
1002 
1003 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1004 
1005 struct ib_cq {
1006 	struct ib_device       *device;
1007 	struct ib_uobject      *uobject;
1008 	ib_comp_handler   	comp_handler;
1009 	void                  (*event_handler)(struct ib_event *, void *);
1010 	void                   *cq_context;
1011 	int               	cqe;
1012 	atomic_t          	usecnt; /* count number of work queues */
1013 };
1014 
1015 struct ib_srq {
1016 	struct ib_device       *device;
1017 	struct ib_pd	       *pd;
1018 	struct ib_uobject      *uobject;
1019 	void		      (*event_handler)(struct ib_event *, void *);
1020 	void		       *srq_context;
1021 	enum ib_srq_type	srq_type;
1022 	atomic_t		usecnt;
1023 
1024 	union {
1025 		struct {
1026 			struct ib_xrcd *xrcd;
1027 			struct ib_cq   *cq;
1028 			u32		srq_num;
1029 		} xrc;
1030 	} ext;
1031 };
1032 
1033 struct ib_qp {
1034 	struct ib_device       *device;
1035 	struct ib_pd	       *pd;
1036 	struct ib_cq	       *send_cq;
1037 	struct ib_cq	       *recv_cq;
1038 	struct ib_srq	       *srq;
1039 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1040 	struct list_head	xrcd_list;
1041 	atomic_t		usecnt; /* count times opened, mcast attaches */
1042 	struct list_head	open_list;
1043 	struct ib_qp           *real_qp;
1044 	struct ib_uobject      *uobject;
1045 	void                  (*event_handler)(struct ib_event *, void *);
1046 	void		       *qp_context;
1047 	u32			qp_num;
1048 	enum ib_qp_type		qp_type;
1049 	enum ib_qpg_type	qpg_type;
1050 };
1051 
1052 struct ib_mr {
1053 	struct ib_device  *device;
1054 	struct ib_pd	  *pd;
1055 	struct ib_uobject *uobject;
1056 	u32		   lkey;
1057 	u32		   rkey;
1058 	atomic_t	   usecnt; /* count number of MWs */
1059 };
1060 
1061 struct ib_mw {
1062 	struct ib_device	*device;
1063 	struct ib_pd		*pd;
1064 	struct ib_uobject	*uobject;
1065 	u32			rkey;
1066 };
1067 
1068 struct ib_fmr {
1069 	struct ib_device	*device;
1070 	struct ib_pd		*pd;
1071 	struct list_head	list;
1072 	u32			lkey;
1073 	u32			rkey;
1074 };
1075 
1076 struct ib_flow_spec {
1077 	enum ib_flow_types type;
1078 	union {
1079 		struct {
1080 			__be16 ethertype;
1081 			__be16 vlan;
1082 			u8 vlan_present;
1083 			u8  mac[6];
1084 			u8  port;
1085 		} eth;
1086 		struct {
1087 			__be32 qpn;
1088 		} ib_uc;
1089 		struct {
1090 			u8  mgid[16];
1091 		} ib_mc;
1092 	} l2_id;
1093 	__be32 src_ip;
1094 	__be32 dst_ip;
1095 	__be16 src_port;
1096 	__be16 dst_port;
1097 	u8 l4_protocol;
1098 	u8 block_mc_loopback;
1099 	u8 rule_type;
1100 };
1101 
1102 struct ib_mad;
1103 struct ib_grh;
1104 
1105 enum ib_process_mad_flags {
1106 	IB_MAD_IGNORE_MKEY	= 1,
1107 	IB_MAD_IGNORE_BKEY	= 2,
1108 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1109 };
1110 
1111 enum ib_mad_result {
1112 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1113 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1114 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1115 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1116 };
1117 
1118 #define IB_DEVICE_NAME_MAX 64
1119 
1120 struct ib_cache {
1121 	rwlock_t                lock;
1122 	struct ib_event_handler event_handler;
1123 	struct ib_pkey_cache  **pkey_cache;
1124 	struct ib_gid_cache   **gid_cache;
1125 	u8                     *lmc_cache;
1126 };
1127 
1128 struct ib_dma_mapping_ops {
1129 	int		(*mapping_error)(struct ib_device *dev,
1130 					 u64 dma_addr);
1131 	u64		(*map_single)(struct ib_device *dev,
1132 				      void *ptr, size_t size,
1133 				      enum dma_data_direction direction);
1134 	void		(*unmap_single)(struct ib_device *dev,
1135 					u64 addr, size_t size,
1136 					enum dma_data_direction direction);
1137 	u64		(*map_page)(struct ib_device *dev,
1138 				    struct page *page, unsigned long offset,
1139 				    size_t size,
1140 				    enum dma_data_direction direction);
1141 	void		(*unmap_page)(struct ib_device *dev,
1142 				      u64 addr, size_t size,
1143 				      enum dma_data_direction direction);
1144 	int		(*map_sg)(struct ib_device *dev,
1145 				  struct scatterlist *sg, int nents,
1146 				  enum dma_data_direction direction);
1147 	void		(*unmap_sg)(struct ib_device *dev,
1148 				    struct scatterlist *sg, int nents,
1149 				    enum dma_data_direction direction);
1150 	u64		(*dma_address)(struct ib_device *dev,
1151 				       struct scatterlist *sg);
1152 	unsigned int	(*dma_len)(struct ib_device *dev,
1153 				   struct scatterlist *sg);
1154 	void		(*sync_single_for_cpu)(struct ib_device *dev,
1155 					       u64 dma_handle,
1156 					       size_t size,
1157 					       enum dma_data_direction dir);
1158 	void		(*sync_single_for_device)(struct ib_device *dev,
1159 						  u64 dma_handle,
1160 						  size_t size,
1161 						  enum dma_data_direction dir);
1162 	void		*(*alloc_coherent)(struct ib_device *dev,
1163 					   size_t size,
1164 					   u64 *dma_handle,
1165 					   gfp_t flag);
1166 	void		(*free_coherent)(struct ib_device *dev,
1167 					 size_t size, void *cpu_addr,
1168 					 u64 dma_handle);
1169 };
1170 
1171 struct iw_cm_verbs;
1172 
1173 struct ib_device {
1174 	struct device                *dma_device;
1175 
1176 	char                          name[IB_DEVICE_NAME_MAX];
1177 
1178 	struct list_head              event_handler_list;
1179 	spinlock_t                    event_handler_lock;
1180 
1181 	spinlock_t                    client_data_lock;
1182 	struct list_head              core_list;
1183 	struct list_head              client_data_list;
1184 
1185 	struct ib_cache               cache;
1186 	int                          *pkey_tbl_len;
1187 	int                          *gid_tbl_len;
1188 
1189 	int			      num_comp_vectors;
1190 
1191 	struct iw_cm_verbs	     *iwcm;
1192 
1193 	int		           (*get_protocol_stats)(struct ib_device *device,
1194 							 union rdma_protocol_stats *stats);
1195 	int		           (*query_device)(struct ib_device *device,
1196 						   struct ib_device_attr *device_attr);
1197 	int		           (*query_port)(struct ib_device *device,
1198 						 u8 port_num,
1199 						 struct ib_port_attr *port_attr);
1200 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1201 						     u8 port_num);
1202 	int		           (*query_gid)(struct ib_device *device,
1203 						u8 port_num, int index,
1204 						union ib_gid *gid);
1205 	int		           (*query_pkey)(struct ib_device *device,
1206 						 u8 port_num, u16 index, u16 *pkey);
1207 	int		           (*modify_device)(struct ib_device *device,
1208 						    int device_modify_mask,
1209 						    struct ib_device_modify *device_modify);
1210 	int		           (*modify_port)(struct ib_device *device,
1211 						  u8 port_num, int port_modify_mask,
1212 						  struct ib_port_modify *port_modify);
1213 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1214 						     struct ib_udata *udata);
1215 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1216 	int                        (*mmap)(struct ib_ucontext *context,
1217 					   struct vm_area_struct *vma);
1218 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1219 					       struct ib_ucontext *context,
1220 					       struct ib_udata *udata);
1221 	int                        (*dealloc_pd)(struct ib_pd *pd);
1222 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1223 						struct ib_ah_attr *ah_attr);
1224 	int                        (*modify_ah)(struct ib_ah *ah,
1225 						struct ib_ah_attr *ah_attr);
1226 	int                        (*query_ah)(struct ib_ah *ah,
1227 					       struct ib_ah_attr *ah_attr);
1228 	int                        (*destroy_ah)(struct ib_ah *ah);
1229 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1230 						 struct ib_srq_init_attr *srq_init_attr,
1231 						 struct ib_udata *udata);
1232 	int                        (*modify_srq)(struct ib_srq *srq,
1233 						 struct ib_srq_attr *srq_attr,
1234 						 enum ib_srq_attr_mask srq_attr_mask,
1235 						 struct ib_udata *udata);
1236 	int                        (*query_srq)(struct ib_srq *srq,
1237 						struct ib_srq_attr *srq_attr);
1238 	int                        (*destroy_srq)(struct ib_srq *srq);
1239 	int                        (*post_srq_recv)(struct ib_srq *srq,
1240 						    struct ib_recv_wr *recv_wr,
1241 						    struct ib_recv_wr **bad_recv_wr);
1242 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1243 						struct ib_qp_init_attr *qp_init_attr,
1244 						struct ib_udata *udata);
1245 	int                        (*modify_qp)(struct ib_qp *qp,
1246 						struct ib_qp_attr *qp_attr,
1247 						int qp_attr_mask,
1248 						struct ib_udata *udata);
1249 	int                        (*query_qp)(struct ib_qp *qp,
1250 					       struct ib_qp_attr *qp_attr,
1251 					       int qp_attr_mask,
1252 					       struct ib_qp_init_attr *qp_init_attr);
1253 	int                        (*destroy_qp)(struct ib_qp *qp);
1254 	int                        (*post_send)(struct ib_qp *qp,
1255 						struct ib_send_wr *send_wr,
1256 						struct ib_send_wr **bad_send_wr);
1257 	int                        (*post_recv)(struct ib_qp *qp,
1258 						struct ib_recv_wr *recv_wr,
1259 						struct ib_recv_wr **bad_recv_wr);
1260 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1261 						int comp_vector,
1262 						struct ib_ucontext *context,
1263 						struct ib_udata *udata);
1264 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1265 						u16 cq_period);
1266 	int                        (*destroy_cq)(struct ib_cq *cq);
1267 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1268 						struct ib_udata *udata);
1269 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1270 					      struct ib_wc *wc);
1271 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1272 	int                        (*req_notify_cq)(struct ib_cq *cq,
1273 						    enum ib_cq_notify_flags flags);
1274 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
1275 						      int wc_cnt);
1276 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1277 						 int mr_access_flags);
1278 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1279 						  struct ib_phys_buf *phys_buf_array,
1280 						  int num_phys_buf,
1281 						  int mr_access_flags,
1282 						  u64 *iova_start);
1283 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1284 						  u64 start, u64 length,
1285 						  u64 virt_addr,
1286 						  int mr_access_flags,
1287 						  struct ib_udata *udata,
1288 							int mr_id);
1289 	int                        (*query_mr)(struct ib_mr *mr,
1290 					       struct ib_mr_attr *mr_attr);
1291 	int                        (*dereg_mr)(struct ib_mr *mr);
1292 	struct ib_mr *		   (*alloc_fast_reg_mr)(struct ib_pd *pd,
1293 					       int max_page_list_len);
1294 	struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1295 								   int page_list_len);
1296 	void			   (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1297 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1298 						    int mr_rereg_mask,
1299 						    struct ib_pd *pd,
1300 						    struct ib_phys_buf *phys_buf_array,
1301 						    int num_phys_buf,
1302 						    int mr_access_flags,
1303 						    u64 *iova_start);
1304 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1305 	int                        (*bind_mw)(struct ib_qp *qp,
1306 					      struct ib_mw *mw,
1307 					      struct ib_mw_bind *mw_bind);
1308 	int                        (*dealloc_mw)(struct ib_mw *mw);
1309 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1310 						int mr_access_flags,
1311 						struct ib_fmr_attr *fmr_attr);
1312 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1313 						   u64 *page_list, int list_len,
1314 						   u64 iova);
1315 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1316 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1317 	int                        (*attach_mcast)(struct ib_qp *qp,
1318 						   union ib_gid *gid,
1319 						   u16 lid);
1320 	int                        (*detach_mcast)(struct ib_qp *qp,
1321 						   union ib_gid *gid,
1322 						   u16 lid);
1323 	int                        (*process_mad)(struct ib_device *device,
1324 						  int process_mad_flags,
1325 						  u8 port_num,
1326 						  struct ib_wc *in_wc,
1327 						  struct ib_grh *in_grh,
1328 						  struct ib_mad *in_mad,
1329 						  struct ib_mad *out_mad);
1330 	struct ib_srq *		   (*create_xrc_srq)(struct ib_pd *pd,
1331 						     struct ib_cq *xrc_cq,
1332 						     struct ib_xrcd *xrcd,
1333 						     struct ib_srq_init_attr *srq_init_attr,
1334 						     struct ib_udata *udata);
1335 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
1336 						 struct ib_ucontext *ucontext,
1337 						 struct ib_udata *udata);
1338 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1339 	int			   (*create_xrc_rcv_qp)(struct ib_qp_init_attr *init_attr,
1340 							u32 *qp_num);
1341 	int			   (*modify_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1342 							u32 qp_num,
1343 							struct ib_qp_attr *attr,
1344 							int attr_mask);
1345 	int			   (*query_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1346 						       u32 qp_num,
1347 						       struct ib_qp_attr *attr,
1348 						       int attr_mask,
1349 						       struct ib_qp_init_attr *init_attr);
1350 	int 			   (*reg_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1351 						     void *context,
1352 						     u32 qp_num);
1353 	int 			   (*unreg_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1354 						       void *context,
1355 						       u32 qp_num);
1356 	int                        (*attach_flow)(struct ib_qp *qp,
1357 						  struct ib_flow_spec *spec,
1358 						  int priority);
1359 	int                        (*detach_flow)(struct ib_qp *qp,
1360 						  struct ib_flow_spec *spec,
1361 						  int priority);
1362 
1363 	unsigned long		   (*get_unmapped_area)(struct file *file,
1364 					unsigned long addr,
1365 					unsigned long len, unsigned long pgoff,
1366 					unsigned long flags);
1367 	struct ib_dma_mapping_ops   *dma_ops;
1368 
1369 	struct module               *owner;
1370 	struct device                dev;
1371 	struct kobject               *ports_parent;
1372 	struct list_head             port_list;
1373 
1374 	enum {
1375 		IB_DEV_UNINITIALIZED,
1376 		IB_DEV_REGISTERED,
1377 		IB_DEV_UNREGISTERED
1378 	}                            reg_state;
1379 
1380 	int			     uverbs_abi_ver;
1381 	u64			     uverbs_cmd_mask;
1382 
1383 	char			     node_desc[64];
1384 	__be64			     node_guid;
1385 	u32			     local_dma_lkey;
1386 	u8                           node_type;
1387 	u8                           phys_port_cnt;
1388 	struct rb_root		     ib_uverbs_xrcd_table;
1389 	struct mutex		     xrcd_table_mutex;
1390 };
1391 
1392 struct ib_client {
1393 	char  *name;
1394 	void (*add)   (struct ib_device *);
1395 	void (*remove)(struct ib_device *);
1396 
1397 	struct list_head list;
1398 };
1399 
1400 struct ib_device *ib_alloc_device(size_t size);
1401 void ib_dealloc_device(struct ib_device *device);
1402 
1403 int ib_register_device(struct ib_device *device,
1404 		       int (*port_callback)(struct ib_device *,
1405 					    u8, struct kobject *));
1406 void ib_unregister_device(struct ib_device *device);
1407 
1408 int ib_register_client   (struct ib_client *client);
1409 void ib_unregister_client(struct ib_client *client);
1410 
1411 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1412 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1413 			 void *data);
1414 
1415 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1416 {
1417 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1418 }
1419 
1420 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1421 {
1422 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1423 }
1424 
1425 /**
1426  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1427  * contains all required attributes and no attributes not allowed for
1428  * the given QP state transition.
1429  * @cur_state: Current QP state
1430  * @next_state: Next QP state
1431  * @type: QP type
1432  * @mask: Mask of supplied QP attributes
1433  *
1434  * This function is a helper function that a low-level driver's
1435  * modify_qp method can use to validate the consumer's input.  It
1436  * checks that cur_state and next_state are valid QP states, that a
1437  * transition from cur_state to next_state is allowed by the IB spec,
1438  * and that the attribute mask supplied is allowed for the transition.
1439  */
1440 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1441 		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1442 
1443 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1444 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1445 void ib_dispatch_event(struct ib_event *event);
1446 
1447 int ib_query_device(struct ib_device *device,
1448 		    struct ib_device_attr *device_attr);
1449 
1450 int ib_query_port(struct ib_device *device,
1451 		  u8 port_num, struct ib_port_attr *port_attr);
1452 
1453 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1454 					       u8 port_num);
1455 
1456 int ib_query_gid(struct ib_device *device,
1457 		 u8 port_num, int index, union ib_gid *gid);
1458 
1459 int ib_query_pkey(struct ib_device *device,
1460 		  u8 port_num, u16 index, u16 *pkey);
1461 
1462 int ib_modify_device(struct ib_device *device,
1463 		     int device_modify_mask,
1464 		     struct ib_device_modify *device_modify);
1465 
1466 int ib_modify_port(struct ib_device *device,
1467 		   u8 port_num, int port_modify_mask,
1468 		   struct ib_port_modify *port_modify);
1469 
1470 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1471 		u8 *port_num, u16 *index);
1472 
1473 int ib_find_pkey(struct ib_device *device,
1474 		 u8 port_num, u16 pkey, u16 *index);
1475 
1476 /**
1477  * ib_alloc_pd - Allocates an unused protection domain.
1478  * @device: The device on which to allocate the protection domain.
1479  *
1480  * A protection domain object provides an association between QPs, shared
1481  * receive queues, address handles, memory regions, and memory windows.
1482  */
1483 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1484 
1485 /**
1486  * ib_dealloc_pd - Deallocates a protection domain.
1487  * @pd: The protection domain to deallocate.
1488  */
1489 int ib_dealloc_pd(struct ib_pd *pd);
1490 
1491 /**
1492  * ib_create_ah - Creates an address handle for the given address vector.
1493  * @pd: The protection domain associated with the address handle.
1494  * @ah_attr: The attributes of the address vector.
1495  *
1496  * The address handle is used to reference a local or global destination
1497  * in all UD QP post sends.
1498  */
1499 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1500 
1501 /**
1502  * ib_init_ah_from_wc - Initializes address handle attributes from a
1503  *   work completion.
1504  * @device: Device on which the received message arrived.
1505  * @port_num: Port on which the received message arrived.
1506  * @wc: Work completion associated with the received message.
1507  * @grh: References the received global route header.  This parameter is
1508  *   ignored unless the work completion indicates that the GRH is valid.
1509  * @ah_attr: Returned attributes that can be used when creating an address
1510  *   handle for replying to the message.
1511  */
1512 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1513 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1514 
1515 /**
1516  * ib_create_ah_from_wc - Creates an address handle associated with the
1517  *   sender of the specified work completion.
1518  * @pd: The protection domain associated with the address handle.
1519  * @wc: Work completion information associated with a received message.
1520  * @grh: References the received global route header.  This parameter is
1521  *   ignored unless the work completion indicates that the GRH is valid.
1522  * @port_num: The outbound port number to associate with the address.
1523  *
1524  * The address handle is used to reference a local or global destination
1525  * in all UD QP post sends.
1526  */
1527 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1528 				   struct ib_grh *grh, u8 port_num);
1529 
1530 /**
1531  * ib_modify_ah - Modifies the address vector associated with an address
1532  *   handle.
1533  * @ah: The address handle to modify.
1534  * @ah_attr: The new address vector attributes to associate with the
1535  *   address handle.
1536  */
1537 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1538 
1539 /**
1540  * ib_query_ah - Queries the address vector associated with an address
1541  *   handle.
1542  * @ah: The address handle to query.
1543  * @ah_attr: The address vector attributes associated with the address
1544  *   handle.
1545  */
1546 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1547 
1548 /**
1549  * ib_destroy_ah - Destroys an address handle.
1550  * @ah: The address handle to destroy.
1551  */
1552 int ib_destroy_ah(struct ib_ah *ah);
1553 
1554 /**
1555  * ib_create_xrc_srq - Creates an XRC SRQ associated with the specified
1556  *   protection domain, cq, and xrc domain.
1557  * @pd: The protection domain associated with the SRQ.
1558  * @xrc_cq: The cq to be associated with the XRC SRQ.
1559  * @xrcd: The XRC domain to be associated with the XRC SRQ.
1560  * @srq_init_attr: A list of initial attributes required to create the
1561  *   XRC SRQ.  If XRC SRQ creation succeeds, then the attributes are updated
1562  *   to the actual capabilities of the created XRC SRQ.
1563  *
1564  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1565  * requested size of the XRC SRQ, and set to the actual values allocated
1566  * on return.  If ib_create_xrc_srq() succeeds, then max_wr and max_sge
1567  * will always be at least as large as the requested values.
1568  */
1569 struct ib_srq *ib_create_xrc_srq(struct ib_pd *pd,
1570 				 struct ib_cq *xrc_cq,
1571 				 struct ib_xrcd *xrcd,
1572 				 struct ib_srq_init_attr *srq_init_attr);
1573 
1574 /**
1575  * ib_create_srq - Creates a SRQ associated with the specified protection
1576  *   domain.
1577  * @pd: The protection domain associated with the SRQ.
1578  * @srq_init_attr: A list of initial attributes required to create the
1579  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1580  *   the actual capabilities of the created SRQ.
1581  *
1582  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1583  * requested size of the SRQ, and set to the actual values allocated
1584  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1585  * will always be at least as large as the requested values.
1586  */
1587 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1588 			     struct ib_srq_init_attr *srq_init_attr);
1589 
1590 /**
1591  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1592  * @srq: The SRQ to modify.
1593  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1594  *   the current values of selected SRQ attributes are returned.
1595  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1596  *   are being modified.
1597  *
1598  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1599  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1600  * the number of receives queued drops below the limit.
1601  */
1602 int ib_modify_srq(struct ib_srq *srq,
1603 		  struct ib_srq_attr *srq_attr,
1604 		  enum ib_srq_attr_mask srq_attr_mask);
1605 
1606 /**
1607  * ib_query_srq - Returns the attribute list and current values for the
1608  *   specified SRQ.
1609  * @srq: The SRQ to query.
1610  * @srq_attr: The attributes of the specified SRQ.
1611  */
1612 int ib_query_srq(struct ib_srq *srq,
1613 		 struct ib_srq_attr *srq_attr);
1614 
1615 /**
1616  * ib_destroy_srq - Destroys the specified SRQ.
1617  * @srq: The SRQ to destroy.
1618  */
1619 int ib_destroy_srq(struct ib_srq *srq);
1620 
1621 /**
1622  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1623  * @srq: The SRQ to post the work request on.
1624  * @recv_wr: A list of work requests to post on the receive queue.
1625  * @bad_recv_wr: On an immediate failure, this parameter will reference
1626  *   the work request that failed to be posted on the QP.
1627  */
1628 static inline int ib_post_srq_recv(struct ib_srq *srq,
1629 				   struct ib_recv_wr *recv_wr,
1630 				   struct ib_recv_wr **bad_recv_wr)
1631 {
1632 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1633 }
1634 
1635 /**
1636  * ib_create_qp - Creates a QP associated with the specified protection
1637  *   domain.
1638  * @pd: The protection domain associated with the QP.
1639  * @qp_init_attr: A list of initial attributes required to create the
1640  *   QP.  If QP creation succeeds, then the attributes are updated to
1641  *   the actual capabilities of the created QP.
1642  */
1643 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1644 			   struct ib_qp_init_attr *qp_init_attr);
1645 
1646 /**
1647  * ib_modify_qp - Modifies the attributes for the specified QP and then
1648  *   transitions the QP to the given state.
1649  * @qp: The QP to modify.
1650  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1651  *   the current values of selected QP attributes are returned.
1652  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1653  *   are being modified.
1654  */
1655 int ib_modify_qp(struct ib_qp *qp,
1656 		 struct ib_qp_attr *qp_attr,
1657 		 int qp_attr_mask);
1658 
1659 /**
1660  * ib_query_qp - Returns the attribute list and current values for the
1661  *   specified QP.
1662  * @qp: The QP to query.
1663  * @qp_attr: The attributes of the specified QP.
1664  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1665  * @qp_init_attr: Additional attributes of the selected QP.
1666  *
1667  * The qp_attr_mask may be used to limit the query to gathering only the
1668  * selected attributes.
1669  */
1670 int ib_query_qp(struct ib_qp *qp,
1671 		struct ib_qp_attr *qp_attr,
1672 		int qp_attr_mask,
1673 		struct ib_qp_init_attr *qp_init_attr);
1674 
1675 /**
1676  * ib_destroy_qp - Destroys the specified QP.
1677  * @qp: The QP to destroy.
1678  */
1679 int ib_destroy_qp(struct ib_qp *qp);
1680 
1681 /**
1682  * ib_open_qp - Obtain a reference to an existing sharable QP.
1683  * @xrcd - XRC domain
1684  * @qp_open_attr: Attributes identifying the QP to open.
1685  *
1686  * Returns a reference to a sharable QP.
1687  */
1688 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1689 			 struct ib_qp_open_attr *qp_open_attr);
1690 
1691 /**
1692  * ib_close_qp - Release an external reference to a QP.
1693  * @qp: The QP handle to release
1694  *
1695  * The opened QP handle is released by the caller.  The underlying
1696  * shared QP is not destroyed until all internal references are released.
1697  */
1698 int ib_close_qp(struct ib_qp *qp);
1699 
1700 /**
1701  * ib_post_send - Posts a list of work requests to the send queue of
1702  *   the specified QP.
1703  * @qp: The QP to post the work request on.
1704  * @send_wr: A list of work requests to post on the send queue.
1705  * @bad_send_wr: On an immediate failure, this parameter will reference
1706  *   the work request that failed to be posted on the QP.
1707  *
1708  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1709  * error is returned, the QP state shall not be affected,
1710  * ib_post_send() will return an immediate error after queueing any
1711  * earlier work requests in the list.
1712  */
1713 static inline int ib_post_send(struct ib_qp *qp,
1714 			       struct ib_send_wr *send_wr,
1715 			       struct ib_send_wr **bad_send_wr)
1716 {
1717 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1718 }
1719 
1720 /**
1721  * ib_post_recv - Posts a list of work requests to the receive queue of
1722  *   the specified QP.
1723  * @qp: The QP to post the work request on.
1724  * @recv_wr: A list of work requests to post on the receive queue.
1725  * @bad_recv_wr: On an immediate failure, this parameter will reference
1726  *   the work request that failed to be posted on the QP.
1727  */
1728 static inline int ib_post_recv(struct ib_qp *qp,
1729 			       struct ib_recv_wr *recv_wr,
1730 			       struct ib_recv_wr **bad_recv_wr)
1731 {
1732 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1733 }
1734 
1735 /*
1736  * IB_CQ_VECTOR_LEAST_ATTACHED: The constant specifies that
1737  *	the CQ will be attached to the completion vector that has
1738  *	the least number of CQs already attached to it.
1739  */
1740 #define IB_CQ_VECTOR_LEAST_ATTACHED	0xffffffff
1741 
1742 /**
1743  * ib_create_cq - Creates a CQ on the specified device.
1744  * @device: The device on which to create the CQ.
1745  * @comp_handler: A user-specified callback that is invoked when a
1746  *   completion event occurs on the CQ.
1747  * @event_handler: A user-specified callback that is invoked when an
1748  *   asynchronous event not associated with a completion occurs on the CQ.
1749  * @cq_context: Context associated with the CQ returned to the user via
1750  *   the associated completion and event handlers.
1751  * @cqe: The minimum size of the CQ.
1752  * @comp_vector - Completion vector used to signal completion events.
1753  *     Must be >= 0 and < context->num_comp_vectors.
1754  *
1755  * Users can examine the cq structure to determine the actual CQ size.
1756  */
1757 struct ib_cq *ib_create_cq(struct ib_device *device,
1758 			   ib_comp_handler comp_handler,
1759 			   void (*event_handler)(struct ib_event *, void *),
1760 			   void *cq_context, int cqe, int comp_vector);
1761 
1762 /**
1763  * ib_resize_cq - Modifies the capacity of the CQ.
1764  * @cq: The CQ to resize.
1765  * @cqe: The minimum size of the CQ.
1766  *
1767  * Users can examine the cq structure to determine the actual CQ size.
1768  */
1769 int ib_resize_cq(struct ib_cq *cq, int cqe);
1770 
1771 /**
1772  * ib_modify_cq - Modifies moderation params of the CQ
1773  * @cq: The CQ to modify.
1774  * @cq_count: number of CQEs that will trigger an event
1775  * @cq_period: max period of time in usec before triggering an event
1776  *
1777  */
1778 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1779 
1780 /**
1781  * ib_destroy_cq - Destroys the specified CQ.
1782  * @cq: The CQ to destroy.
1783  */
1784 int ib_destroy_cq(struct ib_cq *cq);
1785 
1786 /**
1787  * ib_poll_cq - poll a CQ for completion(s)
1788  * @cq:the CQ being polled
1789  * @num_entries:maximum number of completions to return
1790  * @wc:array of at least @num_entries &struct ib_wc where completions
1791  *   will be returned
1792  *
1793  * Poll a CQ for (possibly multiple) completions.  If the return value
1794  * is < 0, an error occurred.  If the return value is >= 0, it is the
1795  * number of completions returned.  If the return value is
1796  * non-negative and < num_entries, then the CQ was emptied.
1797  */
1798 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1799 			     struct ib_wc *wc)
1800 {
1801 	return cq->device->poll_cq(cq, num_entries, wc);
1802 }
1803 
1804 /**
1805  * ib_peek_cq - Returns the number of unreaped completions currently
1806  *   on the specified CQ.
1807  * @cq: The CQ to peek.
1808  * @wc_cnt: A minimum number of unreaped completions to check for.
1809  *
1810  * If the number of unreaped completions is greater than or equal to wc_cnt,
1811  * this function returns wc_cnt, otherwise, it returns the actual number of
1812  * unreaped completions.
1813  */
1814 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1815 
1816 /**
1817  * ib_req_notify_cq - Request completion notification on a CQ.
1818  * @cq: The CQ to generate an event for.
1819  * @flags:
1820  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1821  *   to request an event on the next solicited event or next work
1822  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1823  *   may also be |ed in to request a hint about missed events, as
1824  *   described below.
1825  *
1826  * Return Value:
1827  *    < 0 means an error occurred while requesting notification
1828  *   == 0 means notification was requested successfully, and if
1829  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1830  *        were missed and it is safe to wait for another event.  In
1831  *        this case is it guaranteed that any work completions added
1832  *        to the CQ since the last CQ poll will trigger a completion
1833  *        notification event.
1834  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1835  *        in.  It means that the consumer must poll the CQ again to
1836  *        make sure it is empty to avoid missing an event because of a
1837  *        race between requesting notification and an entry being
1838  *        added to the CQ.  This return value means it is possible
1839  *        (but not guaranteed) that a work completion has been added
1840  *        to the CQ since the last poll without triggering a
1841  *        completion notification event.
1842  */
1843 static inline int ib_req_notify_cq(struct ib_cq *cq,
1844 				   enum ib_cq_notify_flags flags)
1845 {
1846 	return cq->device->req_notify_cq(cq, flags);
1847 }
1848 
1849 /**
1850  * ib_req_ncomp_notif - Request completion notification when there are
1851  *   at least the specified number of unreaped completions on the CQ.
1852  * @cq: The CQ to generate an event for.
1853  * @wc_cnt: The number of unreaped completions that should be on the
1854  *   CQ before an event is generated.
1855  */
1856 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1857 {
1858 	return cq->device->req_ncomp_notif ?
1859 		cq->device->req_ncomp_notif(cq, wc_cnt) :
1860 		-ENOSYS;
1861 }
1862 
1863 /**
1864  * ib_get_dma_mr - Returns a memory region for system memory that is
1865  *   usable for DMA.
1866  * @pd: The protection domain associated with the memory region.
1867  * @mr_access_flags: Specifies the memory access rights.
1868  *
1869  * Note that the ib_dma_*() functions defined below must be used
1870  * to create/destroy addresses used with the Lkey or Rkey returned
1871  * by ib_get_dma_mr().
1872  */
1873 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1874 
1875 /**
1876  * ib_dma_mapping_error - check a DMA addr for error
1877  * @dev: The device for which the dma_addr was created
1878  * @dma_addr: The DMA address to check
1879  */
1880 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1881 {
1882 	if (dev->dma_ops)
1883 		return dev->dma_ops->mapping_error(dev, dma_addr);
1884 	return dma_mapping_error(dev->dma_device, dma_addr);
1885 }
1886 
1887 /**
1888  * ib_dma_map_single - Map a kernel virtual address to DMA address
1889  * @dev: The device for which the dma_addr is to be created
1890  * @cpu_addr: The kernel virtual address
1891  * @size: The size of the region in bytes
1892  * @direction: The direction of the DMA
1893  */
1894 static inline u64 ib_dma_map_single(struct ib_device *dev,
1895 				    void *cpu_addr, size_t size,
1896 				    enum dma_data_direction direction)
1897 {
1898 	if (dev->dma_ops)
1899 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1900 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1901 }
1902 
1903 /**
1904  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1905  * @dev: The device for which the DMA address was created
1906  * @addr: The DMA address
1907  * @size: The size of the region in bytes
1908  * @direction: The direction of the DMA
1909  */
1910 static inline void ib_dma_unmap_single(struct ib_device *dev,
1911 				       u64 addr, size_t size,
1912 				       enum dma_data_direction direction)
1913 {
1914 	if (dev->dma_ops)
1915 		dev->dma_ops->unmap_single(dev, addr, size, direction);
1916 	else
1917 		dma_unmap_single(dev->dma_device, addr, size, direction);
1918 }
1919 
1920 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1921 					  void *cpu_addr, size_t size,
1922 					  enum dma_data_direction direction,
1923 					  struct dma_attrs *attrs)
1924 {
1925 	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1926 				    direction, attrs);
1927 }
1928 
1929 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1930 					     u64 addr, size_t size,
1931 					     enum dma_data_direction direction,
1932 					     struct dma_attrs *attrs)
1933 {
1934 	return dma_unmap_single_attrs(dev->dma_device, addr, size,
1935 				      direction, attrs);
1936 }
1937 
1938 /**
1939  * ib_dma_map_page - Map a physical page to DMA address
1940  * @dev: The device for which the dma_addr is to be created
1941  * @page: The page to be mapped
1942  * @offset: The offset within the page
1943  * @size: The size of the region in bytes
1944  * @direction: The direction of the DMA
1945  */
1946 static inline u64 ib_dma_map_page(struct ib_device *dev,
1947 				  struct page *page,
1948 				  unsigned long offset,
1949 				  size_t size,
1950 					 enum dma_data_direction direction)
1951 {
1952 	if (dev->dma_ops)
1953 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1954 	return dma_map_page(dev->dma_device, page, offset, size, direction);
1955 }
1956 
1957 /**
1958  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1959  * @dev: The device for which the DMA address was created
1960  * @addr: The DMA address
1961  * @size: The size of the region in bytes
1962  * @direction: The direction of the DMA
1963  */
1964 static inline void ib_dma_unmap_page(struct ib_device *dev,
1965 				     u64 addr, size_t size,
1966 				     enum dma_data_direction direction)
1967 {
1968 	if (dev->dma_ops)
1969 		dev->dma_ops->unmap_page(dev, addr, size, direction);
1970 	else
1971 		dma_unmap_page(dev->dma_device, addr, size, direction);
1972 }
1973 
1974 /**
1975  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1976  * @dev: The device for which the DMA addresses are to be created
1977  * @sg: The array of scatter/gather entries
1978  * @nents: The number of scatter/gather entries
1979  * @direction: The direction of the DMA
1980  */
1981 static inline int ib_dma_map_sg(struct ib_device *dev,
1982 				struct scatterlist *sg, int nents,
1983 				enum dma_data_direction direction)
1984 {
1985 	if (dev->dma_ops)
1986 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1987 	return dma_map_sg(dev->dma_device, sg, nents, direction);
1988 }
1989 
1990 /**
1991  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1992  * @dev: The device for which the DMA addresses were created
1993  * @sg: The array of scatter/gather entries
1994  * @nents: The number of scatter/gather entries
1995  * @direction: The direction of the DMA
1996  */
1997 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1998 				   struct scatterlist *sg, int nents,
1999 				   enum dma_data_direction direction)
2000 {
2001 	if (dev->dma_ops)
2002 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2003 	else
2004 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
2005 }
2006 
2007 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2008 				      struct scatterlist *sg, int nents,
2009 				      enum dma_data_direction direction,
2010 				      struct dma_attrs *attrs)
2011 {
2012 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2013 }
2014 
2015 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2016 					 struct scatterlist *sg, int nents,
2017 					 enum dma_data_direction direction,
2018 					 struct dma_attrs *attrs)
2019 {
2020 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2021 }
2022 /**
2023  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2024  * @dev: The device for which the DMA addresses were created
2025  * @sg: The scatter/gather entry
2026  */
2027 static inline u64 ib_sg_dma_address(struct ib_device *dev,
2028 				    struct scatterlist *sg)
2029 {
2030 	if (dev->dma_ops)
2031 		return dev->dma_ops->dma_address(dev, sg);
2032 	return sg_dma_address(sg);
2033 }
2034 
2035 /**
2036  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2037  * @dev: The device for which the DMA addresses were created
2038  * @sg: The scatter/gather entry
2039  */
2040 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2041 					 struct scatterlist *sg)
2042 {
2043 	if (dev->dma_ops)
2044 		return dev->dma_ops->dma_len(dev, sg);
2045 	return sg_dma_len(sg);
2046 }
2047 
2048 /**
2049  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2050  * @dev: The device for which the DMA address was created
2051  * @addr: The DMA address
2052  * @size: The size of the region in bytes
2053  * @dir: The direction of the DMA
2054  */
2055 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2056 					      u64 addr,
2057 					      size_t size,
2058 					      enum dma_data_direction dir)
2059 {
2060 	if (dev->dma_ops)
2061 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2062 	else
2063 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2064 }
2065 
2066 /**
2067  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2068  * @dev: The device for which the DMA address was created
2069  * @addr: The DMA address
2070  * @size: The size of the region in bytes
2071  * @dir: The direction of the DMA
2072  */
2073 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2074 						 u64 addr,
2075 						 size_t size,
2076 						 enum dma_data_direction dir)
2077 {
2078 	if (dev->dma_ops)
2079 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2080 	else
2081 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2082 }
2083 
2084 /**
2085  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2086  * @dev: The device for which the DMA address is requested
2087  * @size: The size of the region to allocate in bytes
2088  * @dma_handle: A pointer for returning the DMA address of the region
2089  * @flag: memory allocator flags
2090  */
2091 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2092 					   size_t size,
2093 					   u64 *dma_handle,
2094 					   gfp_t flag)
2095 {
2096 	if (dev->dma_ops)
2097 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2098 	else {
2099 		dma_addr_t handle;
2100 		void *ret;
2101 
2102 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2103 		*dma_handle = handle;
2104 		return ret;
2105 	}
2106 }
2107 
2108 /**
2109  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2110  * @dev: The device for which the DMA addresses were allocated
2111  * @size: The size of the region
2112  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2113  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2114  */
2115 static inline void ib_dma_free_coherent(struct ib_device *dev,
2116 					size_t size, void *cpu_addr,
2117 					u64 dma_handle)
2118 {
2119 	if (dev->dma_ops)
2120 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2121 	else
2122 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2123 }
2124 
2125 /**
2126  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2127  *   by an HCA.
2128  * @pd: The protection domain associated assigned to the registered region.
2129  * @phys_buf_array: Specifies a list of physical buffers to use in the
2130  *   memory region.
2131  * @num_phys_buf: Specifies the size of the phys_buf_array.
2132  * @mr_access_flags: Specifies the memory access rights.
2133  * @iova_start: The offset of the region's starting I/O virtual address.
2134  */
2135 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2136 			     struct ib_phys_buf *phys_buf_array,
2137 			     int num_phys_buf,
2138 			     int mr_access_flags,
2139 			     u64 *iova_start);
2140 
2141 /**
2142  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2143  *   Conceptually, this call performs the functions deregister memory region
2144  *   followed by register physical memory region.  Where possible,
2145  *   resources are reused instead of deallocated and reallocated.
2146  * @mr: The memory region to modify.
2147  * @mr_rereg_mask: A bit-mask used to indicate which of the following
2148  *   properties of the memory region are being modified.
2149  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2150  *   the new protection domain to associated with the memory region,
2151  *   otherwise, this parameter is ignored.
2152  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2153  *   field specifies a list of physical buffers to use in the new
2154  *   translation, otherwise, this parameter is ignored.
2155  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2156  *   field specifies the size of the phys_buf_array, otherwise, this
2157  *   parameter is ignored.
2158  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2159  *   field specifies the new memory access rights, otherwise, this
2160  *   parameter is ignored.
2161  * @iova_start: The offset of the region's starting I/O virtual address.
2162  */
2163 int ib_rereg_phys_mr(struct ib_mr *mr,
2164 		     int mr_rereg_mask,
2165 		     struct ib_pd *pd,
2166 		     struct ib_phys_buf *phys_buf_array,
2167 		     int num_phys_buf,
2168 		     int mr_access_flags,
2169 		     u64 *iova_start);
2170 
2171 /**
2172  * ib_query_mr - Retrieves information about a specific memory region.
2173  * @mr: The memory region to retrieve information about.
2174  * @mr_attr: The attributes of the specified memory region.
2175  */
2176 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2177 
2178 /**
2179  * ib_dereg_mr - Deregisters a memory region and removes it from the
2180  *   HCA translation table.
2181  * @mr: The memory region to deregister.
2182  */
2183 int ib_dereg_mr(struct ib_mr *mr);
2184 
2185 /**
2186  * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2187  *   IB_WR_FAST_REG_MR send work request.
2188  * @pd: The protection domain associated with the region.
2189  * @max_page_list_len: requested max physical buffer list length to be
2190  *   used with fast register work requests for this MR.
2191  */
2192 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2193 
2194 /**
2195  * ib_alloc_fast_reg_page_list - Allocates a page list array
2196  * @device - ib device pointer.
2197  * @page_list_len - size of the page list array to be allocated.
2198  *
2199  * This allocates and returns a struct ib_fast_reg_page_list * and a
2200  * page_list array that is at least page_list_len in size.  The actual
2201  * size is returned in max_page_list_len.  The caller is responsible
2202  * for initializing the contents of the page_list array before posting
2203  * a send work request with the IB_WC_FAST_REG_MR opcode.
2204  *
2205  * The page_list array entries must be translated using one of the
2206  * ib_dma_*() functions just like the addresses passed to
2207  * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2208  * ib_fast_reg_page_list must not be modified by the caller until the
2209  * IB_WC_FAST_REG_MR work request completes.
2210  */
2211 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2212 				struct ib_device *device, int page_list_len);
2213 
2214 /**
2215  * ib_free_fast_reg_page_list - Deallocates a previously allocated
2216  *   page list array.
2217  * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2218  */
2219 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2220 
2221 /**
2222  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2223  *   R_Key and L_Key.
2224  * @mr - struct ib_mr pointer to be updated.
2225  * @newkey - new key to be used.
2226  */
2227 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2228 {
2229 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2230 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2231 }
2232 
2233 /**
2234  * ib_alloc_mw - Allocates a memory window.
2235  * @pd: The protection domain associated with the memory window.
2236  */
2237 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2238 
2239 /**
2240  * ib_bind_mw - Posts a work request to the send queue of the specified
2241  *   QP, which binds the memory window to the given address range and
2242  *   remote access attributes.
2243  * @qp: QP to post the bind work request on.
2244  * @mw: The memory window to bind.
2245  * @mw_bind: Specifies information about the memory window, including
2246  *   its address range, remote access rights, and associated memory region.
2247  */
2248 static inline int ib_bind_mw(struct ib_qp *qp,
2249 			     struct ib_mw *mw,
2250 			     struct ib_mw_bind *mw_bind)
2251 {
2252 	/* XXX reference counting in corresponding MR? */
2253 	return mw->device->bind_mw ?
2254 		mw->device->bind_mw(qp, mw, mw_bind) :
2255 		-ENOSYS;
2256 }
2257 
2258 /**
2259  * ib_dealloc_mw - Deallocates a memory window.
2260  * @mw: The memory window to deallocate.
2261  */
2262 int ib_dealloc_mw(struct ib_mw *mw);
2263 
2264 /**
2265  * ib_alloc_fmr - Allocates a unmapped fast memory region.
2266  * @pd: The protection domain associated with the unmapped region.
2267  * @mr_access_flags: Specifies the memory access rights.
2268  * @fmr_attr: Attributes of the unmapped region.
2269  *
2270  * A fast memory region must be mapped before it can be used as part of
2271  * a work request.
2272  */
2273 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2274 			    int mr_access_flags,
2275 			    struct ib_fmr_attr *fmr_attr);
2276 
2277 /**
2278  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2279  * @fmr: The fast memory region to associate with the pages.
2280  * @page_list: An array of physical pages to map to the fast memory region.
2281  * @list_len: The number of pages in page_list.
2282  * @iova: The I/O virtual address to use with the mapped region.
2283  */
2284 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2285 				  u64 *page_list, int list_len,
2286 				  u64 iova)
2287 {
2288 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2289 }
2290 
2291 /**
2292  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2293  * @fmr_list: A linked list of fast memory regions to unmap.
2294  */
2295 int ib_unmap_fmr(struct list_head *fmr_list);
2296 
2297 /**
2298  * ib_dealloc_fmr - Deallocates a fast memory region.
2299  * @fmr: The fast memory region to deallocate.
2300  */
2301 int ib_dealloc_fmr(struct ib_fmr *fmr);
2302 
2303 /**
2304  * ib_attach_mcast - Attaches the specified QP to a multicast group.
2305  * @qp: QP to attach to the multicast group.  The QP must be type
2306  *   IB_QPT_UD.
2307  * @gid: Multicast group GID.
2308  * @lid: Multicast group LID in host byte order.
2309  *
2310  * In order to send and receive multicast packets, subnet
2311  * administration must have created the multicast group and configured
2312  * the fabric appropriately.  The port associated with the specified
2313  * QP must also be a member of the multicast group.
2314  */
2315 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2316 
2317 /**
2318  * ib_detach_mcast - Detaches the specified QP from a multicast group.
2319  * @qp: QP to detach from the multicast group.
2320  * @gid: Multicast group GID.
2321  * @lid: Multicast group LID in host byte order.
2322  */
2323 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2324 
2325 /**
2326  * ib_alloc_xrcd - Allocates an XRC domain.
2327  * @device: The device on which to allocate the XRC domain.
2328  */
2329 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2330 
2331 /**
2332  * ib_dealloc_xrcd - Deallocates an XRC domain.
2333  * @xrcd: The XRC domain to deallocate.
2334  */
2335 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2336 
2337 int ib_attach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2338 int ib_detach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2339 
2340 #endif /* IB_VERBS_H */
2341