xref: /linux/include/rdma/ib_verbs.h (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #if !defined(IB_VERBS_H)
42 #define IB_VERBS_H
43 
44 #include <linux/types.h>
45 #include <linux/device.h>
46 #include <linux/mm.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/kref.h>
49 #include <linux/list.h>
50 #include <linux/rwsem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <asm/atomic.h>
54 #include <asm/uaccess.h>
55 
56 union ib_gid {
57 	u8	raw[16];
58 	struct {
59 		__be64	subnet_prefix;
60 		__be64	interface_id;
61 	} global;
62 };
63 
64 enum rdma_node_type {
65 	/* IB values map to NodeInfo:NodeType. */
66 	RDMA_NODE_IB_CA 	= 1,
67 	RDMA_NODE_IB_SWITCH,
68 	RDMA_NODE_IB_ROUTER,
69 	RDMA_NODE_RNIC
70 };
71 
72 enum rdma_transport_type {
73 	RDMA_TRANSPORT_IB,
74 	RDMA_TRANSPORT_IWARP
75 };
76 
77 enum rdma_transport_type
78 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
79 
80 enum ib_device_cap_flags {
81 	IB_DEVICE_RESIZE_MAX_WR		= 1,
82 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
83 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
84 	IB_DEVICE_RAW_MULTI		= (1<<3),
85 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
86 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
87 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
88 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
89 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
90 	IB_DEVICE_INIT_TYPE		= (1<<9),
91 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
92 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
93 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
94 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
95 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
96 	IB_DEVICE_ZERO_STAG		= (1<<15),
97 	IB_DEVICE_SEND_W_INV		= (1<<16),
98 	IB_DEVICE_MEM_WINDOW		= (1<<17),
99 	/*
100 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
101 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
102 	 * messages and can verify the validity of checksum for
103 	 * incoming messages.  Setting this flag implies that the
104 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
105 	 */
106 	IB_DEVICE_UD_IP_CSUM		= (1<<18),
107 };
108 
109 enum ib_atomic_cap {
110 	IB_ATOMIC_NONE,
111 	IB_ATOMIC_HCA,
112 	IB_ATOMIC_GLOB
113 };
114 
115 struct ib_device_attr {
116 	u64			fw_ver;
117 	__be64			sys_image_guid;
118 	u64			max_mr_size;
119 	u64			page_size_cap;
120 	u32			vendor_id;
121 	u32			vendor_part_id;
122 	u32			hw_ver;
123 	int			max_qp;
124 	int			max_qp_wr;
125 	int			device_cap_flags;
126 	int			max_sge;
127 	int			max_sge_rd;
128 	int			max_cq;
129 	int			max_cqe;
130 	int			max_mr;
131 	int			max_pd;
132 	int			max_qp_rd_atom;
133 	int			max_ee_rd_atom;
134 	int			max_res_rd_atom;
135 	int			max_qp_init_rd_atom;
136 	int			max_ee_init_rd_atom;
137 	enum ib_atomic_cap	atomic_cap;
138 	int			max_ee;
139 	int			max_rdd;
140 	int			max_mw;
141 	int			max_raw_ipv6_qp;
142 	int			max_raw_ethy_qp;
143 	int			max_mcast_grp;
144 	int			max_mcast_qp_attach;
145 	int			max_total_mcast_qp_attach;
146 	int			max_ah;
147 	int			max_fmr;
148 	int			max_map_per_fmr;
149 	int			max_srq;
150 	int			max_srq_wr;
151 	int			max_srq_sge;
152 	u16			max_pkeys;
153 	u8			local_ca_ack_delay;
154 };
155 
156 enum ib_mtu {
157 	IB_MTU_256  = 1,
158 	IB_MTU_512  = 2,
159 	IB_MTU_1024 = 3,
160 	IB_MTU_2048 = 4,
161 	IB_MTU_4096 = 5
162 };
163 
164 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
165 {
166 	switch (mtu) {
167 	case IB_MTU_256:  return  256;
168 	case IB_MTU_512:  return  512;
169 	case IB_MTU_1024: return 1024;
170 	case IB_MTU_2048: return 2048;
171 	case IB_MTU_4096: return 4096;
172 	default: 	  return -1;
173 	}
174 }
175 
176 enum ib_port_state {
177 	IB_PORT_NOP		= 0,
178 	IB_PORT_DOWN		= 1,
179 	IB_PORT_INIT		= 2,
180 	IB_PORT_ARMED		= 3,
181 	IB_PORT_ACTIVE		= 4,
182 	IB_PORT_ACTIVE_DEFER	= 5
183 };
184 
185 enum ib_port_cap_flags {
186 	IB_PORT_SM				= 1 <<  1,
187 	IB_PORT_NOTICE_SUP			= 1 <<  2,
188 	IB_PORT_TRAP_SUP			= 1 <<  3,
189 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
190 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
191 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
192 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
193 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
194 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
195 	IB_PORT_SM_DISABLED			= 1 << 10,
196 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
197 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
198 	IB_PORT_CM_SUP				= 1 << 16,
199 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
200 	IB_PORT_REINIT_SUP			= 1 << 18,
201 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
202 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
203 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
204 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
205 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
206 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
207 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
208 };
209 
210 enum ib_port_width {
211 	IB_WIDTH_1X	= 1,
212 	IB_WIDTH_4X	= 2,
213 	IB_WIDTH_8X	= 4,
214 	IB_WIDTH_12X	= 8
215 };
216 
217 static inline int ib_width_enum_to_int(enum ib_port_width width)
218 {
219 	switch (width) {
220 	case IB_WIDTH_1X:  return  1;
221 	case IB_WIDTH_4X:  return  4;
222 	case IB_WIDTH_8X:  return  8;
223 	case IB_WIDTH_12X: return 12;
224 	default: 	  return -1;
225 	}
226 }
227 
228 struct ib_port_attr {
229 	enum ib_port_state	state;
230 	enum ib_mtu		max_mtu;
231 	enum ib_mtu		active_mtu;
232 	int			gid_tbl_len;
233 	u32			port_cap_flags;
234 	u32			max_msg_sz;
235 	u32			bad_pkey_cntr;
236 	u32			qkey_viol_cntr;
237 	u16			pkey_tbl_len;
238 	u16			lid;
239 	u16			sm_lid;
240 	u8			lmc;
241 	u8			max_vl_num;
242 	u8			sm_sl;
243 	u8			subnet_timeout;
244 	u8			init_type_reply;
245 	u8			active_width;
246 	u8			active_speed;
247 	u8                      phys_state;
248 };
249 
250 enum ib_device_modify_flags {
251 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
252 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
253 };
254 
255 struct ib_device_modify {
256 	u64	sys_image_guid;
257 	char	node_desc[64];
258 };
259 
260 enum ib_port_modify_flags {
261 	IB_PORT_SHUTDOWN		= 1,
262 	IB_PORT_INIT_TYPE		= (1<<2),
263 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
264 };
265 
266 struct ib_port_modify {
267 	u32	set_port_cap_mask;
268 	u32	clr_port_cap_mask;
269 	u8	init_type;
270 };
271 
272 enum ib_event_type {
273 	IB_EVENT_CQ_ERR,
274 	IB_EVENT_QP_FATAL,
275 	IB_EVENT_QP_REQ_ERR,
276 	IB_EVENT_QP_ACCESS_ERR,
277 	IB_EVENT_COMM_EST,
278 	IB_EVENT_SQ_DRAINED,
279 	IB_EVENT_PATH_MIG,
280 	IB_EVENT_PATH_MIG_ERR,
281 	IB_EVENT_DEVICE_FATAL,
282 	IB_EVENT_PORT_ACTIVE,
283 	IB_EVENT_PORT_ERR,
284 	IB_EVENT_LID_CHANGE,
285 	IB_EVENT_PKEY_CHANGE,
286 	IB_EVENT_SM_CHANGE,
287 	IB_EVENT_SRQ_ERR,
288 	IB_EVENT_SRQ_LIMIT_REACHED,
289 	IB_EVENT_QP_LAST_WQE_REACHED,
290 	IB_EVENT_CLIENT_REREGISTER
291 };
292 
293 struct ib_event {
294 	struct ib_device	*device;
295 	union {
296 		struct ib_cq	*cq;
297 		struct ib_qp	*qp;
298 		struct ib_srq	*srq;
299 		u8		port_num;
300 	} element;
301 	enum ib_event_type	event;
302 };
303 
304 struct ib_event_handler {
305 	struct ib_device *device;
306 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
307 	struct list_head  list;
308 };
309 
310 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
311 	do {							\
312 		(_ptr)->device  = _device;			\
313 		(_ptr)->handler = _handler;			\
314 		INIT_LIST_HEAD(&(_ptr)->list);			\
315 	} while (0)
316 
317 struct ib_global_route {
318 	union ib_gid	dgid;
319 	u32		flow_label;
320 	u8		sgid_index;
321 	u8		hop_limit;
322 	u8		traffic_class;
323 };
324 
325 struct ib_grh {
326 	__be32		version_tclass_flow;
327 	__be16		paylen;
328 	u8		next_hdr;
329 	u8		hop_limit;
330 	union ib_gid	sgid;
331 	union ib_gid	dgid;
332 };
333 
334 enum {
335 	IB_MULTICAST_QPN = 0xffffff
336 };
337 
338 #define IB_LID_PERMISSIVE	__constant_htons(0xFFFF)
339 
340 enum ib_ah_flags {
341 	IB_AH_GRH	= 1
342 };
343 
344 enum ib_rate {
345 	IB_RATE_PORT_CURRENT = 0,
346 	IB_RATE_2_5_GBPS = 2,
347 	IB_RATE_5_GBPS   = 5,
348 	IB_RATE_10_GBPS  = 3,
349 	IB_RATE_20_GBPS  = 6,
350 	IB_RATE_30_GBPS  = 4,
351 	IB_RATE_40_GBPS  = 7,
352 	IB_RATE_60_GBPS  = 8,
353 	IB_RATE_80_GBPS  = 9,
354 	IB_RATE_120_GBPS = 10
355 };
356 
357 /**
358  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
359  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
360  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
361  * @rate: rate to convert.
362  */
363 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
364 
365 /**
366  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
367  * enum.
368  * @mult: multiple to convert.
369  */
370 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
371 
372 struct ib_ah_attr {
373 	struct ib_global_route	grh;
374 	u16			dlid;
375 	u8			sl;
376 	u8			src_path_bits;
377 	u8			static_rate;
378 	u8			ah_flags;
379 	u8			port_num;
380 };
381 
382 enum ib_wc_status {
383 	IB_WC_SUCCESS,
384 	IB_WC_LOC_LEN_ERR,
385 	IB_WC_LOC_QP_OP_ERR,
386 	IB_WC_LOC_EEC_OP_ERR,
387 	IB_WC_LOC_PROT_ERR,
388 	IB_WC_WR_FLUSH_ERR,
389 	IB_WC_MW_BIND_ERR,
390 	IB_WC_BAD_RESP_ERR,
391 	IB_WC_LOC_ACCESS_ERR,
392 	IB_WC_REM_INV_REQ_ERR,
393 	IB_WC_REM_ACCESS_ERR,
394 	IB_WC_REM_OP_ERR,
395 	IB_WC_RETRY_EXC_ERR,
396 	IB_WC_RNR_RETRY_EXC_ERR,
397 	IB_WC_LOC_RDD_VIOL_ERR,
398 	IB_WC_REM_INV_RD_REQ_ERR,
399 	IB_WC_REM_ABORT_ERR,
400 	IB_WC_INV_EECN_ERR,
401 	IB_WC_INV_EEC_STATE_ERR,
402 	IB_WC_FATAL_ERR,
403 	IB_WC_RESP_TIMEOUT_ERR,
404 	IB_WC_GENERAL_ERR
405 };
406 
407 enum ib_wc_opcode {
408 	IB_WC_SEND,
409 	IB_WC_RDMA_WRITE,
410 	IB_WC_RDMA_READ,
411 	IB_WC_COMP_SWAP,
412 	IB_WC_FETCH_ADD,
413 	IB_WC_BIND_MW,
414 /*
415  * Set value of IB_WC_RECV so consumers can test if a completion is a
416  * receive by testing (opcode & IB_WC_RECV).
417  */
418 	IB_WC_RECV			= 1 << 7,
419 	IB_WC_RECV_RDMA_WITH_IMM
420 };
421 
422 enum ib_wc_flags {
423 	IB_WC_GRH		= 1,
424 	IB_WC_WITH_IMM		= (1<<1)
425 };
426 
427 struct ib_wc {
428 	u64			wr_id;
429 	enum ib_wc_status	status;
430 	enum ib_wc_opcode	opcode;
431 	u32			vendor_err;
432 	u32			byte_len;
433 	struct ib_qp	       *qp;
434 	__be32			imm_data;
435 	u32			src_qp;
436 	int			wc_flags;
437 	u16			pkey_index;
438 	u16			slid;
439 	u8			sl;
440 	u8			dlid_path_bits;
441 	u8			port_num;	/* valid only for DR SMPs on switches */
442 	int			csum_ok;
443 };
444 
445 enum ib_cq_notify_flags {
446 	IB_CQ_SOLICITED			= 1 << 0,
447 	IB_CQ_NEXT_COMP			= 1 << 1,
448 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
449 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
450 };
451 
452 enum ib_srq_attr_mask {
453 	IB_SRQ_MAX_WR	= 1 << 0,
454 	IB_SRQ_LIMIT	= 1 << 1,
455 };
456 
457 struct ib_srq_attr {
458 	u32	max_wr;
459 	u32	max_sge;
460 	u32	srq_limit;
461 };
462 
463 struct ib_srq_init_attr {
464 	void		      (*event_handler)(struct ib_event *, void *);
465 	void		       *srq_context;
466 	struct ib_srq_attr	attr;
467 };
468 
469 struct ib_qp_cap {
470 	u32	max_send_wr;
471 	u32	max_recv_wr;
472 	u32	max_send_sge;
473 	u32	max_recv_sge;
474 	u32	max_inline_data;
475 };
476 
477 enum ib_sig_type {
478 	IB_SIGNAL_ALL_WR,
479 	IB_SIGNAL_REQ_WR
480 };
481 
482 enum ib_qp_type {
483 	/*
484 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
485 	 * here (and in that order) since the MAD layer uses them as
486 	 * indices into a 2-entry table.
487 	 */
488 	IB_QPT_SMI,
489 	IB_QPT_GSI,
490 
491 	IB_QPT_RC,
492 	IB_QPT_UC,
493 	IB_QPT_UD,
494 	IB_QPT_RAW_IPV6,
495 	IB_QPT_RAW_ETY
496 };
497 
498 struct ib_qp_init_attr {
499 	void                  (*event_handler)(struct ib_event *, void *);
500 	void		       *qp_context;
501 	struct ib_cq	       *send_cq;
502 	struct ib_cq	       *recv_cq;
503 	struct ib_srq	       *srq;
504 	struct ib_qp_cap	cap;
505 	enum ib_sig_type	sq_sig_type;
506 	enum ib_qp_type		qp_type;
507 	u8			port_num; /* special QP types only */
508 };
509 
510 enum ib_rnr_timeout {
511 	IB_RNR_TIMER_655_36 =  0,
512 	IB_RNR_TIMER_000_01 =  1,
513 	IB_RNR_TIMER_000_02 =  2,
514 	IB_RNR_TIMER_000_03 =  3,
515 	IB_RNR_TIMER_000_04 =  4,
516 	IB_RNR_TIMER_000_06 =  5,
517 	IB_RNR_TIMER_000_08 =  6,
518 	IB_RNR_TIMER_000_12 =  7,
519 	IB_RNR_TIMER_000_16 =  8,
520 	IB_RNR_TIMER_000_24 =  9,
521 	IB_RNR_TIMER_000_32 = 10,
522 	IB_RNR_TIMER_000_48 = 11,
523 	IB_RNR_TIMER_000_64 = 12,
524 	IB_RNR_TIMER_000_96 = 13,
525 	IB_RNR_TIMER_001_28 = 14,
526 	IB_RNR_TIMER_001_92 = 15,
527 	IB_RNR_TIMER_002_56 = 16,
528 	IB_RNR_TIMER_003_84 = 17,
529 	IB_RNR_TIMER_005_12 = 18,
530 	IB_RNR_TIMER_007_68 = 19,
531 	IB_RNR_TIMER_010_24 = 20,
532 	IB_RNR_TIMER_015_36 = 21,
533 	IB_RNR_TIMER_020_48 = 22,
534 	IB_RNR_TIMER_030_72 = 23,
535 	IB_RNR_TIMER_040_96 = 24,
536 	IB_RNR_TIMER_061_44 = 25,
537 	IB_RNR_TIMER_081_92 = 26,
538 	IB_RNR_TIMER_122_88 = 27,
539 	IB_RNR_TIMER_163_84 = 28,
540 	IB_RNR_TIMER_245_76 = 29,
541 	IB_RNR_TIMER_327_68 = 30,
542 	IB_RNR_TIMER_491_52 = 31
543 };
544 
545 enum ib_qp_attr_mask {
546 	IB_QP_STATE			= 1,
547 	IB_QP_CUR_STATE			= (1<<1),
548 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
549 	IB_QP_ACCESS_FLAGS		= (1<<3),
550 	IB_QP_PKEY_INDEX		= (1<<4),
551 	IB_QP_PORT			= (1<<5),
552 	IB_QP_QKEY			= (1<<6),
553 	IB_QP_AV			= (1<<7),
554 	IB_QP_PATH_MTU			= (1<<8),
555 	IB_QP_TIMEOUT			= (1<<9),
556 	IB_QP_RETRY_CNT			= (1<<10),
557 	IB_QP_RNR_RETRY			= (1<<11),
558 	IB_QP_RQ_PSN			= (1<<12),
559 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
560 	IB_QP_ALT_PATH			= (1<<14),
561 	IB_QP_MIN_RNR_TIMER		= (1<<15),
562 	IB_QP_SQ_PSN			= (1<<16),
563 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
564 	IB_QP_PATH_MIG_STATE		= (1<<18),
565 	IB_QP_CAP			= (1<<19),
566 	IB_QP_DEST_QPN			= (1<<20)
567 };
568 
569 enum ib_qp_state {
570 	IB_QPS_RESET,
571 	IB_QPS_INIT,
572 	IB_QPS_RTR,
573 	IB_QPS_RTS,
574 	IB_QPS_SQD,
575 	IB_QPS_SQE,
576 	IB_QPS_ERR
577 };
578 
579 enum ib_mig_state {
580 	IB_MIG_MIGRATED,
581 	IB_MIG_REARM,
582 	IB_MIG_ARMED
583 };
584 
585 struct ib_qp_attr {
586 	enum ib_qp_state	qp_state;
587 	enum ib_qp_state	cur_qp_state;
588 	enum ib_mtu		path_mtu;
589 	enum ib_mig_state	path_mig_state;
590 	u32			qkey;
591 	u32			rq_psn;
592 	u32			sq_psn;
593 	u32			dest_qp_num;
594 	int			qp_access_flags;
595 	struct ib_qp_cap	cap;
596 	struct ib_ah_attr	ah_attr;
597 	struct ib_ah_attr	alt_ah_attr;
598 	u16			pkey_index;
599 	u16			alt_pkey_index;
600 	u8			en_sqd_async_notify;
601 	u8			sq_draining;
602 	u8			max_rd_atomic;
603 	u8			max_dest_rd_atomic;
604 	u8			min_rnr_timer;
605 	u8			port_num;
606 	u8			timeout;
607 	u8			retry_cnt;
608 	u8			rnr_retry;
609 	u8			alt_port_num;
610 	u8			alt_timeout;
611 };
612 
613 enum ib_wr_opcode {
614 	IB_WR_RDMA_WRITE,
615 	IB_WR_RDMA_WRITE_WITH_IMM,
616 	IB_WR_SEND,
617 	IB_WR_SEND_WITH_IMM,
618 	IB_WR_RDMA_READ,
619 	IB_WR_ATOMIC_CMP_AND_SWP,
620 	IB_WR_ATOMIC_FETCH_AND_ADD
621 };
622 
623 enum ib_send_flags {
624 	IB_SEND_FENCE		= 1,
625 	IB_SEND_SIGNALED	= (1<<1),
626 	IB_SEND_SOLICITED	= (1<<2),
627 	IB_SEND_INLINE		= (1<<3),
628 	IB_SEND_IP_CSUM		= (1<<4)
629 };
630 
631 struct ib_sge {
632 	u64	addr;
633 	u32	length;
634 	u32	lkey;
635 };
636 
637 struct ib_send_wr {
638 	struct ib_send_wr      *next;
639 	u64			wr_id;
640 	struct ib_sge	       *sg_list;
641 	int			num_sge;
642 	enum ib_wr_opcode	opcode;
643 	int			send_flags;
644 	__be32			imm_data;
645 	union {
646 		struct {
647 			u64	remote_addr;
648 			u32	rkey;
649 		} rdma;
650 		struct {
651 			u64	remote_addr;
652 			u64	compare_add;
653 			u64	swap;
654 			u32	rkey;
655 		} atomic;
656 		struct {
657 			struct ib_ah *ah;
658 			u32	remote_qpn;
659 			u32	remote_qkey;
660 			u16	pkey_index; /* valid for GSI only */
661 			u8	port_num;   /* valid for DR SMPs on switch only */
662 		} ud;
663 	} wr;
664 };
665 
666 struct ib_recv_wr {
667 	struct ib_recv_wr      *next;
668 	u64			wr_id;
669 	struct ib_sge	       *sg_list;
670 	int			num_sge;
671 };
672 
673 enum ib_access_flags {
674 	IB_ACCESS_LOCAL_WRITE	= 1,
675 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
676 	IB_ACCESS_REMOTE_READ	= (1<<2),
677 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
678 	IB_ACCESS_MW_BIND	= (1<<4)
679 };
680 
681 struct ib_phys_buf {
682 	u64      addr;
683 	u64      size;
684 };
685 
686 struct ib_mr_attr {
687 	struct ib_pd	*pd;
688 	u64		device_virt_addr;
689 	u64		size;
690 	int		mr_access_flags;
691 	u32		lkey;
692 	u32		rkey;
693 };
694 
695 enum ib_mr_rereg_flags {
696 	IB_MR_REREG_TRANS	= 1,
697 	IB_MR_REREG_PD		= (1<<1),
698 	IB_MR_REREG_ACCESS	= (1<<2)
699 };
700 
701 struct ib_mw_bind {
702 	struct ib_mr   *mr;
703 	u64		wr_id;
704 	u64		addr;
705 	u32		length;
706 	int		send_flags;
707 	int		mw_access_flags;
708 };
709 
710 struct ib_fmr_attr {
711 	int	max_pages;
712 	int	max_maps;
713 	u8	page_shift;
714 };
715 
716 struct ib_ucontext {
717 	struct ib_device       *device;
718 	struct list_head	pd_list;
719 	struct list_head	mr_list;
720 	struct list_head	mw_list;
721 	struct list_head	cq_list;
722 	struct list_head	qp_list;
723 	struct list_head	srq_list;
724 	struct list_head	ah_list;
725 	int			closing;
726 };
727 
728 struct ib_uobject {
729 	u64			user_handle;	/* handle given to us by userspace */
730 	struct ib_ucontext     *context;	/* associated user context */
731 	void		       *object;		/* containing object */
732 	struct list_head	list;		/* link to context's list */
733 	u32			id;		/* index into kernel idr */
734 	struct kref		ref;
735 	struct rw_semaphore	mutex;		/* protects .live */
736 	int			live;
737 };
738 
739 struct ib_udata {
740 	void __user *inbuf;
741 	void __user *outbuf;
742 	size_t       inlen;
743 	size_t       outlen;
744 };
745 
746 struct ib_pd {
747 	struct ib_device       *device;
748 	struct ib_uobject      *uobject;
749 	atomic_t          	usecnt; /* count all resources */
750 };
751 
752 struct ib_ah {
753 	struct ib_device	*device;
754 	struct ib_pd		*pd;
755 	struct ib_uobject	*uobject;
756 };
757 
758 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
759 
760 struct ib_cq {
761 	struct ib_device       *device;
762 	struct ib_uobject      *uobject;
763 	ib_comp_handler   	comp_handler;
764 	void                  (*event_handler)(struct ib_event *, void *);
765 	void *            	cq_context;
766 	int               	cqe;
767 	atomic_t          	usecnt; /* count number of work queues */
768 };
769 
770 struct ib_srq {
771 	struct ib_device       *device;
772 	struct ib_pd	       *pd;
773 	struct ib_uobject      *uobject;
774 	void		      (*event_handler)(struct ib_event *, void *);
775 	void		       *srq_context;
776 	atomic_t		usecnt;
777 };
778 
779 struct ib_qp {
780 	struct ib_device       *device;
781 	struct ib_pd	       *pd;
782 	struct ib_cq	       *send_cq;
783 	struct ib_cq	       *recv_cq;
784 	struct ib_srq	       *srq;
785 	struct ib_uobject      *uobject;
786 	void                  (*event_handler)(struct ib_event *, void *);
787 	void		       *qp_context;
788 	u32			qp_num;
789 	enum ib_qp_type		qp_type;
790 };
791 
792 struct ib_mr {
793 	struct ib_device  *device;
794 	struct ib_pd	  *pd;
795 	struct ib_uobject *uobject;
796 	u32		   lkey;
797 	u32		   rkey;
798 	atomic_t	   usecnt; /* count number of MWs */
799 };
800 
801 struct ib_mw {
802 	struct ib_device	*device;
803 	struct ib_pd		*pd;
804 	struct ib_uobject	*uobject;
805 	u32			rkey;
806 };
807 
808 struct ib_fmr {
809 	struct ib_device	*device;
810 	struct ib_pd		*pd;
811 	struct list_head	list;
812 	u32			lkey;
813 	u32			rkey;
814 };
815 
816 struct ib_mad;
817 struct ib_grh;
818 
819 enum ib_process_mad_flags {
820 	IB_MAD_IGNORE_MKEY	= 1,
821 	IB_MAD_IGNORE_BKEY	= 2,
822 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
823 };
824 
825 enum ib_mad_result {
826 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
827 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
828 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
829 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
830 };
831 
832 #define IB_DEVICE_NAME_MAX 64
833 
834 struct ib_cache {
835 	rwlock_t                lock;
836 	struct ib_event_handler event_handler;
837 	struct ib_pkey_cache  **pkey_cache;
838 	struct ib_gid_cache   **gid_cache;
839 	u8                     *lmc_cache;
840 };
841 
842 struct ib_dma_mapping_ops {
843 	int		(*mapping_error)(struct ib_device *dev,
844 					 u64 dma_addr);
845 	u64		(*map_single)(struct ib_device *dev,
846 				      void *ptr, size_t size,
847 				      enum dma_data_direction direction);
848 	void		(*unmap_single)(struct ib_device *dev,
849 					u64 addr, size_t size,
850 					enum dma_data_direction direction);
851 	u64		(*map_page)(struct ib_device *dev,
852 				    struct page *page, unsigned long offset,
853 				    size_t size,
854 				    enum dma_data_direction direction);
855 	void		(*unmap_page)(struct ib_device *dev,
856 				      u64 addr, size_t size,
857 				      enum dma_data_direction direction);
858 	int		(*map_sg)(struct ib_device *dev,
859 				  struct scatterlist *sg, int nents,
860 				  enum dma_data_direction direction);
861 	void		(*unmap_sg)(struct ib_device *dev,
862 				    struct scatterlist *sg, int nents,
863 				    enum dma_data_direction direction);
864 	u64		(*dma_address)(struct ib_device *dev,
865 				       struct scatterlist *sg);
866 	unsigned int	(*dma_len)(struct ib_device *dev,
867 				   struct scatterlist *sg);
868 	void		(*sync_single_for_cpu)(struct ib_device *dev,
869 					       u64 dma_handle,
870 					       size_t size,
871 				               enum dma_data_direction dir);
872 	void		(*sync_single_for_device)(struct ib_device *dev,
873 						  u64 dma_handle,
874 						  size_t size,
875 						  enum dma_data_direction dir);
876 	void		*(*alloc_coherent)(struct ib_device *dev,
877 					   size_t size,
878 					   u64 *dma_handle,
879 					   gfp_t flag);
880 	void		(*free_coherent)(struct ib_device *dev,
881 					 size_t size, void *cpu_addr,
882 					 u64 dma_handle);
883 };
884 
885 struct iw_cm_verbs;
886 
887 struct ib_device {
888 	struct device                *dma_device;
889 
890 	char                          name[IB_DEVICE_NAME_MAX];
891 
892 	struct list_head              event_handler_list;
893 	spinlock_t                    event_handler_lock;
894 
895 	struct list_head              core_list;
896 	struct list_head              client_data_list;
897 	spinlock_t                    client_data_lock;
898 
899 	struct ib_cache               cache;
900 	int                          *pkey_tbl_len;
901 	int                          *gid_tbl_len;
902 
903 	int			      num_comp_vectors;
904 
905 	struct iw_cm_verbs	     *iwcm;
906 
907 	int		           (*query_device)(struct ib_device *device,
908 						   struct ib_device_attr *device_attr);
909 	int		           (*query_port)(struct ib_device *device,
910 						 u8 port_num,
911 						 struct ib_port_attr *port_attr);
912 	int		           (*query_gid)(struct ib_device *device,
913 						u8 port_num, int index,
914 						union ib_gid *gid);
915 	int		           (*query_pkey)(struct ib_device *device,
916 						 u8 port_num, u16 index, u16 *pkey);
917 	int		           (*modify_device)(struct ib_device *device,
918 						    int device_modify_mask,
919 						    struct ib_device_modify *device_modify);
920 	int		           (*modify_port)(struct ib_device *device,
921 						  u8 port_num, int port_modify_mask,
922 						  struct ib_port_modify *port_modify);
923 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
924 						     struct ib_udata *udata);
925 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
926 	int                        (*mmap)(struct ib_ucontext *context,
927 					   struct vm_area_struct *vma);
928 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
929 					       struct ib_ucontext *context,
930 					       struct ib_udata *udata);
931 	int                        (*dealloc_pd)(struct ib_pd *pd);
932 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
933 						struct ib_ah_attr *ah_attr);
934 	int                        (*modify_ah)(struct ib_ah *ah,
935 						struct ib_ah_attr *ah_attr);
936 	int                        (*query_ah)(struct ib_ah *ah,
937 					       struct ib_ah_attr *ah_attr);
938 	int                        (*destroy_ah)(struct ib_ah *ah);
939 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
940 						 struct ib_srq_init_attr *srq_init_attr,
941 						 struct ib_udata *udata);
942 	int                        (*modify_srq)(struct ib_srq *srq,
943 						 struct ib_srq_attr *srq_attr,
944 						 enum ib_srq_attr_mask srq_attr_mask,
945 						 struct ib_udata *udata);
946 	int                        (*query_srq)(struct ib_srq *srq,
947 						struct ib_srq_attr *srq_attr);
948 	int                        (*destroy_srq)(struct ib_srq *srq);
949 	int                        (*post_srq_recv)(struct ib_srq *srq,
950 						    struct ib_recv_wr *recv_wr,
951 						    struct ib_recv_wr **bad_recv_wr);
952 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
953 						struct ib_qp_init_attr *qp_init_attr,
954 						struct ib_udata *udata);
955 	int                        (*modify_qp)(struct ib_qp *qp,
956 						struct ib_qp_attr *qp_attr,
957 						int qp_attr_mask,
958 						struct ib_udata *udata);
959 	int                        (*query_qp)(struct ib_qp *qp,
960 					       struct ib_qp_attr *qp_attr,
961 					       int qp_attr_mask,
962 					       struct ib_qp_init_attr *qp_init_attr);
963 	int                        (*destroy_qp)(struct ib_qp *qp);
964 	int                        (*post_send)(struct ib_qp *qp,
965 						struct ib_send_wr *send_wr,
966 						struct ib_send_wr **bad_send_wr);
967 	int                        (*post_recv)(struct ib_qp *qp,
968 						struct ib_recv_wr *recv_wr,
969 						struct ib_recv_wr **bad_recv_wr);
970 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
971 						int comp_vector,
972 						struct ib_ucontext *context,
973 						struct ib_udata *udata);
974 	int                        (*destroy_cq)(struct ib_cq *cq);
975 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
976 						struct ib_udata *udata);
977 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
978 					      struct ib_wc *wc);
979 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
980 	int                        (*req_notify_cq)(struct ib_cq *cq,
981 						    enum ib_cq_notify_flags flags);
982 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
983 						      int wc_cnt);
984 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
985 						 int mr_access_flags);
986 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
987 						  struct ib_phys_buf *phys_buf_array,
988 						  int num_phys_buf,
989 						  int mr_access_flags,
990 						  u64 *iova_start);
991 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
992 						  u64 start, u64 length,
993 						  u64 virt_addr,
994 						  int mr_access_flags,
995 						  struct ib_udata *udata);
996 	int                        (*query_mr)(struct ib_mr *mr,
997 					       struct ib_mr_attr *mr_attr);
998 	int                        (*dereg_mr)(struct ib_mr *mr);
999 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1000 						    int mr_rereg_mask,
1001 						    struct ib_pd *pd,
1002 						    struct ib_phys_buf *phys_buf_array,
1003 						    int num_phys_buf,
1004 						    int mr_access_flags,
1005 						    u64 *iova_start);
1006 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1007 	int                        (*bind_mw)(struct ib_qp *qp,
1008 					      struct ib_mw *mw,
1009 					      struct ib_mw_bind *mw_bind);
1010 	int                        (*dealloc_mw)(struct ib_mw *mw);
1011 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1012 						int mr_access_flags,
1013 						struct ib_fmr_attr *fmr_attr);
1014 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1015 						   u64 *page_list, int list_len,
1016 						   u64 iova);
1017 	int		           (*unmap_fmr)(struct list_head *fmr_list);
1018 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1019 	int                        (*attach_mcast)(struct ib_qp *qp,
1020 						   union ib_gid *gid,
1021 						   u16 lid);
1022 	int                        (*detach_mcast)(struct ib_qp *qp,
1023 						   union ib_gid *gid,
1024 						   u16 lid);
1025 	int                        (*process_mad)(struct ib_device *device,
1026 						  int process_mad_flags,
1027 						  u8 port_num,
1028 						  struct ib_wc *in_wc,
1029 						  struct ib_grh *in_grh,
1030 						  struct ib_mad *in_mad,
1031 						  struct ib_mad *out_mad);
1032 
1033 	struct ib_dma_mapping_ops   *dma_ops;
1034 
1035 	struct module               *owner;
1036 	struct class_device          class_dev;
1037 	struct kobject               *ports_parent;
1038 	struct list_head             port_list;
1039 
1040 	enum {
1041 		IB_DEV_UNINITIALIZED,
1042 		IB_DEV_REGISTERED,
1043 		IB_DEV_UNREGISTERED
1044 	}                            reg_state;
1045 
1046 	u64			     uverbs_cmd_mask;
1047 	int			     uverbs_abi_ver;
1048 
1049 	char			     node_desc[64];
1050 	__be64			     node_guid;
1051 	u8                           node_type;
1052 	u8                           phys_port_cnt;
1053 };
1054 
1055 struct ib_client {
1056 	char  *name;
1057 	void (*add)   (struct ib_device *);
1058 	void (*remove)(struct ib_device *);
1059 
1060 	struct list_head list;
1061 };
1062 
1063 struct ib_device *ib_alloc_device(size_t size);
1064 void ib_dealloc_device(struct ib_device *device);
1065 
1066 int ib_register_device   (struct ib_device *device);
1067 void ib_unregister_device(struct ib_device *device);
1068 
1069 int ib_register_client   (struct ib_client *client);
1070 void ib_unregister_client(struct ib_client *client);
1071 
1072 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1073 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1074 			 void *data);
1075 
1076 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1077 {
1078 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1079 }
1080 
1081 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1082 {
1083 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1084 }
1085 
1086 /**
1087  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1088  * contains all required attributes and no attributes not allowed for
1089  * the given QP state transition.
1090  * @cur_state: Current QP state
1091  * @next_state: Next QP state
1092  * @type: QP type
1093  * @mask: Mask of supplied QP attributes
1094  *
1095  * This function is a helper function that a low-level driver's
1096  * modify_qp method can use to validate the consumer's input.  It
1097  * checks that cur_state and next_state are valid QP states, that a
1098  * transition from cur_state to next_state is allowed by the IB spec,
1099  * and that the attribute mask supplied is allowed for the transition.
1100  */
1101 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1102 		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1103 
1104 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1105 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1106 void ib_dispatch_event(struct ib_event *event);
1107 
1108 int ib_query_device(struct ib_device *device,
1109 		    struct ib_device_attr *device_attr);
1110 
1111 int ib_query_port(struct ib_device *device,
1112 		  u8 port_num, struct ib_port_attr *port_attr);
1113 
1114 int ib_query_gid(struct ib_device *device,
1115 		 u8 port_num, int index, union ib_gid *gid);
1116 
1117 int ib_query_pkey(struct ib_device *device,
1118 		  u8 port_num, u16 index, u16 *pkey);
1119 
1120 int ib_modify_device(struct ib_device *device,
1121 		     int device_modify_mask,
1122 		     struct ib_device_modify *device_modify);
1123 
1124 int ib_modify_port(struct ib_device *device,
1125 		   u8 port_num, int port_modify_mask,
1126 		   struct ib_port_modify *port_modify);
1127 
1128 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1129 		u8 *port_num, u16 *index);
1130 
1131 int ib_find_pkey(struct ib_device *device,
1132 		 u8 port_num, u16 pkey, u16 *index);
1133 
1134 /**
1135  * ib_alloc_pd - Allocates an unused protection domain.
1136  * @device: The device on which to allocate the protection domain.
1137  *
1138  * A protection domain object provides an association between QPs, shared
1139  * receive queues, address handles, memory regions, and memory windows.
1140  */
1141 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1142 
1143 /**
1144  * ib_dealloc_pd - Deallocates a protection domain.
1145  * @pd: The protection domain to deallocate.
1146  */
1147 int ib_dealloc_pd(struct ib_pd *pd);
1148 
1149 /**
1150  * ib_create_ah - Creates an address handle for the given address vector.
1151  * @pd: The protection domain associated with the address handle.
1152  * @ah_attr: The attributes of the address vector.
1153  *
1154  * The address handle is used to reference a local or global destination
1155  * in all UD QP post sends.
1156  */
1157 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1158 
1159 /**
1160  * ib_init_ah_from_wc - Initializes address handle attributes from a
1161  *   work completion.
1162  * @device: Device on which the received message arrived.
1163  * @port_num: Port on which the received message arrived.
1164  * @wc: Work completion associated with the received message.
1165  * @grh: References the received global route header.  This parameter is
1166  *   ignored unless the work completion indicates that the GRH is valid.
1167  * @ah_attr: Returned attributes that can be used when creating an address
1168  *   handle for replying to the message.
1169  */
1170 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1171 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1172 
1173 /**
1174  * ib_create_ah_from_wc - Creates an address handle associated with the
1175  *   sender of the specified work completion.
1176  * @pd: The protection domain associated with the address handle.
1177  * @wc: Work completion information associated with a received message.
1178  * @grh: References the received global route header.  This parameter is
1179  *   ignored unless the work completion indicates that the GRH is valid.
1180  * @port_num: The outbound port number to associate with the address.
1181  *
1182  * The address handle is used to reference a local or global destination
1183  * in all UD QP post sends.
1184  */
1185 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1186 				   struct ib_grh *grh, u8 port_num);
1187 
1188 /**
1189  * ib_modify_ah - Modifies the address vector associated with an address
1190  *   handle.
1191  * @ah: The address handle to modify.
1192  * @ah_attr: The new address vector attributes to associate with the
1193  *   address handle.
1194  */
1195 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1196 
1197 /**
1198  * ib_query_ah - Queries the address vector associated with an address
1199  *   handle.
1200  * @ah: The address handle to query.
1201  * @ah_attr: The address vector attributes associated with the address
1202  *   handle.
1203  */
1204 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1205 
1206 /**
1207  * ib_destroy_ah - Destroys an address handle.
1208  * @ah: The address handle to destroy.
1209  */
1210 int ib_destroy_ah(struct ib_ah *ah);
1211 
1212 /**
1213  * ib_create_srq - Creates a SRQ associated with the specified protection
1214  *   domain.
1215  * @pd: The protection domain associated with the SRQ.
1216  * @srq_init_attr: A list of initial attributes required to create the
1217  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1218  *   the actual capabilities of the created SRQ.
1219  *
1220  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1221  * requested size of the SRQ, and set to the actual values allocated
1222  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1223  * will always be at least as large as the requested values.
1224  */
1225 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1226 			     struct ib_srq_init_attr *srq_init_attr);
1227 
1228 /**
1229  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1230  * @srq: The SRQ to modify.
1231  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1232  *   the current values of selected SRQ attributes are returned.
1233  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1234  *   are being modified.
1235  *
1236  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1237  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1238  * the number of receives queued drops below the limit.
1239  */
1240 int ib_modify_srq(struct ib_srq *srq,
1241 		  struct ib_srq_attr *srq_attr,
1242 		  enum ib_srq_attr_mask srq_attr_mask);
1243 
1244 /**
1245  * ib_query_srq - Returns the attribute list and current values for the
1246  *   specified SRQ.
1247  * @srq: The SRQ to query.
1248  * @srq_attr: The attributes of the specified SRQ.
1249  */
1250 int ib_query_srq(struct ib_srq *srq,
1251 		 struct ib_srq_attr *srq_attr);
1252 
1253 /**
1254  * ib_destroy_srq - Destroys the specified SRQ.
1255  * @srq: The SRQ to destroy.
1256  */
1257 int ib_destroy_srq(struct ib_srq *srq);
1258 
1259 /**
1260  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1261  * @srq: The SRQ to post the work request on.
1262  * @recv_wr: A list of work requests to post on the receive queue.
1263  * @bad_recv_wr: On an immediate failure, this parameter will reference
1264  *   the work request that failed to be posted on the QP.
1265  */
1266 static inline int ib_post_srq_recv(struct ib_srq *srq,
1267 				   struct ib_recv_wr *recv_wr,
1268 				   struct ib_recv_wr **bad_recv_wr)
1269 {
1270 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1271 }
1272 
1273 /**
1274  * ib_create_qp - Creates a QP associated with the specified protection
1275  *   domain.
1276  * @pd: The protection domain associated with the QP.
1277  * @qp_init_attr: A list of initial attributes required to create the
1278  *   QP.  If QP creation succeeds, then the attributes are updated to
1279  *   the actual capabilities of the created QP.
1280  */
1281 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1282 			   struct ib_qp_init_attr *qp_init_attr);
1283 
1284 /**
1285  * ib_modify_qp - Modifies the attributes for the specified QP and then
1286  *   transitions the QP to the given state.
1287  * @qp: The QP to modify.
1288  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1289  *   the current values of selected QP attributes are returned.
1290  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1291  *   are being modified.
1292  */
1293 int ib_modify_qp(struct ib_qp *qp,
1294 		 struct ib_qp_attr *qp_attr,
1295 		 int qp_attr_mask);
1296 
1297 /**
1298  * ib_query_qp - Returns the attribute list and current values for the
1299  *   specified QP.
1300  * @qp: The QP to query.
1301  * @qp_attr: The attributes of the specified QP.
1302  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1303  * @qp_init_attr: Additional attributes of the selected QP.
1304  *
1305  * The qp_attr_mask may be used to limit the query to gathering only the
1306  * selected attributes.
1307  */
1308 int ib_query_qp(struct ib_qp *qp,
1309 		struct ib_qp_attr *qp_attr,
1310 		int qp_attr_mask,
1311 		struct ib_qp_init_attr *qp_init_attr);
1312 
1313 /**
1314  * ib_destroy_qp - Destroys the specified QP.
1315  * @qp: The QP to destroy.
1316  */
1317 int ib_destroy_qp(struct ib_qp *qp);
1318 
1319 /**
1320  * ib_post_send - Posts a list of work requests to the send queue of
1321  *   the specified QP.
1322  * @qp: The QP to post the work request on.
1323  * @send_wr: A list of work requests to post on the send queue.
1324  * @bad_send_wr: On an immediate failure, this parameter will reference
1325  *   the work request that failed to be posted on the QP.
1326  */
1327 static inline int ib_post_send(struct ib_qp *qp,
1328 			       struct ib_send_wr *send_wr,
1329 			       struct ib_send_wr **bad_send_wr)
1330 {
1331 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1332 }
1333 
1334 /**
1335  * ib_post_recv - Posts a list of work requests to the receive queue of
1336  *   the specified QP.
1337  * @qp: The QP to post the work request on.
1338  * @recv_wr: A list of work requests to post on the receive queue.
1339  * @bad_recv_wr: On an immediate failure, this parameter will reference
1340  *   the work request that failed to be posted on the QP.
1341  */
1342 static inline int ib_post_recv(struct ib_qp *qp,
1343 			       struct ib_recv_wr *recv_wr,
1344 			       struct ib_recv_wr **bad_recv_wr)
1345 {
1346 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1347 }
1348 
1349 /**
1350  * ib_create_cq - Creates a CQ on the specified device.
1351  * @device: The device on which to create the CQ.
1352  * @comp_handler: A user-specified callback that is invoked when a
1353  *   completion event occurs on the CQ.
1354  * @event_handler: A user-specified callback that is invoked when an
1355  *   asynchronous event not associated with a completion occurs on the CQ.
1356  * @cq_context: Context associated with the CQ returned to the user via
1357  *   the associated completion and event handlers.
1358  * @cqe: The minimum size of the CQ.
1359  * @comp_vector - Completion vector used to signal completion events.
1360  *     Must be >= 0 and < context->num_comp_vectors.
1361  *
1362  * Users can examine the cq structure to determine the actual CQ size.
1363  */
1364 struct ib_cq *ib_create_cq(struct ib_device *device,
1365 			   ib_comp_handler comp_handler,
1366 			   void (*event_handler)(struct ib_event *, void *),
1367 			   void *cq_context, int cqe, int comp_vector);
1368 
1369 /**
1370  * ib_resize_cq - Modifies the capacity of the CQ.
1371  * @cq: The CQ to resize.
1372  * @cqe: The minimum size of the CQ.
1373  *
1374  * Users can examine the cq structure to determine the actual CQ size.
1375  */
1376 int ib_resize_cq(struct ib_cq *cq, int cqe);
1377 
1378 /**
1379  * ib_destroy_cq - Destroys the specified CQ.
1380  * @cq: The CQ to destroy.
1381  */
1382 int ib_destroy_cq(struct ib_cq *cq);
1383 
1384 /**
1385  * ib_poll_cq - poll a CQ for completion(s)
1386  * @cq:the CQ being polled
1387  * @num_entries:maximum number of completions to return
1388  * @wc:array of at least @num_entries &struct ib_wc where completions
1389  *   will be returned
1390  *
1391  * Poll a CQ for (possibly multiple) completions.  If the return value
1392  * is < 0, an error occurred.  If the return value is >= 0, it is the
1393  * number of completions returned.  If the return value is
1394  * non-negative and < num_entries, then the CQ was emptied.
1395  */
1396 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1397 			     struct ib_wc *wc)
1398 {
1399 	return cq->device->poll_cq(cq, num_entries, wc);
1400 }
1401 
1402 /**
1403  * ib_peek_cq - Returns the number of unreaped completions currently
1404  *   on the specified CQ.
1405  * @cq: The CQ to peek.
1406  * @wc_cnt: A minimum number of unreaped completions to check for.
1407  *
1408  * If the number of unreaped completions is greater than or equal to wc_cnt,
1409  * this function returns wc_cnt, otherwise, it returns the actual number of
1410  * unreaped completions.
1411  */
1412 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1413 
1414 /**
1415  * ib_req_notify_cq - Request completion notification on a CQ.
1416  * @cq: The CQ to generate an event for.
1417  * @flags:
1418  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1419  *   to request an event on the next solicited event or next work
1420  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1421  *   may also be |ed in to request a hint about missed events, as
1422  *   described below.
1423  *
1424  * Return Value:
1425  *    < 0 means an error occurred while requesting notification
1426  *   == 0 means notification was requested successfully, and if
1427  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1428  *        were missed and it is safe to wait for another event.  In
1429  *        this case is it guaranteed that any work completions added
1430  *        to the CQ since the last CQ poll will trigger a completion
1431  *        notification event.
1432  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1433  *        in.  It means that the consumer must poll the CQ again to
1434  *        make sure it is empty to avoid missing an event because of a
1435  *        race between requesting notification and an entry being
1436  *        added to the CQ.  This return value means it is possible
1437  *        (but not guaranteed) that a work completion has been added
1438  *        to the CQ since the last poll without triggering a
1439  *        completion notification event.
1440  */
1441 static inline int ib_req_notify_cq(struct ib_cq *cq,
1442 				   enum ib_cq_notify_flags flags)
1443 {
1444 	return cq->device->req_notify_cq(cq, flags);
1445 }
1446 
1447 /**
1448  * ib_req_ncomp_notif - Request completion notification when there are
1449  *   at least the specified number of unreaped completions on the CQ.
1450  * @cq: The CQ to generate an event for.
1451  * @wc_cnt: The number of unreaped completions that should be on the
1452  *   CQ before an event is generated.
1453  */
1454 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1455 {
1456 	return cq->device->req_ncomp_notif ?
1457 		cq->device->req_ncomp_notif(cq, wc_cnt) :
1458 		-ENOSYS;
1459 }
1460 
1461 /**
1462  * ib_get_dma_mr - Returns a memory region for system memory that is
1463  *   usable for DMA.
1464  * @pd: The protection domain associated with the memory region.
1465  * @mr_access_flags: Specifies the memory access rights.
1466  *
1467  * Note that the ib_dma_*() functions defined below must be used
1468  * to create/destroy addresses used with the Lkey or Rkey returned
1469  * by ib_get_dma_mr().
1470  */
1471 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1472 
1473 /**
1474  * ib_dma_mapping_error - check a DMA addr for error
1475  * @dev: The device for which the dma_addr was created
1476  * @dma_addr: The DMA address to check
1477  */
1478 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1479 {
1480 	if (dev->dma_ops)
1481 		return dev->dma_ops->mapping_error(dev, dma_addr);
1482 	return dma_mapping_error(dma_addr);
1483 }
1484 
1485 /**
1486  * ib_dma_map_single - Map a kernel virtual address to DMA address
1487  * @dev: The device for which the dma_addr is to be created
1488  * @cpu_addr: The kernel virtual address
1489  * @size: The size of the region in bytes
1490  * @direction: The direction of the DMA
1491  */
1492 static inline u64 ib_dma_map_single(struct ib_device *dev,
1493 				    void *cpu_addr, size_t size,
1494 				    enum dma_data_direction direction)
1495 {
1496 	if (dev->dma_ops)
1497 		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1498 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1499 }
1500 
1501 /**
1502  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1503  * @dev: The device for which the DMA address was created
1504  * @addr: The DMA address
1505  * @size: The size of the region in bytes
1506  * @direction: The direction of the DMA
1507  */
1508 static inline void ib_dma_unmap_single(struct ib_device *dev,
1509 				       u64 addr, size_t size,
1510 				       enum dma_data_direction direction)
1511 {
1512 	if (dev->dma_ops)
1513 		dev->dma_ops->unmap_single(dev, addr, size, direction);
1514 	else
1515 		dma_unmap_single(dev->dma_device, addr, size, direction);
1516 }
1517 
1518 /**
1519  * ib_dma_map_page - Map a physical page to DMA address
1520  * @dev: The device for which the dma_addr is to be created
1521  * @page: The page to be mapped
1522  * @offset: The offset within the page
1523  * @size: The size of the region in bytes
1524  * @direction: The direction of the DMA
1525  */
1526 static inline u64 ib_dma_map_page(struct ib_device *dev,
1527 				  struct page *page,
1528 				  unsigned long offset,
1529 				  size_t size,
1530 					 enum dma_data_direction direction)
1531 {
1532 	if (dev->dma_ops)
1533 		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1534 	return dma_map_page(dev->dma_device, page, offset, size, direction);
1535 }
1536 
1537 /**
1538  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1539  * @dev: The device for which the DMA address was created
1540  * @addr: The DMA address
1541  * @size: The size of the region in bytes
1542  * @direction: The direction of the DMA
1543  */
1544 static inline void ib_dma_unmap_page(struct ib_device *dev,
1545 				     u64 addr, size_t size,
1546 				     enum dma_data_direction direction)
1547 {
1548 	if (dev->dma_ops)
1549 		dev->dma_ops->unmap_page(dev, addr, size, direction);
1550 	else
1551 		dma_unmap_page(dev->dma_device, addr, size, direction);
1552 }
1553 
1554 /**
1555  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1556  * @dev: The device for which the DMA addresses are to be created
1557  * @sg: The array of scatter/gather entries
1558  * @nents: The number of scatter/gather entries
1559  * @direction: The direction of the DMA
1560  */
1561 static inline int ib_dma_map_sg(struct ib_device *dev,
1562 				struct scatterlist *sg, int nents,
1563 				enum dma_data_direction direction)
1564 {
1565 	if (dev->dma_ops)
1566 		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1567 	return dma_map_sg(dev->dma_device, sg, nents, direction);
1568 }
1569 
1570 /**
1571  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1572  * @dev: The device for which the DMA addresses were created
1573  * @sg: The array of scatter/gather entries
1574  * @nents: The number of scatter/gather entries
1575  * @direction: The direction of the DMA
1576  */
1577 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1578 				   struct scatterlist *sg, int nents,
1579 				   enum dma_data_direction direction)
1580 {
1581 	if (dev->dma_ops)
1582 		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1583 	else
1584 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1585 }
1586 
1587 /**
1588  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1589  * @dev: The device for which the DMA addresses were created
1590  * @sg: The scatter/gather entry
1591  */
1592 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1593 				    struct scatterlist *sg)
1594 {
1595 	if (dev->dma_ops)
1596 		return dev->dma_ops->dma_address(dev, sg);
1597 	return sg_dma_address(sg);
1598 }
1599 
1600 /**
1601  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1602  * @dev: The device for which the DMA addresses were created
1603  * @sg: The scatter/gather entry
1604  */
1605 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1606 					 struct scatterlist *sg)
1607 {
1608 	if (dev->dma_ops)
1609 		return dev->dma_ops->dma_len(dev, sg);
1610 	return sg_dma_len(sg);
1611 }
1612 
1613 /**
1614  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1615  * @dev: The device for which the DMA address was created
1616  * @addr: The DMA address
1617  * @size: The size of the region in bytes
1618  * @dir: The direction of the DMA
1619  */
1620 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1621 					      u64 addr,
1622 					      size_t size,
1623 					      enum dma_data_direction dir)
1624 {
1625 	if (dev->dma_ops)
1626 		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1627 	else
1628 		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1629 }
1630 
1631 /**
1632  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1633  * @dev: The device for which the DMA address was created
1634  * @addr: The DMA address
1635  * @size: The size of the region in bytes
1636  * @dir: The direction of the DMA
1637  */
1638 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1639 						 u64 addr,
1640 						 size_t size,
1641 						 enum dma_data_direction dir)
1642 {
1643 	if (dev->dma_ops)
1644 		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1645 	else
1646 		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1647 }
1648 
1649 /**
1650  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1651  * @dev: The device for which the DMA address is requested
1652  * @size: The size of the region to allocate in bytes
1653  * @dma_handle: A pointer for returning the DMA address of the region
1654  * @flag: memory allocator flags
1655  */
1656 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1657 					   size_t size,
1658 					   u64 *dma_handle,
1659 					   gfp_t flag)
1660 {
1661 	if (dev->dma_ops)
1662 		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1663 	else {
1664 		dma_addr_t handle;
1665 		void *ret;
1666 
1667 		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1668 		*dma_handle = handle;
1669 		return ret;
1670 	}
1671 }
1672 
1673 /**
1674  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1675  * @dev: The device for which the DMA addresses were allocated
1676  * @size: The size of the region
1677  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1678  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1679  */
1680 static inline void ib_dma_free_coherent(struct ib_device *dev,
1681 					size_t size, void *cpu_addr,
1682 					u64 dma_handle)
1683 {
1684 	if (dev->dma_ops)
1685 		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1686 	else
1687 		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1688 }
1689 
1690 /**
1691  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1692  *   by an HCA.
1693  * @pd: The protection domain associated assigned to the registered region.
1694  * @phys_buf_array: Specifies a list of physical buffers to use in the
1695  *   memory region.
1696  * @num_phys_buf: Specifies the size of the phys_buf_array.
1697  * @mr_access_flags: Specifies the memory access rights.
1698  * @iova_start: The offset of the region's starting I/O virtual address.
1699  */
1700 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1701 			     struct ib_phys_buf *phys_buf_array,
1702 			     int num_phys_buf,
1703 			     int mr_access_flags,
1704 			     u64 *iova_start);
1705 
1706 /**
1707  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1708  *   Conceptually, this call performs the functions deregister memory region
1709  *   followed by register physical memory region.  Where possible,
1710  *   resources are reused instead of deallocated and reallocated.
1711  * @mr: The memory region to modify.
1712  * @mr_rereg_mask: A bit-mask used to indicate which of the following
1713  *   properties of the memory region are being modified.
1714  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1715  *   the new protection domain to associated with the memory region,
1716  *   otherwise, this parameter is ignored.
1717  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1718  *   field specifies a list of physical buffers to use in the new
1719  *   translation, otherwise, this parameter is ignored.
1720  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1721  *   field specifies the size of the phys_buf_array, otherwise, this
1722  *   parameter is ignored.
1723  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1724  *   field specifies the new memory access rights, otherwise, this
1725  *   parameter is ignored.
1726  * @iova_start: The offset of the region's starting I/O virtual address.
1727  */
1728 int ib_rereg_phys_mr(struct ib_mr *mr,
1729 		     int mr_rereg_mask,
1730 		     struct ib_pd *pd,
1731 		     struct ib_phys_buf *phys_buf_array,
1732 		     int num_phys_buf,
1733 		     int mr_access_flags,
1734 		     u64 *iova_start);
1735 
1736 /**
1737  * ib_query_mr - Retrieves information about a specific memory region.
1738  * @mr: The memory region to retrieve information about.
1739  * @mr_attr: The attributes of the specified memory region.
1740  */
1741 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1742 
1743 /**
1744  * ib_dereg_mr - Deregisters a memory region and removes it from the
1745  *   HCA translation table.
1746  * @mr: The memory region to deregister.
1747  */
1748 int ib_dereg_mr(struct ib_mr *mr);
1749 
1750 /**
1751  * ib_alloc_mw - Allocates a memory window.
1752  * @pd: The protection domain associated with the memory window.
1753  */
1754 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1755 
1756 /**
1757  * ib_bind_mw - Posts a work request to the send queue of the specified
1758  *   QP, which binds the memory window to the given address range and
1759  *   remote access attributes.
1760  * @qp: QP to post the bind work request on.
1761  * @mw: The memory window to bind.
1762  * @mw_bind: Specifies information about the memory window, including
1763  *   its address range, remote access rights, and associated memory region.
1764  */
1765 static inline int ib_bind_mw(struct ib_qp *qp,
1766 			     struct ib_mw *mw,
1767 			     struct ib_mw_bind *mw_bind)
1768 {
1769 	/* XXX reference counting in corresponding MR? */
1770 	return mw->device->bind_mw ?
1771 		mw->device->bind_mw(qp, mw, mw_bind) :
1772 		-ENOSYS;
1773 }
1774 
1775 /**
1776  * ib_dealloc_mw - Deallocates a memory window.
1777  * @mw: The memory window to deallocate.
1778  */
1779 int ib_dealloc_mw(struct ib_mw *mw);
1780 
1781 /**
1782  * ib_alloc_fmr - Allocates a unmapped fast memory region.
1783  * @pd: The protection domain associated with the unmapped region.
1784  * @mr_access_flags: Specifies the memory access rights.
1785  * @fmr_attr: Attributes of the unmapped region.
1786  *
1787  * A fast memory region must be mapped before it can be used as part of
1788  * a work request.
1789  */
1790 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1791 			    int mr_access_flags,
1792 			    struct ib_fmr_attr *fmr_attr);
1793 
1794 /**
1795  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1796  * @fmr: The fast memory region to associate with the pages.
1797  * @page_list: An array of physical pages to map to the fast memory region.
1798  * @list_len: The number of pages in page_list.
1799  * @iova: The I/O virtual address to use with the mapped region.
1800  */
1801 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1802 				  u64 *page_list, int list_len,
1803 				  u64 iova)
1804 {
1805 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1806 }
1807 
1808 /**
1809  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1810  * @fmr_list: A linked list of fast memory regions to unmap.
1811  */
1812 int ib_unmap_fmr(struct list_head *fmr_list);
1813 
1814 /**
1815  * ib_dealloc_fmr - Deallocates a fast memory region.
1816  * @fmr: The fast memory region to deallocate.
1817  */
1818 int ib_dealloc_fmr(struct ib_fmr *fmr);
1819 
1820 /**
1821  * ib_attach_mcast - Attaches the specified QP to a multicast group.
1822  * @qp: QP to attach to the multicast group.  The QP must be type
1823  *   IB_QPT_UD.
1824  * @gid: Multicast group GID.
1825  * @lid: Multicast group LID in host byte order.
1826  *
1827  * In order to send and receive multicast packets, subnet
1828  * administration must have created the multicast group and configured
1829  * the fabric appropriately.  The port associated with the specified
1830  * QP must also be a member of the multicast group.
1831  */
1832 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1833 
1834 /**
1835  * ib_detach_mcast - Detaches the specified QP from a multicast group.
1836  * @qp: QP to detach from the multicast group.
1837  * @gid: Multicast group GID.
1838  * @lid: Multicast group LID in host byte order.
1839  */
1840 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1841 
1842 #endif /* IB_VERBS_H */
1843