xref: /linux/include/rdma/ib_verbs.h (revision f24e9f586b377749dff37554696cf3a105540c94)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #if !defined(IB_VERBS_H)
42 #define IB_VERBS_H
43 
44 #include <linux/types.h>
45 #include <linux/device.h>
46 
47 #include <asm/atomic.h>
48 #include <asm/scatterlist.h>
49 #include <asm/uaccess.h>
50 
51 union ib_gid {
52 	u8	raw[16];
53 	struct {
54 		__be64	subnet_prefix;
55 		__be64	interface_id;
56 	} global;
57 };
58 
59 enum rdma_node_type {
60 	/* IB values map to NodeInfo:NodeType. */
61 	RDMA_NODE_IB_CA 	= 1,
62 	RDMA_NODE_IB_SWITCH,
63 	RDMA_NODE_IB_ROUTER,
64 	RDMA_NODE_RNIC
65 };
66 
67 enum rdma_transport_type {
68 	RDMA_TRANSPORT_IB,
69 	RDMA_TRANSPORT_IWARP
70 };
71 
72 enum rdma_transport_type
73 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
74 
75 enum ib_device_cap_flags {
76 	IB_DEVICE_RESIZE_MAX_WR		= 1,
77 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
78 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
79 	IB_DEVICE_RAW_MULTI		= (1<<3),
80 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
81 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
82 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
83 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
84 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
85 	IB_DEVICE_INIT_TYPE		= (1<<9),
86 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
87 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
88 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
89 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
90 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
91 	IB_DEVICE_ZERO_STAG		= (1<<15),
92 	IB_DEVICE_SEND_W_INV		= (1<<16),
93 	IB_DEVICE_MEM_WINDOW		= (1<<17)
94 };
95 
96 enum ib_atomic_cap {
97 	IB_ATOMIC_NONE,
98 	IB_ATOMIC_HCA,
99 	IB_ATOMIC_GLOB
100 };
101 
102 struct ib_device_attr {
103 	u64			fw_ver;
104 	__be64			sys_image_guid;
105 	u64			max_mr_size;
106 	u64			page_size_cap;
107 	u32			vendor_id;
108 	u32			vendor_part_id;
109 	u32			hw_ver;
110 	int			max_qp;
111 	int			max_qp_wr;
112 	int			device_cap_flags;
113 	int			max_sge;
114 	int			max_sge_rd;
115 	int			max_cq;
116 	int			max_cqe;
117 	int			max_mr;
118 	int			max_pd;
119 	int			max_qp_rd_atom;
120 	int			max_ee_rd_atom;
121 	int			max_res_rd_atom;
122 	int			max_qp_init_rd_atom;
123 	int			max_ee_init_rd_atom;
124 	enum ib_atomic_cap	atomic_cap;
125 	int			max_ee;
126 	int			max_rdd;
127 	int			max_mw;
128 	int			max_raw_ipv6_qp;
129 	int			max_raw_ethy_qp;
130 	int			max_mcast_grp;
131 	int			max_mcast_qp_attach;
132 	int			max_total_mcast_qp_attach;
133 	int			max_ah;
134 	int			max_fmr;
135 	int			max_map_per_fmr;
136 	int			max_srq;
137 	int			max_srq_wr;
138 	int			max_srq_sge;
139 	u16			max_pkeys;
140 	u8			local_ca_ack_delay;
141 };
142 
143 enum ib_mtu {
144 	IB_MTU_256  = 1,
145 	IB_MTU_512  = 2,
146 	IB_MTU_1024 = 3,
147 	IB_MTU_2048 = 4,
148 	IB_MTU_4096 = 5
149 };
150 
151 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
152 {
153 	switch (mtu) {
154 	case IB_MTU_256:  return  256;
155 	case IB_MTU_512:  return  512;
156 	case IB_MTU_1024: return 1024;
157 	case IB_MTU_2048: return 2048;
158 	case IB_MTU_4096: return 4096;
159 	default: 	  return -1;
160 	}
161 }
162 
163 enum ib_port_state {
164 	IB_PORT_NOP		= 0,
165 	IB_PORT_DOWN		= 1,
166 	IB_PORT_INIT		= 2,
167 	IB_PORT_ARMED		= 3,
168 	IB_PORT_ACTIVE		= 4,
169 	IB_PORT_ACTIVE_DEFER	= 5
170 };
171 
172 enum ib_port_cap_flags {
173 	IB_PORT_SM				= 1 <<  1,
174 	IB_PORT_NOTICE_SUP			= 1 <<  2,
175 	IB_PORT_TRAP_SUP			= 1 <<  3,
176 	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
177 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
178 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
179 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
180 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
181 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
182 	IB_PORT_SM_DISABLED			= 1 << 10,
183 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
184 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
185 	IB_PORT_CM_SUP				= 1 << 16,
186 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
187 	IB_PORT_REINIT_SUP			= 1 << 18,
188 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
189 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
190 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
191 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
192 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
193 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
194 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
195 };
196 
197 enum ib_port_width {
198 	IB_WIDTH_1X	= 1,
199 	IB_WIDTH_4X	= 2,
200 	IB_WIDTH_8X	= 4,
201 	IB_WIDTH_12X	= 8
202 };
203 
204 static inline int ib_width_enum_to_int(enum ib_port_width width)
205 {
206 	switch (width) {
207 	case IB_WIDTH_1X:  return  1;
208 	case IB_WIDTH_4X:  return  4;
209 	case IB_WIDTH_8X:  return  8;
210 	case IB_WIDTH_12X: return 12;
211 	default: 	  return -1;
212 	}
213 }
214 
215 struct ib_port_attr {
216 	enum ib_port_state	state;
217 	enum ib_mtu		max_mtu;
218 	enum ib_mtu		active_mtu;
219 	int			gid_tbl_len;
220 	u32			port_cap_flags;
221 	u32			max_msg_sz;
222 	u32			bad_pkey_cntr;
223 	u32			qkey_viol_cntr;
224 	u16			pkey_tbl_len;
225 	u16			lid;
226 	u16			sm_lid;
227 	u8			lmc;
228 	u8			max_vl_num;
229 	u8			sm_sl;
230 	u8			subnet_timeout;
231 	u8			init_type_reply;
232 	u8			active_width;
233 	u8			active_speed;
234 	u8                      phys_state;
235 };
236 
237 enum ib_device_modify_flags {
238 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
239 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
240 };
241 
242 struct ib_device_modify {
243 	u64	sys_image_guid;
244 	char	node_desc[64];
245 };
246 
247 enum ib_port_modify_flags {
248 	IB_PORT_SHUTDOWN		= 1,
249 	IB_PORT_INIT_TYPE		= (1<<2),
250 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
251 };
252 
253 struct ib_port_modify {
254 	u32	set_port_cap_mask;
255 	u32	clr_port_cap_mask;
256 	u8	init_type;
257 };
258 
259 enum ib_event_type {
260 	IB_EVENT_CQ_ERR,
261 	IB_EVENT_QP_FATAL,
262 	IB_EVENT_QP_REQ_ERR,
263 	IB_EVENT_QP_ACCESS_ERR,
264 	IB_EVENT_COMM_EST,
265 	IB_EVENT_SQ_DRAINED,
266 	IB_EVENT_PATH_MIG,
267 	IB_EVENT_PATH_MIG_ERR,
268 	IB_EVENT_DEVICE_FATAL,
269 	IB_EVENT_PORT_ACTIVE,
270 	IB_EVENT_PORT_ERR,
271 	IB_EVENT_LID_CHANGE,
272 	IB_EVENT_PKEY_CHANGE,
273 	IB_EVENT_SM_CHANGE,
274 	IB_EVENT_SRQ_ERR,
275 	IB_EVENT_SRQ_LIMIT_REACHED,
276 	IB_EVENT_QP_LAST_WQE_REACHED,
277 	IB_EVENT_CLIENT_REREGISTER
278 };
279 
280 struct ib_event {
281 	struct ib_device	*device;
282 	union {
283 		struct ib_cq	*cq;
284 		struct ib_qp	*qp;
285 		struct ib_srq	*srq;
286 		u8		port_num;
287 	} element;
288 	enum ib_event_type	event;
289 };
290 
291 struct ib_event_handler {
292 	struct ib_device *device;
293 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
294 	struct list_head  list;
295 };
296 
297 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
298 	do {							\
299 		(_ptr)->device  = _device;			\
300 		(_ptr)->handler = _handler;			\
301 		INIT_LIST_HEAD(&(_ptr)->list);			\
302 	} while (0)
303 
304 struct ib_global_route {
305 	union ib_gid	dgid;
306 	u32		flow_label;
307 	u8		sgid_index;
308 	u8		hop_limit;
309 	u8		traffic_class;
310 };
311 
312 struct ib_grh {
313 	__be32		version_tclass_flow;
314 	__be16		paylen;
315 	u8		next_hdr;
316 	u8		hop_limit;
317 	union ib_gid	sgid;
318 	union ib_gid	dgid;
319 };
320 
321 enum {
322 	IB_MULTICAST_QPN = 0xffffff
323 };
324 
325 #define IB_LID_PERMISSIVE	__constant_htons(0xFFFF)
326 
327 enum ib_ah_flags {
328 	IB_AH_GRH	= 1
329 };
330 
331 enum ib_rate {
332 	IB_RATE_PORT_CURRENT = 0,
333 	IB_RATE_2_5_GBPS = 2,
334 	IB_RATE_5_GBPS   = 5,
335 	IB_RATE_10_GBPS  = 3,
336 	IB_RATE_20_GBPS  = 6,
337 	IB_RATE_30_GBPS  = 4,
338 	IB_RATE_40_GBPS  = 7,
339 	IB_RATE_60_GBPS  = 8,
340 	IB_RATE_80_GBPS  = 9,
341 	IB_RATE_120_GBPS = 10
342 };
343 
344 /**
345  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
346  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
347  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
348  * @rate: rate to convert.
349  */
350 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
351 
352 /**
353  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
354  * enum.
355  * @mult: multiple to convert.
356  */
357 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
358 
359 struct ib_ah_attr {
360 	struct ib_global_route	grh;
361 	u16			dlid;
362 	u8			sl;
363 	u8			src_path_bits;
364 	u8			static_rate;
365 	u8			ah_flags;
366 	u8			port_num;
367 };
368 
369 enum ib_wc_status {
370 	IB_WC_SUCCESS,
371 	IB_WC_LOC_LEN_ERR,
372 	IB_WC_LOC_QP_OP_ERR,
373 	IB_WC_LOC_EEC_OP_ERR,
374 	IB_WC_LOC_PROT_ERR,
375 	IB_WC_WR_FLUSH_ERR,
376 	IB_WC_MW_BIND_ERR,
377 	IB_WC_BAD_RESP_ERR,
378 	IB_WC_LOC_ACCESS_ERR,
379 	IB_WC_REM_INV_REQ_ERR,
380 	IB_WC_REM_ACCESS_ERR,
381 	IB_WC_REM_OP_ERR,
382 	IB_WC_RETRY_EXC_ERR,
383 	IB_WC_RNR_RETRY_EXC_ERR,
384 	IB_WC_LOC_RDD_VIOL_ERR,
385 	IB_WC_REM_INV_RD_REQ_ERR,
386 	IB_WC_REM_ABORT_ERR,
387 	IB_WC_INV_EECN_ERR,
388 	IB_WC_INV_EEC_STATE_ERR,
389 	IB_WC_FATAL_ERR,
390 	IB_WC_RESP_TIMEOUT_ERR,
391 	IB_WC_GENERAL_ERR
392 };
393 
394 enum ib_wc_opcode {
395 	IB_WC_SEND,
396 	IB_WC_RDMA_WRITE,
397 	IB_WC_RDMA_READ,
398 	IB_WC_COMP_SWAP,
399 	IB_WC_FETCH_ADD,
400 	IB_WC_BIND_MW,
401 /*
402  * Set value of IB_WC_RECV so consumers can test if a completion is a
403  * receive by testing (opcode & IB_WC_RECV).
404  */
405 	IB_WC_RECV			= 1 << 7,
406 	IB_WC_RECV_RDMA_WITH_IMM
407 };
408 
409 enum ib_wc_flags {
410 	IB_WC_GRH		= 1,
411 	IB_WC_WITH_IMM		= (1<<1)
412 };
413 
414 struct ib_wc {
415 	u64			wr_id;
416 	enum ib_wc_status	status;
417 	enum ib_wc_opcode	opcode;
418 	u32			vendor_err;
419 	u32			byte_len;
420 	__be32			imm_data;
421 	u32			qp_num;
422 	u32			src_qp;
423 	int			wc_flags;
424 	u16			pkey_index;
425 	u16			slid;
426 	u8			sl;
427 	u8			dlid_path_bits;
428 	u8			port_num;	/* valid only for DR SMPs on switches */
429 };
430 
431 enum ib_cq_notify {
432 	IB_CQ_SOLICITED,
433 	IB_CQ_NEXT_COMP
434 };
435 
436 enum ib_srq_attr_mask {
437 	IB_SRQ_MAX_WR	= 1 << 0,
438 	IB_SRQ_LIMIT	= 1 << 1,
439 };
440 
441 struct ib_srq_attr {
442 	u32	max_wr;
443 	u32	max_sge;
444 	u32	srq_limit;
445 };
446 
447 struct ib_srq_init_attr {
448 	void		      (*event_handler)(struct ib_event *, void *);
449 	void		       *srq_context;
450 	struct ib_srq_attr	attr;
451 };
452 
453 struct ib_qp_cap {
454 	u32	max_send_wr;
455 	u32	max_recv_wr;
456 	u32	max_send_sge;
457 	u32	max_recv_sge;
458 	u32	max_inline_data;
459 };
460 
461 enum ib_sig_type {
462 	IB_SIGNAL_ALL_WR,
463 	IB_SIGNAL_REQ_WR
464 };
465 
466 enum ib_qp_type {
467 	/*
468 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
469 	 * here (and in that order) since the MAD layer uses them as
470 	 * indices into a 2-entry table.
471 	 */
472 	IB_QPT_SMI,
473 	IB_QPT_GSI,
474 
475 	IB_QPT_RC,
476 	IB_QPT_UC,
477 	IB_QPT_UD,
478 	IB_QPT_RAW_IPV6,
479 	IB_QPT_RAW_ETY
480 };
481 
482 struct ib_qp_init_attr {
483 	void                  (*event_handler)(struct ib_event *, void *);
484 	void		       *qp_context;
485 	struct ib_cq	       *send_cq;
486 	struct ib_cq	       *recv_cq;
487 	struct ib_srq	       *srq;
488 	struct ib_qp_cap	cap;
489 	enum ib_sig_type	sq_sig_type;
490 	enum ib_qp_type		qp_type;
491 	u8			port_num; /* special QP types only */
492 };
493 
494 enum ib_rnr_timeout {
495 	IB_RNR_TIMER_655_36 =  0,
496 	IB_RNR_TIMER_000_01 =  1,
497 	IB_RNR_TIMER_000_02 =  2,
498 	IB_RNR_TIMER_000_03 =  3,
499 	IB_RNR_TIMER_000_04 =  4,
500 	IB_RNR_TIMER_000_06 =  5,
501 	IB_RNR_TIMER_000_08 =  6,
502 	IB_RNR_TIMER_000_12 =  7,
503 	IB_RNR_TIMER_000_16 =  8,
504 	IB_RNR_TIMER_000_24 =  9,
505 	IB_RNR_TIMER_000_32 = 10,
506 	IB_RNR_TIMER_000_48 = 11,
507 	IB_RNR_TIMER_000_64 = 12,
508 	IB_RNR_TIMER_000_96 = 13,
509 	IB_RNR_TIMER_001_28 = 14,
510 	IB_RNR_TIMER_001_92 = 15,
511 	IB_RNR_TIMER_002_56 = 16,
512 	IB_RNR_TIMER_003_84 = 17,
513 	IB_RNR_TIMER_005_12 = 18,
514 	IB_RNR_TIMER_007_68 = 19,
515 	IB_RNR_TIMER_010_24 = 20,
516 	IB_RNR_TIMER_015_36 = 21,
517 	IB_RNR_TIMER_020_48 = 22,
518 	IB_RNR_TIMER_030_72 = 23,
519 	IB_RNR_TIMER_040_96 = 24,
520 	IB_RNR_TIMER_061_44 = 25,
521 	IB_RNR_TIMER_081_92 = 26,
522 	IB_RNR_TIMER_122_88 = 27,
523 	IB_RNR_TIMER_163_84 = 28,
524 	IB_RNR_TIMER_245_76 = 29,
525 	IB_RNR_TIMER_327_68 = 30,
526 	IB_RNR_TIMER_491_52 = 31
527 };
528 
529 enum ib_qp_attr_mask {
530 	IB_QP_STATE			= 1,
531 	IB_QP_CUR_STATE			= (1<<1),
532 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
533 	IB_QP_ACCESS_FLAGS		= (1<<3),
534 	IB_QP_PKEY_INDEX		= (1<<4),
535 	IB_QP_PORT			= (1<<5),
536 	IB_QP_QKEY			= (1<<6),
537 	IB_QP_AV			= (1<<7),
538 	IB_QP_PATH_MTU			= (1<<8),
539 	IB_QP_TIMEOUT			= (1<<9),
540 	IB_QP_RETRY_CNT			= (1<<10),
541 	IB_QP_RNR_RETRY			= (1<<11),
542 	IB_QP_RQ_PSN			= (1<<12),
543 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
544 	IB_QP_ALT_PATH			= (1<<14),
545 	IB_QP_MIN_RNR_TIMER		= (1<<15),
546 	IB_QP_SQ_PSN			= (1<<16),
547 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
548 	IB_QP_PATH_MIG_STATE		= (1<<18),
549 	IB_QP_CAP			= (1<<19),
550 	IB_QP_DEST_QPN			= (1<<20)
551 };
552 
553 enum ib_qp_state {
554 	IB_QPS_RESET,
555 	IB_QPS_INIT,
556 	IB_QPS_RTR,
557 	IB_QPS_RTS,
558 	IB_QPS_SQD,
559 	IB_QPS_SQE,
560 	IB_QPS_ERR
561 };
562 
563 enum ib_mig_state {
564 	IB_MIG_MIGRATED,
565 	IB_MIG_REARM,
566 	IB_MIG_ARMED
567 };
568 
569 struct ib_qp_attr {
570 	enum ib_qp_state	qp_state;
571 	enum ib_qp_state	cur_qp_state;
572 	enum ib_mtu		path_mtu;
573 	enum ib_mig_state	path_mig_state;
574 	u32			qkey;
575 	u32			rq_psn;
576 	u32			sq_psn;
577 	u32			dest_qp_num;
578 	int			qp_access_flags;
579 	struct ib_qp_cap	cap;
580 	struct ib_ah_attr	ah_attr;
581 	struct ib_ah_attr	alt_ah_attr;
582 	u16			pkey_index;
583 	u16			alt_pkey_index;
584 	u8			en_sqd_async_notify;
585 	u8			sq_draining;
586 	u8			max_rd_atomic;
587 	u8			max_dest_rd_atomic;
588 	u8			min_rnr_timer;
589 	u8			port_num;
590 	u8			timeout;
591 	u8			retry_cnt;
592 	u8			rnr_retry;
593 	u8			alt_port_num;
594 	u8			alt_timeout;
595 };
596 
597 enum ib_wr_opcode {
598 	IB_WR_RDMA_WRITE,
599 	IB_WR_RDMA_WRITE_WITH_IMM,
600 	IB_WR_SEND,
601 	IB_WR_SEND_WITH_IMM,
602 	IB_WR_RDMA_READ,
603 	IB_WR_ATOMIC_CMP_AND_SWP,
604 	IB_WR_ATOMIC_FETCH_AND_ADD
605 };
606 
607 enum ib_send_flags {
608 	IB_SEND_FENCE		= 1,
609 	IB_SEND_SIGNALED	= (1<<1),
610 	IB_SEND_SOLICITED	= (1<<2),
611 	IB_SEND_INLINE		= (1<<3)
612 };
613 
614 struct ib_sge {
615 	u64	addr;
616 	u32	length;
617 	u32	lkey;
618 };
619 
620 struct ib_send_wr {
621 	struct ib_send_wr      *next;
622 	u64			wr_id;
623 	struct ib_sge	       *sg_list;
624 	int			num_sge;
625 	enum ib_wr_opcode	opcode;
626 	int			send_flags;
627 	__be32			imm_data;
628 	union {
629 		struct {
630 			u64	remote_addr;
631 			u32	rkey;
632 		} rdma;
633 		struct {
634 			u64	remote_addr;
635 			u64	compare_add;
636 			u64	swap;
637 			u32	rkey;
638 		} atomic;
639 		struct {
640 			struct ib_ah *ah;
641 			u32	remote_qpn;
642 			u32	remote_qkey;
643 			u16	pkey_index; /* valid for GSI only */
644 			u8	port_num;   /* valid for DR SMPs on switch only */
645 		} ud;
646 	} wr;
647 };
648 
649 struct ib_recv_wr {
650 	struct ib_recv_wr      *next;
651 	u64			wr_id;
652 	struct ib_sge	       *sg_list;
653 	int			num_sge;
654 };
655 
656 enum ib_access_flags {
657 	IB_ACCESS_LOCAL_WRITE	= 1,
658 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
659 	IB_ACCESS_REMOTE_READ	= (1<<2),
660 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
661 	IB_ACCESS_MW_BIND	= (1<<4)
662 };
663 
664 struct ib_phys_buf {
665 	u64      addr;
666 	u64      size;
667 };
668 
669 struct ib_mr_attr {
670 	struct ib_pd	*pd;
671 	u64		device_virt_addr;
672 	u64		size;
673 	int		mr_access_flags;
674 	u32		lkey;
675 	u32		rkey;
676 };
677 
678 enum ib_mr_rereg_flags {
679 	IB_MR_REREG_TRANS	= 1,
680 	IB_MR_REREG_PD		= (1<<1),
681 	IB_MR_REREG_ACCESS	= (1<<2)
682 };
683 
684 struct ib_mw_bind {
685 	struct ib_mr   *mr;
686 	u64		wr_id;
687 	u64		addr;
688 	u32		length;
689 	int		send_flags;
690 	int		mw_access_flags;
691 };
692 
693 struct ib_fmr_attr {
694 	int	max_pages;
695 	int	max_maps;
696 	u8	page_shift;
697 };
698 
699 struct ib_ucontext {
700 	struct ib_device       *device;
701 	struct list_head	pd_list;
702 	struct list_head	mr_list;
703 	struct list_head	mw_list;
704 	struct list_head	cq_list;
705 	struct list_head	qp_list;
706 	struct list_head	srq_list;
707 	struct list_head	ah_list;
708 };
709 
710 struct ib_uobject {
711 	u64			user_handle;	/* handle given to us by userspace */
712 	struct ib_ucontext     *context;	/* associated user context */
713 	void		       *object;		/* containing object */
714 	struct list_head	list;		/* link to context's list */
715 	u32			id;		/* index into kernel idr */
716 	struct kref		ref;
717 	struct rw_semaphore	mutex;		/* protects .live */
718 	int			live;
719 };
720 
721 struct ib_umem {
722 	unsigned long		user_base;
723 	unsigned long		virt_base;
724 	size_t			length;
725 	int			offset;
726 	int			page_size;
727 	int                     writable;
728 	struct list_head	chunk_list;
729 };
730 
731 struct ib_umem_chunk {
732 	struct list_head	list;
733 	int                     nents;
734 	int                     nmap;
735 	struct scatterlist      page_list[0];
736 };
737 
738 struct ib_udata {
739 	void __user *inbuf;
740 	void __user *outbuf;
741 	size_t       inlen;
742 	size_t       outlen;
743 };
744 
745 #define IB_UMEM_MAX_PAGE_CHUNK						\
746 	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
747 	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
748 	  (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
749 
750 struct ib_umem_object {
751 	struct ib_uobject	uobject;
752 	struct ib_umem		umem;
753 };
754 
755 struct ib_pd {
756 	struct ib_device       *device;
757 	struct ib_uobject      *uobject;
758 	atomic_t          	usecnt; /* count all resources */
759 };
760 
761 struct ib_ah {
762 	struct ib_device	*device;
763 	struct ib_pd		*pd;
764 	struct ib_uobject	*uobject;
765 };
766 
767 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
768 
769 struct ib_cq {
770 	struct ib_device       *device;
771 	struct ib_uobject      *uobject;
772 	ib_comp_handler   	comp_handler;
773 	void                  (*event_handler)(struct ib_event *, void *);
774 	void *            	cq_context;
775 	int               	cqe;
776 	atomic_t          	usecnt; /* count number of work queues */
777 };
778 
779 struct ib_srq {
780 	struct ib_device       *device;
781 	struct ib_pd	       *pd;
782 	struct ib_uobject      *uobject;
783 	void		      (*event_handler)(struct ib_event *, void *);
784 	void		       *srq_context;
785 	atomic_t		usecnt;
786 };
787 
788 struct ib_qp {
789 	struct ib_device       *device;
790 	struct ib_pd	       *pd;
791 	struct ib_cq	       *send_cq;
792 	struct ib_cq	       *recv_cq;
793 	struct ib_srq	       *srq;
794 	struct ib_uobject      *uobject;
795 	void                  (*event_handler)(struct ib_event *, void *);
796 	void		       *qp_context;
797 	u32			qp_num;
798 	enum ib_qp_type		qp_type;
799 };
800 
801 struct ib_mr {
802 	struct ib_device  *device;
803 	struct ib_pd	  *pd;
804 	struct ib_uobject *uobject;
805 	u32		   lkey;
806 	u32		   rkey;
807 	atomic_t	   usecnt; /* count number of MWs */
808 };
809 
810 struct ib_mw {
811 	struct ib_device	*device;
812 	struct ib_pd		*pd;
813 	struct ib_uobject	*uobject;
814 	u32			rkey;
815 };
816 
817 struct ib_fmr {
818 	struct ib_device	*device;
819 	struct ib_pd		*pd;
820 	struct list_head	list;
821 	u32			lkey;
822 	u32			rkey;
823 };
824 
825 struct ib_mad;
826 struct ib_grh;
827 
828 enum ib_process_mad_flags {
829 	IB_MAD_IGNORE_MKEY	= 1,
830 	IB_MAD_IGNORE_BKEY	= 2,
831 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
832 };
833 
834 enum ib_mad_result {
835 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
836 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
837 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
838 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
839 };
840 
841 #define IB_DEVICE_NAME_MAX 64
842 
843 struct ib_cache {
844 	rwlock_t                lock;
845 	struct ib_event_handler event_handler;
846 	struct ib_pkey_cache  **pkey_cache;
847 	struct ib_gid_cache   **gid_cache;
848 	u8                     *lmc_cache;
849 };
850 
851 struct iw_cm_verbs;
852 
853 struct ib_device {
854 	struct device                *dma_device;
855 
856 	char                          name[IB_DEVICE_NAME_MAX];
857 
858 	struct list_head              event_handler_list;
859 	spinlock_t                    event_handler_lock;
860 
861 	struct list_head              core_list;
862 	struct list_head              client_data_list;
863 	spinlock_t                    client_data_lock;
864 
865 	struct ib_cache               cache;
866 
867 	u32                           flags;
868 
869 	struct iw_cm_verbs	     *iwcm;
870 
871 	int		           (*query_device)(struct ib_device *device,
872 						   struct ib_device_attr *device_attr);
873 	int		           (*query_port)(struct ib_device *device,
874 						 u8 port_num,
875 						 struct ib_port_attr *port_attr);
876 	int		           (*query_gid)(struct ib_device *device,
877 						u8 port_num, int index,
878 						union ib_gid *gid);
879 	int		           (*query_pkey)(struct ib_device *device,
880 						 u8 port_num, u16 index, u16 *pkey);
881 	int		           (*modify_device)(struct ib_device *device,
882 						    int device_modify_mask,
883 						    struct ib_device_modify *device_modify);
884 	int		           (*modify_port)(struct ib_device *device,
885 						  u8 port_num, int port_modify_mask,
886 						  struct ib_port_modify *port_modify);
887 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
888 						     struct ib_udata *udata);
889 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
890 	int                        (*mmap)(struct ib_ucontext *context,
891 					   struct vm_area_struct *vma);
892 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
893 					       struct ib_ucontext *context,
894 					       struct ib_udata *udata);
895 	int                        (*dealloc_pd)(struct ib_pd *pd);
896 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
897 						struct ib_ah_attr *ah_attr);
898 	int                        (*modify_ah)(struct ib_ah *ah,
899 						struct ib_ah_attr *ah_attr);
900 	int                        (*query_ah)(struct ib_ah *ah,
901 					       struct ib_ah_attr *ah_attr);
902 	int                        (*destroy_ah)(struct ib_ah *ah);
903 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
904 						 struct ib_srq_init_attr *srq_init_attr,
905 						 struct ib_udata *udata);
906 	int                        (*modify_srq)(struct ib_srq *srq,
907 						 struct ib_srq_attr *srq_attr,
908 						 enum ib_srq_attr_mask srq_attr_mask,
909 						 struct ib_udata *udata);
910 	int                        (*query_srq)(struct ib_srq *srq,
911 						struct ib_srq_attr *srq_attr);
912 	int                        (*destroy_srq)(struct ib_srq *srq);
913 	int                        (*post_srq_recv)(struct ib_srq *srq,
914 						    struct ib_recv_wr *recv_wr,
915 						    struct ib_recv_wr **bad_recv_wr);
916 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
917 						struct ib_qp_init_attr *qp_init_attr,
918 						struct ib_udata *udata);
919 	int                        (*modify_qp)(struct ib_qp *qp,
920 						struct ib_qp_attr *qp_attr,
921 						int qp_attr_mask,
922 						struct ib_udata *udata);
923 	int                        (*query_qp)(struct ib_qp *qp,
924 					       struct ib_qp_attr *qp_attr,
925 					       int qp_attr_mask,
926 					       struct ib_qp_init_attr *qp_init_attr);
927 	int                        (*destroy_qp)(struct ib_qp *qp);
928 	int                        (*post_send)(struct ib_qp *qp,
929 						struct ib_send_wr *send_wr,
930 						struct ib_send_wr **bad_send_wr);
931 	int                        (*post_recv)(struct ib_qp *qp,
932 						struct ib_recv_wr *recv_wr,
933 						struct ib_recv_wr **bad_recv_wr);
934 	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
935 						struct ib_ucontext *context,
936 						struct ib_udata *udata);
937 	int                        (*destroy_cq)(struct ib_cq *cq);
938 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
939 						struct ib_udata *udata);
940 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
941 					      struct ib_wc *wc);
942 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
943 	int                        (*req_notify_cq)(struct ib_cq *cq,
944 						    enum ib_cq_notify cq_notify);
945 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
946 						      int wc_cnt);
947 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
948 						 int mr_access_flags);
949 	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
950 						  struct ib_phys_buf *phys_buf_array,
951 						  int num_phys_buf,
952 						  int mr_access_flags,
953 						  u64 *iova_start);
954 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
955 						  struct ib_umem *region,
956 						  int mr_access_flags,
957 						  struct ib_udata *udata);
958 	int                        (*query_mr)(struct ib_mr *mr,
959 					       struct ib_mr_attr *mr_attr);
960 	int                        (*dereg_mr)(struct ib_mr *mr);
961 	int                        (*rereg_phys_mr)(struct ib_mr *mr,
962 						    int mr_rereg_mask,
963 						    struct ib_pd *pd,
964 						    struct ib_phys_buf *phys_buf_array,
965 						    int num_phys_buf,
966 						    int mr_access_flags,
967 						    u64 *iova_start);
968 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
969 	int                        (*bind_mw)(struct ib_qp *qp,
970 					      struct ib_mw *mw,
971 					      struct ib_mw_bind *mw_bind);
972 	int                        (*dealloc_mw)(struct ib_mw *mw);
973 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
974 						int mr_access_flags,
975 						struct ib_fmr_attr *fmr_attr);
976 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
977 						   u64 *page_list, int list_len,
978 						   u64 iova);
979 	int		           (*unmap_fmr)(struct list_head *fmr_list);
980 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
981 	int                        (*attach_mcast)(struct ib_qp *qp,
982 						   union ib_gid *gid,
983 						   u16 lid);
984 	int                        (*detach_mcast)(struct ib_qp *qp,
985 						   union ib_gid *gid,
986 						   u16 lid);
987 	int                        (*process_mad)(struct ib_device *device,
988 						  int process_mad_flags,
989 						  u8 port_num,
990 						  struct ib_wc *in_wc,
991 						  struct ib_grh *in_grh,
992 						  struct ib_mad *in_mad,
993 						  struct ib_mad *out_mad);
994 
995 	struct module               *owner;
996 	struct class_device          class_dev;
997 	struct kobject               ports_parent;
998 	struct list_head             port_list;
999 
1000 	enum {
1001 		IB_DEV_UNINITIALIZED,
1002 		IB_DEV_REGISTERED,
1003 		IB_DEV_UNREGISTERED
1004 	}                            reg_state;
1005 
1006 	u64			     uverbs_cmd_mask;
1007 	int			     uverbs_abi_ver;
1008 
1009 	char			     node_desc[64];
1010 	__be64			     node_guid;
1011 	u8                           node_type;
1012 	u8                           phys_port_cnt;
1013 };
1014 
1015 struct ib_client {
1016 	char  *name;
1017 	void (*add)   (struct ib_device *);
1018 	void (*remove)(struct ib_device *);
1019 
1020 	struct list_head list;
1021 };
1022 
1023 struct ib_device *ib_alloc_device(size_t size);
1024 void ib_dealloc_device(struct ib_device *device);
1025 
1026 int ib_register_device   (struct ib_device *device);
1027 void ib_unregister_device(struct ib_device *device);
1028 
1029 int ib_register_client   (struct ib_client *client);
1030 void ib_unregister_client(struct ib_client *client);
1031 
1032 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1033 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1034 			 void *data);
1035 
1036 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1037 {
1038 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1039 }
1040 
1041 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1042 {
1043 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1044 }
1045 
1046 /**
1047  * ib_modify_qp_is_ok - Check that the supplied attribute mask
1048  * contains all required attributes and no attributes not allowed for
1049  * the given QP state transition.
1050  * @cur_state: Current QP state
1051  * @next_state: Next QP state
1052  * @type: QP type
1053  * @mask: Mask of supplied QP attributes
1054  *
1055  * This function is a helper function that a low-level driver's
1056  * modify_qp method can use to validate the consumer's input.  It
1057  * checks that cur_state and next_state are valid QP states, that a
1058  * transition from cur_state to next_state is allowed by the IB spec,
1059  * and that the attribute mask supplied is allowed for the transition.
1060  */
1061 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1062 		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1063 
1064 int ib_register_event_handler  (struct ib_event_handler *event_handler);
1065 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1066 void ib_dispatch_event(struct ib_event *event);
1067 
1068 int ib_query_device(struct ib_device *device,
1069 		    struct ib_device_attr *device_attr);
1070 
1071 int ib_query_port(struct ib_device *device,
1072 		  u8 port_num, struct ib_port_attr *port_attr);
1073 
1074 int ib_query_gid(struct ib_device *device,
1075 		 u8 port_num, int index, union ib_gid *gid);
1076 
1077 int ib_query_pkey(struct ib_device *device,
1078 		  u8 port_num, u16 index, u16 *pkey);
1079 
1080 int ib_modify_device(struct ib_device *device,
1081 		     int device_modify_mask,
1082 		     struct ib_device_modify *device_modify);
1083 
1084 int ib_modify_port(struct ib_device *device,
1085 		   u8 port_num, int port_modify_mask,
1086 		   struct ib_port_modify *port_modify);
1087 
1088 /**
1089  * ib_alloc_pd - Allocates an unused protection domain.
1090  * @device: The device on which to allocate the protection domain.
1091  *
1092  * A protection domain object provides an association between QPs, shared
1093  * receive queues, address handles, memory regions, and memory windows.
1094  */
1095 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1096 
1097 /**
1098  * ib_dealloc_pd - Deallocates a protection domain.
1099  * @pd: The protection domain to deallocate.
1100  */
1101 int ib_dealloc_pd(struct ib_pd *pd);
1102 
1103 /**
1104  * ib_create_ah - Creates an address handle for the given address vector.
1105  * @pd: The protection domain associated with the address handle.
1106  * @ah_attr: The attributes of the address vector.
1107  *
1108  * The address handle is used to reference a local or global destination
1109  * in all UD QP post sends.
1110  */
1111 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1112 
1113 /**
1114  * ib_init_ah_from_wc - Initializes address handle attributes from a
1115  *   work completion.
1116  * @device: Device on which the received message arrived.
1117  * @port_num: Port on which the received message arrived.
1118  * @wc: Work completion associated with the received message.
1119  * @grh: References the received global route header.  This parameter is
1120  *   ignored unless the work completion indicates that the GRH is valid.
1121  * @ah_attr: Returned attributes that can be used when creating an address
1122  *   handle for replying to the message.
1123  */
1124 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1125 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1126 
1127 /**
1128  * ib_create_ah_from_wc - Creates an address handle associated with the
1129  *   sender of the specified work completion.
1130  * @pd: The protection domain associated with the address handle.
1131  * @wc: Work completion information associated with a received message.
1132  * @grh: References the received global route header.  This parameter is
1133  *   ignored unless the work completion indicates that the GRH is valid.
1134  * @port_num: The outbound port number to associate with the address.
1135  *
1136  * The address handle is used to reference a local or global destination
1137  * in all UD QP post sends.
1138  */
1139 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1140 				   struct ib_grh *grh, u8 port_num);
1141 
1142 /**
1143  * ib_modify_ah - Modifies the address vector associated with an address
1144  *   handle.
1145  * @ah: The address handle to modify.
1146  * @ah_attr: The new address vector attributes to associate with the
1147  *   address handle.
1148  */
1149 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1150 
1151 /**
1152  * ib_query_ah - Queries the address vector associated with an address
1153  *   handle.
1154  * @ah: The address handle to query.
1155  * @ah_attr: The address vector attributes associated with the address
1156  *   handle.
1157  */
1158 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1159 
1160 /**
1161  * ib_destroy_ah - Destroys an address handle.
1162  * @ah: The address handle to destroy.
1163  */
1164 int ib_destroy_ah(struct ib_ah *ah);
1165 
1166 /**
1167  * ib_create_srq - Creates a SRQ associated with the specified protection
1168  *   domain.
1169  * @pd: The protection domain associated with the SRQ.
1170  * @srq_init_attr: A list of initial attributes required to create the
1171  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1172  *   the actual capabilities of the created SRQ.
1173  *
1174  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1175  * requested size of the SRQ, and set to the actual values allocated
1176  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1177  * will always be at least as large as the requested values.
1178  */
1179 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1180 			     struct ib_srq_init_attr *srq_init_attr);
1181 
1182 /**
1183  * ib_modify_srq - Modifies the attributes for the specified SRQ.
1184  * @srq: The SRQ to modify.
1185  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1186  *   the current values of selected SRQ attributes are returned.
1187  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1188  *   are being modified.
1189  *
1190  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1191  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1192  * the number of receives queued drops below the limit.
1193  */
1194 int ib_modify_srq(struct ib_srq *srq,
1195 		  struct ib_srq_attr *srq_attr,
1196 		  enum ib_srq_attr_mask srq_attr_mask);
1197 
1198 /**
1199  * ib_query_srq - Returns the attribute list and current values for the
1200  *   specified SRQ.
1201  * @srq: The SRQ to query.
1202  * @srq_attr: The attributes of the specified SRQ.
1203  */
1204 int ib_query_srq(struct ib_srq *srq,
1205 		 struct ib_srq_attr *srq_attr);
1206 
1207 /**
1208  * ib_destroy_srq - Destroys the specified SRQ.
1209  * @srq: The SRQ to destroy.
1210  */
1211 int ib_destroy_srq(struct ib_srq *srq);
1212 
1213 /**
1214  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1215  * @srq: The SRQ to post the work request on.
1216  * @recv_wr: A list of work requests to post on the receive queue.
1217  * @bad_recv_wr: On an immediate failure, this parameter will reference
1218  *   the work request that failed to be posted on the QP.
1219  */
1220 static inline int ib_post_srq_recv(struct ib_srq *srq,
1221 				   struct ib_recv_wr *recv_wr,
1222 				   struct ib_recv_wr **bad_recv_wr)
1223 {
1224 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1225 }
1226 
1227 /**
1228  * ib_create_qp - Creates a QP associated with the specified protection
1229  *   domain.
1230  * @pd: The protection domain associated with the QP.
1231  * @qp_init_attr: A list of initial attributes required to create the
1232  *   QP.  If QP creation succeeds, then the attributes are updated to
1233  *   the actual capabilities of the created QP.
1234  */
1235 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1236 			   struct ib_qp_init_attr *qp_init_attr);
1237 
1238 /**
1239  * ib_modify_qp - Modifies the attributes for the specified QP and then
1240  *   transitions the QP to the given state.
1241  * @qp: The QP to modify.
1242  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1243  *   the current values of selected QP attributes are returned.
1244  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1245  *   are being modified.
1246  */
1247 int ib_modify_qp(struct ib_qp *qp,
1248 		 struct ib_qp_attr *qp_attr,
1249 		 int qp_attr_mask);
1250 
1251 /**
1252  * ib_query_qp - Returns the attribute list and current values for the
1253  *   specified QP.
1254  * @qp: The QP to query.
1255  * @qp_attr: The attributes of the specified QP.
1256  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1257  * @qp_init_attr: Additional attributes of the selected QP.
1258  *
1259  * The qp_attr_mask may be used to limit the query to gathering only the
1260  * selected attributes.
1261  */
1262 int ib_query_qp(struct ib_qp *qp,
1263 		struct ib_qp_attr *qp_attr,
1264 		int qp_attr_mask,
1265 		struct ib_qp_init_attr *qp_init_attr);
1266 
1267 /**
1268  * ib_destroy_qp - Destroys the specified QP.
1269  * @qp: The QP to destroy.
1270  */
1271 int ib_destroy_qp(struct ib_qp *qp);
1272 
1273 /**
1274  * ib_post_send - Posts a list of work requests to the send queue of
1275  *   the specified QP.
1276  * @qp: The QP to post the work request on.
1277  * @send_wr: A list of work requests to post on the send queue.
1278  * @bad_send_wr: On an immediate failure, this parameter will reference
1279  *   the work request that failed to be posted on the QP.
1280  */
1281 static inline int ib_post_send(struct ib_qp *qp,
1282 			       struct ib_send_wr *send_wr,
1283 			       struct ib_send_wr **bad_send_wr)
1284 {
1285 	return qp->device->post_send(qp, send_wr, bad_send_wr);
1286 }
1287 
1288 /**
1289  * ib_post_recv - Posts a list of work requests to the receive queue of
1290  *   the specified QP.
1291  * @qp: The QP to post the work request on.
1292  * @recv_wr: A list of work requests to post on the receive queue.
1293  * @bad_recv_wr: On an immediate failure, this parameter will reference
1294  *   the work request that failed to be posted on the QP.
1295  */
1296 static inline int ib_post_recv(struct ib_qp *qp,
1297 			       struct ib_recv_wr *recv_wr,
1298 			       struct ib_recv_wr **bad_recv_wr)
1299 {
1300 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1301 }
1302 
1303 /**
1304  * ib_create_cq - Creates a CQ on the specified device.
1305  * @device: The device on which to create the CQ.
1306  * @comp_handler: A user-specified callback that is invoked when a
1307  *   completion event occurs on the CQ.
1308  * @event_handler: A user-specified callback that is invoked when an
1309  *   asynchronous event not associated with a completion occurs on the CQ.
1310  * @cq_context: Context associated with the CQ returned to the user via
1311  *   the associated completion and event handlers.
1312  * @cqe: The minimum size of the CQ.
1313  *
1314  * Users can examine the cq structure to determine the actual CQ size.
1315  */
1316 struct ib_cq *ib_create_cq(struct ib_device *device,
1317 			   ib_comp_handler comp_handler,
1318 			   void (*event_handler)(struct ib_event *, void *),
1319 			   void *cq_context, int cqe);
1320 
1321 /**
1322  * ib_resize_cq - Modifies the capacity of the CQ.
1323  * @cq: The CQ to resize.
1324  * @cqe: The minimum size of the CQ.
1325  *
1326  * Users can examine the cq structure to determine the actual CQ size.
1327  */
1328 int ib_resize_cq(struct ib_cq *cq, int cqe);
1329 
1330 /**
1331  * ib_destroy_cq - Destroys the specified CQ.
1332  * @cq: The CQ to destroy.
1333  */
1334 int ib_destroy_cq(struct ib_cq *cq);
1335 
1336 /**
1337  * ib_poll_cq - poll a CQ for completion(s)
1338  * @cq:the CQ being polled
1339  * @num_entries:maximum number of completions to return
1340  * @wc:array of at least @num_entries &struct ib_wc where completions
1341  *   will be returned
1342  *
1343  * Poll a CQ for (possibly multiple) completions.  If the return value
1344  * is < 0, an error occurred.  If the return value is >= 0, it is the
1345  * number of completions returned.  If the return value is
1346  * non-negative and < num_entries, then the CQ was emptied.
1347  */
1348 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1349 			     struct ib_wc *wc)
1350 {
1351 	return cq->device->poll_cq(cq, num_entries, wc);
1352 }
1353 
1354 /**
1355  * ib_peek_cq - Returns the number of unreaped completions currently
1356  *   on the specified CQ.
1357  * @cq: The CQ to peek.
1358  * @wc_cnt: A minimum number of unreaped completions to check for.
1359  *
1360  * If the number of unreaped completions is greater than or equal to wc_cnt,
1361  * this function returns wc_cnt, otherwise, it returns the actual number of
1362  * unreaped completions.
1363  */
1364 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1365 
1366 /**
1367  * ib_req_notify_cq - Request completion notification on a CQ.
1368  * @cq: The CQ to generate an event for.
1369  * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
1370  *   occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
1371  *   notification will occur on the next completion.
1372  */
1373 static inline int ib_req_notify_cq(struct ib_cq *cq,
1374 				   enum ib_cq_notify cq_notify)
1375 {
1376 	return cq->device->req_notify_cq(cq, cq_notify);
1377 }
1378 
1379 /**
1380  * ib_req_ncomp_notif - Request completion notification when there are
1381  *   at least the specified number of unreaped completions on the CQ.
1382  * @cq: The CQ to generate an event for.
1383  * @wc_cnt: The number of unreaped completions that should be on the
1384  *   CQ before an event is generated.
1385  */
1386 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1387 {
1388 	return cq->device->req_ncomp_notif ?
1389 		cq->device->req_ncomp_notif(cq, wc_cnt) :
1390 		-ENOSYS;
1391 }
1392 
1393 /**
1394  * ib_get_dma_mr - Returns a memory region for system memory that is
1395  *   usable for DMA.
1396  * @pd: The protection domain associated with the memory region.
1397  * @mr_access_flags: Specifies the memory access rights.
1398  */
1399 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 
1401 /**
1402  * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403  *   by an HCA.
1404  * @pd: The protection domain associated assigned to the registered region.
1405  * @phys_buf_array: Specifies a list of physical buffers to use in the
1406  *   memory region.
1407  * @num_phys_buf: Specifies the size of the phys_buf_array.
1408  * @mr_access_flags: Specifies the memory access rights.
1409  * @iova_start: The offset of the region's starting I/O virtual address.
1410  */
1411 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1412 			     struct ib_phys_buf *phys_buf_array,
1413 			     int num_phys_buf,
1414 			     int mr_access_flags,
1415 			     u64 *iova_start);
1416 
1417 /**
1418  * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1419  *   Conceptually, this call performs the functions deregister memory region
1420  *   followed by register physical memory region.  Where possible,
1421  *   resources are reused instead of deallocated and reallocated.
1422  * @mr: The memory region to modify.
1423  * @mr_rereg_mask: A bit-mask used to indicate which of the following
1424  *   properties of the memory region are being modified.
1425  * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1426  *   the new protection domain to associated with the memory region,
1427  *   otherwise, this parameter is ignored.
1428  * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1429  *   field specifies a list of physical buffers to use in the new
1430  *   translation, otherwise, this parameter is ignored.
1431  * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1432  *   field specifies the size of the phys_buf_array, otherwise, this
1433  *   parameter is ignored.
1434  * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1435  *   field specifies the new memory access rights, otherwise, this
1436  *   parameter is ignored.
1437  * @iova_start: The offset of the region's starting I/O virtual address.
1438  */
1439 int ib_rereg_phys_mr(struct ib_mr *mr,
1440 		     int mr_rereg_mask,
1441 		     struct ib_pd *pd,
1442 		     struct ib_phys_buf *phys_buf_array,
1443 		     int num_phys_buf,
1444 		     int mr_access_flags,
1445 		     u64 *iova_start);
1446 
1447 /**
1448  * ib_query_mr - Retrieves information about a specific memory region.
1449  * @mr: The memory region to retrieve information about.
1450  * @mr_attr: The attributes of the specified memory region.
1451  */
1452 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1453 
1454 /**
1455  * ib_dereg_mr - Deregisters a memory region and removes it from the
1456  *   HCA translation table.
1457  * @mr: The memory region to deregister.
1458  */
1459 int ib_dereg_mr(struct ib_mr *mr);
1460 
1461 /**
1462  * ib_alloc_mw - Allocates a memory window.
1463  * @pd: The protection domain associated with the memory window.
1464  */
1465 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1466 
1467 /**
1468  * ib_bind_mw - Posts a work request to the send queue of the specified
1469  *   QP, which binds the memory window to the given address range and
1470  *   remote access attributes.
1471  * @qp: QP to post the bind work request on.
1472  * @mw: The memory window to bind.
1473  * @mw_bind: Specifies information about the memory window, including
1474  *   its address range, remote access rights, and associated memory region.
1475  */
1476 static inline int ib_bind_mw(struct ib_qp *qp,
1477 			     struct ib_mw *mw,
1478 			     struct ib_mw_bind *mw_bind)
1479 {
1480 	/* XXX reference counting in corresponding MR? */
1481 	return mw->device->bind_mw ?
1482 		mw->device->bind_mw(qp, mw, mw_bind) :
1483 		-ENOSYS;
1484 }
1485 
1486 /**
1487  * ib_dealloc_mw - Deallocates a memory window.
1488  * @mw: The memory window to deallocate.
1489  */
1490 int ib_dealloc_mw(struct ib_mw *mw);
1491 
1492 /**
1493  * ib_alloc_fmr - Allocates a unmapped fast memory region.
1494  * @pd: The protection domain associated with the unmapped region.
1495  * @mr_access_flags: Specifies the memory access rights.
1496  * @fmr_attr: Attributes of the unmapped region.
1497  *
1498  * A fast memory region must be mapped before it can be used as part of
1499  * a work request.
1500  */
1501 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1502 			    int mr_access_flags,
1503 			    struct ib_fmr_attr *fmr_attr);
1504 
1505 /**
1506  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1507  * @fmr: The fast memory region to associate with the pages.
1508  * @page_list: An array of physical pages to map to the fast memory region.
1509  * @list_len: The number of pages in page_list.
1510  * @iova: The I/O virtual address to use with the mapped region.
1511  */
1512 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1513 				  u64 *page_list, int list_len,
1514 				  u64 iova)
1515 {
1516 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1517 }
1518 
1519 /**
1520  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1521  * @fmr_list: A linked list of fast memory regions to unmap.
1522  */
1523 int ib_unmap_fmr(struct list_head *fmr_list);
1524 
1525 /**
1526  * ib_dealloc_fmr - Deallocates a fast memory region.
1527  * @fmr: The fast memory region to deallocate.
1528  */
1529 int ib_dealloc_fmr(struct ib_fmr *fmr);
1530 
1531 /**
1532  * ib_attach_mcast - Attaches the specified QP to a multicast group.
1533  * @qp: QP to attach to the multicast group.  The QP must be type
1534  *   IB_QPT_UD.
1535  * @gid: Multicast group GID.
1536  * @lid: Multicast group LID in host byte order.
1537  *
1538  * In order to send and receive multicast packets, subnet
1539  * administration must have created the multicast group and configured
1540  * the fabric appropriately.  The port associated with the specified
1541  * QP must also be a member of the multicast group.
1542  */
1543 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1544 
1545 /**
1546  * ib_detach_mcast - Detaches the specified QP from a multicast group.
1547  * @qp: QP to detach from the multicast group.
1548  * @gid: Multicast group GID.
1549  * @lid: Multicast group LID in host byte order.
1550  */
1551 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1552 
1553 #endif /* IB_VERBS_H */
1554