xref: /titanic_41/usr/src/uts/common/sys/ib/clients/of/rdma/ib_verbs.h (revision 6a634c9dca3093f3922e4b7ab826d7bdf17bf78e)
1 /*
2  * This file contains definitions used in OFED defined user/kernel
3  * interfaces. These are imported from the OFED header ib_verbs.h. Oracle
4  * elects to have and use the contents of ib_verbs.h under and governed
5  * by the OpenIB.org BSD license (see below for full license text). However,
6  * the following notice accompanied the original version of this file:
7  */
8 
9 /*
10  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
11  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
12  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
13  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
14  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
15  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
16  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
17  *
18  * This software is available to you under a choice of one of two
19  * licenses.  You may choose to be licensed under the terms of the GNU
20  * General Public License (GPL) Version 2, available from the file
21  * COPYING in the main directory of this source tree, or the
22  * OpenIB.org BSD license below:
23  *
24  *     Redistribution and use in source and binary forms, with or
25  *     without modification, are permitted provided that the following
26  *     conditions are met:
27  *
28  *      - Redistributions of source code must retain the above
29  *        copyright notice, this list of conditions and the following
30  *        disclaimer.
31  *
32  *      - Redistributions in binary form must reproduce the above
33  *        copyright notice, this list of conditions and the following
34  *        disclaimer in the documentation and/or other materials
35  *        provided with the distribution.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
38  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
39  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
40  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
41  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
42  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
43  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
44  * SOFTWARE.
45  */
46 #ifndef _SYS_IB_CLIENTS_OF_IB_VERBS_H
47 #define	_SYS_IB_CLIENTS_OF_IB_VERBS_H
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 #include <sys/atomic.h>
54 #include <sys/mutex.h>
55 #include <sys/ib/ibtl/ibvti.h>
56 #include <sys/ib/clients/of/ofa_solaris.h>
57 #include <sys/ib/clients/of/sol_ofs/sol_ofs_common.h>
58 
59 typedef struct sol_ofs_client_s *ofs_client_p_t;
60 
61 union ib_gid {
62 	uint8_t	raw[16];
63 	struct {
64 		uint64_t	subnet_prefix;
65 		uint64_t	interface_id;
66 	} global;
67 };
68 
69 enum rdma_node_type {
70 	/* IB values map to NodeInfo:NodeType. */
71 	RDMA_NODE_IB_CA 	= 1,
72 	RDMA_NODE_IB_SWITCH,
73 	RDMA_NODE_IB_ROUTER,
74 	RDMA_NODE_RNIC
75 };
76 
77 enum rdma_transport_type {
78 	RDMA_TRANSPORT_IB,
79 	RDMA_TRANSPORT_IWARP
80 };
81 
82 #define	__attribute_const__		__attribute__((__const__))
83 enum rdma_transport_type
84 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
85 
86 enum ib_device_cap_flags {
87 	IB_DEVICE_RESIZE_MAX_WR		= 1,
88 	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
89 	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
90 	IB_DEVICE_RAW_MULTI		= (1<<3),
91 	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
92 	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
93 	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
94 	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
95 	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
96 	IB_DEVICE_INIT_TYPE		= (1<<9),
97 	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
98 	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
99 	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
100 	IB_DEVICE_SRQ_RESIZE		= (1<<13),
101 	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
102 	IB_DEVICE_ZERO_STAG		= (1<<15),
103 	IB_DEVICE_SEND_W_INV		= (1<<16),
104 	IB_DEVICE_MEM_WINDOW		= (1<<17)
105 };
106 
107 enum ib_atomic_cap {
108 	IB_ATOMIC_NONE,
109 	IB_ATOMIC_HCA,
110 	IB_ATOMIC_GLOB
111 };
112 
113 struct ib_device_attr {
114 	uint64_t		fw_ver;
115 	uint64_t		sys_image_guid;
116 	uint64_t		max_mr_size;
117 	uint64_t		page_size_cap;
118 	uint32_t		vendor_id;
119 	uint32_t		vendor_part_id;
120 	uint32_t		hw_ver;
121 	int			max_qp;
122 	int			max_qp_wr;
123 	int			device_cap_flags;
124 	int			max_sge;
125 	int			max_sge_rd;
126 	int			max_cq;
127 	int			max_cqe;
128 	int			max_mr;
129 	int			max_pd;
130 	int			max_qp_rd_atom;
131 	int			max_ee_rd_atom;
132 	int			max_res_rd_atom;
133 	int			max_qp_init_rd_atom;
134 	int			max_ee_init_rd_atom;
135 	enum ib_atomic_cap	atomic_cap;
136 	int			max_ee;
137 	int			max_rdd;
138 	int			max_mw;
139 	int			max_raw_ipv6_qp;
140 	int			max_raw_ethy_qp;
141 	int			max_mcast_grp;
142 	int			max_mcast_qp_attach;
143 	int			max_total_mcast_qp_attach;
144 	int			max_ah;
145 	int			max_fmr;
146 	int			max_map_per_fmr;
147 	int			max_srq;
148 	int			max_srq_wr;
149 	int			max_srq_sge;
150 	uint16_t		max_pkeys;
151 	uint8_t			local_ca_ack_delay;
152 };
153 
154 enum ib_mtu {
155 	OFED_IB_MTU_256  = 1,
156 	OFED_IB_MTU_512  = 2,
157 	OFED_IB_MTU_1024 = 3,
158 	OFED_IB_MTU_2048 = 4,
159 	OFED_IB_MTU_4096 = 5
160 };
161 
162 enum ib_port_state {
163 	IB_PORT_NOP		= 0,
164 	IB_PORT_DOWN		= 1,
165 	IB_PORT_INIT		= 2,
166 	IB_PORT_ARMED		= 3,
167 	IB_PORT_ACTIVE		= 4,
168 	IB_PORT_ACTIVE_DEFER	= 5
169 };
170 
171 enum ib_port_cap_flags {
172 	IB_PORT_SM				= 1 <<  1,
173 	IB_PORT_NOTICE_SUP			= 1 <<  2,
174 	IB_PORT_TRAP_SUP			= 1 <<  3,
175 	IB_PORT_OPT_IPD_SUP			= 1 <<  4,
176 	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
177 	IB_PORT_SL_MAP_SUP			= 1 <<  6,
178 	IB_PORT_MKEY_NVRAM			= 1 <<  7,
179 	IB_PORT_PKEY_NVRAM			= 1 <<  8,
180 	IB_PORT_LED_INFO_SUP			= 1 <<  9,
181 	IB_PORT_SM_DISABLED			= 1 << 10,
182 	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
183 	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
184 	IB_PORT_CM_SUP				= 1 << 16,
185 	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
186 	IB_PORT_REINIT_SUP			= 1 << 18,
187 	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
188 	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
189 	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
190 	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
191 	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
192 	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
193 	IB_PORT_CLIENT_REG_SUP			= 1 << 25
194 };
195 
196 enum ib_port_width {
197 	IB_WIDTH_1X	= 1,
198 	IB_WIDTH_4X	= 2,
199 	IB_WIDTH_8X	= 4,
200 	IB_WIDTH_12X	= 8
201 };
202 
ib_width_enum_to_int(enum ib_port_width width)203 static inline int ib_width_enum_to_int(enum ib_port_width width)
204 {
205 	switch (width) {
206 		case IB_WIDTH_1X:  return  1;
207 		case IB_WIDTH_4X:  return  4;
208 		case IB_WIDTH_8X:  return  8;
209 		case IB_WIDTH_12X: return 12;
210 		default: return -1;
211 	}
212 }
213 
214 struct ib_port_attr {
215 	enum ib_port_state	state;
216 	enum ib_mtu		max_mtu;
217 	enum ib_mtu		active_mtu;
218 	int			gid_tbl_len;
219 	uint32_t		port_cap_flags;
220 	uint32_t		max_msg_sz;
221 	uint32_t		bad_pkey_cntr;
222 	uint32_t		qkey_viol_cntr;
223 	uint16_t		pkey_tbl_len;
224 	uint16_t		lid;
225 	uint16_t		sm_lid;
226 	uint8_t			lmc;
227 	uint8_t			max_vl_num;
228 	uint8_t			sm_sl;
229 	uint8_t			subnet_timeout;
230 	uint8_t			init_type_reply;
231 	uint8_t			active_width;
232 	uint8_t			active_speed;
233 	uint8_t			phys_state;
234 };
235 
236 enum ib_device_modify_flags {
237 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
238 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
239 };
240 
241 struct ib_device_modify {
242 	uint64_t	sys_image_guid;
243 	char	node_desc[64];
244 };
245 
246 enum ib_port_modify_flags {
247 	IB_PORT_SHUTDOWN		= 1,
248 	IB_PORT_INIT_TYPE		= (1<<2),
249 	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
250 };
251 
252 struct ib_port_modify {
253 	uint32_t	set_port_cap_mask;
254 	uint32_t	clr_port_cap_mask;
255 	uint8_t	init_type;
256 };
257 
258 enum ib_event_type {
259 	IB_EVENT_CQ_ERR,
260 	IB_EVENT_QP_FATAL,
261 	IB_EVENT_QP_REQ_ERR,
262 	IB_EVENT_QP_ACCESS_ERR,
263 	IB_EVENT_COMM_EST,
264 	IB_EVENT_SQ_DRAINED,
265 	IB_EVENT_PATH_MIG,
266 	IB_EVENT_PATH_MIG_ERR,
267 	IB_EVENT_DEVICE_FATAL,
268 	IB_EVENT_PORT_ACTIVE,
269 	IB_EVENT_PORT_ERR,
270 	IB_EVENT_LID_CHANGE,
271 	IB_EVENT_PKEY_CHANGE,
272 	IB_EVENT_SM_CHANGE,
273 	IB_EVENT_SRQ_ERR,
274 	IB_EVENT_SRQ_LIMIT_REACHED,
275 	IB_EVENT_QP_LAST_WQE_REACHED,
276 	IB_EVENT_CLIENT_REREGISTER
277 };
278 
279 struct ib_event {
280 	struct ib_device	*device;
281 	union {
282 		struct ib_cq	*cq;
283 		struct ib_qp	*qp;
284 		struct ib_srq	*srq;
285 		uint8_t		port_num;
286 	} element;
287 	enum ib_event_type	event;
288 };
289 
290 struct ib_event_handler {
291 	struct ib_device	*device;
292 	void 			(*handler)(struct ib_event_handler *,
293 				    struct ib_event *);
294 	llist_head_t	list;
295 };
296 
297 struct ib_global_route {
298 	union ib_gid	dgid;
299 	uint32_t	flow_label;
300 	uint8_t		sgid_index;
301 	uint8_t		hop_limit;
302 	uint8_t		traffic_class;
303 };
304 
305 enum ib_ah_flags {
306 	IB_AH_GRH	= 1
307 };
308 
309 enum ib_rate {
310 	IB_RATE_PORT_CURRENT = 0,
311 	IB_RATE_2_5_GBPS = 2,
312 	IB_RATE_5_GBPS   = 5,
313 	IB_RATE_10_GBPS  = 3,
314 	IB_RATE_20_GBPS  = 6,
315 	IB_RATE_30_GBPS  = 4,
316 	IB_RATE_40_GBPS  = 7,
317 	IB_RATE_60_GBPS  = 8,
318 	IB_RATE_80_GBPS  = 9,
319 	IB_RATE_120_GBPS = 10
320 };
321 
322 struct ib_ah_attr {
323 	struct ib_global_route	grh;
324 	uint16_t		dlid;
325 	uint8_t			sl;
326 	uint8_t			src_path_bits;
327 	uint8_t			static_rate;
328 	uint8_t			ah_flags;
329 	uint8_t			port_num;
330 };
331 
332 enum ib_wc_status {
333 	IB_WC_SUCCESS,
334 	IB_WC_LOC_LEN_ERR,
335 	IB_WC_LOC_QP_OP_ERR,
336 	IB_WC_LOC_EEC_OP_ERR,
337 	IB_WC_LOC_PROT_ERR,
338 	IB_WC_WR_FLUSH_ERR,
339 	IB_WC_MW_BIND_ERR,
340 	IB_WC_BAD_RESP_ERR,
341 	IB_WC_LOC_ACCESS_ERR,
342 	IB_WC_REM_INV_REQ_ERR,
343 	IB_WC_REM_ACCESS_ERR,
344 	IB_WC_REM_OP_ERR,
345 	IB_WC_RETRY_EXC_ERR,
346 	IB_WC_RNR_RETRY_EXC_ERR,
347 	IB_WC_LOC_RDD_VIOL_ERR,
348 	IB_WC_REM_INV_RD_REQ_ERR,
349 	IB_WC_REM_ABORT_ERR,
350 	IB_WC_INV_EECN_ERR,
351 	IB_WC_INV_EEC_STATE_ERR,
352 	IB_WC_FATAL_ERR,
353 	IB_WC_RESP_TIMEOUT_ERR,
354 	IB_WC_GENERAL_ERR
355 };
356 
357 enum ib_wc_opcode {
358 	IB_WC_SEND,
359 	IB_WC_RDMA_WRITE,
360 	IB_WC_RDMA_READ,
361 	IB_WC_COMP_SWAP,
362 	IB_WC_FETCH_ADD,
363 	IB_WC_BIND_MW,
364 /*
365  * Set value of IB_WC_RECV so consumers can test if a completion is a
366  * receive by testing (opcode & IB_WC_RECV).
367  */
368 	IB_WC_RECV			= 1 << 7,
369 	IB_WC_RECV_RDMA_WITH_IMM
370 };
371 
372 enum ib_wc_flags {
373 	IB_WC_GRH		= 1,
374 	IB_WC_WITH_IMM		= (1<<1),
375 };
376 
377 struct ib_wc {
378 	uint64_t		wr_id;
379 	enum ib_wc_status	status;
380 	enum ib_wc_opcode	opcode;
381 	uint32_t		vendor_err;
382 	uint32_t		byte_len;
383 	struct ib_qp		*qp;
384 	uint32_t		imm_data;
385 	uint32_t		src_qp;
386 	int			wc_flags;
387 	uint16_t		pkey_index;
388 	uint16_t		slid;
389 	uint8_t			sl;
390 	uint8_t			dlid_path_bits;
391 	uint8_t			port_num;
392 };
393 
394 enum ib_cq_notify_flags {
395 	IB_CQ_SOLICITED			= 1 << 0,
396 	IB_CQ_NEXT_COMP			= 1 << 1,
397 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
398 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
399 };
400 
401 enum ib_srq_attr_mask {
402 	IB_SRQ_MAX_WR	= 1 << 0,
403 	IB_SRQ_LIMIT	= 1 << 1,
404 };
405 
406 struct ib_srq_attr {
407 	uint32_t	max_wr;
408 	uint32_t	max_sge;
409 	uint32_t	srq_limit;
410 };
411 
412 struct ib_srq_init_attr {
413 	void			(*event_handler)(struct ib_event *, void *);
414 	void			*srq_context;
415 	struct ib_srq_attr	attr;
416 };
417 
418 struct ib_qp_cap {
419 	uint32_t	max_send_wr;
420 	uint32_t	max_recv_wr;
421 	uint32_t	max_send_sge;
422 	uint32_t	max_recv_sge;
423 	uint32_t	max_inline_data;
424 };
425 
426 enum ib_sig_type {
427 	IB_SIGNAL_ALL_WR,
428 	IB_SIGNAL_REQ_WR
429 };
430 
431 enum ib_qp_type {
432 	/*
433 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
434 	 * here (and in that order) since the MAD layer uses them as
435 	 * indices into a 2-entry table.
436 	 */
437 	IB_QPT_SMI,
438 	IB_QPT_GSI,
439 
440 	IB_QPT_RC,
441 	IB_QPT_UC,
442 	IB_QPT_UD,
443 	IB_QPT_RAW_IPV6,
444 	IB_QPT_RAW_ETY
445 };
446 
447 struct ib_qp_init_attr {
448 	void			(*event_handler)(struct ib_event *, void *);
449 	void			*qp_context;
450 	struct ib_cq		*send_cq;
451 	struct ib_cq		*recv_cq;
452 	struct ib_srq		*srq;
453 	struct ib_qp_cap	cap;
454 	enum ib_sig_type	sq_sig_type;
455 	enum ib_qp_type		qp_type;
456 	uint8_t			port_num; /* special QP types only */
457 };
458 
459 enum ib_rnr_timeout {
460 	IB_RNR_TIMER_655_36 =  0,
461 	IB_RNR_TIMER_000_01 =  1,
462 	IB_RNR_TIMER_000_02 =  2,
463 	IB_RNR_TIMER_000_03 =  3,
464 	IB_RNR_TIMER_000_04 =  4,
465 	IB_RNR_TIMER_000_06 =  5,
466 	IB_RNR_TIMER_000_08 =  6,
467 	IB_RNR_TIMER_000_12 =  7,
468 	IB_RNR_TIMER_000_16 =  8,
469 	IB_RNR_TIMER_000_24 =  9,
470 	IB_RNR_TIMER_000_32 = 10,
471 	IB_RNR_TIMER_000_48 = 11,
472 	IB_RNR_TIMER_000_64 = 12,
473 	IB_RNR_TIMER_000_96 = 13,
474 	IB_RNR_TIMER_001_28 = 14,
475 	IB_RNR_TIMER_001_92 = 15,
476 	IB_RNR_TIMER_002_56 = 16,
477 	IB_RNR_TIMER_003_84 = 17,
478 	IB_RNR_TIMER_005_12 = 18,
479 	IB_RNR_TIMER_007_68 = 19,
480 	IB_RNR_TIMER_010_24 = 20,
481 	IB_RNR_TIMER_015_36 = 21,
482 	IB_RNR_TIMER_020_48 = 22,
483 	IB_RNR_TIMER_030_72 = 23,
484 	IB_RNR_TIMER_040_96 = 24,
485 	IB_RNR_TIMER_061_44 = 25,
486 	IB_RNR_TIMER_081_92 = 26,
487 	IB_RNR_TIMER_122_88 = 27,
488 	IB_RNR_TIMER_163_84 = 28,
489 	IB_RNR_TIMER_245_76 = 29,
490 	IB_RNR_TIMER_327_68 = 30,
491 	IB_RNR_TIMER_491_52 = 31
492 };
493 
494 enum ib_qp_attr_mask {
495 	IB_QP_STATE			= 1,
496 	IB_QP_CUR_STATE			= (1<<1),
497 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
498 	IB_QP_ACCESS_FLAGS		= (1<<3),
499 	IB_QP_PKEY_INDEX		= (1<<4),
500 	IB_QP_PORT			= (1<<5),
501 	IB_QP_QKEY			= (1<<6),
502 	IB_QP_AV			= (1<<7),
503 	IB_QP_PATH_MTU			= (1<<8),
504 	IB_QP_TIMEOUT			= (1<<9),
505 	IB_QP_RETRY_CNT			= (1<<10),
506 	IB_QP_RNR_RETRY			= (1<<11),
507 	IB_QP_RQ_PSN			= (1<<12),
508 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
509 	IB_QP_ALT_PATH			= (1<<14),
510 	IB_QP_MIN_RNR_TIMER		= (1<<15),
511 	IB_QP_SQ_PSN			= (1<<16),
512 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
513 	IB_QP_PATH_MIG_STATE		= (1<<18),
514 	IB_QP_CAP			= (1<<19),
515 	IB_QP_DEST_QPN			= (1<<20)
516 };
517 
518 enum ib_qp_state {
519 	IB_QPS_RESET,
520 	IB_QPS_INIT,
521 	IB_QPS_RTR,
522 	IB_QPS_RTS,
523 	IB_QPS_SQD,
524 	IB_QPS_SQE,
525 	IB_QPS_ERR
526 };
527 
528 enum ib_mig_state {
529 	IB_MIG_MIGRATED,
530 	IB_MIG_REARM,
531 	IB_MIG_ARMED
532 };
533 
534 struct ib_qp_attr {
535 	enum ib_qp_state	qp_state;
536 	enum ib_qp_state	cur_qp_state;
537 	enum ib_mtu		path_mtu;
538 	enum ib_mig_state	path_mig_state;
539 	uint32_t		qkey;
540 	uint32_t		rq_psn;
541 	uint32_t		sq_psn;
542 	uint32_t		dest_qp_num;
543 	int			qp_access_flags;
544 	struct ib_qp_cap	cap;
545 	struct ib_ah_attr	ah_attr;
546 	struct ib_ah_attr	alt_ah_attr;
547 	uint16_t		pkey_index;
548 	uint16_t		alt_pkey_index;
549 	uint8_t			en_sqd_async_notify;
550 	uint8_t			sq_draining;
551 	uint8_t			max_rd_atomic;
552 	uint8_t			max_dest_rd_atomic;
553 	uint8_t			min_rnr_timer;
554 	uint8_t			port_num;
555 	uint8_t			timeout;
556 	uint8_t			retry_cnt;
557 	uint8_t			rnr_retry;
558 	uint8_t			alt_port_num;
559 	uint8_t			alt_timeout;
560 };
561 
562 enum ib_wr_opcode {
563 	IB_WR_RDMA_WRITE,
564 	IB_WR_RDMA_WRITE_WITH_IMM,
565 	IB_WR_SEND,
566 	IB_WR_SEND_WITH_IMM,
567 	IB_WR_RDMA_READ,
568 	IB_WR_ATOMIC_CMP_AND_SWP,
569 	IB_WR_ATOMIC_FETCH_AND_ADD
570 };
571 
572 enum ib_access_flags {
573 	IB_ACCESS_LOCAL_WRITE	= 1,
574 	IB_ACCESS_REMOTE_WRITE	= (1<<1),
575 	IB_ACCESS_REMOTE_READ	= (1<<2),
576 	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
577 	IB_ACCESS_MW_BIND	= (1<<4),
578 	IB_ACCESS_SO		= (1<<5)	/* MR with Strong Ordering */
579 };
580 
581 struct ib_pd {
582 	struct ib_device	*device;
583 	ibt_pd_hdl_t		ibt_pd;
584 };
585 
586 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
587 
588 struct ib_cq {
589 	struct ib_device	*device;
590 	ib_comp_handler		comp_handler;
591 	void			(*event_handler)(struct ib_event *, void *);
592 	void			*cq_context;
593 	int			cqe;
594 	ibt_cq_hdl_t		ibt_cq;
595 	kmutex_t		lock;
596 };
597 
598 struct ib_srq {
599 	struct ib_device	*device;
600 	struct ib_pd		*pd;
601 	void			(*event_handler)(struct ib_event *, void *);
602 	void			*srq_context;
603 	ibt_srq_hdl_t		ibt_srq;
604 };
605 
606 struct ib_qp {
607 	struct ib_device	*device;
608 	struct ib_pd		*pd;
609 	struct ib_cq		*send_cq;
610 	struct ib_cq		*recv_cq;
611 	struct ib_srq		*srq;
612 	void			(*event_handler)(struct ib_event *, void *);
613 	void			*qp_context;
614 	uint32_t		qp_num;
615 	enum ib_qp_type		qp_type;
616 	ibt_qp_hdl_t		ibt_qp;
617 	kmutex_t 		lock;
618 };
619 
620 #define	IB_DEVICE_NAME_MAX	64
621 
622 typedef struct ib_device {
623 	ibt_hca_hdl_t		hca_hdl;
624 	char			name[IB_DEVICE_NAME_MAX];
625 	uint64_t		node_guid;
626 	uint32_t		local_dma_lkey;
627 	uint8_t			phys_port_cnt;
628 	uint8_t			node_type;
629 	enum {
630 				IB_DEV_UNINITIALIZED,
631 				IB_DEV_REGISTERED,
632 				IB_DEV_UNREGISTERED,
633 				IB_DEV_CLOSE = 100,
634 				IB_DEV_OPEN
635 	} reg_state;
636 	void 			*data;
637 	ofs_client_p_t 		clnt_hdl;
638 	struct llist_head 	list;
639 } ib_device_t;
640 
641 typedef struct ib_client {
642 	char		*name;
643 	void		(*add)   (struct ib_device *);
644 	void		(*remove)(struct ib_device *);
645 	dev_info_t	*dip;
646 	ofs_client_p_t 	clnt_hdl;
647 	enum {
648 			IB_CLNT_UNINITIALIZED,
649 			IB_CLNT_INITIALIZED
650 	} state;
651 } ib_client_t;
652 
653 int ib_register_client(struct ib_client *client);
654 void ib_unregister_client(struct ib_client *client);
655 
656 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
657 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
658     void *data);
659 
660 int ib_query_device(struct ib_device *device,
661     struct ib_device_attr *device_attr);
662 
663 /*
664  * ib_alloc_pd - Allocates an unused protection domain.
665  * @device: The device on which to allocate the protection domain.
666  *
667  * A protection domain object provides an association between QPs, shared
668  * receive queues, address handles, memory regions, and memory windows.
669  */
670 struct ib_pd *ib_alloc_pd(struct ib_device *device);
671 
672 /*
673  * ib_dealloc_pd - Deallocates a protection domain.
674  * @pd: The protection domain to deallocate.
675  */
676 int ib_dealloc_pd(struct ib_pd *pd);
677 
678 /*
679  * ib_create_qp - Creates a QP associated with the specified protection
680  *   domain.
681  * @pd: The protection domain associated with the QP.
682  * @qp_init_attr: A list of initial attributes required to create the
683  *   QP.  If QP creation succeeds, then the attributes are updated to
684  *   the actual capabilities of the created QP.
685  */
686 struct ib_qp *ib_create_qp(struct ib_pd *pd,
687     struct ib_qp_init_attr *qp_init_attr);
688 
689 /*
690  * ib_modify_qp - Modifies the attributes for the specified QP and then
691  *   transitions the QP to the given state.
692  * @qp: The QP to modify.
693  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
694  *   the current values of selected QP attributes are returned.
695  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
696  *   are being modified.
697  */
698 int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
699     int qp_attr_mask);
700 
701 /*
702  * ib_destroy_qp - Destroys the specified QP.
703  * @qp: The QP to destroy.
704  */
705 int ib_destroy_qp(struct ib_qp *qp);
706 
707 /*
708  * IB_CQ_VECTOR_LEAST_ATTACHED: The constant specifies that
709  *      the CQ will be attached to the completion vector that has
710  *      the least number of CQs already attached to it.
711  */
712 #define	IB_CQ_VECTOR_LEAST_ATTACHED	0xffffffff
713 
714 /*
715  * ib_create_cq - Creates a CQ on the specified device.
716  * @device: The device on which to create the CQ.
717  * @comp_handler: A user-specified callback that is invoked when a
718  *   completion event occurs on the CQ.
719  * @event_handler: A user-specified callback that is invoked when an
720  *   asynchronous event not associated with a completion occurs on the CQ.
721  * @cq_context: Context associated with the CQ returned to the user via
722  *   the associated completion and event handlers.
723  * @cqe: The minimum size of the CQ.
724  * @comp_vector - Completion queue sched handle.
725  *
726  * Users can examine the cq structure to determine the actual CQ size.
727  */
728 struct ib_cq *ib_create_cq(struct ib_device *device,
729     ib_comp_handler comp_handler,
730     void (*event_handler)(struct ib_event *, void *),
731     void *cq_context, int cqe, void *comp_vector);
732 
733 /*
734  * ib_destroy_cq - Destroys the specified CQ.
735  * @cq: The CQ to destroy.
736  */
737 int ib_destroy_cq(struct ib_cq *cq);
738 
739 /*
740  * ib_poll_cq - poll a CQ for completion(s)
741  * @cq:the CQ being polled
742  * @num_entries:maximum number of completions to return
743  * @wc:array of at least @num_entries &struct ib_wc where completions
744  *   will be returned
745  *
746  * Poll a CQ for (possibly multiple) completions.  If the return value
747  * is < 0, an error occurred.  If the return value is >= 0, it is the
748  * number of completions returned.  If the return value is
749  * non-negative and < num_entries, then the CQ was emptied.
750  */
751 int ib_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
752 
753 /*
754  * ib_req_notify_cq - Request completion notification on a CQ.
755  * @cq: The CQ to generate an event for.
756  * @flags:
757  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
758  *   to request an event on the next solicited event or next work
759  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
760  *   may also be |ed in to request a hint about missed events, as
761  *   described below.
762  *
763  * Return Value:
764  *    < 0 means an error occurred while requesting notification
765  *   == 0 means notification was requested successfully, and if
766  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
767  *        were missed and it is safe to wait for another event.  In
768  *        this case is it guaranteed that any work completions added
769  *        to the CQ since the last CQ poll will trigger a completion
770  *        notification event.
771  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
772  *        in.  It means that the consumer must poll the CQ again to
773  *        make sure it is empty to avoid missing an event because of a
774  *        race between requesting notification and an entry being
775  *        added to the CQ.  This return value means it is possible
776  *        (but not guaranteed) that a work completion has been added
777  *        to the CQ since the last poll without triggering a
778  *        completion notification event.
779  */
780 int ib_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
781 
782 struct rdma_cm_id;
783 ibt_hca_hdl_t ib_get_ibt_hca_hdl(struct ib_device *device);
784 
785 ibt_channel_hdl_t
786 ib_get_ibt_channel_hdl(struct rdma_cm_id *cm);
787 
788 #ifdef __cplusplus
789 }
790 #endif
791 #endif /* _SYS_IB_CLIENTS_OF_IB_VERBS_H */
792