1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2004, 2011-2012 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #ifndef INFINIBAND_VERBS_H
37 #define INFINIBAND_VERBS_H
38
39 #include <stdint.h>
40 #include <pthread.h>
41 #include <stddef.h>
42 #include <errno.h>
43 #include <string.h>
44 #include <infiniband/types.h>
45
46 #ifdef __cplusplus
47 # define BEGIN_C_DECLS extern "C" {
48 # define END_C_DECLS }
49 #else /* !__cplusplus */
50 # define BEGIN_C_DECLS
51 # define END_C_DECLS
52 #endif /* __cplusplus */
53
54 #if __GNUC__ >= 3
55 # define __attribute_const __attribute__((const))
56 #else
57 # define __attribute_const
58 #endif
59
60 BEGIN_C_DECLS
61
62 union ibv_gid {
63 uint8_t raw[16];
64 struct {
65 __be64 subnet_prefix;
66 __be64 interface_id;
67 } global;
68 };
69
70 #ifndef container_of
71 /**
72 * container_of - cast a member of a structure out to the containing structure
73 * @ptr: the pointer to the member.
74 * @type: the type of the container struct this is embedded in.
75 * @member: the name of the member within the struct.
76 *
77 */
78 #define container_of(ptr, type, member) \
79 ((type *) ((uint8_t *)(ptr) - offsetof(type, member)))
80 #endif
81
82 #define vext_field_avail(type, fld, sz) (offsetof(type, fld) < (sz))
83
84 static void *__VERBS_ABI_IS_EXTENDED = ((uint8_t *) NULL) - 1;
85
86 enum ibv_node_type {
87 IBV_NODE_UNKNOWN = -1,
88 IBV_NODE_CA = 1,
89 IBV_NODE_SWITCH,
90 IBV_NODE_ROUTER,
91 IBV_NODE_RNIC,
92 IBV_NODE_USNIC,
93 IBV_NODE_USNIC_UDP,
94 };
95
96 enum ibv_transport_type {
97 IBV_TRANSPORT_UNKNOWN = -1,
98 IBV_TRANSPORT_IB = 0,
99 IBV_TRANSPORT_IWARP,
100 IBV_TRANSPORT_USNIC,
101 IBV_TRANSPORT_USNIC_UDP,
102 };
103
104 enum ibv_device_cap_flags {
105 IBV_DEVICE_RESIZE_MAX_WR = 1,
106 IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1,
107 IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2,
108 IBV_DEVICE_RAW_MULTI = 1 << 3,
109 IBV_DEVICE_AUTO_PATH_MIG = 1 << 4,
110 IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5,
111 IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
112 IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
113 IBV_DEVICE_SHUTDOWN_PORT = 1 << 8,
114 IBV_DEVICE_INIT_TYPE = 1 << 9,
115 IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
116 IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11,
117 IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
118 IBV_DEVICE_SRQ_RESIZE = 1 << 13,
119 IBV_DEVICE_N_NOTIFY_CQ = 1 << 14,
120 IBV_DEVICE_MEM_WINDOW = 1 << 17,
121 IBV_DEVICE_UD_IP_CSUM = 1 << 18,
122 IBV_DEVICE_XRC = 1 << 20,
123 IBV_DEVICE_MEM_MGT_EXTENSIONS = 1 << 21,
124 IBV_DEVICE_MEM_WINDOW_TYPE_2A = 1 << 23,
125 IBV_DEVICE_MEM_WINDOW_TYPE_2B = 1 << 24,
126 IBV_DEVICE_RC_IP_CSUM = 1 << 25,
127 IBV_DEVICE_RAW_IP_CSUM = 1 << 26,
128 IBV_DEVICE_MANAGED_FLOW_STEERING = 1 << 29
129 };
130
131 /*
132 * Can't extended above ibv_device_cap_flags enum as in some systems/compilers
133 * enum range is limited to 4 bytes.
134 */
135 #define IBV_DEVICE_RAW_SCATTER_FCS (1ULL << 34)
136
137 enum ibv_atomic_cap {
138 IBV_ATOMIC_NONE,
139 IBV_ATOMIC_HCA,
140 IBV_ATOMIC_GLOB
141 };
142
143 struct ibv_device_attr {
144 char fw_ver[64];
145 __be64 node_guid;
146 __be64 sys_image_guid;
147 uint64_t max_mr_size;
148 uint64_t page_size_cap;
149 uint32_t vendor_id;
150 uint32_t vendor_part_id;
151 uint32_t hw_ver;
152 int max_qp;
153 int max_qp_wr;
154 int device_cap_flags;
155 int max_sge;
156 int max_sge_rd;
157 int max_cq;
158 int max_cqe;
159 int max_mr;
160 int max_pd;
161 int max_qp_rd_atom;
162 int max_ee_rd_atom;
163 int max_res_rd_atom;
164 int max_qp_init_rd_atom;
165 int max_ee_init_rd_atom;
166 enum ibv_atomic_cap atomic_cap;
167 int max_ee;
168 int max_rdd;
169 int max_mw;
170 int max_raw_ipv6_qp;
171 int max_raw_ethy_qp;
172 int max_mcast_grp;
173 int max_mcast_qp_attach;
174 int max_total_mcast_qp_attach;
175 int max_ah;
176 int max_fmr;
177 int max_map_per_fmr;
178 int max_srq;
179 int max_srq_wr;
180 int max_srq_sge;
181 uint16_t max_pkeys;
182 uint8_t local_ca_ack_delay;
183 uint8_t phys_port_cnt;
184 };
185
186 /* An extensible input struct for possible future extensions of the
187 * ibv_query_device_ex verb. */
188 struct ibv_query_device_ex_input {
189 uint32_t comp_mask;
190 };
191
192 enum ibv_odp_transport_cap_bits {
193 IBV_ODP_SUPPORT_SEND = 1 << 0,
194 IBV_ODP_SUPPORT_RECV = 1 << 1,
195 IBV_ODP_SUPPORT_WRITE = 1 << 2,
196 IBV_ODP_SUPPORT_READ = 1 << 3,
197 IBV_ODP_SUPPORT_ATOMIC = 1 << 4,
198 };
199
200 struct ibv_odp_caps {
201 uint64_t general_caps;
202 struct {
203 uint32_t rc_odp_caps;
204 uint32_t uc_odp_caps;
205 uint32_t ud_odp_caps;
206 } per_transport_caps;
207 };
208
209 enum ibv_odp_general_caps {
210 IBV_ODP_SUPPORT = 1 << 0,
211 };
212
213 struct ibv_tso_caps {
214 uint32_t max_tso;
215 uint32_t supported_qpts;
216 };
217
218 /* RX Hash function flags */
219 enum ibv_rx_hash_function_flags {
220 IBV_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
221 };
222
223 /*
224 * RX Hash fields enable to set which incoming packet's field should
225 * participates in RX Hash. Each flag represent certain packet's field,
226 * when the flag is set the field that is represented by the flag will
227 * participate in RX Hash calculation.
228 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
229 * and *TCP and *UDP flags can't be enabled together on the same QP.
230 */
231 enum ibv_rx_hash_fields {
232 IBV_RX_HASH_SRC_IPV4 = 1 << 0,
233 IBV_RX_HASH_DST_IPV4 = 1 << 1,
234 IBV_RX_HASH_SRC_IPV6 = 1 << 2,
235 IBV_RX_HASH_DST_IPV6 = 1 << 3,
236 IBV_RX_HASH_SRC_PORT_TCP = 1 << 4,
237 IBV_RX_HASH_DST_PORT_TCP = 1 << 5,
238 IBV_RX_HASH_SRC_PORT_UDP = 1 << 6,
239 IBV_RX_HASH_DST_PORT_UDP = 1 << 7
240 };
241
242 struct ibv_rss_caps {
243 uint32_t supported_qpts;
244 uint32_t max_rwq_indirection_tables;
245 uint32_t max_rwq_indirection_table_size;
246 uint64_t rx_hash_fields_mask; /* enum ibv_rx_hash_fields */
247 uint8_t rx_hash_function; /* enum ibv_rx_hash_function_flags */
248 };
249
250 struct ibv_packet_pacing_caps {
251 uint32_t qp_rate_limit_min;
252 uint32_t qp_rate_limit_max; /* In kbps */
253 uint32_t supported_qpts;
254 };
255
256 enum ibv_raw_packet_caps {
257 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING = 1 << 0,
258 IBV_RAW_PACKET_CAP_SCATTER_FCS = 1 << 1,
259 IBV_RAW_PACKET_CAP_IP_CSUM = 1 << 2,
260 };
261
262 struct ibv_device_attr_ex {
263 struct ibv_device_attr orig_attr;
264 uint32_t comp_mask;
265 struct ibv_odp_caps odp_caps;
266 uint64_t completion_timestamp_mask;
267 uint64_t hca_core_clock;
268 uint64_t device_cap_flags_ex;
269 struct ibv_tso_caps tso_caps;
270 struct ibv_rss_caps rss_caps;
271 uint32_t max_wq_type_rq;
272 struct ibv_packet_pacing_caps packet_pacing_caps;
273 uint32_t raw_packet_caps; /* Use ibv_raw_packet_caps */
274 };
275
276 enum ibv_mtu {
277 IBV_MTU_256 = 1,
278 IBV_MTU_512 = 2,
279 IBV_MTU_1024 = 3,
280 IBV_MTU_2048 = 4,
281 IBV_MTU_4096 = 5
282 };
283
284 enum ibv_port_state {
285 IBV_PORT_NOP = 0,
286 IBV_PORT_DOWN = 1,
287 IBV_PORT_INIT = 2,
288 IBV_PORT_ARMED = 3,
289 IBV_PORT_ACTIVE = 4,
290 IBV_PORT_ACTIVE_DEFER = 5
291 };
292
293 enum {
294 IBV_LINK_LAYER_UNSPECIFIED,
295 IBV_LINK_LAYER_INFINIBAND,
296 IBV_LINK_LAYER_ETHERNET,
297 };
298
299 enum ibv_port_cap_flags {
300 IBV_PORT_SM = 1 << 1,
301 IBV_PORT_NOTICE_SUP = 1 << 2,
302 IBV_PORT_TRAP_SUP = 1 << 3,
303 IBV_PORT_OPT_IPD_SUP = 1 << 4,
304 IBV_PORT_AUTO_MIGR_SUP = 1 << 5,
305 IBV_PORT_SL_MAP_SUP = 1 << 6,
306 IBV_PORT_MKEY_NVRAM = 1 << 7,
307 IBV_PORT_PKEY_NVRAM = 1 << 8,
308 IBV_PORT_LED_INFO_SUP = 1 << 9,
309 IBV_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
310 IBV_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
311 IBV_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
312 IBV_PORT_CM_SUP = 1 << 16,
313 IBV_PORT_SNMP_TUNNEL_SUP = 1 << 17,
314 IBV_PORT_REINIT_SUP = 1 << 18,
315 IBV_PORT_DEVICE_MGMT_SUP = 1 << 19,
316 IBV_PORT_VENDOR_CLASS_SUP = 1 << 20,
317 IBV_PORT_DR_NOTICE_SUP = 1 << 21,
318 IBV_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
319 IBV_PORT_BOOT_MGMT_SUP = 1 << 23,
320 IBV_PORT_LINK_LATENCY_SUP = 1 << 24,
321 IBV_PORT_CLIENT_REG_SUP = 1 << 25,
322 IBV_PORT_IP_BASED_GIDS = 1 << 26
323 };
324
325 struct ibv_port_attr {
326 enum ibv_port_state state;
327 enum ibv_mtu max_mtu;
328 enum ibv_mtu active_mtu;
329 int gid_tbl_len;
330 uint32_t port_cap_flags;
331 uint32_t max_msg_sz;
332 uint32_t bad_pkey_cntr;
333 uint32_t qkey_viol_cntr;
334 uint16_t pkey_tbl_len;
335 uint16_t lid;
336 uint16_t sm_lid;
337 uint8_t lmc;
338 uint8_t max_vl_num;
339 uint8_t sm_sl;
340 uint8_t subnet_timeout;
341 uint8_t init_type_reply;
342 uint8_t active_width;
343 uint8_t active_speed;
344 uint8_t phys_state;
345 uint8_t link_layer;
346 uint8_t reserved;
347 };
348
349 enum ibv_event_type {
350 IBV_EVENT_CQ_ERR,
351 IBV_EVENT_QP_FATAL,
352 IBV_EVENT_QP_REQ_ERR,
353 IBV_EVENT_QP_ACCESS_ERR,
354 IBV_EVENT_COMM_EST,
355 IBV_EVENT_SQ_DRAINED,
356 IBV_EVENT_PATH_MIG,
357 IBV_EVENT_PATH_MIG_ERR,
358 IBV_EVENT_DEVICE_FATAL,
359 IBV_EVENT_PORT_ACTIVE,
360 IBV_EVENT_PORT_ERR,
361 IBV_EVENT_LID_CHANGE,
362 IBV_EVENT_PKEY_CHANGE,
363 IBV_EVENT_SM_CHANGE,
364 IBV_EVENT_SRQ_ERR,
365 IBV_EVENT_SRQ_LIMIT_REACHED,
366 IBV_EVENT_QP_LAST_WQE_REACHED,
367 IBV_EVENT_CLIENT_REREGISTER,
368 IBV_EVENT_GID_CHANGE,
369 IBV_EVENT_WQ_FATAL,
370 };
371
372 struct ibv_async_event {
373 union {
374 struct ibv_cq *cq;
375 struct ibv_qp *qp;
376 struct ibv_srq *srq;
377 struct ibv_wq *wq;
378 int port_num;
379 } element;
380 enum ibv_event_type event_type;
381 };
382
383 enum ibv_wc_status {
384 IBV_WC_SUCCESS,
385 IBV_WC_LOC_LEN_ERR,
386 IBV_WC_LOC_QP_OP_ERR,
387 IBV_WC_LOC_EEC_OP_ERR,
388 IBV_WC_LOC_PROT_ERR,
389 IBV_WC_WR_FLUSH_ERR,
390 IBV_WC_MW_BIND_ERR,
391 IBV_WC_BAD_RESP_ERR,
392 IBV_WC_LOC_ACCESS_ERR,
393 IBV_WC_REM_INV_REQ_ERR,
394 IBV_WC_REM_ACCESS_ERR,
395 IBV_WC_REM_OP_ERR,
396 IBV_WC_RETRY_EXC_ERR,
397 IBV_WC_RNR_RETRY_EXC_ERR,
398 IBV_WC_LOC_RDD_VIOL_ERR,
399 IBV_WC_REM_INV_RD_REQ_ERR,
400 IBV_WC_REM_ABORT_ERR,
401 IBV_WC_INV_EECN_ERR,
402 IBV_WC_INV_EEC_STATE_ERR,
403 IBV_WC_FATAL_ERR,
404 IBV_WC_RESP_TIMEOUT_ERR,
405 IBV_WC_GENERAL_ERR
406 };
407 const char *ibv_wc_status_str(enum ibv_wc_status status);
408
409 enum ibv_wc_opcode {
410 IBV_WC_SEND,
411 IBV_WC_RDMA_WRITE,
412 IBV_WC_RDMA_READ,
413 IBV_WC_COMP_SWAP,
414 IBV_WC_FETCH_ADD,
415 IBV_WC_BIND_MW,
416 IBV_WC_LOCAL_INV,
417 IBV_WC_TSO,
418 /*
419 * Set value of IBV_WC_RECV so consumers can test if a completion is a
420 * receive by testing (opcode & IBV_WC_RECV).
421 */
422 IBV_WC_RECV = 1 << 7,
423 IBV_WC_RECV_RDMA_WITH_IMM
424 };
425
426 enum {
427 IBV_WC_IP_CSUM_OK_SHIFT = 2
428 };
429
430 enum ibv_create_cq_wc_flags {
431 IBV_WC_EX_WITH_BYTE_LEN = 1 << 0,
432 IBV_WC_EX_WITH_IMM = 1 << 1,
433 IBV_WC_EX_WITH_QP_NUM = 1 << 2,
434 IBV_WC_EX_WITH_SRC_QP = 1 << 3,
435 IBV_WC_EX_WITH_SLID = 1 << 4,
436 IBV_WC_EX_WITH_SL = 1 << 5,
437 IBV_WC_EX_WITH_DLID_PATH_BITS = 1 << 6,
438 IBV_WC_EX_WITH_COMPLETION_TIMESTAMP = 1 << 7,
439 IBV_WC_EX_WITH_CVLAN = 1 << 8,
440 IBV_WC_EX_WITH_FLOW_TAG = 1 << 9,
441 };
442
443 enum {
444 IBV_WC_STANDARD_FLAGS = IBV_WC_EX_WITH_BYTE_LEN |
445 IBV_WC_EX_WITH_IMM |
446 IBV_WC_EX_WITH_QP_NUM |
447 IBV_WC_EX_WITH_SRC_QP |
448 IBV_WC_EX_WITH_SLID |
449 IBV_WC_EX_WITH_SL |
450 IBV_WC_EX_WITH_DLID_PATH_BITS
451 };
452
453 enum {
454 IBV_CREATE_CQ_SUP_WC_FLAGS = IBV_WC_STANDARD_FLAGS |
455 IBV_WC_EX_WITH_COMPLETION_TIMESTAMP |
456 IBV_WC_EX_WITH_CVLAN |
457 IBV_WC_EX_WITH_FLOW_TAG
458 };
459
460 enum ibv_wc_flags {
461 IBV_WC_GRH = 1 << 0,
462 IBV_WC_WITH_IMM = 1 << 1,
463 IBV_WC_IP_CSUM_OK = 1 << IBV_WC_IP_CSUM_OK_SHIFT,
464 IBV_WC_WITH_INV = 1 << 3
465 };
466
467 struct ibv_wc {
468 uint64_t wr_id;
469 enum ibv_wc_status status;
470 enum ibv_wc_opcode opcode;
471 uint32_t vendor_err;
472 uint32_t byte_len;
473 /* When (wc_flags & IBV_WC_WITH_IMM): Immediate data in network byte order.
474 * When (wc_flags & IBV_WC_WITH_INV): Stores the invalidated rkey.
475 */
476 union {
477 __be32 imm_data;
478 uint32_t invalidated_rkey;
479 };
480 uint32_t qp_num;
481 uint32_t src_qp;
482 int wc_flags;
483 uint16_t pkey_index;
484 uint16_t slid;
485 uint8_t sl;
486 uint8_t dlid_path_bits;
487 };
488
489 enum ibv_access_flags {
490 IBV_ACCESS_LOCAL_WRITE = 1,
491 IBV_ACCESS_REMOTE_WRITE = (1<<1),
492 IBV_ACCESS_REMOTE_READ = (1<<2),
493 IBV_ACCESS_REMOTE_ATOMIC = (1<<3),
494 IBV_ACCESS_MW_BIND = (1<<4),
495 IBV_ACCESS_ZERO_BASED = (1<<5),
496 IBV_ACCESS_ON_DEMAND = (1<<6),
497 };
498
499 struct ibv_mw_bind_info {
500 struct ibv_mr *mr;
501 uint64_t addr;
502 uint64_t length;
503 int mw_access_flags; /* use ibv_access_flags */
504 };
505
506 struct ibv_pd {
507 struct ibv_context *context;
508 uint32_t handle;
509 };
510
511 enum ibv_xrcd_init_attr_mask {
512 IBV_XRCD_INIT_ATTR_FD = 1 << 0,
513 IBV_XRCD_INIT_ATTR_OFLAGS = 1 << 1,
514 IBV_XRCD_INIT_ATTR_RESERVED = 1 << 2
515 };
516
517 struct ibv_xrcd_init_attr {
518 uint32_t comp_mask;
519 int fd;
520 int oflags;
521 };
522
523 struct ibv_xrcd {
524 struct ibv_context *context;
525 };
526
527 enum ibv_rereg_mr_flags {
528 IBV_REREG_MR_CHANGE_TRANSLATION = (1 << 0),
529 IBV_REREG_MR_CHANGE_PD = (1 << 1),
530 IBV_REREG_MR_CHANGE_ACCESS = (1 << 2),
531 IBV_REREG_MR_KEEP_VALID = (1 << 3),
532 IBV_REREG_MR_FLAGS_SUPPORTED = ((IBV_REREG_MR_KEEP_VALID << 1) - 1)
533 };
534
535 struct ibv_mr {
536 struct ibv_context *context;
537 struct ibv_pd *pd;
538 void *addr;
539 size_t length;
540 uint32_t handle;
541 uint32_t lkey;
542 uint32_t rkey;
543 };
544
545 enum ibv_mw_type {
546 IBV_MW_TYPE_1 = 1,
547 IBV_MW_TYPE_2 = 2
548 };
549
550 struct ibv_mw {
551 struct ibv_context *context;
552 struct ibv_pd *pd;
553 uint32_t rkey;
554 uint32_t handle;
555 enum ibv_mw_type type;
556 };
557
558 struct ibv_global_route {
559 union ibv_gid dgid;
560 uint32_t flow_label;
561 uint8_t sgid_index;
562 uint8_t hop_limit;
563 uint8_t traffic_class;
564 };
565
566 struct ibv_grh {
567 __be32 version_tclass_flow;
568 __be16 paylen;
569 uint8_t next_hdr;
570 uint8_t hop_limit;
571 union ibv_gid sgid;
572 union ibv_gid dgid;
573 };
574
575 enum ibv_rate {
576 IBV_RATE_MAX = 0,
577 IBV_RATE_2_5_GBPS = 2,
578 IBV_RATE_5_GBPS = 5,
579 IBV_RATE_10_GBPS = 3,
580 IBV_RATE_20_GBPS = 6,
581 IBV_RATE_30_GBPS = 4,
582 IBV_RATE_40_GBPS = 7,
583 IBV_RATE_60_GBPS = 8,
584 IBV_RATE_80_GBPS = 9,
585 IBV_RATE_120_GBPS = 10,
586 IBV_RATE_14_GBPS = 11,
587 IBV_RATE_56_GBPS = 12,
588 IBV_RATE_112_GBPS = 13,
589 IBV_RATE_168_GBPS = 14,
590 IBV_RATE_25_GBPS = 15,
591 IBV_RATE_100_GBPS = 16,
592 IBV_RATE_200_GBPS = 17,
593 IBV_RATE_300_GBPS = 18,
594 IBV_RATE_28_GBPS = 19,
595 IBV_RATE_50_GBPS = 20,
596 IBV_RATE_400_GBPS = 21,
597 IBV_RATE_600_GBPS = 22,
598 IBV_RATE_800_GBPS = 23,
599 IBV_RATE_1200_GBPS = 24,
600 };
601
602 /**
603 * ibv_rate_to_mult - Convert the IB rate enum to a multiple of the
604 * base rate of 2.5 Gbit/sec. For example, IBV_RATE_5_GBPS will be
605 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
606 * @rate: rate to convert.
607 */
608 int __attribute_const ibv_rate_to_mult(enum ibv_rate rate);
609
610 /**
611 * mult_to_ibv_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
612 * @mult: multiple to convert.
613 */
614 enum ibv_rate __attribute_const mult_to_ibv_rate(int mult);
615
616 /**
617 * ibv_rate_to_mbps - Convert the IB rate enum to Mbit/sec.
618 * For example, IBV_RATE_5_GBPS will return the value 5000.
619 * @rate: rate to convert.
620 */
621 int __attribute_const ibv_rate_to_mbps(enum ibv_rate rate);
622
623 /**
624 * mbps_to_ibv_rate - Convert a Mbit/sec value to an IB rate enum.
625 * @mbps: value to convert.
626 */
627 enum ibv_rate __attribute_const mbps_to_ibv_rate(int mbps) __attribute_const;
628
629 struct ibv_ah_attr {
630 struct ibv_global_route grh;
631 uint16_t dlid;
632 uint8_t sl;
633 uint8_t src_path_bits;
634 uint8_t static_rate;
635 uint8_t is_global;
636 uint8_t port_num;
637 };
638
639 enum ibv_srq_attr_mask {
640 IBV_SRQ_MAX_WR = 1 << 0,
641 IBV_SRQ_LIMIT = 1 << 1
642 };
643
644 struct ibv_srq_attr {
645 uint32_t max_wr;
646 uint32_t max_sge;
647 uint32_t srq_limit;
648 };
649
650 struct ibv_srq_init_attr {
651 void *srq_context;
652 struct ibv_srq_attr attr;
653 };
654
655 enum ibv_srq_type {
656 IBV_SRQT_BASIC,
657 IBV_SRQT_XRC
658 };
659
660 enum ibv_srq_init_attr_mask {
661 IBV_SRQ_INIT_ATTR_TYPE = 1 << 0,
662 IBV_SRQ_INIT_ATTR_PD = 1 << 1,
663 IBV_SRQ_INIT_ATTR_XRCD = 1 << 2,
664 IBV_SRQ_INIT_ATTR_CQ = 1 << 3,
665 IBV_SRQ_INIT_ATTR_RESERVED = 1 << 4
666 };
667
668 struct ibv_srq_init_attr_ex {
669 void *srq_context;
670 struct ibv_srq_attr attr;
671
672 uint32_t comp_mask;
673 enum ibv_srq_type srq_type;
674 struct ibv_pd *pd;
675 struct ibv_xrcd *xrcd;
676 struct ibv_cq *cq;
677 };
678
679 enum ibv_wq_type {
680 IBV_WQT_RQ
681 };
682
683 enum ibv_wq_init_attr_mask {
684 IBV_WQ_INIT_ATTR_FLAGS = 1 << 0,
685 IBV_WQ_INIT_ATTR_RESERVED = 1 << 1,
686 };
687
688 enum ibv_wq_flags {
689 IBV_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
690 IBV_WQ_FLAGS_SCATTER_FCS = 1 << 1,
691 IBV_WQ_FLAGS_RESERVED = 1 << 2,
692 };
693
694 struct ibv_wq_init_attr {
695 void *wq_context;
696 enum ibv_wq_type wq_type;
697 uint32_t max_wr;
698 uint32_t max_sge;
699 struct ibv_pd *pd;
700 struct ibv_cq *cq;
701 uint32_t comp_mask; /* Use ibv_wq_init_attr_mask */
702 uint32_t create_flags; /* use ibv_wq_flags */
703 };
704
705 enum ibv_wq_state {
706 IBV_WQS_RESET,
707 IBV_WQS_RDY,
708 IBV_WQS_ERR,
709 IBV_WQS_UNKNOWN
710 };
711
712 enum ibv_wq_attr_mask {
713 IBV_WQ_ATTR_STATE = 1 << 0,
714 IBV_WQ_ATTR_CURR_STATE = 1 << 1,
715 IBV_WQ_ATTR_FLAGS = 1 << 2,
716 IBV_WQ_ATTR_RESERVED = 1 << 3,
717 };
718
719 struct ibv_wq_attr {
720 /* enum ibv_wq_attr_mask */
721 uint32_t attr_mask;
722 /* Move the WQ to this state */
723 enum ibv_wq_state wq_state;
724 /* Assume this is the current WQ state */
725 enum ibv_wq_state curr_wq_state;
726 uint32_t flags; /* Use ibv_wq_flags */
727 uint32_t flags_mask; /* Use ibv_wq_flags */
728 };
729
730 /*
731 * Receive Work Queue Indirection Table.
732 * It's used in order to distribute incoming packets between different
733 * Receive Work Queues. Associating Receive WQs with different CPU cores
734 * allows to workload the traffic between different CPU cores.
735 * The Indirection Table can contain only WQs of type IBV_WQT_RQ.
736 */
737 struct ibv_rwq_ind_table {
738 struct ibv_context *context;
739 int ind_tbl_handle;
740 int ind_tbl_num;
741 uint32_t comp_mask;
742 };
743
744 enum ibv_ind_table_init_attr_mask {
745 IBV_CREATE_IND_TABLE_RESERVED = (1 << 0)
746 };
747
748 /*
749 * Receive Work Queue Indirection Table attributes
750 */
751 struct ibv_rwq_ind_table_init_attr {
752 uint32_t log_ind_tbl_size;
753 /* Each entry is a pointer to a Receive Work Queue */
754 struct ibv_wq **ind_tbl;
755 uint32_t comp_mask;
756 };
757
758 enum ibv_qp_type {
759 IBV_QPT_RC = 2,
760 IBV_QPT_UC,
761 IBV_QPT_UD,
762 IBV_QPT_RAW_PACKET = 8,
763 IBV_QPT_XRC_SEND = 9,
764 IBV_QPT_XRC_RECV
765 };
766
767 struct ibv_qp_cap {
768 uint32_t max_send_wr;
769 uint32_t max_recv_wr;
770 uint32_t max_send_sge;
771 uint32_t max_recv_sge;
772 uint32_t max_inline_data;
773 };
774
775 struct ibv_qp_init_attr {
776 void *qp_context;
777 struct ibv_cq *send_cq;
778 struct ibv_cq *recv_cq;
779 struct ibv_srq *srq;
780 struct ibv_qp_cap cap;
781 enum ibv_qp_type qp_type;
782 int sq_sig_all;
783 };
784
785 enum ibv_qp_init_attr_mask {
786 IBV_QP_INIT_ATTR_PD = 1 << 0,
787 IBV_QP_INIT_ATTR_XRCD = 1 << 1,
788 IBV_QP_INIT_ATTR_CREATE_FLAGS = 1 << 2,
789 IBV_QP_INIT_ATTR_MAX_TSO_HEADER = 1 << 3,
790 IBV_QP_INIT_ATTR_IND_TABLE = 1 << 4,
791 IBV_QP_INIT_ATTR_RX_HASH = 1 << 5,
792 IBV_QP_INIT_ATTR_RESERVED = 1 << 6
793 };
794
795 enum ibv_qp_create_flags {
796 IBV_QP_CREATE_BLOCK_SELF_MCAST_LB = 1 << 1,
797 IBV_QP_CREATE_SCATTER_FCS = 1 << 8,
798 IBV_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
799 };
800
801 struct ibv_rx_hash_conf {
802 /* enum ibv_rx_hash_function_flags */
803 uint8_t rx_hash_function;
804 uint8_t rx_hash_key_len;
805 uint8_t *rx_hash_key;
806 /* enum ibv_rx_hash_fields */
807 uint64_t rx_hash_fields_mask;
808 };
809
810 struct ibv_qp_init_attr_ex {
811 void *qp_context;
812 struct ibv_cq *send_cq;
813 struct ibv_cq *recv_cq;
814 struct ibv_srq *srq;
815 struct ibv_qp_cap cap;
816 enum ibv_qp_type qp_type;
817 int sq_sig_all;
818
819 uint32_t comp_mask;
820 struct ibv_pd *pd;
821 struct ibv_xrcd *xrcd;
822 uint32_t create_flags;
823 uint16_t max_tso_header;
824 struct ibv_rwq_ind_table *rwq_ind_tbl;
825 struct ibv_rx_hash_conf rx_hash_conf;
826 };
827
828 enum ibv_qp_open_attr_mask {
829 IBV_QP_OPEN_ATTR_NUM = 1 << 0,
830 IBV_QP_OPEN_ATTR_XRCD = 1 << 1,
831 IBV_QP_OPEN_ATTR_CONTEXT = 1 << 2,
832 IBV_QP_OPEN_ATTR_TYPE = 1 << 3,
833 IBV_QP_OPEN_ATTR_RESERVED = 1 << 4
834 };
835
836 struct ibv_qp_open_attr {
837 uint32_t comp_mask;
838 uint32_t qp_num;
839 struct ibv_xrcd *xrcd;
840 void *qp_context;
841 enum ibv_qp_type qp_type;
842 };
843
844 enum ibv_qp_attr_mask {
845 IBV_QP_STATE = 1 << 0,
846 IBV_QP_CUR_STATE = 1 << 1,
847 IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
848 IBV_QP_ACCESS_FLAGS = 1 << 3,
849 IBV_QP_PKEY_INDEX = 1 << 4,
850 IBV_QP_PORT = 1 << 5,
851 IBV_QP_QKEY = 1 << 6,
852 IBV_QP_AV = 1 << 7,
853 IBV_QP_PATH_MTU = 1 << 8,
854 IBV_QP_TIMEOUT = 1 << 9,
855 IBV_QP_RETRY_CNT = 1 << 10,
856 IBV_QP_RNR_RETRY = 1 << 11,
857 IBV_QP_RQ_PSN = 1 << 12,
858 IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13,
859 IBV_QP_ALT_PATH = 1 << 14,
860 IBV_QP_MIN_RNR_TIMER = 1 << 15,
861 IBV_QP_SQ_PSN = 1 << 16,
862 IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
863 IBV_QP_PATH_MIG_STATE = 1 << 18,
864 IBV_QP_CAP = 1 << 19,
865 IBV_QP_DEST_QPN = 1 << 20,
866 IBV_QP_RATE_LIMIT = 1 << 25,
867 };
868
869 enum ibv_qp_state {
870 IBV_QPS_RESET,
871 IBV_QPS_INIT,
872 IBV_QPS_RTR,
873 IBV_QPS_RTS,
874 IBV_QPS_SQD,
875 IBV_QPS_SQE,
876 IBV_QPS_ERR,
877 IBV_QPS_UNKNOWN
878 };
879
880 enum ibv_mig_state {
881 IBV_MIG_MIGRATED,
882 IBV_MIG_REARM,
883 IBV_MIG_ARMED
884 };
885
886 struct ibv_qp_attr {
887 enum ibv_qp_state qp_state;
888 enum ibv_qp_state cur_qp_state;
889 enum ibv_mtu path_mtu;
890 enum ibv_mig_state path_mig_state;
891 uint32_t qkey;
892 uint32_t rq_psn;
893 uint32_t sq_psn;
894 uint32_t dest_qp_num;
895 int qp_access_flags;
896 struct ibv_qp_cap cap;
897 struct ibv_ah_attr ah_attr;
898 struct ibv_ah_attr alt_ah_attr;
899 uint16_t pkey_index;
900 uint16_t alt_pkey_index;
901 uint8_t en_sqd_async_notify;
902 uint8_t sq_draining;
903 uint8_t max_rd_atomic;
904 uint8_t max_dest_rd_atomic;
905 uint8_t min_rnr_timer;
906 uint8_t port_num;
907 uint8_t timeout;
908 uint8_t retry_cnt;
909 uint8_t rnr_retry;
910 uint8_t alt_port_num;
911 uint8_t alt_timeout;
912 uint32_t rate_limit;
913 };
914
915 enum ibv_wr_opcode {
916 IBV_WR_RDMA_WRITE,
917 IBV_WR_RDMA_WRITE_WITH_IMM,
918 IBV_WR_SEND,
919 IBV_WR_SEND_WITH_IMM,
920 IBV_WR_RDMA_READ,
921 IBV_WR_ATOMIC_CMP_AND_SWP,
922 IBV_WR_ATOMIC_FETCH_AND_ADD,
923 IBV_WR_LOCAL_INV,
924 IBV_WR_BIND_MW,
925 IBV_WR_SEND_WITH_INV,
926 IBV_WR_TSO,
927 };
928
929 enum ibv_send_flags {
930 IBV_SEND_FENCE = 1 << 0,
931 IBV_SEND_SIGNALED = 1 << 1,
932 IBV_SEND_SOLICITED = 1 << 2,
933 IBV_SEND_INLINE = 1 << 3,
934 IBV_SEND_IP_CSUM = 1 << 4
935 };
936
937 struct ibv_sge {
938 uint64_t addr;
939 uint32_t length;
940 uint32_t lkey;
941 };
942
943 struct ibv_send_wr {
944 uint64_t wr_id;
945 struct ibv_send_wr *next;
946 struct ibv_sge *sg_list;
947 int num_sge;
948 enum ibv_wr_opcode opcode;
949 int send_flags;
950 __be32 imm_data;
951 union {
952 struct {
953 uint64_t remote_addr;
954 uint32_t rkey;
955 } rdma;
956 struct {
957 uint64_t remote_addr;
958 uint64_t compare_add;
959 uint64_t swap;
960 uint32_t rkey;
961 } atomic;
962 struct {
963 struct ibv_ah *ah;
964 uint32_t remote_qpn;
965 uint32_t remote_qkey;
966 } ud;
967 } wr;
968 union {
969 struct {
970 uint32_t remote_srqn;
971 } xrc;
972 } qp_type;
973 union {
974 struct {
975 struct ibv_mw *mw;
976 uint32_t rkey;
977 struct ibv_mw_bind_info bind_info;
978 } bind_mw;
979 struct {
980 void *hdr;
981 uint16_t hdr_sz;
982 uint16_t mss;
983 } tso;
984 };
985 };
986
987 struct ibv_recv_wr {
988 uint64_t wr_id;
989 struct ibv_recv_wr *next;
990 struct ibv_sge *sg_list;
991 int num_sge;
992 };
993
994 struct ibv_mw_bind {
995 uint64_t wr_id;
996 int send_flags;
997 struct ibv_mw_bind_info bind_info;
998 };
999
1000 struct ibv_srq {
1001 struct ibv_context *context;
1002 void *srq_context;
1003 struct ibv_pd *pd;
1004 uint32_t handle;
1005
1006 pthread_mutex_t mutex;
1007 pthread_cond_t cond;
1008 uint32_t events_completed;
1009 };
1010
1011 /*
1012 * Work Queue. QP can be created without internal WQs "packaged" inside it,
1013 * this QP can be configured to use "external" WQ object as its
1014 * receive/send queue.
1015 * WQ associated (many to one) with Completion Queue it owns WQ properties
1016 * (PD, WQ size etc).
1017 * WQ of type IBV_WQT_RQ:
1018 * - Contains receive WQEs, in this case its PD serves as scatter as well.
1019 * - Exposes post receive function to be used to post a list of work
1020 * requests (WRs) to its receive queue.
1021 */
1022 struct ibv_wq {
1023 struct ibv_context *context;
1024 void *wq_context;
1025 struct ibv_pd *pd;
1026 struct ibv_cq *cq;
1027 uint32_t wq_num;
1028 uint32_t handle;
1029 enum ibv_wq_state state;
1030 enum ibv_wq_type wq_type;
1031 int (*post_recv)(struct ibv_wq *current,
1032 struct ibv_recv_wr *recv_wr,
1033 struct ibv_recv_wr **bad_recv_wr);
1034 pthread_mutex_t mutex;
1035 pthread_cond_t cond;
1036 uint32_t events_completed;
1037 uint32_t comp_mask;
1038 };
1039
1040 struct ibv_qp {
1041 struct ibv_context *context;
1042 void *qp_context;
1043 struct ibv_pd *pd;
1044 struct ibv_cq *send_cq;
1045 struct ibv_cq *recv_cq;
1046 struct ibv_srq *srq;
1047 uint32_t handle;
1048 uint32_t qp_num;
1049 enum ibv_qp_state state;
1050 enum ibv_qp_type qp_type;
1051
1052 pthread_mutex_t mutex;
1053 pthread_cond_t cond;
1054 uint32_t events_completed;
1055 };
1056
1057 struct ibv_comp_channel {
1058 struct ibv_context *context;
1059 int fd;
1060 int refcnt;
1061 };
1062
1063 struct ibv_cq {
1064 struct ibv_context *context;
1065 struct ibv_comp_channel *channel;
1066 void *cq_context;
1067 uint32_t handle;
1068 int cqe;
1069
1070 pthread_mutex_t mutex;
1071 pthread_cond_t cond;
1072 uint32_t comp_events_completed;
1073 uint32_t async_events_completed;
1074 };
1075
1076 struct ibv_poll_cq_attr {
1077 uint32_t comp_mask;
1078 };
1079
1080 struct ibv_cq_ex {
1081 struct ibv_context *context;
1082 struct ibv_comp_channel *channel;
1083 void *cq_context;
1084 uint32_t handle;
1085 int cqe;
1086
1087 pthread_mutex_t mutex;
1088 pthread_cond_t cond;
1089 uint32_t comp_events_completed;
1090 uint32_t async_events_completed;
1091
1092 uint32_t comp_mask;
1093 enum ibv_wc_status status;
1094 uint64_t wr_id;
1095 int (*start_poll)(struct ibv_cq_ex *current,
1096 struct ibv_poll_cq_attr *attr);
1097 int (*next_poll)(struct ibv_cq_ex *current);
1098 void (*end_poll)(struct ibv_cq_ex *current);
1099 enum ibv_wc_opcode (*read_opcode)(struct ibv_cq_ex *current);
1100 uint32_t (*read_vendor_err)(struct ibv_cq_ex *current);
1101 uint32_t (*read_byte_len)(struct ibv_cq_ex *current);
1102 uint32_t (*read_imm_data)(struct ibv_cq_ex *current);
1103 uint32_t (*read_qp_num)(struct ibv_cq_ex *current);
1104 uint32_t (*read_src_qp)(struct ibv_cq_ex *current);
1105 int (*read_wc_flags)(struct ibv_cq_ex *current);
1106 uint32_t (*read_slid)(struct ibv_cq_ex *current);
1107 uint8_t (*read_sl)(struct ibv_cq_ex *current);
1108 uint8_t (*read_dlid_path_bits)(struct ibv_cq_ex *current);
1109 uint64_t (*read_completion_ts)(struct ibv_cq_ex *current);
1110 uint16_t (*read_cvlan)(struct ibv_cq_ex *current);
1111 uint32_t (*read_flow_tag)(struct ibv_cq_ex *current);
1112 };
1113
ibv_cq_ex_to_cq(struct ibv_cq_ex * cq)1114 static inline struct ibv_cq *ibv_cq_ex_to_cq(struct ibv_cq_ex *cq)
1115 {
1116 return (struct ibv_cq *)cq;
1117 }
1118
ibv_start_poll(struct ibv_cq_ex * cq,struct ibv_poll_cq_attr * attr)1119 static inline int ibv_start_poll(struct ibv_cq_ex *cq,
1120 struct ibv_poll_cq_attr *attr)
1121 {
1122 return cq->start_poll(cq, attr);
1123 }
1124
ibv_next_poll(struct ibv_cq_ex * cq)1125 static inline int ibv_next_poll(struct ibv_cq_ex *cq)
1126 {
1127 return cq->next_poll(cq);
1128 }
1129
ibv_end_poll(struct ibv_cq_ex * cq)1130 static inline void ibv_end_poll(struct ibv_cq_ex *cq)
1131 {
1132 cq->end_poll(cq);
1133 }
1134
ibv_wc_read_opcode(struct ibv_cq_ex * cq)1135 static inline enum ibv_wc_opcode ibv_wc_read_opcode(struct ibv_cq_ex *cq)
1136 {
1137 return cq->read_opcode(cq);
1138 }
1139
ibv_wc_read_vendor_err(struct ibv_cq_ex * cq)1140 static inline uint32_t ibv_wc_read_vendor_err(struct ibv_cq_ex *cq)
1141 {
1142 return cq->read_vendor_err(cq);
1143 }
1144
ibv_wc_read_byte_len(struct ibv_cq_ex * cq)1145 static inline uint32_t ibv_wc_read_byte_len(struct ibv_cq_ex *cq)
1146 {
1147 return cq->read_byte_len(cq);
1148 }
1149
ibv_wc_read_imm_data(struct ibv_cq_ex * cq)1150 static inline uint32_t ibv_wc_read_imm_data(struct ibv_cq_ex *cq)
1151 {
1152 return cq->read_imm_data(cq);
1153 }
1154
ibv_wc_read_qp_num(struct ibv_cq_ex * cq)1155 static inline uint32_t ibv_wc_read_qp_num(struct ibv_cq_ex *cq)
1156 {
1157 return cq->read_qp_num(cq);
1158 }
1159
ibv_wc_read_src_qp(struct ibv_cq_ex * cq)1160 static inline uint32_t ibv_wc_read_src_qp(struct ibv_cq_ex *cq)
1161 {
1162 return cq->read_src_qp(cq);
1163 }
1164
ibv_wc_read_wc_flags(struct ibv_cq_ex * cq)1165 static inline int ibv_wc_read_wc_flags(struct ibv_cq_ex *cq)
1166 {
1167 return cq->read_wc_flags(cq);
1168 }
1169
ibv_wc_read_slid(struct ibv_cq_ex * cq)1170 static inline uint32_t ibv_wc_read_slid(struct ibv_cq_ex *cq)
1171 {
1172 return cq->read_slid(cq);
1173 }
1174
ibv_wc_read_sl(struct ibv_cq_ex * cq)1175 static inline uint8_t ibv_wc_read_sl(struct ibv_cq_ex *cq)
1176 {
1177 return cq->read_sl(cq);
1178 }
1179
ibv_wc_read_dlid_path_bits(struct ibv_cq_ex * cq)1180 static inline uint8_t ibv_wc_read_dlid_path_bits(struct ibv_cq_ex *cq)
1181 {
1182 return cq->read_dlid_path_bits(cq);
1183 }
1184
ibv_wc_read_completion_ts(struct ibv_cq_ex * cq)1185 static inline uint64_t ibv_wc_read_completion_ts(struct ibv_cq_ex *cq)
1186 {
1187 return cq->read_completion_ts(cq);
1188 }
1189
ibv_wc_read_cvlan(struct ibv_cq_ex * cq)1190 static inline uint16_t ibv_wc_read_cvlan(struct ibv_cq_ex *cq)
1191 {
1192 return cq->read_cvlan(cq);
1193 }
1194
ibv_wc_read_flow_tag(struct ibv_cq_ex * cq)1195 static inline uint32_t ibv_wc_read_flow_tag(struct ibv_cq_ex *cq)
1196 {
1197 return cq->read_flow_tag(cq);
1198 }
1199
ibv_post_wq_recv(struct ibv_wq * wq,struct ibv_recv_wr * recv_wr,struct ibv_recv_wr ** bad_recv_wr)1200 static inline int ibv_post_wq_recv(struct ibv_wq *wq,
1201 struct ibv_recv_wr *recv_wr,
1202 struct ibv_recv_wr **bad_recv_wr)
1203 {
1204 return wq->post_recv(wq, recv_wr, bad_recv_wr);
1205 }
1206
1207 struct ibv_ah {
1208 struct ibv_context *context;
1209 struct ibv_pd *pd;
1210 uint32_t handle;
1211 };
1212
1213 enum ibv_flow_flags {
1214 IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0,
1215 IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1,
1216 };
1217
1218 enum ibv_flow_attr_type {
1219 /* steering according to rule specifications */
1220 IBV_FLOW_ATTR_NORMAL = 0x0,
1221 /* default unicast and multicast rule -
1222 * receive all Eth traffic which isn't steered to any QP
1223 */
1224 IBV_FLOW_ATTR_ALL_DEFAULT = 0x1,
1225 /* default multicast rule -
1226 * receive all Eth multicast traffic which isn't steered to any QP
1227 */
1228 IBV_FLOW_ATTR_MC_DEFAULT = 0x2,
1229 /* sniffer rule - receive all port traffic */
1230 IBV_FLOW_ATTR_SNIFFER = 0x3,
1231 };
1232
1233 enum ibv_flow_spec_type {
1234 IBV_FLOW_SPEC_ETH = 0x20,
1235 IBV_FLOW_SPEC_IPV4 = 0x30,
1236 IBV_FLOW_SPEC_IPV6 = 0x31,
1237 IBV_FLOW_SPEC_IPV4_EXT = 0x32,
1238 IBV_FLOW_SPEC_TCP = 0x40,
1239 IBV_FLOW_SPEC_UDP = 0x41,
1240 IBV_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1241 IBV_FLOW_SPEC_INNER = 0x100,
1242 IBV_FLOW_SPEC_ACTION_TAG = 0x1000,
1243 IBV_FLOW_SPEC_ACTION_DROP = 0x1001,
1244 };
1245
1246 struct ibv_flow_eth_filter {
1247 uint8_t dst_mac[6];
1248 uint8_t src_mac[6];
1249 uint16_t ether_type;
1250 /*
1251 * same layout as 802.1q: prio 3, cfi 1, vlan id 12
1252 */
1253 uint16_t vlan_tag;
1254 };
1255
1256 struct ibv_flow_spec_eth {
1257 enum ibv_flow_spec_type type;
1258 uint16_t size;
1259 struct ibv_flow_eth_filter val;
1260 struct ibv_flow_eth_filter mask;
1261 };
1262
1263 struct ibv_flow_ipv4_filter {
1264 uint32_t src_ip;
1265 uint32_t dst_ip;
1266 };
1267
1268 struct ibv_flow_spec_ipv4 {
1269 enum ibv_flow_spec_type type;
1270 uint16_t size;
1271 struct ibv_flow_ipv4_filter val;
1272 struct ibv_flow_ipv4_filter mask;
1273 };
1274
1275 struct ibv_flow_ipv4_ext_filter {
1276 uint32_t src_ip;
1277 uint32_t dst_ip;
1278 uint8_t proto;
1279 uint8_t tos;
1280 uint8_t ttl;
1281 uint8_t flags;
1282 };
1283
1284 struct ibv_flow_spec_ipv4_ext {
1285 enum ibv_flow_spec_type type;
1286 uint16_t size;
1287 struct ibv_flow_ipv4_ext_filter val;
1288 struct ibv_flow_ipv4_ext_filter mask;
1289 };
1290
1291 struct ibv_flow_ipv6_filter {
1292 uint8_t src_ip[16];
1293 uint8_t dst_ip[16];
1294 uint32_t flow_label;
1295 uint8_t next_hdr;
1296 uint8_t traffic_class;
1297 uint8_t hop_limit;
1298 };
1299
1300 struct ibv_flow_spec_ipv6 {
1301 enum ibv_flow_spec_type type;
1302 uint16_t size;
1303 struct ibv_flow_ipv6_filter val;
1304 struct ibv_flow_ipv6_filter mask;
1305 };
1306
1307 struct ibv_flow_tcp_udp_filter {
1308 uint16_t dst_port;
1309 uint16_t src_port;
1310 };
1311
1312 struct ibv_flow_spec_tcp_udp {
1313 enum ibv_flow_spec_type type;
1314 uint16_t size;
1315 struct ibv_flow_tcp_udp_filter val;
1316 struct ibv_flow_tcp_udp_filter mask;
1317 };
1318
1319 struct ibv_flow_tunnel_filter {
1320 uint32_t tunnel_id;
1321 };
1322
1323 struct ibv_flow_spec_tunnel {
1324 enum ibv_flow_spec_type type;
1325 uint16_t size;
1326 struct ibv_flow_tunnel_filter val;
1327 struct ibv_flow_tunnel_filter mask;
1328 };
1329
1330 struct ibv_flow_spec_action_tag {
1331 enum ibv_flow_spec_type type;
1332 uint16_t size;
1333 uint32_t tag_id;
1334 };
1335
1336 struct ibv_flow_spec_action_drop {
1337 enum ibv_flow_spec_type type;
1338 uint16_t size;
1339 };
1340
1341 struct ibv_flow_spec {
1342 union {
1343 struct {
1344 enum ibv_flow_spec_type type;
1345 uint16_t size;
1346 } hdr;
1347 struct ibv_flow_spec_eth eth;
1348 struct ibv_flow_spec_ipv4 ipv4;
1349 struct ibv_flow_spec_tcp_udp tcp_udp;
1350 struct ibv_flow_spec_ipv4_ext ipv4_ext;
1351 struct ibv_flow_spec_ipv6 ipv6;
1352 struct ibv_flow_spec_tunnel tunnel;
1353 struct ibv_flow_spec_action_tag flow_tag;
1354 struct ibv_flow_spec_action_drop drop;
1355 };
1356 };
1357
1358 struct ibv_flow_attr {
1359 uint32_t comp_mask;
1360 enum ibv_flow_attr_type type;
1361 uint16_t size;
1362 uint16_t priority;
1363 uint8_t num_of_specs;
1364 uint8_t port;
1365 uint32_t flags;
1366 /* Following are the optional layers according to user request
1367 * struct ibv_flow_spec_xxx [L2]
1368 * struct ibv_flow_spec_yyy [L3/L4]
1369 */
1370 };
1371
1372 struct ibv_flow {
1373 uint32_t comp_mask;
1374 struct ibv_context *context;
1375 uint32_t handle;
1376 };
1377
1378 struct ibv_device;
1379 struct ibv_context;
1380
1381 /* Obsolete, never used, do not touch */
1382 struct _ibv_device_ops {
1383 struct ibv_context * (*_dummy1)(struct ibv_device *device, int cmd_fd);
1384 void (*_dummy2)(struct ibv_context *context);
1385 };
1386
1387 enum {
1388 IBV_SYSFS_NAME_MAX = 64,
1389 IBV_SYSFS_PATH_MAX = 256
1390 };
1391
1392 struct ibv_device {
1393 struct _ibv_device_ops _ops;
1394 enum ibv_node_type node_type;
1395 enum ibv_transport_type transport_type;
1396 /* Name of underlying kernel IB device, eg "mthca0" */
1397 char name[IBV_SYSFS_NAME_MAX];
1398 /* Name of uverbs device, eg "uverbs0" */
1399 char dev_name[IBV_SYSFS_NAME_MAX];
1400 /* Path to infiniband_verbs class device in sysfs */
1401 char dev_path[IBV_SYSFS_PATH_MAX];
1402 /* Path to infiniband class device in sysfs */
1403 char ibdev_path[IBV_SYSFS_PATH_MAX];
1404 };
1405
1406 struct ibv_context_ops {
1407 int (*query_device)(struct ibv_context *context,
1408 struct ibv_device_attr *device_attr);
1409 int (*query_port)(struct ibv_context *context, uint8_t port_num,
1410 struct ibv_port_attr *port_attr);
1411 struct ibv_pd * (*alloc_pd)(struct ibv_context *context);
1412 int (*dealloc_pd)(struct ibv_pd *pd);
1413 struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
1414 int access);
1415 int (*rereg_mr)(struct ibv_mr *mr,
1416 int flags,
1417 struct ibv_pd *pd, void *addr,
1418 size_t length,
1419 int access);
1420 int (*dereg_mr)(struct ibv_mr *mr);
1421 struct ibv_mw * (*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
1422 int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
1423 struct ibv_mw_bind *mw_bind);
1424 int (*dealloc_mw)(struct ibv_mw *mw);
1425 struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
1426 struct ibv_comp_channel *channel,
1427 int comp_vector);
1428 int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
1429 int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
1430 void (*cq_event)(struct ibv_cq *cq);
1431 int (*resize_cq)(struct ibv_cq *cq, int cqe);
1432 int (*destroy_cq)(struct ibv_cq *cq);
1433 struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
1434 struct ibv_srq_init_attr *srq_init_attr);
1435 int (*modify_srq)(struct ibv_srq *srq,
1436 struct ibv_srq_attr *srq_attr,
1437 int srq_attr_mask);
1438 int (*query_srq)(struct ibv_srq *srq,
1439 struct ibv_srq_attr *srq_attr);
1440 int (*destroy_srq)(struct ibv_srq *srq);
1441 int (*post_srq_recv)(struct ibv_srq *srq,
1442 struct ibv_recv_wr *recv_wr,
1443 struct ibv_recv_wr **bad_recv_wr);
1444 struct ibv_qp * (*create_qp)(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
1445 int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1446 int attr_mask,
1447 struct ibv_qp_init_attr *init_attr);
1448 int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1449 int attr_mask);
1450 int (*destroy_qp)(struct ibv_qp *qp);
1451 int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
1452 struct ibv_send_wr **bad_wr);
1453 int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
1454 struct ibv_recv_wr **bad_wr);
1455 struct ibv_ah * (*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
1456 int (*destroy_ah)(struct ibv_ah *ah);
1457 int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
1458 uint16_t lid);
1459 int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
1460 uint16_t lid);
1461 void (*async_event)(struct ibv_async_event *event);
1462 };
1463
1464 struct ibv_context {
1465 struct ibv_device *device;
1466 struct ibv_context_ops ops;
1467 int cmd_fd;
1468 int async_fd;
1469 int num_comp_vectors;
1470 pthread_mutex_t mutex;
1471 void *abi_compat;
1472 };
1473
1474 enum ibv_cq_init_attr_mask {
1475 IBV_CQ_INIT_ATTR_MASK_FLAGS = 1 << 0,
1476 IBV_CQ_INIT_ATTR_MASK_RESERVED = 1 << 1
1477 };
1478
1479 enum ibv_create_cq_attr_flags {
1480 IBV_CREATE_CQ_ATTR_SINGLE_THREADED = 1 << 0,
1481 IBV_CREATE_CQ_ATTR_RESERVED = 1 << 1,
1482 };
1483
1484 struct ibv_cq_init_attr_ex {
1485 /* Minimum number of entries required for CQ */
1486 uint32_t cqe;
1487 /* Consumer-supplied context returned for completion events */
1488 void *cq_context;
1489 /* Completion channel where completion events will be queued.
1490 * May be NULL if completion events will not be used.
1491 */
1492 struct ibv_comp_channel *channel;
1493 /* Completion vector used to signal completion events.
1494 * Must be < context->num_comp_vectors.
1495 */
1496 uint32_t comp_vector;
1497 /* Or'ed bit of enum ibv_create_cq_wc_flags. */
1498 uint64_t wc_flags;
1499 /* compatibility mask (extended verb). Or'd flags of
1500 * enum ibv_cq_init_attr_mask
1501 */
1502 uint32_t comp_mask;
1503 /* create cq attr flags - one or more flags from
1504 * enum ibv_create_cq_attr_flags
1505 */
1506 uint32_t flags;
1507 };
1508
1509 enum ibv_values_mask {
1510 IBV_VALUES_MASK_RAW_CLOCK = 1 << 0,
1511 IBV_VALUES_MASK_RESERVED = 1 << 1
1512 };
1513
1514 struct ibv_values_ex {
1515 uint32_t comp_mask;
1516 struct timespec raw_clock;
1517 };
1518
1519 enum verbs_context_mask {
1520 VERBS_CONTEXT_XRCD = 1 << 0,
1521 VERBS_CONTEXT_SRQ = 1 << 1,
1522 VERBS_CONTEXT_QP = 1 << 2,
1523 VERBS_CONTEXT_CREATE_FLOW = 1 << 3,
1524 VERBS_CONTEXT_DESTROY_FLOW = 1 << 4,
1525 VERBS_CONTEXT_RESERVED = 1 << 5
1526 };
1527
1528 struct verbs_context {
1529 /* "grows up" - new fields go here */
1530 int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
1531 struct ibv_rwq_ind_table *(*create_rwq_ind_table)(struct ibv_context *context,
1532 struct ibv_rwq_ind_table_init_attr *init_attr);
1533 int (*destroy_wq)(struct ibv_wq *wq);
1534 int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
1535 struct ibv_wq * (*create_wq)(struct ibv_context *context,
1536 struct ibv_wq_init_attr *wq_init_attr);
1537 int (*query_rt_values)(struct ibv_context *context,
1538 struct ibv_values_ex *values);
1539 struct ibv_cq_ex *(*create_cq_ex)(struct ibv_context *context,
1540 struct ibv_cq_init_attr_ex *init_attr);
1541 struct verbs_ex_private *priv;
1542 int (*query_device_ex)(struct ibv_context *context,
1543 const struct ibv_query_device_ex_input *input,
1544 struct ibv_device_attr_ex *attr,
1545 size_t attr_size);
1546 int (*ibv_destroy_flow) (struct ibv_flow *flow);
1547 void (*ABI_placeholder2) (void); /* DO NOT COPY THIS GARBAGE */
1548 struct ibv_flow * (*ibv_create_flow) (struct ibv_qp *qp,
1549 struct ibv_flow_attr *flow_attr);
1550 void (*ABI_placeholder1) (void); /* DO NOT COPY THIS GARBAGE */
1551 struct ibv_qp *(*open_qp)(struct ibv_context *context,
1552 struct ibv_qp_open_attr *attr);
1553 struct ibv_qp *(*create_qp_ex)(struct ibv_context *context,
1554 struct ibv_qp_init_attr_ex *qp_init_attr_ex);
1555 int (*get_srq_num)(struct ibv_srq *srq, uint32_t *srq_num);
1556 struct ibv_srq * (*create_srq_ex)(struct ibv_context *context,
1557 struct ibv_srq_init_attr_ex *srq_init_attr_ex);
1558 struct ibv_xrcd * (*open_xrcd)(struct ibv_context *context,
1559 struct ibv_xrcd_init_attr *xrcd_init_attr);
1560 int (*close_xrcd)(struct ibv_xrcd *xrcd);
1561 uint64_t has_comp_mask;
1562 size_t sz; /* Must be immediately before struct ibv_context */
1563 struct ibv_context context; /* Must be last field in the struct */
1564 };
1565
verbs_get_ctx(struct ibv_context * ctx)1566 static inline struct verbs_context *verbs_get_ctx(struct ibv_context *ctx)
1567 {
1568 return (ctx->abi_compat != __VERBS_ABI_IS_EXTENDED) ?
1569 NULL : container_of(ctx, struct verbs_context, context);
1570 }
1571
1572 #define verbs_get_ctx_op(ctx, op) ({ \
1573 struct verbs_context *__vctx = verbs_get_ctx(ctx); \
1574 (!__vctx || (__vctx->sz < sizeof(*__vctx) - offsetof(struct verbs_context, op)) || \
1575 !__vctx->op) ? NULL : __vctx; })
1576
1577 #define verbs_set_ctx_op(_vctx, op, ptr) ({ \
1578 struct verbs_context *vctx = _vctx; \
1579 if (vctx && (vctx->sz >= sizeof(*vctx) - offsetof(struct verbs_context, op))) \
1580 vctx->op = ptr; })
1581
1582 /**
1583 * ibv_get_device_list - Get list of IB devices currently available
1584 * @num_devices: optional. if non-NULL, set to the number of devices
1585 * returned in the array.
1586 *
1587 * Return a NULL-terminated array of IB devices. The array can be
1588 * released with ibv_free_device_list().
1589 */
1590 struct ibv_device **ibv_get_device_list(int *num_devices);
1591
1592 /**
1593 * ibv_free_device_list - Free list from ibv_get_device_list()
1594 *
1595 * Free an array of devices returned from ibv_get_device_list(). Once
1596 * the array is freed, pointers to devices that were not opened with
1597 * ibv_open_device() are no longer valid. Client code must open all
1598 * devices it intends to use before calling ibv_free_device_list().
1599 */
1600 void ibv_free_device_list(struct ibv_device **list);
1601
1602 /**
1603 * ibv_get_device_name - Return kernel device name
1604 */
1605 const char *ibv_get_device_name(struct ibv_device *device);
1606
1607 /**
1608 * ibv_get_device_guid - Return device's node GUID
1609 */
1610 __be64 ibv_get_device_guid(struct ibv_device *device);
1611
1612 /**
1613 * ibv_open_device - Initialize device for use
1614 */
1615 struct ibv_context *ibv_open_device(struct ibv_device *device);
1616
1617 /**
1618 * ibv_close_device - Release device
1619 */
1620 int ibv_close_device(struct ibv_context *context);
1621
1622 /**
1623 * ibv_get_async_event - Get next async event
1624 * @event: Pointer to use to return async event
1625 *
1626 * All async events returned by ibv_get_async_event() must eventually
1627 * be acknowledged with ibv_ack_async_event().
1628 */
1629 int ibv_get_async_event(struct ibv_context *context,
1630 struct ibv_async_event *event);
1631
1632 /**
1633 * ibv_ack_async_event - Acknowledge an async event
1634 * @event: Event to be acknowledged.
1635 *
1636 * All async events which are returned by ibv_get_async_event() must
1637 * be acknowledged. To avoid races, destroying an object (CQ, SRQ or
1638 * QP) will wait for all affiliated events to be acknowledged, so
1639 * there should be a one-to-one correspondence between acks and
1640 * successful gets.
1641 */
1642 void ibv_ack_async_event(struct ibv_async_event *event);
1643
1644 /**
1645 * ibv_query_device - Get device properties
1646 */
1647 int ibv_query_device(struct ibv_context *context,
1648 struct ibv_device_attr *device_attr);
1649
1650 /**
1651 * ibv_query_port - Get port properties
1652 */
1653 int ibv_query_port(struct ibv_context *context, uint8_t port_num,
1654 struct ibv_port_attr *port_attr);
1655
___ibv_query_port(struct ibv_context * context,uint8_t port_num,struct ibv_port_attr * port_attr)1656 static inline int ___ibv_query_port(struct ibv_context *context,
1657 uint8_t port_num,
1658 struct ibv_port_attr *port_attr)
1659 {
1660 /* For compatibility when running with old libibverbs */
1661 port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
1662 port_attr->reserved = 0;
1663
1664 return ibv_query_port(context, port_num, port_attr);
1665 }
1666
1667 #define ibv_query_port(context, port_num, port_attr) \
1668 ___ibv_query_port(context, port_num, port_attr)
1669
1670 /**
1671 * ibv_query_gid - Get a GID table entry
1672 */
1673 int ibv_query_gid(struct ibv_context *context, uint8_t port_num,
1674 int index, union ibv_gid *gid);
1675
1676 /**
1677 * ibv_query_pkey - Get a P_Key table entry
1678 */
1679 int ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
1680 int index, __be16 *pkey);
1681
1682 /**
1683 * ibv_alloc_pd - Allocate a protection domain
1684 */
1685 struct ibv_pd *ibv_alloc_pd(struct ibv_context *context);
1686
1687 /**
1688 * ibv_dealloc_pd - Free a protection domain
1689 */
1690 int ibv_dealloc_pd(struct ibv_pd *pd);
1691
ibv_create_flow(struct ibv_qp * qp,struct ibv_flow_attr * flow)1692 static inline struct ibv_flow *ibv_create_flow(struct ibv_qp *qp,
1693 struct ibv_flow_attr *flow)
1694 {
1695 struct verbs_context *vctx = verbs_get_ctx_op(qp->context,
1696 ibv_create_flow);
1697 if (!vctx || !vctx->ibv_create_flow) {
1698 errno = ENOSYS;
1699 return NULL;
1700 }
1701
1702 return vctx->ibv_create_flow(qp, flow);
1703 }
1704
ibv_destroy_flow(struct ibv_flow * flow_id)1705 static inline int ibv_destroy_flow(struct ibv_flow *flow_id)
1706 {
1707 struct verbs_context *vctx = verbs_get_ctx_op(flow_id->context,
1708 ibv_destroy_flow);
1709 if (!vctx || !vctx->ibv_destroy_flow)
1710 return -ENOSYS;
1711 return vctx->ibv_destroy_flow(flow_id);
1712 }
1713
1714 /**
1715 * ibv_open_xrcd - Open an extended connection domain
1716 */
1717 static inline struct ibv_xrcd *
ibv_open_xrcd(struct ibv_context * context,struct ibv_xrcd_init_attr * xrcd_init_attr)1718 ibv_open_xrcd(struct ibv_context *context, struct ibv_xrcd_init_attr *xrcd_init_attr)
1719 {
1720 struct verbs_context *vctx = verbs_get_ctx_op(context, open_xrcd);
1721 if (!vctx) {
1722 errno = ENOSYS;
1723 return NULL;
1724 }
1725 return vctx->open_xrcd(context, xrcd_init_attr);
1726 }
1727
1728 /**
1729 * ibv_close_xrcd - Close an extended connection domain
1730 */
ibv_close_xrcd(struct ibv_xrcd * xrcd)1731 static inline int ibv_close_xrcd(struct ibv_xrcd *xrcd)
1732 {
1733 struct verbs_context *vctx = verbs_get_ctx(xrcd->context);
1734 return vctx->close_xrcd(xrcd);
1735 }
1736
1737 /**
1738 * ibv_reg_mr - Register a memory region
1739 */
1740 struct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr,
1741 size_t length, int access);
1742
1743
1744 enum ibv_rereg_mr_err_code {
1745 /* Old MR is valid, invalid input */
1746 IBV_REREG_MR_ERR_INPUT = -1,
1747 /* Old MR is valid, failed via don't fork on new address range */
1748 IBV_REREG_MR_ERR_DONT_FORK_NEW = -2,
1749 /* New MR is valid, failed via do fork on old address range */
1750 IBV_REREG_MR_ERR_DO_FORK_OLD = -3,
1751 /* MR shouldn't be used, command error */
1752 IBV_REREG_MR_ERR_CMD = -4,
1753 /* MR shouldn't be used, command error, invalid fork state on new address range */
1754 IBV_REREG_MR_ERR_CMD_AND_DO_FORK_NEW = -5,
1755 };
1756
1757 /**
1758 * ibv_rereg_mr - Re-Register a memory region
1759 */
1760 int ibv_rereg_mr(struct ibv_mr *mr, int flags,
1761 struct ibv_pd *pd, void *addr,
1762 size_t length, int access);
1763 /**
1764 * ibv_dereg_mr - Deregister a memory region
1765 */
1766 int ibv_dereg_mr(struct ibv_mr *mr);
1767
1768 /**
1769 * ibv_alloc_mw - Allocate a memory window
1770 */
ibv_alloc_mw(struct ibv_pd * pd,enum ibv_mw_type type)1771 static inline struct ibv_mw *ibv_alloc_mw(struct ibv_pd *pd,
1772 enum ibv_mw_type type)
1773 {
1774 struct ibv_mw *mw;
1775
1776 if (!pd->context->ops.alloc_mw) {
1777 errno = ENOSYS;
1778 return NULL;
1779 }
1780
1781 mw = pd->context->ops.alloc_mw(pd, type);
1782 return mw;
1783 }
1784
1785 /**
1786 * ibv_dealloc_mw - Free a memory window
1787 */
ibv_dealloc_mw(struct ibv_mw * mw)1788 static inline int ibv_dealloc_mw(struct ibv_mw *mw)
1789 {
1790 return mw->context->ops.dealloc_mw(mw);
1791 }
1792
1793 /**
1794 * ibv_inc_rkey - Increase the 8 lsb in the given rkey
1795 */
ibv_inc_rkey(uint32_t rkey)1796 static inline uint32_t ibv_inc_rkey(uint32_t rkey)
1797 {
1798 const uint32_t mask = 0x000000ff;
1799 uint8_t newtag = (uint8_t)((rkey + 1) & mask);
1800
1801 return (rkey & ~mask) | newtag;
1802 }
1803
1804 /**
1805 * ibv_bind_mw - Bind a memory window to a region
1806 */
ibv_bind_mw(struct ibv_qp * qp,struct ibv_mw * mw,struct ibv_mw_bind * mw_bind)1807 static inline int ibv_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
1808 struct ibv_mw_bind *mw_bind)
1809 {
1810 if (mw->type != IBV_MW_TYPE_1)
1811 return EINVAL;
1812
1813 return mw->context->ops.bind_mw(qp, mw, mw_bind);
1814 }
1815
1816 /**
1817 * ibv_create_comp_channel - Create a completion event channel
1818 */
1819 struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
1820
1821 /**
1822 * ibv_destroy_comp_channel - Destroy a completion event channel
1823 */
1824 int ibv_destroy_comp_channel(struct ibv_comp_channel *channel);
1825
1826 /**
1827 * ibv_create_cq - Create a completion queue
1828 * @context - Context CQ will be attached to
1829 * @cqe - Minimum number of entries required for CQ
1830 * @cq_context - Consumer-supplied context returned for completion events
1831 * @channel - Completion channel where completion events will be queued.
1832 * May be NULL if completion events will not be used.
1833 * @comp_vector - Completion vector used to signal completion events.
1834 * Must be >= 0 and < context->num_comp_vectors.
1835 */
1836 struct ibv_cq *ibv_create_cq(struct ibv_context *context, int cqe,
1837 void *cq_context,
1838 struct ibv_comp_channel *channel,
1839 int comp_vector);
1840
1841 /**
1842 * ibv_create_cq_ex - Create a completion queue
1843 * @context - Context CQ will be attached to
1844 * @cq_attr - Attributes to create the CQ with
1845 */
1846 static inline
ibv_create_cq_ex(struct ibv_context * context,struct ibv_cq_init_attr_ex * cq_attr)1847 struct ibv_cq_ex *ibv_create_cq_ex(struct ibv_context *context,
1848 struct ibv_cq_init_attr_ex *cq_attr)
1849 {
1850 struct verbs_context *vctx = verbs_get_ctx_op(context, create_cq_ex);
1851
1852 if (!vctx) {
1853 errno = ENOSYS;
1854 return NULL;
1855 }
1856
1857 if (cq_attr->comp_mask & ~(IBV_CQ_INIT_ATTR_MASK_RESERVED - 1)) {
1858 errno = EINVAL;
1859 return NULL;
1860 }
1861
1862 return vctx->create_cq_ex(context, cq_attr);
1863 }
1864
1865 /**
1866 * ibv_resize_cq - Modifies the capacity of the CQ.
1867 * @cq: The CQ to resize.
1868 * @cqe: The minimum size of the CQ.
1869 *
1870 * Users can examine the cq structure to determine the actual CQ size.
1871 */
1872 int ibv_resize_cq(struct ibv_cq *cq, int cqe);
1873
1874 /**
1875 * ibv_destroy_cq - Destroy a completion queue
1876 */
1877 int ibv_destroy_cq(struct ibv_cq *cq);
1878
1879 /**
1880 * ibv_get_cq_event - Read next CQ event
1881 * @channel: Channel to get next event from.
1882 * @cq: Used to return pointer to CQ.
1883 * @cq_context: Used to return consumer-supplied CQ context.
1884 *
1885 * All completion events returned by ibv_get_cq_event() must
1886 * eventually be acknowledged with ibv_ack_cq_events().
1887 */
1888 int ibv_get_cq_event(struct ibv_comp_channel *channel,
1889 struct ibv_cq **cq, void **cq_context);
1890
1891 /**
1892 * ibv_ack_cq_events - Acknowledge CQ completion events
1893 * @cq: CQ to acknowledge events for
1894 * @nevents: Number of events to acknowledge.
1895 *
1896 * All completion events which are returned by ibv_get_cq_event() must
1897 * be acknowledged. To avoid races, ibv_destroy_cq() will wait for
1898 * all completion events to be acknowledged, so there should be a
1899 * one-to-one correspondence between acks and successful gets. An
1900 * application may accumulate multiple completion events and
1901 * acknowledge them in a single call to ibv_ack_cq_events() by passing
1902 * the number of events to ack in @nevents.
1903 */
1904 void ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents);
1905
1906 /**
1907 * ibv_poll_cq - Poll a CQ for work completions
1908 * @cq:the CQ being polled
1909 * @num_entries:maximum number of completions to return
1910 * @wc:array of at least @num_entries of &struct ibv_wc where completions
1911 * will be returned
1912 *
1913 * Poll a CQ for (possibly multiple) completions. If the return value
1914 * is < 0, an error occurred. If the return value is >= 0, it is the
1915 * number of completions returned. If the return value is
1916 * non-negative and strictly less than num_entries, then the CQ was
1917 * emptied.
1918 */
ibv_poll_cq(struct ibv_cq * cq,int num_entries,struct ibv_wc * wc)1919 static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
1920 {
1921 return cq->context->ops.poll_cq(cq, num_entries, wc);
1922 }
1923
1924 /**
1925 * ibv_req_notify_cq - Request completion notification on a CQ. An
1926 * event will be added to the completion channel associated with the
1927 * CQ when an entry is added to the CQ.
1928 * @cq: The completion queue to request notification for.
1929 * @solicited_only: If non-zero, an event will be generated only for
1930 * the next solicited CQ entry. If zero, any CQ entry, solicited or
1931 * not, will generate an event.
1932 */
ibv_req_notify_cq(struct ibv_cq * cq,int solicited_only)1933 static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
1934 {
1935 return cq->context->ops.req_notify_cq(cq, solicited_only);
1936 }
1937
1938 /**
1939 * ibv_create_srq - Creates a SRQ associated with the specified protection
1940 * domain.
1941 * @pd: The protection domain associated with the SRQ.
1942 * @srq_init_attr: A list of initial attributes required to create the SRQ.
1943 *
1944 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1945 * requested size of the SRQ, and set to the actual values allocated
1946 * on return. If ibv_create_srq() succeeds, then max_wr and max_sge
1947 * will always be at least as large as the requested values.
1948 */
1949 struct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
1950 struct ibv_srq_init_attr *srq_init_attr);
1951
1952 static inline struct ibv_srq *
ibv_create_srq_ex(struct ibv_context * context,struct ibv_srq_init_attr_ex * srq_init_attr_ex)1953 ibv_create_srq_ex(struct ibv_context *context,
1954 struct ibv_srq_init_attr_ex *srq_init_attr_ex)
1955 {
1956 struct verbs_context *vctx;
1957 uint32_t mask = srq_init_attr_ex->comp_mask;
1958
1959 if (!(mask & ~(IBV_SRQ_INIT_ATTR_PD | IBV_SRQ_INIT_ATTR_TYPE)) &&
1960 (mask & IBV_SRQ_INIT_ATTR_PD) &&
1961 (!(mask & IBV_SRQ_INIT_ATTR_TYPE) ||
1962 (srq_init_attr_ex->srq_type == IBV_SRQT_BASIC)))
1963 return ibv_create_srq(srq_init_attr_ex->pd,
1964 (struct ibv_srq_init_attr *)srq_init_attr_ex);
1965
1966 vctx = verbs_get_ctx_op(context, create_srq_ex);
1967 if (!vctx) {
1968 errno = ENOSYS;
1969 return NULL;
1970 }
1971 return vctx->create_srq_ex(context, srq_init_attr_ex);
1972 }
1973
1974 /**
1975 * ibv_modify_srq - Modifies the attributes for the specified SRQ.
1976 * @srq: The SRQ to modify.
1977 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1978 * the current values of selected SRQ attributes are returned.
1979 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1980 * are being modified.
1981 *
1982 * The mask may contain IBV_SRQ_MAX_WR to resize the SRQ and/or
1983 * IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
1984 * the number of receives queued drops below the limit.
1985 */
1986 int ibv_modify_srq(struct ibv_srq *srq,
1987 struct ibv_srq_attr *srq_attr,
1988 int srq_attr_mask);
1989
1990 /**
1991 * ibv_query_srq - Returns the attribute list and current values for the
1992 * specified SRQ.
1993 * @srq: The SRQ to query.
1994 * @srq_attr: The attributes of the specified SRQ.
1995 */
1996 int ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
1997
ibv_get_srq_num(struct ibv_srq * srq,uint32_t * srq_num)1998 static inline int ibv_get_srq_num(struct ibv_srq *srq, uint32_t *srq_num)
1999 {
2000 struct verbs_context *vctx = verbs_get_ctx_op(srq->context, get_srq_num);
2001
2002 if (!vctx)
2003 return ENOSYS;
2004
2005 return vctx->get_srq_num(srq, srq_num);
2006 }
2007
2008 /**
2009 * ibv_destroy_srq - Destroys the specified SRQ.
2010 * @srq: The SRQ to destroy.
2011 */
2012 int ibv_destroy_srq(struct ibv_srq *srq);
2013
2014 /**
2015 * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
2016 * @srq: The SRQ to post the work request on.
2017 * @recv_wr: A list of work requests to post on the receive queue.
2018 * @bad_recv_wr: On an immediate failure, this parameter will reference
2019 * the work request that failed to be posted on the QP.
2020 */
ibv_post_srq_recv(struct ibv_srq * srq,struct ibv_recv_wr * recv_wr,struct ibv_recv_wr ** bad_recv_wr)2021 static inline int ibv_post_srq_recv(struct ibv_srq *srq,
2022 struct ibv_recv_wr *recv_wr,
2023 struct ibv_recv_wr **bad_recv_wr)
2024 {
2025 return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
2026 }
2027
2028 /**
2029 * ibv_create_qp - Create a queue pair.
2030 */
2031 struct ibv_qp *ibv_create_qp(struct ibv_pd *pd,
2032 struct ibv_qp_init_attr *qp_init_attr);
2033
2034 static inline struct ibv_qp *
ibv_create_qp_ex(struct ibv_context * context,struct ibv_qp_init_attr_ex * qp_init_attr_ex)2035 ibv_create_qp_ex(struct ibv_context *context, struct ibv_qp_init_attr_ex *qp_init_attr_ex)
2036 {
2037 struct verbs_context *vctx;
2038 uint32_t mask = qp_init_attr_ex->comp_mask;
2039
2040 if (mask == IBV_QP_INIT_ATTR_PD)
2041 return ibv_create_qp(qp_init_attr_ex->pd,
2042 (struct ibv_qp_init_attr *)qp_init_attr_ex);
2043
2044 vctx = verbs_get_ctx_op(context, create_qp_ex);
2045 if (!vctx) {
2046 errno = ENOSYS;
2047 return NULL;
2048 }
2049 return vctx->create_qp_ex(context, qp_init_attr_ex);
2050 }
2051
2052 /**
2053 * ibv_query_rt_values_ex - Get current real time @values of a device.
2054 * @values - in/out - defines the attributes we need to query/queried.
2055 * (Or's bits of enum ibv_values_mask on values->comp_mask field)
2056 */
2057 static inline int
ibv_query_rt_values_ex(struct ibv_context * context,struct ibv_values_ex * values)2058 ibv_query_rt_values_ex(struct ibv_context *context,
2059 struct ibv_values_ex *values)
2060 {
2061 struct verbs_context *vctx;
2062
2063 vctx = verbs_get_ctx_op(context, query_rt_values);
2064 if (!vctx)
2065 return ENOSYS;
2066
2067 if (values->comp_mask & ~(IBV_VALUES_MASK_RESERVED - 1))
2068 return EINVAL;
2069
2070 return vctx->query_rt_values(context, values);
2071 }
2072
2073 /**
2074 * ibv_query_device_ex - Get extended device properties
2075 */
2076 static inline int
ibv_query_device_ex(struct ibv_context * context,const struct ibv_query_device_ex_input * input,struct ibv_device_attr_ex * attr)2077 ibv_query_device_ex(struct ibv_context *context,
2078 const struct ibv_query_device_ex_input *input,
2079 struct ibv_device_attr_ex *attr)
2080 {
2081 struct verbs_context *vctx;
2082 int ret;
2083
2084 vctx = verbs_get_ctx_op(context, query_device_ex);
2085 if (!vctx)
2086 goto legacy;
2087
2088 ret = vctx->query_device_ex(context, input, attr, sizeof(*attr));
2089 if (ret == ENOSYS)
2090 goto legacy;
2091
2092 return ret;
2093
2094 legacy:
2095 memset(attr, 0, sizeof(*attr));
2096 ret = ibv_query_device(context, &attr->orig_attr);
2097
2098 return ret;
2099 }
2100
2101 /**
2102 * ibv_open_qp - Open a shareable queue pair.
2103 */
2104 static inline struct ibv_qp *
ibv_open_qp(struct ibv_context * context,struct ibv_qp_open_attr * qp_open_attr)2105 ibv_open_qp(struct ibv_context *context, struct ibv_qp_open_attr *qp_open_attr)
2106 {
2107 struct verbs_context *vctx = verbs_get_ctx_op(context, open_qp);
2108 if (!vctx) {
2109 errno = ENOSYS;
2110 return NULL;
2111 }
2112 return vctx->open_qp(context, qp_open_attr);
2113 }
2114
2115 /**
2116 * ibv_modify_qp - Modify a queue pair.
2117 */
2118 int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
2119 int attr_mask);
2120
2121 /**
2122 * ibv_query_qp - Returns the attribute list and current values for the
2123 * specified QP.
2124 * @qp: The QP to query.
2125 * @attr: The attributes of the specified QP.
2126 * @attr_mask: A bit-mask used to select specific attributes to query.
2127 * @init_attr: Additional attributes of the selected QP.
2128 *
2129 * The qp_attr_mask may be used to limit the query to gathering only the
2130 * selected attributes.
2131 */
2132 int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
2133 int attr_mask,
2134 struct ibv_qp_init_attr *init_attr);
2135
2136 /**
2137 * ibv_destroy_qp - Destroy a queue pair.
2138 */
2139 int ibv_destroy_qp(struct ibv_qp *qp);
2140
2141 /*
2142 * ibv_create_wq - Creates a WQ associated with the specified protection
2143 * domain.
2144 * @context: ibv_context.
2145 * @wq_init_attr: A list of initial attributes required to create the
2146 * WQ. If WQ creation succeeds, then the attributes are updated to
2147 * the actual capabilities of the created WQ.
2148 *
2149 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
2150 * the requested size of the WQ, and set to the actual values allocated
2151 * on return.
2152 * If ibv_create_wq() succeeds, then max_wr and max_sge will always be
2153 * at least as large as the requested values.
2154 *
2155 * Return Value
2156 * ibv_create_wq() returns a pointer to the created WQ, or NULL if the request
2157 * fails.
2158 */
ibv_create_wq(struct ibv_context * context,struct ibv_wq_init_attr * wq_init_attr)2159 static inline struct ibv_wq *ibv_create_wq(struct ibv_context *context,
2160 struct ibv_wq_init_attr *wq_init_attr)
2161 {
2162 struct verbs_context *vctx = verbs_get_ctx_op(context, create_wq);
2163 struct ibv_wq *wq;
2164
2165 if (!vctx) {
2166 errno = ENOSYS;
2167 return NULL;
2168 }
2169
2170 wq = vctx->create_wq(context, wq_init_attr);
2171 if (wq)
2172 wq->events_completed = 0;
2173
2174 return wq;
2175 }
2176
2177 /*
2178 * ibv_modify_wq - Modifies the attributes for the specified WQ.
2179 * @wq: The WQ to modify.
2180 * @wq_attr: On input, specifies the WQ attributes to modify.
2181 * wq_attr->attr_mask: A bit-mask used to specify which attributes of the WQ
2182 * are being modified.
2183 * On output, the current values of selected WQ attributes are returned.
2184 *
2185 * Return Value
2186 * ibv_modify_wq() returns 0 on success, or the value of errno
2187 * on failure (which indicates the failure reason).
2188 *
2189 */
ibv_modify_wq(struct ibv_wq * wq,struct ibv_wq_attr * wq_attr)2190 static inline int ibv_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
2191 {
2192 struct verbs_context *vctx = verbs_get_ctx_op(wq->context, modify_wq);
2193
2194 if (!vctx)
2195 return ENOSYS;
2196
2197 return vctx->modify_wq(wq, wq_attr);
2198 }
2199
2200 /*
2201 * ibv_destroy_wq - Destroys the specified WQ.
2202 * @ibv_wq: The WQ to destroy.
2203 * Return Value
2204 * ibv_destroy_wq() returns 0 on success, or the value of errno
2205 * on failure (which indicates the failure reason).
2206 */
ibv_destroy_wq(struct ibv_wq * wq)2207 static inline int ibv_destroy_wq(struct ibv_wq *wq)
2208 {
2209 struct verbs_context *vctx;
2210
2211 vctx = verbs_get_ctx_op(wq->context, destroy_wq);
2212 if (!vctx)
2213 return ENOSYS;
2214
2215 return vctx->destroy_wq(wq);
2216 }
2217
2218 /*
2219 * ibv_create_rwq_ind_table - Creates a receive work queue Indirection Table
2220 * @context: ibv_context.
2221 * @init_attr: A list of initial attributes required to create the Indirection Table.
2222 * Return Value
2223 * ibv_create_rwq_ind_table returns a pointer to the created
2224 * Indirection Table, or NULL if the request fails.
2225 */
ibv_create_rwq_ind_table(struct ibv_context * context,struct ibv_rwq_ind_table_init_attr * init_attr)2226 static inline struct ibv_rwq_ind_table *ibv_create_rwq_ind_table(struct ibv_context *context,
2227 struct ibv_rwq_ind_table_init_attr *init_attr)
2228 {
2229 struct verbs_context *vctx;
2230
2231 vctx = verbs_get_ctx_op(context, create_rwq_ind_table);
2232 if (!vctx) {
2233 errno = ENOSYS;
2234 return NULL;
2235 }
2236
2237 return vctx->create_rwq_ind_table(context, init_attr);
2238 }
2239
2240 /*
2241 * ibv_destroy_rwq_ind_table - Destroys the specified Indirection Table.
2242 * @rwq_ind_table: The Indirection Table to destroy.
2243 * Return Value
2244 * ibv_destroy_rwq_ind_table() returns 0 on success, or the value of errno
2245 * on failure (which indicates the failure reason).
2246 */
ibv_destroy_rwq_ind_table(struct ibv_rwq_ind_table * rwq_ind_table)2247 static inline int ibv_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
2248 {
2249 struct verbs_context *vctx;
2250
2251 vctx = verbs_get_ctx_op(rwq_ind_table->context, destroy_rwq_ind_table);
2252 if (!vctx)
2253 return ENOSYS;
2254
2255 return vctx->destroy_rwq_ind_table(rwq_ind_table);
2256 }
2257
2258 /**
2259 * ibv_post_send - Post a list of work requests to a send queue.
2260 *
2261 * If IBV_SEND_INLINE flag is set, the data buffers can be reused
2262 * immediately after the call returns.
2263 */
ibv_post_send(struct ibv_qp * qp,struct ibv_send_wr * wr,struct ibv_send_wr ** bad_wr)2264 static inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
2265 struct ibv_send_wr **bad_wr)
2266 {
2267 return qp->context->ops.post_send(qp, wr, bad_wr);
2268 }
2269
2270 /**
2271 * ibv_post_recv - Post a list of work requests to a receive queue.
2272 */
ibv_post_recv(struct ibv_qp * qp,struct ibv_recv_wr * wr,struct ibv_recv_wr ** bad_wr)2273 static inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
2274 struct ibv_recv_wr **bad_wr)
2275 {
2276 return qp->context->ops.post_recv(qp, wr, bad_wr);
2277 }
2278
2279 /**
2280 * ibv_create_ah - Create an address handle.
2281 */
2282 struct ibv_ah *ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
2283
2284 /**
2285 * ibv_init_ah_from_wc - Initializes address handle attributes from a
2286 * work completion.
2287 * @context: Device context on which the received message arrived.
2288 * @port_num: Port on which the received message arrived.
2289 * @wc: Work completion associated with the received message.
2290 * @grh: References the received global route header. This parameter is
2291 * ignored unless the work completion indicates that the GRH is valid.
2292 * @ah_attr: Returned attributes that can be used when creating an address
2293 * handle for replying to the message.
2294 */
2295 int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
2296 struct ibv_wc *wc, struct ibv_grh *grh,
2297 struct ibv_ah_attr *ah_attr);
2298
2299 /**
2300 * ibv_create_ah_from_wc - Creates an address handle associated with the
2301 * sender of the specified work completion.
2302 * @pd: The protection domain associated with the address handle.
2303 * @wc: Work completion information associated with a received message.
2304 * @grh: References the received global route header. This parameter is
2305 * ignored unless the work completion indicates that the GRH is valid.
2306 * @port_num: The outbound port number to associate with the address.
2307 *
2308 * The address handle is used to reference a local or global destination
2309 * in all UD QP post sends.
2310 */
2311 struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
2312 struct ibv_grh *grh, uint8_t port_num);
2313
2314 /**
2315 * ibv_destroy_ah - Destroy an address handle.
2316 */
2317 int ibv_destroy_ah(struct ibv_ah *ah);
2318
2319 /**
2320 * ibv_attach_mcast - Attaches the specified QP to a multicast group.
2321 * @qp: QP to attach to the multicast group. The QP must be a UD QP.
2322 * @gid: Multicast group GID.
2323 * @lid: Multicast group LID in host byte order.
2324 *
2325 * In order to route multicast packets correctly, subnet
2326 * administration must have created the multicast group and configured
2327 * the fabric appropriately. The port associated with the specified
2328 * QP must also be a member of the multicast group.
2329 */
2330 int ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
2331
2332 /**
2333 * ibv_detach_mcast - Detaches the specified QP from a multicast group.
2334 * @qp: QP to detach from the multicast group.
2335 * @gid: Multicast group GID.
2336 * @lid: Multicast group LID in host byte order.
2337 */
2338 int ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
2339
2340 /**
2341 * ibv_fork_init - Prepare data structures so that fork() may be used
2342 * safely. If this function is not called or returns a non-zero
2343 * status, then libibverbs data structures are not fork()-safe and the
2344 * effect of an application calling fork() is undefined.
2345 */
2346 int ibv_fork_init(void);
2347
2348 /**
2349 * ibv_node_type_str - Return string describing node_type enum value
2350 */
2351 const char *ibv_node_type_str(enum ibv_node_type node_type);
2352
2353 /**
2354 * ibv_port_state_str - Return string describing port_state enum value
2355 */
2356 const char *ibv_port_state_str(enum ibv_port_state port_state);
2357
2358 /**
2359 * ibv_event_type_str - Return string describing event_type enum value
2360 */
2361 const char *ibv_event_type_str(enum ibv_event_type event);
2362
2363 #define ETHERNET_LL_SIZE 6
2364 int ibv_resolve_eth_l2_from_gid(struct ibv_context *context,
2365 struct ibv_ah_attr *attr,
2366 uint8_t eth_mac[ETHERNET_LL_SIZE],
2367 uint16_t *vid);
2368
ibv_is_qpt_supported(uint32_t caps,enum ibv_qp_type qpt)2369 static inline int ibv_is_qpt_supported(uint32_t caps, enum ibv_qp_type qpt)
2370 {
2371 return !!(caps & (1 << qpt));
2372 }
2373
2374 END_C_DECLS
2375
2376 # undef __attribute_const
2377
2378
2379 #endif /* INFINIBAND_VERBS_H */
2380