xref: /linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #ifndef __PVRDMA_DEV_API_H__
47 #define __PVRDMA_DEV_API_H__
48 
49 #include <linux/types.h>
50 
51 #include "pvrdma_verbs.h"
52 
53 #define PVRDMA_VERSION			17
54 #define PVRDMA_BOARD_ID			1
55 #define PVRDMA_REV_ID			1
56 
57 /*
58  * Masks and accessors for page directory, which is a two-level lookup:
59  * page directory -> page table -> page. Only one directory for now, but we
60  * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
61  * gigabyte for memory regions and so forth.
62  */
63 
64 #define PVRDMA_PDIR_SHIFT		18
65 #define PVRDMA_PTABLE_SHIFT		9
66 #define PVRDMA_PAGE_DIR_DIR(x)		(((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
67 #define PVRDMA_PAGE_DIR_TABLE(x)	(((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
68 #define PVRDMA_PAGE_DIR_PAGE(x)		((x) & 0x1ff)
69 #define PVRDMA_PAGE_DIR_MAX_PAGES	(1 * 512 * 512)
70 #define PVRDMA_MAX_FAST_REG_PAGES	128
71 
72 /*
73  * Max MSI-X vectors.
74  */
75 
76 #define PVRDMA_MAX_INTERRUPTS	3
77 
78 /* Register offsets within PCI resource on BAR1. */
79 #define PVRDMA_REG_VERSION	0x00	/* R: Version of device. */
80 #define PVRDMA_REG_DSRLOW	0x04	/* W: Device shared region low PA. */
81 #define PVRDMA_REG_DSRHIGH	0x08	/* W: Device shared region high PA. */
82 #define PVRDMA_REG_CTL		0x0c	/* W: PVRDMA_DEVICE_CTL */
83 #define PVRDMA_REG_REQUEST	0x10	/* W: Indicate device request. */
84 #define PVRDMA_REG_ERR		0x14	/* R: Device error. */
85 #define PVRDMA_REG_ICR		0x18	/* R: Interrupt cause. */
86 #define PVRDMA_REG_IMR		0x1c	/* R/W: Interrupt mask. */
87 #define PVRDMA_REG_MACL		0x20	/* R/W: MAC address low. */
88 #define PVRDMA_REG_MACH		0x24	/* R/W: MAC address high. */
89 
90 /* Object flags. */
91 #define PVRDMA_CQ_FLAG_ARMED_SOL	BIT(0)	/* Armed for solicited-only. */
92 #define PVRDMA_CQ_FLAG_ARMED		BIT(1)	/* Armed. */
93 #define PVRDMA_MR_FLAG_DMA		BIT(0)	/* DMA region. */
94 #define PVRDMA_MR_FLAG_FRMR		BIT(1)	/* Fast reg memory region. */
95 
96 /*
97  * Atomic operation capability (masked versions are extended atomic
98  * operations.
99  */
100 
101 #define PVRDMA_ATOMIC_OP_COMP_SWAP	BIT(0)	/* Compare and swap. */
102 #define PVRDMA_ATOMIC_OP_FETCH_ADD	BIT(1)	/* Fetch and add. */
103 #define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP	BIT(2)	/* Masked compare and swap. */
104 #define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD	BIT(3)	/* Masked fetch and add. */
105 
106 /*
107  * Base Memory Management Extension flags to support Fast Reg Memory Regions
108  * and Fast Reg Work Requests. Each flag represents a verb operation and we
109  * must support all of them to qualify for the BMME device cap.
110  */
111 
112 #define PVRDMA_BMME_FLAG_LOCAL_INV	BIT(0)	/* Local Invalidate. */
113 #define PVRDMA_BMME_FLAG_REMOTE_INV	BIT(1)	/* Remote Invalidate. */
114 #define PVRDMA_BMME_FLAG_FAST_REG_WR	BIT(2)	/* Fast Reg Work Request. */
115 
116 /*
117  * GID types. The interpretation of the gid_types bit field in the device
118  * capabilities will depend on the device mode. For now, the device only
119  * supports RoCE as mode, so only the different GID types for RoCE are
120  * defined.
121  */
122 
123 #define PVRDMA_GID_TYPE_FLAG_ROCE_V1	BIT(0)
124 #define PVRDMA_GID_TYPE_FLAG_ROCE_V2	BIT(1)
125 
126 enum pvrdma_pci_resource {
127 	PVRDMA_PCI_RESOURCE_MSIX,	/* BAR0: MSI-X, MMIO. */
128 	PVRDMA_PCI_RESOURCE_REG,	/* BAR1: Registers, MMIO. */
129 	PVRDMA_PCI_RESOURCE_UAR,	/* BAR2: UAR pages, MMIO, 64-bit. */
130 	PVRDMA_PCI_RESOURCE_LAST,	/* Last. */
131 };
132 
133 enum pvrdma_device_ctl {
134 	PVRDMA_DEVICE_CTL_ACTIVATE,	/* Activate device. */
135 	PVRDMA_DEVICE_CTL_UNQUIESCE,	/* Unquiesce device. */
136 	PVRDMA_DEVICE_CTL_RESET,	/* Reset device. */
137 };
138 
139 enum pvrdma_intr_vector {
140 	PVRDMA_INTR_VECTOR_RESPONSE,	/* Command response. */
141 	PVRDMA_INTR_VECTOR_ASYNC,	/* Async events. */
142 	PVRDMA_INTR_VECTOR_CQ,		/* CQ notification. */
143 	/* Additional CQ notification vectors. */
144 };
145 
146 enum pvrdma_intr_cause {
147 	PVRDMA_INTR_CAUSE_RESPONSE	= (1 << PVRDMA_INTR_VECTOR_RESPONSE),
148 	PVRDMA_INTR_CAUSE_ASYNC		= (1 << PVRDMA_INTR_VECTOR_ASYNC),
149 	PVRDMA_INTR_CAUSE_CQ		= (1 << PVRDMA_INTR_VECTOR_CQ),
150 };
151 
152 enum pvrdma_gos_bits {
153 	PVRDMA_GOS_BITS_UNK,		/* Unknown. */
154 	PVRDMA_GOS_BITS_32,		/* 32-bit. */
155 	PVRDMA_GOS_BITS_64,		/* 64-bit. */
156 };
157 
158 enum pvrdma_gos_type {
159 	PVRDMA_GOS_TYPE_UNK,		/* Unknown. */
160 	PVRDMA_GOS_TYPE_LINUX,		/* Linux. */
161 };
162 
163 enum pvrdma_device_mode {
164 	PVRDMA_DEVICE_MODE_ROCE,	/* RoCE. */
165 	PVRDMA_DEVICE_MODE_IWARP,	/* iWarp. */
166 	PVRDMA_DEVICE_MODE_IB,		/* InfiniBand. */
167 };
168 
169 struct pvrdma_gos_info {
170 	u32 gos_bits:2;			/* W: PVRDMA_GOS_BITS_ */
171 	u32 gos_type:4;			/* W: PVRDMA_GOS_TYPE_ */
172 	u32 gos_ver:16;			/* W: Guest OS version. */
173 	u32 gos_misc:10;		/* W: Other. */
174 	u32 pad;			/* Pad to 8-byte alignment. */
175 };
176 
177 struct pvrdma_device_caps {
178 	u64 fw_ver;				/* R: Query device. */
179 	__be64 node_guid;
180 	__be64 sys_image_guid;
181 	u64 max_mr_size;
182 	u64 page_size_cap;
183 	u64 atomic_arg_sizes;			/* EX verbs. */
184 	u32 ex_comp_mask;			/* EX verbs. */
185 	u32 device_cap_flags2;			/* EX verbs. */
186 	u32 max_fa_bit_boundary;		/* EX verbs. */
187 	u32 log_max_atomic_inline_arg;		/* EX verbs. */
188 	u32 vendor_id;
189 	u32 vendor_part_id;
190 	u32 hw_ver;
191 	u32 max_qp;
192 	u32 max_qp_wr;
193 	u32 device_cap_flags;
194 	u32 max_sge;
195 	u32 max_sge_rd;
196 	u32 max_cq;
197 	u32 max_cqe;
198 	u32 max_mr;
199 	u32 max_pd;
200 	u32 max_qp_rd_atom;
201 	u32 max_ee_rd_atom;
202 	u32 max_res_rd_atom;
203 	u32 max_qp_init_rd_atom;
204 	u32 max_ee_init_rd_atom;
205 	u32 max_ee;
206 	u32 max_rdd;
207 	u32 max_mw;
208 	u32 max_raw_ipv6_qp;
209 	u32 max_raw_ethy_qp;
210 	u32 max_mcast_grp;
211 	u32 max_mcast_qp_attach;
212 	u32 max_total_mcast_qp_attach;
213 	u32 max_ah;
214 	u32 max_fmr;
215 	u32 max_map_per_fmr;
216 	u32 max_srq;
217 	u32 max_srq_wr;
218 	u32 max_srq_sge;
219 	u32 max_uar;
220 	u32 gid_tbl_len;
221 	u16 max_pkeys;
222 	u8  local_ca_ack_delay;
223 	u8  phys_port_cnt;
224 	u8  mode;				/* PVRDMA_DEVICE_MODE_ */
225 	u8  atomic_ops;				/* PVRDMA_ATOMIC_OP_* bits */
226 	u8  bmme_flags;				/* FRWR Mem Mgmt Extensions */
227 	u8  gid_types;				/* PVRDMA_GID_TYPE_FLAG_ */
228 	u8  reserved[4];
229 };
230 
231 struct pvrdma_ring_page_info {
232 	u32 num_pages;				/* Num pages incl. header. */
233 	u32 reserved;				/* Reserved. */
234 	u64 pdir_dma;				/* Page directory PA. */
235 };
236 
237 #pragma pack(push, 1)
238 
239 struct pvrdma_device_shared_region {
240 	u32 driver_version;			/* W: Driver version. */
241 	u32 pad;				/* Pad to 8-byte align. */
242 	struct pvrdma_gos_info gos_info;	/* W: Guest OS information. */
243 	u64 cmd_slot_dma;			/* W: Command slot address. */
244 	u64 resp_slot_dma;			/* W: Response slot address. */
245 	struct pvrdma_ring_page_info async_ring_pages;
246 						/* W: Async ring page info. */
247 	struct pvrdma_ring_page_info cq_ring_pages;
248 						/* W: CQ ring page info. */
249 	u32 uar_pfn;				/* W: UAR pageframe. */
250 	u32 pad2;				/* Pad to 8-byte align. */
251 	struct pvrdma_device_caps caps;		/* R: Device capabilities. */
252 };
253 
254 #pragma pack(pop)
255 
256 /* Event types. Currently a 1:1 mapping with enum ib_event. */
257 enum pvrdma_eqe_type {
258 	PVRDMA_EVENT_CQ_ERR,
259 	PVRDMA_EVENT_QP_FATAL,
260 	PVRDMA_EVENT_QP_REQ_ERR,
261 	PVRDMA_EVENT_QP_ACCESS_ERR,
262 	PVRDMA_EVENT_COMM_EST,
263 	PVRDMA_EVENT_SQ_DRAINED,
264 	PVRDMA_EVENT_PATH_MIG,
265 	PVRDMA_EVENT_PATH_MIG_ERR,
266 	PVRDMA_EVENT_DEVICE_FATAL,
267 	PVRDMA_EVENT_PORT_ACTIVE,
268 	PVRDMA_EVENT_PORT_ERR,
269 	PVRDMA_EVENT_LID_CHANGE,
270 	PVRDMA_EVENT_PKEY_CHANGE,
271 	PVRDMA_EVENT_SM_CHANGE,
272 	PVRDMA_EVENT_SRQ_ERR,
273 	PVRDMA_EVENT_SRQ_LIMIT_REACHED,
274 	PVRDMA_EVENT_QP_LAST_WQE_REACHED,
275 	PVRDMA_EVENT_CLIENT_REREGISTER,
276 	PVRDMA_EVENT_GID_CHANGE,
277 };
278 
279 /* Event queue element. */
280 struct pvrdma_eqe {
281 	u32 type;	/* Event type. */
282 	u32 info;	/* Handle, other. */
283 };
284 
285 /* CQ notification queue element. */
286 struct pvrdma_cqne {
287 	u32 info;	/* Handle */
288 };
289 
290 enum {
291 	PVRDMA_CMD_FIRST,
292 	PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
293 	PVRDMA_CMD_QUERY_PKEY,
294 	PVRDMA_CMD_CREATE_PD,
295 	PVRDMA_CMD_DESTROY_PD,
296 	PVRDMA_CMD_CREATE_MR,
297 	PVRDMA_CMD_DESTROY_MR,
298 	PVRDMA_CMD_CREATE_CQ,
299 	PVRDMA_CMD_RESIZE_CQ,
300 	PVRDMA_CMD_DESTROY_CQ,
301 	PVRDMA_CMD_CREATE_QP,
302 	PVRDMA_CMD_MODIFY_QP,
303 	PVRDMA_CMD_QUERY_QP,
304 	PVRDMA_CMD_DESTROY_QP,
305 	PVRDMA_CMD_CREATE_UC,
306 	PVRDMA_CMD_DESTROY_UC,
307 	PVRDMA_CMD_CREATE_BIND,
308 	PVRDMA_CMD_DESTROY_BIND,
309 	PVRDMA_CMD_MAX,
310 };
311 
312 enum {
313 	PVRDMA_CMD_FIRST_RESP = (1 << 31),
314 	PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
315 	PVRDMA_CMD_QUERY_PKEY_RESP,
316 	PVRDMA_CMD_CREATE_PD_RESP,
317 	PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
318 	PVRDMA_CMD_CREATE_MR_RESP,
319 	PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
320 	PVRDMA_CMD_CREATE_CQ_RESP,
321 	PVRDMA_CMD_RESIZE_CQ_RESP,
322 	PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
323 	PVRDMA_CMD_CREATE_QP_RESP,
324 	PVRDMA_CMD_MODIFY_QP_RESP,
325 	PVRDMA_CMD_QUERY_QP_RESP,
326 	PVRDMA_CMD_DESTROY_QP_RESP,
327 	PVRDMA_CMD_CREATE_UC_RESP,
328 	PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
329 	PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
330 	PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
331 	PVRDMA_CMD_MAX_RESP,
332 };
333 
334 struct pvrdma_cmd_hdr {
335 	u64 response;		/* Key for response lookup. */
336 	u32 cmd;		/* PVRDMA_CMD_ */
337 	u32 reserved;		/* Reserved. */
338 };
339 
340 struct pvrdma_cmd_resp_hdr {
341 	u64 response;		/* From cmd hdr. */
342 	u32 ack;		/* PVRDMA_CMD_XXX_RESP */
343 	u8 err;			/* Error. */
344 	u8 reserved[3];		/* Reserved. */
345 };
346 
347 struct pvrdma_cmd_query_port {
348 	struct pvrdma_cmd_hdr hdr;
349 	u8 port_num;
350 	u8 reserved[7];
351 };
352 
353 struct pvrdma_cmd_query_port_resp {
354 	struct pvrdma_cmd_resp_hdr hdr;
355 	struct pvrdma_port_attr attrs;
356 };
357 
358 struct pvrdma_cmd_query_pkey {
359 	struct pvrdma_cmd_hdr hdr;
360 	u8 port_num;
361 	u8 index;
362 	u8 reserved[6];
363 };
364 
365 struct pvrdma_cmd_query_pkey_resp {
366 	struct pvrdma_cmd_resp_hdr hdr;
367 	u16 pkey;
368 	u8 reserved[6];
369 };
370 
371 struct pvrdma_cmd_create_uc {
372 	struct pvrdma_cmd_hdr hdr;
373 	u32 pfn; /* UAR page frame number */
374 	u8 reserved[4];
375 };
376 
377 struct pvrdma_cmd_create_uc_resp {
378 	struct pvrdma_cmd_resp_hdr hdr;
379 	u32 ctx_handle;
380 	u8 reserved[4];
381 };
382 
383 struct pvrdma_cmd_destroy_uc {
384 	struct pvrdma_cmd_hdr hdr;
385 	u32 ctx_handle;
386 	u8 reserved[4];
387 };
388 
389 struct pvrdma_cmd_create_pd {
390 	struct pvrdma_cmd_hdr hdr;
391 	u32 ctx_handle;
392 	u8 reserved[4];
393 };
394 
395 struct pvrdma_cmd_create_pd_resp {
396 	struct pvrdma_cmd_resp_hdr hdr;
397 	u32 pd_handle;
398 	u8 reserved[4];
399 };
400 
401 struct pvrdma_cmd_destroy_pd {
402 	struct pvrdma_cmd_hdr hdr;
403 	u32 pd_handle;
404 	u8 reserved[4];
405 };
406 
407 struct pvrdma_cmd_create_mr {
408 	struct pvrdma_cmd_hdr hdr;
409 	u64 start;
410 	u64 length;
411 	u64 pdir_dma;
412 	u32 pd_handle;
413 	u32 access_flags;
414 	u32 flags;
415 	u32 nchunks;
416 };
417 
418 struct pvrdma_cmd_create_mr_resp {
419 	struct pvrdma_cmd_resp_hdr hdr;
420 	u32 mr_handle;
421 	u32 lkey;
422 	u32 rkey;
423 	u8 reserved[4];
424 };
425 
426 struct pvrdma_cmd_destroy_mr {
427 	struct pvrdma_cmd_hdr hdr;
428 	u32 mr_handle;
429 	u8 reserved[4];
430 };
431 
432 struct pvrdma_cmd_create_cq {
433 	struct pvrdma_cmd_hdr hdr;
434 	u64 pdir_dma;
435 	u32 ctx_handle;
436 	u32 cqe;
437 	u32 nchunks;
438 	u8 reserved[4];
439 };
440 
441 struct pvrdma_cmd_create_cq_resp {
442 	struct pvrdma_cmd_resp_hdr hdr;
443 	u32 cq_handle;
444 	u32 cqe;
445 };
446 
447 struct pvrdma_cmd_resize_cq {
448 	struct pvrdma_cmd_hdr hdr;
449 	u32 cq_handle;
450 	u32 cqe;
451 };
452 
453 struct pvrdma_cmd_resize_cq_resp {
454 	struct pvrdma_cmd_resp_hdr hdr;
455 	u32 cqe;
456 	u8 reserved[4];
457 };
458 
459 struct pvrdma_cmd_destroy_cq {
460 	struct pvrdma_cmd_hdr hdr;
461 	u32 cq_handle;
462 	u8 reserved[4];
463 };
464 
465 struct pvrdma_cmd_create_qp {
466 	struct pvrdma_cmd_hdr hdr;
467 	u64 pdir_dma;
468 	u32 pd_handle;
469 	u32 send_cq_handle;
470 	u32 recv_cq_handle;
471 	u32 srq_handle;
472 	u32 max_send_wr;
473 	u32 max_recv_wr;
474 	u32 max_send_sge;
475 	u32 max_recv_sge;
476 	u32 max_inline_data;
477 	u32 lkey;
478 	u32 access_flags;
479 	u16 total_chunks;
480 	u16 send_chunks;
481 	u16 max_atomic_arg;
482 	u8 sq_sig_all;
483 	u8 qp_type;
484 	u8 is_srq;
485 	u8 reserved[3];
486 };
487 
488 struct pvrdma_cmd_create_qp_resp {
489 	struct pvrdma_cmd_resp_hdr hdr;
490 	u32 qpn;
491 	u32 max_send_wr;
492 	u32 max_recv_wr;
493 	u32 max_send_sge;
494 	u32 max_recv_sge;
495 	u32 max_inline_data;
496 };
497 
498 struct pvrdma_cmd_modify_qp {
499 	struct pvrdma_cmd_hdr hdr;
500 	u32 qp_handle;
501 	u32 attr_mask;
502 	struct pvrdma_qp_attr attrs;
503 };
504 
505 struct pvrdma_cmd_query_qp {
506 	struct pvrdma_cmd_hdr hdr;
507 	u32 qp_handle;
508 	u32 attr_mask;
509 };
510 
511 struct pvrdma_cmd_query_qp_resp {
512 	struct pvrdma_cmd_resp_hdr hdr;
513 	struct pvrdma_qp_attr attrs;
514 };
515 
516 struct pvrdma_cmd_destroy_qp {
517 	struct pvrdma_cmd_hdr hdr;
518 	u32 qp_handle;
519 	u8 reserved[4];
520 };
521 
522 struct pvrdma_cmd_destroy_qp_resp {
523 	struct pvrdma_cmd_resp_hdr hdr;
524 	u32 events_reported;
525 	u8 reserved[4];
526 };
527 
528 struct pvrdma_cmd_create_bind {
529 	struct pvrdma_cmd_hdr hdr;
530 	u32 mtu;
531 	u32 vlan;
532 	u32 index;
533 	u8 new_gid[16];
534 	u8 gid_type;
535 	u8 reserved[3];
536 };
537 
538 struct pvrdma_cmd_destroy_bind {
539 	struct pvrdma_cmd_hdr hdr;
540 	u32 index;
541 	u8 dest_gid[16];
542 	u8 reserved[4];
543 };
544 
545 union pvrdma_cmd_req {
546 	struct pvrdma_cmd_hdr hdr;
547 	struct pvrdma_cmd_query_port query_port;
548 	struct pvrdma_cmd_query_pkey query_pkey;
549 	struct pvrdma_cmd_create_uc create_uc;
550 	struct pvrdma_cmd_destroy_uc destroy_uc;
551 	struct pvrdma_cmd_create_pd create_pd;
552 	struct pvrdma_cmd_destroy_pd destroy_pd;
553 	struct pvrdma_cmd_create_mr create_mr;
554 	struct pvrdma_cmd_destroy_mr destroy_mr;
555 	struct pvrdma_cmd_create_cq create_cq;
556 	struct pvrdma_cmd_resize_cq resize_cq;
557 	struct pvrdma_cmd_destroy_cq destroy_cq;
558 	struct pvrdma_cmd_create_qp create_qp;
559 	struct pvrdma_cmd_modify_qp modify_qp;
560 	struct pvrdma_cmd_query_qp query_qp;
561 	struct pvrdma_cmd_destroy_qp destroy_qp;
562 	struct pvrdma_cmd_create_bind create_bind;
563 	struct pvrdma_cmd_destroy_bind destroy_bind;
564 };
565 
566 union pvrdma_cmd_resp {
567 	struct pvrdma_cmd_resp_hdr hdr;
568 	struct pvrdma_cmd_query_port_resp query_port_resp;
569 	struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
570 	struct pvrdma_cmd_create_uc_resp create_uc_resp;
571 	struct pvrdma_cmd_create_pd_resp create_pd_resp;
572 	struct pvrdma_cmd_create_mr_resp create_mr_resp;
573 	struct pvrdma_cmd_create_cq_resp create_cq_resp;
574 	struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
575 	struct pvrdma_cmd_create_qp_resp create_qp_resp;
576 	struct pvrdma_cmd_query_qp_resp query_qp_resp;
577 	struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
578 };
579 
580 #endif /* __PVRDMA_DEV_API_H__ */
581