xref: /linux/include/uapi/rdma/vmw_pvrdma-abi.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
2 /*
3  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of EITHER the GNU General Public License
7  * version 2 as published by the Free Software Foundation or the BSD
8  * 2-Clause License. This program is distributed in the hope that it
9  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
10  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
11  * See the GNU General Public License version 2 for more details at
12  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program available in the file COPYING in the main
16  * directory of this source tree.
17  *
18  * The BSD 2-Clause License
19  *
20  *     Redistribution and use in source and binary forms, with or
21  *     without modification, are permitted provided that the following
22  *     conditions are met:
23  *
24  *      - Redistributions of source code must retain the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer.
27  *
28  *      - Redistributions in binary form must reproduce the above
29  *        copyright notice, this list of conditions and the following
30  *        disclaimer in the documentation and/or other materials
31  *        provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
36  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
37  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
38  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
39  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
42  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
44  * OF THE POSSIBILITY OF SUCH DAMAGE.
45  */
46 
47 #ifndef __VMW_PVRDMA_ABI_H__
48 #define __VMW_PVRDMA_ABI_H__
49 
50 #include <linux/types.h>
51 
52 #define PVRDMA_UVERBS_ABI_VERSION	3		/* ABI Version. */
53 #define PVRDMA_UAR_HANDLE_MASK		0x00FFFFFF	/* Bottom 24 bits. */
54 #define PVRDMA_UAR_QP_OFFSET		0		/* QP doorbell. */
55 #define PVRDMA_UAR_QP_SEND		(1 << 30)	/* Send bit. */
56 #define PVRDMA_UAR_QP_RECV		(1 << 31)	/* Recv bit. */
57 #define PVRDMA_UAR_CQ_OFFSET		4		/* CQ doorbell. */
58 #define PVRDMA_UAR_CQ_ARM_SOL		(1 << 29)	/* Arm solicited bit. */
59 #define PVRDMA_UAR_CQ_ARM		(1 << 30)	/* Arm bit. */
60 #define PVRDMA_UAR_CQ_POLL		(1 << 31)	/* Poll bit. */
61 #define PVRDMA_UAR_SRQ_OFFSET		8		/* SRQ doorbell. */
62 #define PVRDMA_UAR_SRQ_RECV		(1 << 30)	/* Recv bit. */
63 
64 enum pvrdma_wr_opcode {
65 	PVRDMA_WR_RDMA_WRITE,
66 	PVRDMA_WR_RDMA_WRITE_WITH_IMM,
67 	PVRDMA_WR_SEND,
68 	PVRDMA_WR_SEND_WITH_IMM,
69 	PVRDMA_WR_RDMA_READ,
70 	PVRDMA_WR_ATOMIC_CMP_AND_SWP,
71 	PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
72 	PVRDMA_WR_LSO,
73 	PVRDMA_WR_SEND_WITH_INV,
74 	PVRDMA_WR_RDMA_READ_WITH_INV,
75 	PVRDMA_WR_LOCAL_INV,
76 	PVRDMA_WR_FAST_REG_MR,
77 	PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
78 	PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
79 	PVRDMA_WR_BIND_MW,
80 	PVRDMA_WR_REG_SIG_MR,
81 	PVRDMA_WR_ERROR,
82 };
83 
84 enum pvrdma_wc_status {
85 	PVRDMA_WC_SUCCESS,
86 	PVRDMA_WC_LOC_LEN_ERR,
87 	PVRDMA_WC_LOC_QP_OP_ERR,
88 	PVRDMA_WC_LOC_EEC_OP_ERR,
89 	PVRDMA_WC_LOC_PROT_ERR,
90 	PVRDMA_WC_WR_FLUSH_ERR,
91 	PVRDMA_WC_MW_BIND_ERR,
92 	PVRDMA_WC_BAD_RESP_ERR,
93 	PVRDMA_WC_LOC_ACCESS_ERR,
94 	PVRDMA_WC_REM_INV_REQ_ERR,
95 	PVRDMA_WC_REM_ACCESS_ERR,
96 	PVRDMA_WC_REM_OP_ERR,
97 	PVRDMA_WC_RETRY_EXC_ERR,
98 	PVRDMA_WC_RNR_RETRY_EXC_ERR,
99 	PVRDMA_WC_LOC_RDD_VIOL_ERR,
100 	PVRDMA_WC_REM_INV_RD_REQ_ERR,
101 	PVRDMA_WC_REM_ABORT_ERR,
102 	PVRDMA_WC_INV_EECN_ERR,
103 	PVRDMA_WC_INV_EEC_STATE_ERR,
104 	PVRDMA_WC_FATAL_ERR,
105 	PVRDMA_WC_RESP_TIMEOUT_ERR,
106 	PVRDMA_WC_GENERAL_ERR,
107 };
108 
109 enum pvrdma_wc_opcode {
110 	PVRDMA_WC_SEND,
111 	PVRDMA_WC_RDMA_WRITE,
112 	PVRDMA_WC_RDMA_READ,
113 	PVRDMA_WC_COMP_SWAP,
114 	PVRDMA_WC_FETCH_ADD,
115 	PVRDMA_WC_BIND_MW,
116 	PVRDMA_WC_LSO,
117 	PVRDMA_WC_LOCAL_INV,
118 	PVRDMA_WC_FAST_REG_MR,
119 	PVRDMA_WC_MASKED_COMP_SWAP,
120 	PVRDMA_WC_MASKED_FETCH_ADD,
121 	PVRDMA_WC_RECV = 1 << 7,
122 	PVRDMA_WC_RECV_RDMA_WITH_IMM,
123 };
124 
125 enum pvrdma_wc_flags {
126 	PVRDMA_WC_GRH			= 1 << 0,
127 	PVRDMA_WC_WITH_IMM		= 1 << 1,
128 	PVRDMA_WC_WITH_INVALIDATE	= 1 << 2,
129 	PVRDMA_WC_IP_CSUM_OK		= 1 << 3,
130 	PVRDMA_WC_WITH_SMAC		= 1 << 4,
131 	PVRDMA_WC_WITH_VLAN		= 1 << 5,
132 	PVRDMA_WC_WITH_NETWORK_HDR_TYPE	= 1 << 6,
133 	PVRDMA_WC_FLAGS_MAX		= PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
134 };
135 
136 enum pvrdma_network_type {
137 	PVRDMA_NETWORK_IB,
138 	PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
139 	PVRDMA_NETWORK_IPV4,
140 	PVRDMA_NETWORK_IPV6
141 };
142 
143 struct pvrdma_alloc_ucontext_resp {
144 	__u32 qp_tab_size;
145 	__u32 reserved;
146 };
147 
148 struct pvrdma_alloc_pd_resp {
149 	__u32 pdn;
150 	__u32 reserved;
151 };
152 
153 struct pvrdma_create_cq {
154 	__aligned_u64 buf_addr;
155 	__u32 buf_size;
156 	__u32 reserved;
157 };
158 
159 struct pvrdma_create_cq_resp {
160 	__u32 cqn;
161 	__u32 reserved;
162 };
163 
164 struct pvrdma_resize_cq {
165 	__aligned_u64 buf_addr;
166 	__u32 buf_size;
167 	__u32 reserved;
168 };
169 
170 struct pvrdma_create_srq {
171 	__aligned_u64 buf_addr;
172 	__u32 buf_size;
173 	__u32 reserved;
174 };
175 
176 struct pvrdma_create_srq_resp {
177 	__u32 srqn;
178 	__u32 reserved;
179 };
180 
181 struct pvrdma_create_qp {
182 	__aligned_u64 rbuf_addr;
183 	__aligned_u64 sbuf_addr;
184 	__u32 rbuf_size;
185 	__u32 sbuf_size;
186 	__aligned_u64 qp_addr;
187 };
188 
189 struct pvrdma_create_qp_resp {
190 	__u32 qpn;
191 	__u32 qp_handle;
192 };
193 
194 /* PVRDMA masked atomic compare and swap */
195 struct pvrdma_ex_cmp_swap {
196 	__aligned_u64 swap_val;
197 	__aligned_u64 compare_val;
198 	__aligned_u64 swap_mask;
199 	__aligned_u64 compare_mask;
200 };
201 
202 /* PVRDMA masked atomic fetch and add */
203 struct pvrdma_ex_fetch_add {
204 	__aligned_u64 add_val;
205 	__aligned_u64 field_boundary;
206 };
207 
208 /* PVRDMA address vector. */
209 struct pvrdma_av {
210 	__u32 port_pd;
211 	__u32 sl_tclass_flowlabel;
212 	__u8 dgid[16];
213 	__u8 src_path_bits;
214 	__u8 gid_index;
215 	__u8 stat_rate;
216 	__u8 hop_limit;
217 	__u8 dmac[6];
218 	__u8 reserved[6];
219 };
220 
221 /* PVRDMA scatter/gather entry */
222 struct pvrdma_sge {
223 	__aligned_u64 addr;
224 	__u32   length;
225 	__u32   lkey;
226 };
227 
228 /* PVRDMA receive queue work request */
229 struct pvrdma_rq_wqe_hdr {
230 	__aligned_u64 wr_id;		/* wr id */
231 	__u32 num_sge;		/* size of s/g array */
232 	__u32 total_len;	/* reserved */
233 };
234 /* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
235 
236 /* PVRDMA send queue work request */
237 struct pvrdma_sq_wqe_hdr {
238 	__aligned_u64 wr_id;		/* wr id */
239 	__u32 num_sge;		/* size of s/g array */
240 	__u32 total_len;	/* reserved */
241 	__u32 opcode;		/* operation type */
242 	__u32 send_flags;	/* wr flags */
243 	union {
244 		__be32 imm_data;
245 		__u32 invalidate_rkey;
246 	} ex;
247 	__u32 reserved;
248 	union {
249 		struct {
250 			__aligned_u64 remote_addr;
251 			__u32 rkey;
252 			__u8 reserved[4];
253 		} rdma;
254 		struct {
255 			__aligned_u64 remote_addr;
256 			__aligned_u64 compare_add;
257 			__aligned_u64 swap;
258 			__u32 rkey;
259 			__u32 reserved;
260 		} atomic;
261 		struct {
262 			__aligned_u64 remote_addr;
263 			__u32 log_arg_sz;
264 			__u32 rkey;
265 			union {
266 				struct pvrdma_ex_cmp_swap  cmp_swap;
267 				struct pvrdma_ex_fetch_add fetch_add;
268 			} wr_data;
269 		} masked_atomics;
270 		struct {
271 			__aligned_u64 iova_start;
272 			__aligned_u64 pl_pdir_dma;
273 			__u32 page_shift;
274 			__u32 page_list_len;
275 			__u32 length;
276 			__u32 access_flags;
277 			__u32 rkey;
278 			__u32 reserved;
279 		} fast_reg;
280 		struct {
281 			__u32 remote_qpn;
282 			__u32 remote_qkey;
283 			struct pvrdma_av av;
284 		} ud;
285 	} wr;
286 };
287 /* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
288 
289 /* Completion queue element. */
290 struct pvrdma_cqe {
291 	__aligned_u64 wr_id;
292 	__aligned_u64 qp;
293 	__u32 opcode;
294 	__u32 status;
295 	__u32 byte_len;
296 	__be32 imm_data;
297 	__u32 src_qp;
298 	__u32 wc_flags;
299 	__u32 vendor_err;
300 	__u16 pkey_index;
301 	__u16 slid;
302 	__u8 sl;
303 	__u8 dlid_path_bits;
304 	__u8 port_num;
305 	__u8 smac[6];
306 	__u8 network_hdr_type;
307 	__u8 reserved2[6]; /* Pad to next power of 2 (64). */
308 };
309 
310 #endif /* __VMW_PVRDMA_ABI_H__ */
311