xref: /freebsd/sys/dev/qlnx/qlnxe/rdma_common.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #ifndef __RDMA_COMMON__
30 #define __RDMA_COMMON__
31 /************************************************************************/
32 /* Add include to common rdma target for both eCore and protocol rdma driver */
33 /************************************************************************/
34 
35 #define RDMA_RESERVED_LKEY                      (0)                     //Reserved lkey
36 #define RDMA_RING_PAGE_SIZE                     (0x1000)        //4KB pages
37 
38 #define RDMA_MAX_SGE_PER_SQ_WQE         (4)             //max number of SGEs in a single request
39 #define RDMA_MAX_SGE_PER_RQ_WQE         (4)             //max number of SGEs in a single request
40 
41 #define RDMA_MAX_DATA_SIZE_IN_WQE       (0x80000000)    //max size of data in single request
42 
43 #define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
44 #define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
45 
46 #define RDMA_MAX_CQS                            (64*1024)
47 #define RDMA_MAX_TIDS                           (128*1024-1)
48 #define RDMA_MAX_PDS                            (64*1024)
49 #define RDMA_MAX_XRC_SRQS                       (1024)
50 #define RDMA_MAX_SRQS                           (32*1024)
51 
52 #define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
53 #define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
54 #define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
55 
56 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
57 
58 struct rdma_srq_id
59 {
60         __le16 srq_idx /* SRQ index */;
61         __le16 opaque_fid;
62 };
63 
64 struct rdma_srq_producers
65 {
66         __le32 sge_prod /* Current produced sge in SRQ */;
67         __le32 wqe_prod /* Current produced WQE to SRQ */;
68 };
69 
70 /*
71  * rdma completion notification queue element
72  */
73 struct rdma_cnqe
74 {
75 	struct regpair cq_handle;
76 };
77 
78 struct rdma_cqe_responder
79 {
80 	struct regpair srq_wr_id;
81 	struct regpair qp_handle;
82 	__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
83 	__le32 length;
84 	__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
85 	__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
86 	u8 flags;
87 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
88 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
89 #define RDMA_CQE_RESPONDER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
90 #define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
91 #define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1 /* r_key invalidated indicator */
92 #define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
93 #define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1 /* immediate data indicator */
94 #define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
95 #define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
96 #define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
97 #define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
98 #define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
99 	u8 status;
100 };
101 
102 struct rdma_cqe_requester
103 {
104 	__le16 sq_cons;
105 	__le16 reserved0;
106 	__le32 reserved1;
107 	struct regpair qp_handle;
108 	struct regpair reserved2;
109 	__le32 reserved3;
110 	__le16 reserved4;
111 	u8 flags;
112 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
113 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
114 #define RDMA_CQE_REQUESTER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
115 #define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
116 #define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
117 #define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
118 	u8 status;
119 };
120 
121 struct rdma_cqe_common
122 {
123 	struct regpair reserved0;
124 	struct regpair qp_handle;
125 	__le16 reserved1[7];
126 	u8 flags;
127 #define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
128 #define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
129 #define RDMA_CQE_COMMON_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
130 #define RDMA_CQE_COMMON_TYPE_SHIFT       1
131 #define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
132 #define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
133 	u8 status;
134 };
135 
136 /*
137  * rdma completion queue element
138  */
139 union rdma_cqe
140 {
141 	struct rdma_cqe_responder resp;
142 	struct rdma_cqe_requester req;
143 	struct rdma_cqe_common cmn;
144 };
145 
146 /*
147  * CQE requester status enumeration
148  */
149 enum rdma_cqe_requester_status_enum
150 {
151 	RDMA_CQE_REQ_STS_OK,
152 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
153 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
154 	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
155 	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
156 	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
157 	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
158 	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
159 	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
160 	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
161 	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
162 	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
163 	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
164 };
165 
166 /*
167  * CQE responder status enumeration
168  */
169 enum rdma_cqe_responder_status_enum
170 {
171 	RDMA_CQE_RESP_STS_OK,
172 	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
173 	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
174 	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
175 	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
176 	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
177 	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
178 	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
179 	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
180 };
181 
182 /*
183  * CQE type enumeration
184  */
185 enum rdma_cqe_type
186 {
187 	RDMA_CQE_TYPE_REQUESTER,
188 	RDMA_CQE_TYPE_RESPONDER_RQ,
189 	RDMA_CQE_TYPE_RESPONDER_SRQ,
190 	RDMA_CQE_TYPE_INVALID,
191 	MAX_RDMA_CQE_TYPE
192 };
193 
194 /*
195  * DIF Block size options
196  */
197 enum rdma_dif_block_size
198 {
199 	RDMA_DIF_BLOCK_512=0,
200 	RDMA_DIF_BLOCK_4096=1,
201 	MAX_RDMA_DIF_BLOCK_SIZE
202 };
203 
204 /*
205  * DIF CRC initial value
206  */
207 enum rdma_dif_crc_seed
208 {
209 	RDMA_DIF_CRC_SEED_0000=0,
210 	RDMA_DIF_CRC_SEED_FFFF=1,
211 	MAX_RDMA_DIF_CRC_SEED
212 };
213 
214 /*
215  * RDMA DIF Error Result Structure
216  */
217 struct rdma_dif_error_result
218 {
219 	__le32 error_intervals /* Total number of error intervals in the IO. */;
220 	__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
221 	u8 flags;
222 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1 /* CRC error occurred. */
223 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
224 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1 /* App Tag error occurred. */
225 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
226 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1 /* Ref Tag error occurred. */
227 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
228 #define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
229 #define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
230 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
231 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
232 	u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
233 };
234 
235 /*
236  * DIF IO direction
237  */
238 enum rdma_dif_io_direction_flg
239 {
240 	RDMA_DIF_DIR_RX=0,
241 	RDMA_DIF_DIR_TX=1,
242 	MAX_RDMA_DIF_IO_DIRECTION_FLG
243 };
244 
245 /*
246  * RDMA DIF Runt Result Structure
247  */
248 struct rdma_dif_runt_result
249 {
250 	__le16 guard_tag /* CRC result of received IO. */;
251 	__le16 reserved[3];
252 };
253 
254 /*
255  * memory window type enumeration
256  */
257 enum rdma_mw_type
258 {
259 	RDMA_MW_TYPE_1,
260 	RDMA_MW_TYPE_2A,
261 	MAX_RDMA_MW_TYPE
262 };
263 
264 struct rdma_rq_sge
265 {
266 	struct regpair addr;
267 	__le32 length;
268 	__le32 flags;
269 #define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF /* key of memory relating to this RQ */
270 #define RDMA_RQ_SGE_L_KEY_SHIFT     0
271 #define RDMA_RQ_SGE_NUM_SGES_MASK   0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
272 #define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
273 #define RDMA_RQ_SGE_RESERVED0_MASK  0x7
274 #define RDMA_RQ_SGE_RESERVED0_SHIFT 29
275 };
276 
277 struct rdma_sq_atomic_wqe
278 {
279 	__le32 reserved1;
280 	__le32 length /* Total data length (8 bytes for Atomic) */;
281 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
282 	u8 req_type /* Type of WQE */;
283 	u8 flags;
284 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
285 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
286 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
287 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
288 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
289 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
290 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1 /* Dont care for atomic wqe */
291 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
292 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for atomic wqe */
293 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
294 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for atomic wqe */
295 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
296 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
297 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
298 	u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
299 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
300 	struct regpair remote_va /* remote virtual address */;
301 	__le32 r_key /* Remote key */;
302 	__le32 reserved2;
303 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
304 	struct regpair swap_data /* Swap or add data */;
305 };
306 
307 /*
308  * First element (16 bytes) of atomic wqe
309  */
310 struct rdma_sq_atomic_wqe_1st
311 {
312 	__le32 reserved1;
313 	__le32 length /* Total data length (8 bytes for Atomic) */;
314 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
315 	u8 req_type /* Type of WQE */;
316 	u8 flags;
317 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
318 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
319 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
320 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
321 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
322 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
323 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for atomic wqe */
324 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
325 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for atomic wqe */
326 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
327 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
328 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
329 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
330 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
331 };
332 
333 /*
334  * Second element (16 bytes) of atomic wqe
335  */
336 struct rdma_sq_atomic_wqe_2nd
337 {
338 	struct regpair remote_va /* remote virtual address */;
339 	__le32 r_key /* Remote key */;
340 	__le32 reserved2;
341 };
342 
343 /*
344  * Third element (16 bytes) of atomic wqe
345  */
346 struct rdma_sq_atomic_wqe_3rd
347 {
348 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
349 	struct regpair swap_data /* Swap or add data */;
350 };
351 
352 struct rdma_sq_bind_wqe
353 {
354 	struct regpair addr;
355 	__le32 l_key;
356 	u8 req_type /* Type of WQE */;
357 	u8 flags;
358 #define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
359 #define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
360 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
361 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
362 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
363 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
364 #define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
365 #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
366 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
367 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
368 #define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
369 #define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
370 	u8 wqe_size /* Size of WQE in 16B chunks */;
371 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
372 	u8 bind_ctrl;
373 #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1 /* zero based indication */
374 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
375 #define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
376 #define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
377 #define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
378 #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
379 	u8 access_ctrl;
380 #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
381 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
382 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
383 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
384 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
385 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
386 #define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
387 #define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
388 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
389 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
390 #define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
391 #define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
392 	u8 reserved3;
393 	u8 length_hi /* upper 8 bits of the registered MW length */;
394 	__le32 length_lo /* lower 32 bits of the registered MW length */;
395 	__le32 parent_l_key /* l_key of the parent MR */;
396 	__le32 reserved4;
397 };
398 
399 /*
400  * First element (16 bytes) of bind wqe
401  */
402 struct rdma_sq_bind_wqe_1st
403 {
404 	struct regpair addr;
405 	__le32 l_key;
406 	u8 req_type /* Type of WQE */;
407 	u8 flags;
408 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
409 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
410 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
411 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
412 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
413 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
414 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
415 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
416 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
417 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
418 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
419 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
420 	u8 wqe_size /* Size of WQE in 16B chunks */;
421 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
422 };
423 
424 /*
425  * Second element (16 bytes) of bind wqe
426  */
427 struct rdma_sq_bind_wqe_2nd
428 {
429 	u8 bind_ctrl;
430 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
431 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
432 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
433 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
434 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
435 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
436 	u8 access_ctrl;
437 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
438 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
439 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
440 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
441 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
442 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
443 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
444 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
445 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
446 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
447 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
448 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
449 	u8 reserved3;
450 	u8 length_hi /* upper 8 bits of the registered MW length */;
451 	__le32 length_lo /* lower 32 bits of the registered MW length */;
452 	__le32 parent_l_key /* l_key of the parent MR */;
453 	__le32 reserved4;
454 };
455 
456 /*
457  * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
458  */
459 struct rdma_sq_common_wqe
460 {
461 	__le32 reserved1[3];
462 	u8 req_type /* Type of WQE */;
463 	u8 flags;
464 #define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
465 #define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
466 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
467 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
468 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
469 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
470 #define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
471 #define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
472 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
473 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
474 #define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
475 #define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
476 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
477 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
478 };
479 
480 struct rdma_sq_fmr_wqe
481 {
482 	struct regpair addr;
483 	__le32 l_key;
484 	u8 req_type /* Type of WQE */;
485 	u8 flags;
486 #define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1 /* If set, completion will be generated when the WQE is completed */
487 #define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
488 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
489 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
490 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1 /* If set, all pending operations will be completed before start processing this WQE */
491 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
492 #define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1 /* Dont care for FMR wqe */
493 #define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
494 #define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1 /* Should be 0 for FMR wqe */
495 #define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
496 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1 /* If set, indicated host memory of this WQE is DIF protected. */
497 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
498 #define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
499 #define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
500 	u8 wqe_size /* Size of WQE in 16B chunks */;
501 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
502 	u8 fmr_ctrl;
503 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F /* 0 is 4k, 1 is 8k... */
504 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
505 #define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1 /* zero based indication */
506 #define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
507 #define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1 /* indication whether bind is enabled for this MR */
508 #define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
509 #define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
510 #define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
511 	u8 access_ctrl;
512 #define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
513 #define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
514 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
515 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
516 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
517 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
518 #define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
519 #define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
520 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
521 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
522 #define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
523 #define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
524 	u8 reserved3;
525 	u8 length_hi /* upper 8 bits of the registered MR length */;
526 	__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
527 	struct regpair pbl_addr /* Address of PBL */;
528 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
529 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
530 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
531 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
532 	__le16 dif_flags;
533 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
534 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
535 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
536 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
537 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
538 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
539 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
540 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
541 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
542 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
543 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
544 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
545 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
546 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
547 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK    0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
548 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT   7
549 #define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0xFF
550 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              8
551 	__le32 Reserved5;
552 };
553 
554 /*
555  * First element (16 bytes) of fmr wqe
556  */
557 struct rdma_sq_fmr_wqe_1st
558 {
559 	struct regpair addr;
560 	__le32 l_key;
561 	u8 req_type /* Type of WQE */;
562 	u8 flags;
563 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
564 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
565 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
566 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
567 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
568 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
569 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1 /* Dont care for FMR wqe */
570 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
571 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1 /* Should be 0 for FMR wqe */
572 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
573 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
574 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
575 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
576 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
577 	u8 wqe_size /* Size of WQE in 16B chunks */;
578 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
579 };
580 
581 /*
582  * Second element (16 bytes) of fmr wqe
583  */
584 struct rdma_sq_fmr_wqe_2nd
585 {
586 	u8 fmr_ctrl;
587 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F /* 0 is 4k, 1 is 8k... */
588 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
589 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
590 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
591 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1 /* indication whether bind is enabled for this MR */
592 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
593 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
594 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
595 	u8 access_ctrl;
596 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
597 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
598 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
599 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
600 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
601 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
602 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
603 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
604 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
605 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
606 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
607 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
608 	u8 reserved3;
609 	u8 length_hi /* upper 8 bits of the registered MR length */;
610 	__le32 length_lo /* lower 32 bits of the registered MR length. */;
611 	struct regpair pbl_addr /* Address of PBL */;
612 };
613 
614 /*
615  * Third element (16 bytes) of fmr wqe
616  */
617 struct rdma_sq_fmr_wqe_3rd
618 {
619 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
620 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
621 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
622 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
623 	__le16 dif_flags;
624 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
625 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
626 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
627 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
628 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
629 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
630 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
631 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
632 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
633 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
634 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
635 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
636 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
637 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
638 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
639 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
640 	__le32 Reserved5;
641 };
642 
643 struct rdma_sq_local_inv_wqe
644 {
645 	struct regpair reserved;
646 	__le32 inv_l_key /* The invalidate local key */;
647 	u8 req_type /* Type of WQE */;
648 	u8 flags;
649 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
650 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
651 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
652 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
653 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
654 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
655 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1 /* Dont care for local invalidate wqe */
656 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
657 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for local invalidate wqe */
658 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
659 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
660 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
661 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
662 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
663 	u8 wqe_size /* Size of WQE in 16B chunks */;
664 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
665 };
666 
667 struct rdma_sq_rdma_wqe
668 {
669 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
670 	__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
671 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
672 	u8 req_type /* Type of WQE */;
673 	u8 flags;
674 #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1 /* If set, completion will be generated when the WQE is completed */
675 #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
676 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
677 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
678 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1 /* If set, all pending operations will be completed before start processing this WQE */
679 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
680 #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1 /* If set, signal the responder to generate a solicited event on this WQE */
681 #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
682 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
683 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
684 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1 /* If set, indicated host memory of this WQE is DIF protected. */
685 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
686 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK              0x1 /* If set, indicated read with invalidate WQE. iWARP only */
687 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT             6
688 #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x1
689 #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                7
690 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
691 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
692 	struct regpair remote_va /* Remote virtual address */;
693 	__le32 r_key /* Remote key */;
694 	u8 dif_flags;
695 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
696 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
697 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
698 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
699 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
700 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
701 #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
702 #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
703 	u8 reserved2[3];
704 };
705 
706 /*
707  * First element (16 bytes) of rdma wqe
708  */
709 struct rdma_sq_rdma_wqe_1st
710 {
711 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
712 	__le32 length /* Total data length */;
713 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
714 	u8 req_type /* Type of WQE */;
715 	u8 flags;
716 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
717 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
718 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
719 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
720 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
721 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
722 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
723 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
724 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
725 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
726 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
727 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
728 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1 /* If set, indicated read with invalidate WQE. iWARP only */
729 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
730 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
731 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
732 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
733 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
734 };
735 
736 /*
737  * Second element (16 bytes) of rdma wqe
738  */
739 struct rdma_sq_rdma_wqe_2nd
740 {
741 	struct regpair remote_va /* Remote virtual address */;
742 	__le32 r_key /* Remote key */;
743 	u8 dif_flags;
744 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
745 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
746 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
747 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
748 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
749 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
750 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
751 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
752 	u8 reserved2[3];
753 };
754 
755 /*
756  * SQ WQE req type enumeration
757  */
758 enum rdma_sq_req_type
759 {
760 	RDMA_SQ_REQ_TYPE_SEND,
761 	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
762 	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
763 	RDMA_SQ_REQ_TYPE_RDMA_WR,
764 	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
765 	RDMA_SQ_REQ_TYPE_RDMA_RD,
766 	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
767 	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
768 	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
769 	RDMA_SQ_REQ_TYPE_FAST_MR,
770 	RDMA_SQ_REQ_TYPE_BIND,
771 	RDMA_SQ_REQ_TYPE_INVALID,
772 	MAX_RDMA_SQ_REQ_TYPE
773 };
774 
775 struct rdma_sq_send_wqe
776 {
777 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
778 	__le32 length /* Total data length */;
779 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
780 	u8 req_type /* Type of WQE */;
781 	u8 flags;
782 #define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
783 #define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
784 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
785 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
786 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
787 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
788 #define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
789 #define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
790 #define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
791 #define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
792 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for send wqe */
793 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
794 #define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
795 #define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
796 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
797 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
798 	__le32 reserved1[4];
799 };
800 
801 struct rdma_sq_send_wqe_1st
802 {
803 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
804 	__le32 length /* Total data length */;
805 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
806 	u8 req_type /* Type of WQE */;
807 	u8 flags;
808 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
809 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
810 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
811 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
812 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
813 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
814 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE */
815 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
816 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
817 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
818 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
819 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
820 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
821 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
822 };
823 
824 struct rdma_sq_send_wqe_2st
825 {
826 	__le32 reserved1[4];
827 };
828 
829 struct rdma_sq_sge
830 {
831 	__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
832 	struct regpair addr;
833 	__le32 l_key;
834 };
835 
836 struct rdma_srq_wqe_header
837 {
838 	struct regpair wr_id;
839 	u8 num_sges /* number of SGEs in WQE */;
840 	u8 reserved2[7];
841 };
842 
843 struct rdma_srq_sge
844 {
845 	struct regpair addr;
846 	__le32 length;
847 	__le32 l_key;
848 };
849 
850 /*
851  * rdma srq sge
852  */
853 union rdma_srq_elm
854 {
855 	struct rdma_srq_wqe_header header;
856 	struct rdma_srq_sge sge;
857 };
858 
859 /*
860  * Rdma doorbell data for flags update
861  */
862 struct rdma_pwm_flags_data
863 {
864 	__le16 icid /* internal CID */;
865 	u8 agg_flags /* aggregative flags */;
866 	u8 reserved;
867 };
868 
869 /*
870  * Rdma doorbell data for SQ and RQ
871  */
872 struct rdma_pwm_val16_data
873 {
874 	__le16 icid /* internal CID */;
875 	__le16 value /* aggregated value to update */;
876 };
877 
878 union rdma_pwm_val16_data_union
879 {
880 	struct rdma_pwm_val16_data as_struct /* Parameters field */;
881 	__le32 as_dword;
882 };
883 
884 /*
885  * Rdma doorbell data for CQ
886  */
887 struct rdma_pwm_val32_data
888 {
889 	__le16 icid /* internal CID */;
890 	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
891 	u8 params;
892 #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK             0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
893 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT            0
894 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK           0x1 /* enable QM bypass */
895 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT          2
896 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
897 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
898 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK         0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
899 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT        4
900 #define RDMA_PWM_VAL32_DATA_RESERVED_MASK            0x7
901 #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT           5
902 	__le32 value /* aggregated value to update */;
903 };
904 
905 union rdma_pwm_val32_data_union
906 {
907 	struct rdma_pwm_val32_data as_struct /* Parameters field */;
908 	struct regpair as_repair;
909 };
910 
911 #endif /* __RDMA_COMMON__ */
912