xref: /freebsd/sys/dev/qlnx/qlnxe/rdma_common.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
111e25f0dSDavid C Somayajulu /*
211e25f0dSDavid C Somayajulu  * Copyright (c) 2017-2018 Cavium, Inc.
311e25f0dSDavid C Somayajulu  * All rights reserved.
411e25f0dSDavid C Somayajulu  *
511e25f0dSDavid C Somayajulu  *  Redistribution and use in source and binary forms, with or without
611e25f0dSDavid C Somayajulu  *  modification, are permitted provided that the following conditions
711e25f0dSDavid C Somayajulu  *  are met:
811e25f0dSDavid C Somayajulu  *
911e25f0dSDavid C Somayajulu  *  1. Redistributions of source code must retain the above copyright
1011e25f0dSDavid C Somayajulu  *     notice, this list of conditions and the following disclaimer.
1111e25f0dSDavid C Somayajulu  *  2. Redistributions in binary form must reproduce the above copyright
1211e25f0dSDavid C Somayajulu  *     notice, this list of conditions and the following disclaimer in the
1311e25f0dSDavid C Somayajulu  *     documentation and/or other materials provided with the distribution.
1411e25f0dSDavid C Somayajulu  *
1511e25f0dSDavid C Somayajulu  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1611e25f0dSDavid C Somayajulu  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1711e25f0dSDavid C Somayajulu  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1811e25f0dSDavid C Somayajulu  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1911e25f0dSDavid C Somayajulu  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2011e25f0dSDavid C Somayajulu  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2111e25f0dSDavid C Somayajulu  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2211e25f0dSDavid C Somayajulu  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2311e25f0dSDavid C Somayajulu  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2411e25f0dSDavid C Somayajulu  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2511e25f0dSDavid C Somayajulu  *  POSSIBILITY OF SUCH DAMAGE.
2611e25f0dSDavid C Somayajulu  *
2711e25f0dSDavid C Somayajulu  */
2811e25f0dSDavid C Somayajulu 
2911e25f0dSDavid C Somayajulu #ifndef __RDMA_COMMON__
3011e25f0dSDavid C Somayajulu #define __RDMA_COMMON__
31*217ec208SDavid C Somayajulu /************************************************************************/
32*217ec208SDavid C Somayajulu /* Add include to common rdma target for both eCore and protocol rdma driver */
33*217ec208SDavid C Somayajulu /************************************************************************/
3411e25f0dSDavid C Somayajulu 
3511e25f0dSDavid C Somayajulu #define RDMA_RESERVED_LKEY                      (0)                     //Reserved lkey
3611e25f0dSDavid C Somayajulu #define RDMA_RING_PAGE_SIZE                     (0x1000)        //4KB pages
3711e25f0dSDavid C Somayajulu 
3811e25f0dSDavid C Somayajulu #define RDMA_MAX_SGE_PER_SQ_WQE         (4)             //max number of SGEs in a single request
3911e25f0dSDavid C Somayajulu #define RDMA_MAX_SGE_PER_RQ_WQE         (4)             //max number of SGEs in a single request
4011e25f0dSDavid C Somayajulu 
4111e25f0dSDavid C Somayajulu #define RDMA_MAX_DATA_SIZE_IN_WQE       (0x80000000)    //max size of data in single request
4211e25f0dSDavid C Somayajulu 
4311e25f0dSDavid C Somayajulu #define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
4411e25f0dSDavid C Somayajulu #define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
4511e25f0dSDavid C Somayajulu 
4611e25f0dSDavid C Somayajulu #define RDMA_MAX_CQS                            (64*1024)
4711e25f0dSDavid C Somayajulu #define RDMA_MAX_TIDS                           (128*1024-1)
4811e25f0dSDavid C Somayajulu #define RDMA_MAX_PDS                            (64*1024)
49*217ec208SDavid C Somayajulu #define RDMA_MAX_XRC_SRQS                       (1024)
50*217ec208SDavid C Somayajulu #define RDMA_MAX_SRQS                           (32*1024)
5111e25f0dSDavid C Somayajulu 
5211e25f0dSDavid C Somayajulu #define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
5311e25f0dSDavid C Somayajulu #define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
5411e25f0dSDavid C Somayajulu #define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
5511e25f0dSDavid C Somayajulu 
5611e25f0dSDavid C Somayajulu #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
5711e25f0dSDavid C Somayajulu 
5811e25f0dSDavid C Somayajulu struct rdma_srq_id
5911e25f0dSDavid C Somayajulu {
6011e25f0dSDavid C Somayajulu         __le16 srq_idx /* SRQ index */;
6111e25f0dSDavid C Somayajulu         __le16 opaque_fid;
6211e25f0dSDavid C Somayajulu };
6311e25f0dSDavid C Somayajulu 
6411e25f0dSDavid C Somayajulu struct rdma_srq_producers
6511e25f0dSDavid C Somayajulu {
6611e25f0dSDavid C Somayajulu         __le32 sge_prod /* Current produced sge in SRQ */;
6711e25f0dSDavid C Somayajulu         __le32 wqe_prod /* Current produced WQE to SRQ */;
6811e25f0dSDavid C Somayajulu };
6911e25f0dSDavid C Somayajulu 
70*217ec208SDavid C Somayajulu /*
71*217ec208SDavid C Somayajulu  * rdma completion notification queue element
72*217ec208SDavid C Somayajulu  */
73*217ec208SDavid C Somayajulu struct rdma_cnqe
74*217ec208SDavid C Somayajulu {
75*217ec208SDavid C Somayajulu 	struct regpair cq_handle;
76*217ec208SDavid C Somayajulu };
77*217ec208SDavid C Somayajulu 
78*217ec208SDavid C Somayajulu struct rdma_cqe_responder
79*217ec208SDavid C Somayajulu {
80*217ec208SDavid C Somayajulu 	struct regpair srq_wr_id;
81*217ec208SDavid C Somayajulu 	struct regpair qp_handle;
82*217ec208SDavid C Somayajulu 	__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
83*217ec208SDavid C Somayajulu 	__le32 length;
84*217ec208SDavid C Somayajulu 	__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
85*217ec208SDavid C Somayajulu 	__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
86*217ec208SDavid C Somayajulu 	u8 flags;
87*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
88*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
89*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
90*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
91*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1 /* r_key invalidated indicator */
92*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
93*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1 /* immediate data indicator */
94*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
95*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
96*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
97*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
98*217ec208SDavid C Somayajulu #define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
99*217ec208SDavid C Somayajulu 	u8 status;
100*217ec208SDavid C Somayajulu };
101*217ec208SDavid C Somayajulu 
102*217ec208SDavid C Somayajulu struct rdma_cqe_requester
103*217ec208SDavid C Somayajulu {
104*217ec208SDavid C Somayajulu 	__le16 sq_cons;
105*217ec208SDavid C Somayajulu 	__le16 reserved0;
106*217ec208SDavid C Somayajulu 	__le32 reserved1;
107*217ec208SDavid C Somayajulu 	struct regpair qp_handle;
108*217ec208SDavid C Somayajulu 	struct regpair reserved2;
109*217ec208SDavid C Somayajulu 	__le32 reserved3;
110*217ec208SDavid C Somayajulu 	__le16 reserved4;
111*217ec208SDavid C Somayajulu 	u8 flags;
112*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
113*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
114*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
115*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
116*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
117*217ec208SDavid C Somayajulu #define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
118*217ec208SDavid C Somayajulu 	u8 status;
119*217ec208SDavid C Somayajulu };
120*217ec208SDavid C Somayajulu 
121*217ec208SDavid C Somayajulu struct rdma_cqe_common
122*217ec208SDavid C Somayajulu {
123*217ec208SDavid C Somayajulu 	struct regpair reserved0;
124*217ec208SDavid C Somayajulu 	struct regpair qp_handle;
125*217ec208SDavid C Somayajulu 	__le16 reserved1[7];
126*217ec208SDavid C Somayajulu 	u8 flags;
127*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
128*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
129*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
130*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_TYPE_SHIFT       1
131*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
132*217ec208SDavid C Somayajulu #define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
133*217ec208SDavid C Somayajulu 	u8 status;
134*217ec208SDavid C Somayajulu };
135*217ec208SDavid C Somayajulu 
136*217ec208SDavid C Somayajulu /*
137*217ec208SDavid C Somayajulu  * rdma completion queue element
138*217ec208SDavid C Somayajulu  */
139*217ec208SDavid C Somayajulu union rdma_cqe
140*217ec208SDavid C Somayajulu {
141*217ec208SDavid C Somayajulu 	struct rdma_cqe_responder resp;
142*217ec208SDavid C Somayajulu 	struct rdma_cqe_requester req;
143*217ec208SDavid C Somayajulu 	struct rdma_cqe_common cmn;
144*217ec208SDavid C Somayajulu };
145*217ec208SDavid C Somayajulu 
146*217ec208SDavid C Somayajulu /*
147*217ec208SDavid C Somayajulu  * CQE requester status enumeration
148*217ec208SDavid C Somayajulu  */
149*217ec208SDavid C Somayajulu enum rdma_cqe_requester_status_enum
150*217ec208SDavid C Somayajulu {
151*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_OK,
152*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
153*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
154*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
155*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
156*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
157*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
158*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
159*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
160*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
161*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
162*217ec208SDavid C Somayajulu 	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
163*217ec208SDavid C Somayajulu 	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
164*217ec208SDavid C Somayajulu };
165*217ec208SDavid C Somayajulu 
166*217ec208SDavid C Somayajulu /*
167*217ec208SDavid C Somayajulu  * CQE responder status enumeration
168*217ec208SDavid C Somayajulu  */
169*217ec208SDavid C Somayajulu enum rdma_cqe_responder_status_enum
170*217ec208SDavid C Somayajulu {
171*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_OK,
172*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
173*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
174*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
175*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
176*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
177*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
178*217ec208SDavid C Somayajulu 	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
179*217ec208SDavid C Somayajulu 	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
180*217ec208SDavid C Somayajulu };
181*217ec208SDavid C Somayajulu 
182*217ec208SDavid C Somayajulu /*
183*217ec208SDavid C Somayajulu  * CQE type enumeration
184*217ec208SDavid C Somayajulu  */
185*217ec208SDavid C Somayajulu enum rdma_cqe_type
186*217ec208SDavid C Somayajulu {
187*217ec208SDavid C Somayajulu 	RDMA_CQE_TYPE_REQUESTER,
188*217ec208SDavid C Somayajulu 	RDMA_CQE_TYPE_RESPONDER_RQ,
189*217ec208SDavid C Somayajulu 	RDMA_CQE_TYPE_RESPONDER_SRQ,
190*217ec208SDavid C Somayajulu 	RDMA_CQE_TYPE_INVALID,
191*217ec208SDavid C Somayajulu 	MAX_RDMA_CQE_TYPE
192*217ec208SDavid C Somayajulu };
193*217ec208SDavid C Somayajulu 
194*217ec208SDavid C Somayajulu /*
195*217ec208SDavid C Somayajulu  * DIF Block size options
196*217ec208SDavid C Somayajulu  */
197*217ec208SDavid C Somayajulu enum rdma_dif_block_size
198*217ec208SDavid C Somayajulu {
199*217ec208SDavid C Somayajulu 	RDMA_DIF_BLOCK_512=0,
200*217ec208SDavid C Somayajulu 	RDMA_DIF_BLOCK_4096=1,
201*217ec208SDavid C Somayajulu 	MAX_RDMA_DIF_BLOCK_SIZE
202*217ec208SDavid C Somayajulu };
203*217ec208SDavid C Somayajulu 
204*217ec208SDavid C Somayajulu /*
205*217ec208SDavid C Somayajulu  * DIF CRC initial value
206*217ec208SDavid C Somayajulu  */
207*217ec208SDavid C Somayajulu enum rdma_dif_crc_seed
208*217ec208SDavid C Somayajulu {
209*217ec208SDavid C Somayajulu 	RDMA_DIF_CRC_SEED_0000=0,
210*217ec208SDavid C Somayajulu 	RDMA_DIF_CRC_SEED_FFFF=1,
211*217ec208SDavid C Somayajulu 	MAX_RDMA_DIF_CRC_SEED
212*217ec208SDavid C Somayajulu };
213*217ec208SDavid C Somayajulu 
214*217ec208SDavid C Somayajulu /*
215*217ec208SDavid C Somayajulu  * RDMA DIF Error Result Structure
216*217ec208SDavid C Somayajulu  */
217*217ec208SDavid C Somayajulu struct rdma_dif_error_result
218*217ec208SDavid C Somayajulu {
219*217ec208SDavid C Somayajulu 	__le32 error_intervals /* Total number of error intervals in the IO. */;
220*217ec208SDavid C Somayajulu 	__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
221*217ec208SDavid C Somayajulu 	u8 flags;
222*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1 /* CRC error occurred. */
223*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
224*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1 /* App Tag error occurred. */
225*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
226*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1 /* Ref Tag error occurred. */
227*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
228*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
229*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
230*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
231*217ec208SDavid C Somayajulu #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
232*217ec208SDavid C Somayajulu 	u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
233*217ec208SDavid C Somayajulu };
234*217ec208SDavid C Somayajulu 
235*217ec208SDavid C Somayajulu /*
236*217ec208SDavid C Somayajulu  * DIF IO direction
237*217ec208SDavid C Somayajulu  */
238*217ec208SDavid C Somayajulu enum rdma_dif_io_direction_flg
239*217ec208SDavid C Somayajulu {
240*217ec208SDavid C Somayajulu 	RDMA_DIF_DIR_RX=0,
241*217ec208SDavid C Somayajulu 	RDMA_DIF_DIR_TX=1,
242*217ec208SDavid C Somayajulu 	MAX_RDMA_DIF_IO_DIRECTION_FLG
243*217ec208SDavid C Somayajulu };
244*217ec208SDavid C Somayajulu 
245*217ec208SDavid C Somayajulu /*
246*217ec208SDavid C Somayajulu  * RDMA DIF Runt Result Structure
247*217ec208SDavid C Somayajulu  */
248*217ec208SDavid C Somayajulu struct rdma_dif_runt_result
249*217ec208SDavid C Somayajulu {
250*217ec208SDavid C Somayajulu 	__le16 guard_tag /* CRC result of received IO. */;
251*217ec208SDavid C Somayajulu 	__le16 reserved[3];
252*217ec208SDavid C Somayajulu };
253*217ec208SDavid C Somayajulu 
254*217ec208SDavid C Somayajulu /*
255*217ec208SDavid C Somayajulu  * memory window type enumeration
256*217ec208SDavid C Somayajulu  */
257*217ec208SDavid C Somayajulu enum rdma_mw_type
258*217ec208SDavid C Somayajulu {
259*217ec208SDavid C Somayajulu 	RDMA_MW_TYPE_1,
260*217ec208SDavid C Somayajulu 	RDMA_MW_TYPE_2A,
261*217ec208SDavid C Somayajulu 	MAX_RDMA_MW_TYPE
262*217ec208SDavid C Somayajulu };
263*217ec208SDavid C Somayajulu 
264*217ec208SDavid C Somayajulu struct rdma_rq_sge
265*217ec208SDavid C Somayajulu {
266*217ec208SDavid C Somayajulu 	struct regpair addr;
267*217ec208SDavid C Somayajulu 	__le32 length;
268*217ec208SDavid C Somayajulu 	__le32 flags;
269*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF /* key of memory relating to this RQ */
270*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_L_KEY_SHIFT     0
271*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_NUM_SGES_MASK   0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
272*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
273*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_RESERVED0_MASK  0x7
274*217ec208SDavid C Somayajulu #define RDMA_RQ_SGE_RESERVED0_SHIFT 29
275*217ec208SDavid C Somayajulu };
276*217ec208SDavid C Somayajulu 
277*217ec208SDavid C Somayajulu struct rdma_sq_atomic_wqe
278*217ec208SDavid C Somayajulu {
279*217ec208SDavid C Somayajulu 	__le32 reserved1;
280*217ec208SDavid C Somayajulu 	__le32 length /* Total data length (8 bytes for Atomic) */;
281*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
282*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
283*217ec208SDavid C Somayajulu 	u8 flags;
284*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
285*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
286*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
287*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
288*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
289*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
290*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1 /* Dont care for atomic wqe */
291*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
292*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for atomic wqe */
293*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
294*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for atomic wqe */
295*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
296*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
297*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
298*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
299*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
300*217ec208SDavid C Somayajulu 	struct regpair remote_va /* remote virtual address */;
301*217ec208SDavid C Somayajulu 	__le32 r_key /* Remote key */;
302*217ec208SDavid C Somayajulu 	__le32 reserved2;
303*217ec208SDavid C Somayajulu 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
304*217ec208SDavid C Somayajulu 	struct regpair swap_data /* Swap or add data */;
305*217ec208SDavid C Somayajulu };
306*217ec208SDavid C Somayajulu 
307*217ec208SDavid C Somayajulu /*
308*217ec208SDavid C Somayajulu  * First element (16 bytes) of atomic wqe
309*217ec208SDavid C Somayajulu  */
310*217ec208SDavid C Somayajulu struct rdma_sq_atomic_wqe_1st
311*217ec208SDavid C Somayajulu {
312*217ec208SDavid C Somayajulu 	__le32 reserved1;
313*217ec208SDavid C Somayajulu 	__le32 length /* Total data length (8 bytes for Atomic) */;
314*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
315*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
316*217ec208SDavid C Somayajulu 	u8 flags;
317*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
318*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
319*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
320*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
321*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
322*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
323*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for atomic wqe */
324*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
325*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for atomic wqe */
326*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
327*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
328*217ec208SDavid C Somayajulu #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
329*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
330*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
331*217ec208SDavid C Somayajulu };
332*217ec208SDavid C Somayajulu 
333*217ec208SDavid C Somayajulu /*
334*217ec208SDavid C Somayajulu  * Second element (16 bytes) of atomic wqe
335*217ec208SDavid C Somayajulu  */
336*217ec208SDavid C Somayajulu struct rdma_sq_atomic_wqe_2nd
337*217ec208SDavid C Somayajulu {
338*217ec208SDavid C Somayajulu 	struct regpair remote_va /* remote virtual address */;
339*217ec208SDavid C Somayajulu 	__le32 r_key /* Remote key */;
340*217ec208SDavid C Somayajulu 	__le32 reserved2;
341*217ec208SDavid C Somayajulu };
342*217ec208SDavid C Somayajulu 
343*217ec208SDavid C Somayajulu /*
344*217ec208SDavid C Somayajulu  * Third element (16 bytes) of atomic wqe
345*217ec208SDavid C Somayajulu  */
346*217ec208SDavid C Somayajulu struct rdma_sq_atomic_wqe_3rd
347*217ec208SDavid C Somayajulu {
348*217ec208SDavid C Somayajulu 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
349*217ec208SDavid C Somayajulu 	struct regpair swap_data /* Swap or add data */;
350*217ec208SDavid C Somayajulu };
351*217ec208SDavid C Somayajulu 
352*217ec208SDavid C Somayajulu struct rdma_sq_bind_wqe
353*217ec208SDavid C Somayajulu {
354*217ec208SDavid C Somayajulu 	struct regpair addr;
355*217ec208SDavid C Somayajulu 	__le32 l_key;
356*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
357*217ec208SDavid C Somayajulu 	u8 flags;
358*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
359*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
360*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
361*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
362*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
363*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
364*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
365*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
366*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
367*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
368*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
369*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
370*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks */;
371*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
372*217ec208SDavid C Somayajulu 	u8 bind_ctrl;
373*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1 /* zero based indication */
374*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
375*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
376*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
377*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
378*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
379*217ec208SDavid C Somayajulu 	u8 access_ctrl;
380*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
381*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
382*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
383*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
384*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
385*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
386*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
387*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
388*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
389*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
390*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
391*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
392*217ec208SDavid C Somayajulu 	u8 reserved3;
393*217ec208SDavid C Somayajulu 	u8 length_hi /* upper 8 bits of the registered MW length */;
394*217ec208SDavid C Somayajulu 	__le32 length_lo /* lower 32 bits of the registered MW length */;
395*217ec208SDavid C Somayajulu 	__le32 parent_l_key /* l_key of the parent MR */;
396*217ec208SDavid C Somayajulu 	__le32 reserved4;
397*217ec208SDavid C Somayajulu };
398*217ec208SDavid C Somayajulu 
399*217ec208SDavid C Somayajulu /*
400*217ec208SDavid C Somayajulu  * First element (16 bytes) of bind wqe
401*217ec208SDavid C Somayajulu  */
402*217ec208SDavid C Somayajulu struct rdma_sq_bind_wqe_1st
403*217ec208SDavid C Somayajulu {
404*217ec208SDavid C Somayajulu 	struct regpair addr;
405*217ec208SDavid C Somayajulu 	__le32 l_key;
406*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
407*217ec208SDavid C Somayajulu 	u8 flags;
408*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
409*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
410*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
411*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
412*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
413*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
414*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
415*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
416*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
417*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
418*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
419*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
420*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks */;
421*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
422*217ec208SDavid C Somayajulu };
423*217ec208SDavid C Somayajulu 
424*217ec208SDavid C Somayajulu /*
425*217ec208SDavid C Somayajulu  * Second element (16 bytes) of bind wqe
426*217ec208SDavid C Somayajulu  */
427*217ec208SDavid C Somayajulu struct rdma_sq_bind_wqe_2nd
428*217ec208SDavid C Somayajulu {
429*217ec208SDavid C Somayajulu 	u8 bind_ctrl;
430*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
431*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
432*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
433*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
434*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
435*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
436*217ec208SDavid C Somayajulu 	u8 access_ctrl;
437*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
438*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
439*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
440*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
441*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
442*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
443*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
444*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
445*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
446*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
447*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
448*217ec208SDavid C Somayajulu #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
449*217ec208SDavid C Somayajulu 	u8 reserved3;
450*217ec208SDavid C Somayajulu 	u8 length_hi /* upper 8 bits of the registered MW length */;
451*217ec208SDavid C Somayajulu 	__le32 length_lo /* lower 32 bits of the registered MW length */;
452*217ec208SDavid C Somayajulu 	__le32 parent_l_key /* l_key of the parent MR */;
453*217ec208SDavid C Somayajulu 	__le32 reserved4;
454*217ec208SDavid C Somayajulu };
455*217ec208SDavid C Somayajulu 
456*217ec208SDavid C Somayajulu /*
457*217ec208SDavid C Somayajulu  * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
458*217ec208SDavid C Somayajulu  */
459*217ec208SDavid C Somayajulu struct rdma_sq_common_wqe
460*217ec208SDavid C Somayajulu {
461*217ec208SDavid C Somayajulu 	__le32 reserved1[3];
462*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
463*217ec208SDavid C Somayajulu 	u8 flags;
464*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
465*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
466*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
467*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
468*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
469*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
470*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
471*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
472*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
473*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
474*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
475*217ec208SDavid C Somayajulu #define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
476*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
477*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
478*217ec208SDavid C Somayajulu };
479*217ec208SDavid C Somayajulu 
480*217ec208SDavid C Somayajulu struct rdma_sq_fmr_wqe
481*217ec208SDavid C Somayajulu {
482*217ec208SDavid C Somayajulu 	struct regpair addr;
483*217ec208SDavid C Somayajulu 	__le32 l_key;
484*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
485*217ec208SDavid C Somayajulu 	u8 flags;
486*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1 /* If set, completion will be generated when the WQE is completed */
487*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
488*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
489*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
490*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1 /* If set, all pending operations will be completed before start processing this WQE */
491*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
492*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1 /* Dont care for FMR wqe */
493*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
494*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1 /* Should be 0 for FMR wqe */
495*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
496*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1 /* If set, indicated host memory of this WQE is DIF protected. */
497*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
498*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
499*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
500*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks */;
501*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
502*217ec208SDavid C Somayajulu 	u8 fmr_ctrl;
503*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F /* 0 is 4k, 1 is 8k... */
504*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
505*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1 /* zero based indication */
506*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
507*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1 /* indication whether bind is enabled for this MR */
508*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
509*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
510*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
511*217ec208SDavid C Somayajulu 	u8 access_ctrl;
512*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
513*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
514*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
515*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
516*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
517*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
518*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
519*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
520*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
521*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
522*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
523*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
524*217ec208SDavid C Somayajulu 	u8 reserved3;
525*217ec208SDavid C Somayajulu 	u8 length_hi /* upper 8 bits of the registered MR length */;
526*217ec208SDavid C Somayajulu 	__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
527*217ec208SDavid C Somayajulu 	struct regpair pbl_addr /* Address of PBL */;
528*217ec208SDavid C Somayajulu 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
529*217ec208SDavid C Somayajulu 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
530*217ec208SDavid C Somayajulu 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
531*217ec208SDavid C Somayajulu 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
532*217ec208SDavid C Somayajulu 	__le16 dif_flags;
533*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
534*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
535*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
536*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
537*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
538*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
539*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
540*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
541*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
542*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
543*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
544*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
545*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
546*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
547*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK    0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
548*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT   7
549*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0xFF
550*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              8
551*217ec208SDavid C Somayajulu 	__le32 Reserved5;
552*217ec208SDavid C Somayajulu };
553*217ec208SDavid C Somayajulu 
554*217ec208SDavid C Somayajulu /*
555*217ec208SDavid C Somayajulu  * First element (16 bytes) of fmr wqe
556*217ec208SDavid C Somayajulu  */
557*217ec208SDavid C Somayajulu struct rdma_sq_fmr_wqe_1st
558*217ec208SDavid C Somayajulu {
559*217ec208SDavid C Somayajulu 	struct regpair addr;
560*217ec208SDavid C Somayajulu 	__le32 l_key;
561*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
562*217ec208SDavid C Somayajulu 	u8 flags;
563*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
564*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
565*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
566*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
567*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
568*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
569*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1 /* Dont care for FMR wqe */
570*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
571*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1 /* Should be 0 for FMR wqe */
572*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
573*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
574*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
575*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
576*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
577*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks */;
578*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
579*217ec208SDavid C Somayajulu };
580*217ec208SDavid C Somayajulu 
581*217ec208SDavid C Somayajulu /*
582*217ec208SDavid C Somayajulu  * Second element (16 bytes) of fmr wqe
583*217ec208SDavid C Somayajulu  */
584*217ec208SDavid C Somayajulu struct rdma_sq_fmr_wqe_2nd
585*217ec208SDavid C Somayajulu {
586*217ec208SDavid C Somayajulu 	u8 fmr_ctrl;
587*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F /* 0 is 4k, 1 is 8k... */
588*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
589*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
590*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
591*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1 /* indication whether bind is enabled for this MR */
592*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
593*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
594*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
595*217ec208SDavid C Somayajulu 	u8 access_ctrl;
596*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
597*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
598*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
599*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
600*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
601*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
602*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
603*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
604*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
605*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
606*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
607*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
608*217ec208SDavid C Somayajulu 	u8 reserved3;
609*217ec208SDavid C Somayajulu 	u8 length_hi /* upper 8 bits of the registered MR length */;
610*217ec208SDavid C Somayajulu 	__le32 length_lo /* lower 32 bits of the registered MR length. */;
611*217ec208SDavid C Somayajulu 	struct regpair pbl_addr /* Address of PBL */;
612*217ec208SDavid C Somayajulu };
613*217ec208SDavid C Somayajulu 
614*217ec208SDavid C Somayajulu /*
615*217ec208SDavid C Somayajulu  * Third element (16 bytes) of fmr wqe
616*217ec208SDavid C Somayajulu  */
617*217ec208SDavid C Somayajulu struct rdma_sq_fmr_wqe_3rd
618*217ec208SDavid C Somayajulu {
619*217ec208SDavid C Somayajulu 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
620*217ec208SDavid C Somayajulu 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
621*217ec208SDavid C Somayajulu 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
622*217ec208SDavid C Somayajulu 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
623*217ec208SDavid C Somayajulu 	__le16 dif_flags;
624*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
625*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
626*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
627*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
628*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
629*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
630*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
631*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
632*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
633*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
634*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
635*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
636*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
637*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
638*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
639*217ec208SDavid C Somayajulu #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
640*217ec208SDavid C Somayajulu 	__le32 Reserved5;
641*217ec208SDavid C Somayajulu };
642*217ec208SDavid C Somayajulu 
643*217ec208SDavid C Somayajulu struct rdma_sq_local_inv_wqe
644*217ec208SDavid C Somayajulu {
645*217ec208SDavid C Somayajulu 	struct regpair reserved;
646*217ec208SDavid C Somayajulu 	__le32 inv_l_key /* The invalidate local key */;
647*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
648*217ec208SDavid C Somayajulu 	u8 flags;
649*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
650*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
651*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
652*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
653*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
654*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
655*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1 /* Dont care for local invalidate wqe */
656*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
657*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for local invalidate wqe */
658*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
659*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
660*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
661*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
662*217ec208SDavid C Somayajulu #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
663*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks */;
664*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
665*217ec208SDavid C Somayajulu };
666*217ec208SDavid C Somayajulu 
667*217ec208SDavid C Somayajulu struct rdma_sq_rdma_wqe
668*217ec208SDavid C Somayajulu {
669*217ec208SDavid C Somayajulu 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
670*217ec208SDavid C Somayajulu 	__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
671*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
672*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
673*217ec208SDavid C Somayajulu 	u8 flags;
674*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1 /* If set, completion will be generated when the WQE is completed */
675*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
676*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
677*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
678*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1 /* If set, all pending operations will be completed before start processing this WQE */
679*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
680*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1 /* If set, signal the responder to generate a solicited event on this WQE */
681*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
682*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
683*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
684*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1 /* If set, indicated host memory of this WQE is DIF protected. */
685*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
686*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK              0x1 /* If set, indicated read with invalidate WQE. iWARP only */
687*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT             6
688*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x1
689*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                7
690*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
691*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
692*217ec208SDavid C Somayajulu 	struct regpair remote_va /* Remote virtual address */;
693*217ec208SDavid C Somayajulu 	__le32 r_key /* Remote key */;
694*217ec208SDavid C Somayajulu 	u8 dif_flags;
695*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
696*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
697*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
698*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
699*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
700*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
701*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
702*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
703*217ec208SDavid C Somayajulu 	u8 reserved2[3];
704*217ec208SDavid C Somayajulu };
705*217ec208SDavid C Somayajulu 
706*217ec208SDavid C Somayajulu /*
707*217ec208SDavid C Somayajulu  * First element (16 bytes) of rdma wqe
708*217ec208SDavid C Somayajulu  */
709*217ec208SDavid C Somayajulu struct rdma_sq_rdma_wqe_1st
710*217ec208SDavid C Somayajulu {
711*217ec208SDavid C Somayajulu 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
712*217ec208SDavid C Somayajulu 	__le32 length /* Total data length */;
713*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
714*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
715*217ec208SDavid C Somayajulu 	u8 flags;
716*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
717*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
718*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
719*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
720*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
721*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
722*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
723*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
724*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
725*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
726*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
727*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
728*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1 /* If set, indicated read with invalidate WQE. iWARP only */
729*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
730*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
731*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
732*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
733*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
734*217ec208SDavid C Somayajulu };
735*217ec208SDavid C Somayajulu 
736*217ec208SDavid C Somayajulu /*
737*217ec208SDavid C Somayajulu  * Second element (16 bytes) of rdma wqe
738*217ec208SDavid C Somayajulu  */
739*217ec208SDavid C Somayajulu struct rdma_sq_rdma_wqe_2nd
740*217ec208SDavid C Somayajulu {
741*217ec208SDavid C Somayajulu 	struct regpair remote_va /* Remote virtual address */;
742*217ec208SDavid C Somayajulu 	__le32 r_key /* Remote key */;
743*217ec208SDavid C Somayajulu 	u8 dif_flags;
744*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
745*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
746*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
747*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
748*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
749*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
750*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
751*217ec208SDavid C Somayajulu #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
752*217ec208SDavid C Somayajulu 	u8 reserved2[3];
753*217ec208SDavid C Somayajulu };
754*217ec208SDavid C Somayajulu 
755*217ec208SDavid C Somayajulu /*
756*217ec208SDavid C Somayajulu  * SQ WQE req type enumeration
757*217ec208SDavid C Somayajulu  */
758*217ec208SDavid C Somayajulu enum rdma_sq_req_type
759*217ec208SDavid C Somayajulu {
760*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_SEND,
761*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
762*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
763*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_RDMA_WR,
764*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
765*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_RDMA_RD,
766*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
767*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
768*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
769*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_FAST_MR,
770*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_BIND,
771*217ec208SDavid C Somayajulu 	RDMA_SQ_REQ_TYPE_INVALID,
772*217ec208SDavid C Somayajulu 	MAX_RDMA_SQ_REQ_TYPE
773*217ec208SDavid C Somayajulu };
774*217ec208SDavid C Somayajulu 
775*217ec208SDavid C Somayajulu struct rdma_sq_send_wqe
776*217ec208SDavid C Somayajulu {
777*217ec208SDavid C Somayajulu 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
778*217ec208SDavid C Somayajulu 	__le32 length /* Total data length */;
779*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
780*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
781*217ec208SDavid C Somayajulu 	u8 flags;
782*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
783*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
784*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
785*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
786*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
787*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
788*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
789*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
790*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
791*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
792*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for send wqe */
793*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
794*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
795*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
796*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
797*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
798*217ec208SDavid C Somayajulu 	__le32 reserved1[4];
799*217ec208SDavid C Somayajulu };
800*217ec208SDavid C Somayajulu 
801*217ec208SDavid C Somayajulu struct rdma_sq_send_wqe_1st
802*217ec208SDavid C Somayajulu {
803*217ec208SDavid C Somayajulu 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
804*217ec208SDavid C Somayajulu 	__le32 length /* Total data length */;
805*217ec208SDavid C Somayajulu 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
806*217ec208SDavid C Somayajulu 	u8 req_type /* Type of WQE */;
807*217ec208SDavid C Somayajulu 	u8 flags;
808*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
809*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
810*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
811*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
812*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
813*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
814*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE */
815*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
816*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
817*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
818*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
819*217ec208SDavid C Somayajulu #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
820*217ec208SDavid C Somayajulu 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
821*217ec208SDavid C Somayajulu 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
822*217ec208SDavid C Somayajulu };
823*217ec208SDavid C Somayajulu 
824*217ec208SDavid C Somayajulu struct rdma_sq_send_wqe_2st
825*217ec208SDavid C Somayajulu {
826*217ec208SDavid C Somayajulu 	__le32 reserved1[4];
827*217ec208SDavid C Somayajulu };
828*217ec208SDavid C Somayajulu 
829*217ec208SDavid C Somayajulu struct rdma_sq_sge
830*217ec208SDavid C Somayajulu {
831*217ec208SDavid C Somayajulu 	__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
832*217ec208SDavid C Somayajulu 	struct regpair addr;
833*217ec208SDavid C Somayajulu 	__le32 l_key;
834*217ec208SDavid C Somayajulu };
835*217ec208SDavid C Somayajulu 
836*217ec208SDavid C Somayajulu struct rdma_srq_wqe_header
837*217ec208SDavid C Somayajulu {
838*217ec208SDavid C Somayajulu 	struct regpair wr_id;
839*217ec208SDavid C Somayajulu 	u8 num_sges /* number of SGEs in WQE */;
840*217ec208SDavid C Somayajulu 	u8 reserved2[7];
841*217ec208SDavid C Somayajulu };
842*217ec208SDavid C Somayajulu 
843*217ec208SDavid C Somayajulu struct rdma_srq_sge
844*217ec208SDavid C Somayajulu {
845*217ec208SDavid C Somayajulu 	struct regpair addr;
846*217ec208SDavid C Somayajulu 	__le32 length;
847*217ec208SDavid C Somayajulu 	__le32 l_key;
848*217ec208SDavid C Somayajulu };
849*217ec208SDavid C Somayajulu 
850*217ec208SDavid C Somayajulu /*
851*217ec208SDavid C Somayajulu  * rdma srq sge
852*217ec208SDavid C Somayajulu  */
853*217ec208SDavid C Somayajulu union rdma_srq_elm
854*217ec208SDavid C Somayajulu {
855*217ec208SDavid C Somayajulu 	struct rdma_srq_wqe_header header;
856*217ec208SDavid C Somayajulu 	struct rdma_srq_sge sge;
857*217ec208SDavid C Somayajulu };
858*217ec208SDavid C Somayajulu 
859*217ec208SDavid C Somayajulu /*
860*217ec208SDavid C Somayajulu  * Rdma doorbell data for flags update
861*217ec208SDavid C Somayajulu  */
862*217ec208SDavid C Somayajulu struct rdma_pwm_flags_data
863*217ec208SDavid C Somayajulu {
864*217ec208SDavid C Somayajulu 	__le16 icid /* internal CID */;
865*217ec208SDavid C Somayajulu 	u8 agg_flags /* aggregative flags */;
866*217ec208SDavid C Somayajulu 	u8 reserved;
867*217ec208SDavid C Somayajulu };
868*217ec208SDavid C Somayajulu 
869*217ec208SDavid C Somayajulu /*
870*217ec208SDavid C Somayajulu  * Rdma doorbell data for SQ and RQ
871*217ec208SDavid C Somayajulu  */
872*217ec208SDavid C Somayajulu struct rdma_pwm_val16_data
873*217ec208SDavid C Somayajulu {
874*217ec208SDavid C Somayajulu 	__le16 icid /* internal CID */;
875*217ec208SDavid C Somayajulu 	__le16 value /* aggregated value to update */;
876*217ec208SDavid C Somayajulu };
877*217ec208SDavid C Somayajulu 
878*217ec208SDavid C Somayajulu union rdma_pwm_val16_data_union
879*217ec208SDavid C Somayajulu {
880*217ec208SDavid C Somayajulu 	struct rdma_pwm_val16_data as_struct /* Parameters field */;
881*217ec208SDavid C Somayajulu 	__le32 as_dword;
882*217ec208SDavid C Somayajulu };
883*217ec208SDavid C Somayajulu 
884*217ec208SDavid C Somayajulu /*
885*217ec208SDavid C Somayajulu  * Rdma doorbell data for CQ
886*217ec208SDavid C Somayajulu  */
887*217ec208SDavid C Somayajulu struct rdma_pwm_val32_data
888*217ec208SDavid C Somayajulu {
889*217ec208SDavid C Somayajulu 	__le16 icid /* internal CID */;
890*217ec208SDavid C Somayajulu 	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
891*217ec208SDavid C Somayajulu 	u8 params;
892*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK             0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
893*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT            0
894*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK           0x1 /* enable QM bypass */
895*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT          2
896*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
897*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
898*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK         0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
899*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT        4
900*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_RESERVED_MASK            0x7
901*217ec208SDavid C Somayajulu #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT           5
902*217ec208SDavid C Somayajulu 	__le32 value /* aggregated value to update */;
903*217ec208SDavid C Somayajulu };
904*217ec208SDavid C Somayajulu 
905*217ec208SDavid C Somayajulu union rdma_pwm_val32_data_union
906*217ec208SDavid C Somayajulu {
907*217ec208SDavid C Somayajulu 	struct rdma_pwm_val32_data as_struct /* Parameters field */;
908*217ec208SDavid C Somayajulu 	struct regpair as_repair;
909*217ec208SDavid C Somayajulu };
910*217ec208SDavid C Somayajulu 
91111e25f0dSDavid C Somayajulu #endif /* __RDMA_COMMON__ */
912