xref: /freebsd/contrib/ofed/libirdma/irdma_user.h (revision e1c4c8dd8d2d10b6104f06856a77bd5b4813a801)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef IRDMA_USER_H
36 #define IRDMA_USER_H
37 
38 #include "osdep.h"
39 
40 #define irdma_handle void *
41 #define irdma_adapter_handle irdma_handle
42 #define irdma_qp_handle irdma_handle
43 #define irdma_cq_handle irdma_handle
44 #define irdma_pd_id irdma_handle
45 #define irdma_stag_handle irdma_handle
46 #define irdma_stag_index u32
47 #define irdma_stag u32
48 #define irdma_stag_key u8
49 #define irdma_tagged_offset u64
50 #define irdma_access_privileges u32
51 #define irdma_physical_fragment u64
52 #define irdma_address_list u64 *
53 #define irdma_sgl struct ibv_sge *
54 
55 #define IRDMA_MAX_MR_SIZE	0x200000000000ULL
56 
57 #define IRDMA_ACCESS_FLAGS_LOCALREAD		0x01
58 #define IRDMA_ACCESS_FLAGS_LOCALWRITE		0x02
59 #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY	0x04
60 #define IRDMA_ACCESS_FLAGS_REMOTEREAD		0x05
61 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY	0x08
62 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE		0x0a
63 #define IRDMA_ACCESS_FLAGS_BIND_WINDOW		0x10
64 #define IRDMA_ACCESS_FLAGS_ZERO_BASED		0x20
65 #define IRDMA_ACCESS_FLAGS_ALL			0x3f
66 
67 #define IRDMA_OP_TYPE_RDMA_WRITE		0x00
68 #define IRDMA_OP_TYPE_RDMA_READ			0x01
69 #define IRDMA_OP_TYPE_SEND			0x03
70 #define IRDMA_OP_TYPE_SEND_INV			0x04
71 #define IRDMA_OP_TYPE_SEND_SOL			0x05
72 #define IRDMA_OP_TYPE_SEND_SOL_INV		0x06
73 #define IRDMA_OP_TYPE_RDMA_WRITE_SOL		0x0d
74 #define IRDMA_OP_TYPE_BIND_MW			0x08
75 #define IRDMA_OP_TYPE_FAST_REG_NSMR		0x09
76 #define IRDMA_OP_TYPE_INV_STAG			0x0a
77 #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG	0x0b
78 #define IRDMA_OP_TYPE_NOP			0x0c
79 #define IRDMA_OP_TYPE_REC	0x3e
80 #define IRDMA_OP_TYPE_REC_IMM	0x3f
81 
82 #define IRDMA_FLUSH_MAJOR_ERR 1
83 
84 enum irdma_device_caps_const {
85 	IRDMA_WQE_SIZE =			4,
86 	IRDMA_CQP_WQE_SIZE =			8,
87 	IRDMA_CQE_SIZE =			4,
88 	IRDMA_EXTENDED_CQE_SIZE =		8,
89 	IRDMA_AEQE_SIZE =			2,
90 	IRDMA_CEQE_SIZE =			1,
91 	IRDMA_CQP_CTX_SIZE =			8,
92 	IRDMA_SHADOW_AREA_SIZE =		8,
93 	IRDMA_GATHER_STATS_BUF_SIZE =		1024,
94 	IRDMA_MIN_IW_QP_ID =			0,
95 	IRDMA_QUERY_FPM_BUF_SIZE =		176,
96 	IRDMA_COMMIT_FPM_BUF_SIZE =		176,
97 	IRDMA_MAX_IW_QP_ID =			262143,
98 	IRDMA_MIN_CEQID =			0,
99 	IRDMA_MAX_CEQID =			1023,
100 	IRDMA_CEQ_MAX_COUNT =			IRDMA_MAX_CEQID + 1,
101 	IRDMA_MIN_CQID =			0,
102 	IRDMA_MAX_CQID =			524287,
103 	IRDMA_MIN_AEQ_ENTRIES =			1,
104 	IRDMA_MAX_AEQ_ENTRIES =			524287,
105 	IRDMA_MIN_CEQ_ENTRIES =			1,
106 	IRDMA_MAX_CEQ_ENTRIES =			262143,
107 	IRDMA_MIN_CQ_SIZE =			1,
108 	IRDMA_MAX_CQ_SIZE =			1048575,
109 	IRDMA_DB_ID_ZERO =			0,
110 	/* 64K + 1 */
111 	IRDMA_MAX_OUTBOUND_MSG_SIZE =		65537,
112 	/* 64K +1 */
113 	IRDMA_MAX_INBOUND_MSG_SIZE =		65537,
114 	IRDMA_MAX_PE_ENA_VF_COUNT =             32,
115 	IRDMA_MAX_VF_FPM_ID =			47,
116 	IRDMA_MAX_SQ_PAYLOAD_SIZE =		2145386496,
117 	IRDMA_MAX_INLINE_DATA_SIZE =		101,
118 	IRDMA_MAX_WQ_ENTRIES =			32768,
119 	IRDMA_Q2_BUF_SIZE =			256,
120 	IRDMA_QP_CTX_SIZE =			256,
121 	IRDMA_MAX_PDS =				262144,
122 };
123 
124 enum irdma_addressing_type {
125 	IRDMA_ADDR_TYPE_ZERO_BASED = 0,
126 	IRDMA_ADDR_TYPE_VA_BASED   = 1,
127 };
128 
129 enum irdma_flush_opcode {
130 	FLUSH_INVALID = 0,
131 	FLUSH_GENERAL_ERR,
132 	FLUSH_PROT_ERR,
133 	FLUSH_REM_ACCESS_ERR,
134 	FLUSH_LOC_QP_OP_ERR,
135 	FLUSH_REM_OP_ERR,
136 	FLUSH_LOC_LEN_ERR,
137 	FLUSH_FATAL_ERR,
138 	FLUSH_RETRY_EXC_ERR,
139 	FLUSH_MW_BIND_ERR,
140 	FLUSH_REM_INV_REQ_ERR,
141 	FLUSH_RNR_RETRY_EXC_ERR,
142 };
143 
144 enum irdma_cmpl_status {
145 	IRDMA_COMPL_STATUS_SUCCESS = 0,
146 	IRDMA_COMPL_STATUS_FLUSHED,
147 	IRDMA_COMPL_STATUS_INVALID_WQE,
148 	IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
149 	IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
150 	IRDMA_COMPL_STATUS_INVALID_STAG,
151 	IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
152 	IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
153 	IRDMA_COMPL_STATUS_INVALID_PD_ID,
154 	IRDMA_COMPL_STATUS_WRAP_ERROR,
155 	IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
156 	IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
157 	IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
158 	IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
159 	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
160 	IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
161 	IRDMA_COMPL_STATUS_INVALID_FBO,
162 	IRDMA_COMPL_STATUS_INVALID_LEN,
163 	IRDMA_COMPL_STATUS_INVALID_ACCESS,
164 	IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
165 	IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
166 	IRDMA_COMPL_STATUS_INVALID_REGION,
167 	IRDMA_COMPL_STATUS_INVALID_WINDOW,
168 	IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
169 	IRDMA_COMPL_STATUS_UNKNOWN,
170 };
171 
172 enum irdma_cmpl_notify {
173 	IRDMA_CQ_COMPL_EVENT     = 0,
174 	IRDMA_CQ_COMPL_SOLICITED = 1,
175 };
176 
177 enum irdma_qp_caps {
178 	IRDMA_WRITE_WITH_IMM = 1,
179 	IRDMA_SEND_WITH_IMM  = 2,
180 	IRDMA_ROCE	     = 4,
181 	IRDMA_PUSH_MODE      = 8,
182 };
183 
184 struct irdma_qp_uk;
185 struct irdma_cq_uk;
186 struct irdma_qp_uk_init_info;
187 struct irdma_cq_uk_init_info;
188 
189 struct irdma_ring {
190 	volatile u32 head;
191 	volatile u32 tail;	/* effective tail */
192 	u32 size;
193 };
194 
195 struct irdma_cqe {
196 	__le64 buf[IRDMA_CQE_SIZE];
197 };
198 
199 struct irdma_extended_cqe {
200 	__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
201 };
202 
203 struct irdma_post_send {
204 	irdma_sgl sg_list;
205 	u32 num_sges;
206 	u32 qkey;
207 	u32 dest_qp;
208 	u32 ah_id;
209 };
210 
211 struct irdma_post_rq_info {
212 	u64 wr_id;
213 	irdma_sgl sg_list;
214 	u32 num_sges;
215 };
216 
217 struct irdma_rdma_write {
218 	irdma_sgl lo_sg_list;
219 	u32 num_lo_sges;
220 	struct ibv_sge rem_addr;
221 };
222 
223 struct irdma_rdma_read {
224 	irdma_sgl lo_sg_list;
225 	u32 num_lo_sges;
226 	struct ibv_sge rem_addr;
227 };
228 
229 struct irdma_bind_window {
230 	irdma_stag mr_stag;
231 	u64 bind_len;
232 	void *va;
233 	enum irdma_addressing_type addressing_type;
234 	bool ena_reads:1;
235 	bool ena_writes:1;
236 	irdma_stag mw_stag;
237 	bool mem_window_type_1:1;
238 };
239 
240 struct irdma_inv_local_stag {
241 	irdma_stag target_stag;
242 };
243 
244 struct irdma_post_sq_info {
245 	u64 wr_id;
246 	u8 op_type;
247 	u8 l4len;
248 	bool signaled:1;
249 	bool read_fence:1;
250 	bool local_fence:1;
251 	bool inline_data:1;
252 	bool imm_data_valid:1;
253 	bool push_wqe:1;
254 	bool report_rtt:1;
255 	bool udp_hdr:1;
256 	bool defer_flag:1;
257 	u32 imm_data;
258 	u32 stag_to_inv;
259 	union {
260 		struct irdma_post_send send;
261 		struct irdma_rdma_write rdma_write;
262 		struct irdma_rdma_read rdma_read;
263 		struct irdma_bind_window bind_window;
264 		struct irdma_inv_local_stag inv_local_stag;
265 	} op;
266 };
267 
268 struct irdma_cq_poll_info {
269 	u64 wr_id;
270 	irdma_qp_handle qp_handle;
271 	u32 bytes_xfered;
272 	u32 qp_id;
273 	u32 ud_src_qpn;
274 	u32 imm_data;
275 	irdma_stag inv_stag; /* or L_R_Key */
276 	enum irdma_cmpl_status comp_status;
277 	u16 major_err;
278 	u16 minor_err;
279 	u16 ud_vlan;
280 	u8 ud_smac[6];
281 	u8 op_type;
282 	u8 q_type;
283 	bool stag_invalid_set:1; /* or L_R_Key set */
284 	bool push_dropped:1;
285 	bool error:1;
286 	bool solicited_event:1;
287 	bool ipv4:1;
288 	bool ud_vlan_valid:1;
289 	bool ud_smac_valid:1;
290 	bool imm_valid:1;
291 	bool signaled:1;
292 	union {
293 		u32 tcp_sqn;
294 		u32 roce_psn;
295 		u32 rtt;
296 		u32 raw;
297 	} stat;
298 };
299 
300 int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
301 			       struct irdma_post_sq_info *info, bool post_sq);
302 int irdma_uk_inline_send(struct irdma_qp_uk *qp,
303 			 struct irdma_post_sq_info *info, bool post_sq);
304 int irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
305 		     bool post_sq);
306 int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
307 		      bool post_sq);
308 int irdma_uk_post_receive(struct irdma_qp_uk *qp,
309 			  struct irdma_post_rq_info *info);
310 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
311 int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
312 		       bool inv_stag, bool post_sq);
313 int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
314 			bool post_sq);
315 int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
316 		  bool post_sq);
317 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
318 				   struct irdma_post_sq_info *info,
319 				   bool post_sq);
320 
321 struct irdma_wqe_uk_ops {
322 	void (*iw_copy_inline_data)(u8 *dest, struct ibv_sge *sge_list, u32 num_sges, u8 polarity);
323 	u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
324 	void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ibv_sge *sge,
325 				u8 valid);
326 	void (*iw_set_mw_bind_wqe)(__le64 *wqe,
327 				   struct irdma_bind_window *op_info);
328 };
329 
330 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
331 			  struct irdma_cq_poll_info *info);
332 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
333 				      enum irdma_cmpl_notify cq_notify);
334 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
335 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
336 int irdma_uk_cq_init(struct irdma_cq_uk *cq,
337 		     struct irdma_cq_uk_init_info *info);
338 int irdma_uk_qp_init(struct irdma_qp_uk *qp,
339 		     struct irdma_qp_uk_init_info *info);
340 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
341 				 u32 *sq_depth, u8 *sq_shift);
342 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
343 				 u32 *rq_depth, u8 *rq_shift);
344 struct irdma_sq_uk_wr_trk_info {
345 	u64 wrid;
346 	u32 wr_len;
347 	u16 quanta;
348 	u8 signaled;
349 	u8 reserved[1];
350 };
351 
352 struct irdma_qp_quanta {
353 	__le64 elem[IRDMA_WQE_SIZE];
354 };
355 
356 struct irdma_qp_uk {
357 	struct irdma_qp_quanta *sq_base;
358 	struct irdma_qp_quanta *rq_base;
359 	struct irdma_uk_attrs *uk_attrs;
360 	u32 IOMEM *wqe_alloc_db;
361 	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
362 	struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
363 	u64 *rq_wrid_array;
364 	__le64 *shadow_area;
365 	__le32 *push_db;
366 	__le64 *push_wqe;
367 	struct irdma_ring sq_ring;
368 	struct irdma_ring sq_sig_ring;
369 	struct irdma_ring rq_ring;
370 	struct irdma_ring initial_ring;
371 	u32 qp_id;
372 	u32 qp_caps;
373 	u32 sq_size;
374 	u32 rq_size;
375 	u32 max_sq_frag_cnt;
376 	u32 max_rq_frag_cnt;
377 	u32 max_inline_data;
378 	u32 last_rx_cmpl_idx;
379 	u32 last_tx_cmpl_idx;
380 	struct irdma_wqe_uk_ops wqe_ops;
381 	u16 conn_wqes;
382 	u8 qp_type;
383 	u8 swqe_polarity;
384 	u8 swqe_polarity_deferred;
385 	u8 rwqe_polarity;
386 	u8 rq_wqe_size;
387 	u8 rq_wqe_size_multiplier;
388 	u8 start_wqe_idx;
389 	bool deferred_flag:1;
390 	bool push_mode:1; /* whether the last post wqe was pushed */
391 	bool push_dropped:1;
392 	bool first_sq_wq:1;
393 	bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
394 	bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
395 	bool destroy_pending:1; /* Indicates the QP is being destroyed */
396 	void *back_qp;
397 	pthread_spinlock_t *lock;
398 	u8 dbg_rq_flushed;
399 	u16 ord_cnt;
400 	u8 sq_flush_seen;
401 	u8 rq_flush_seen;
402 	u8 rd_fence_rate;
403 };
404 
405 struct irdma_cq_uk {
406 	struct irdma_cqe *cq_base;
407 	u32 IOMEM *cqe_alloc_db;
408 	u32 IOMEM *cq_ack_db;
409 	__le64 *shadow_area;
410 	u32 cq_id;
411 	u32 cq_size;
412 	struct irdma_ring cq_ring;
413 	u8 polarity;
414 	bool avoid_mem_cflct:1;
415 };
416 
417 struct irdma_qp_uk_init_info {
418 	struct irdma_qp_quanta *sq;
419 	struct irdma_qp_quanta *rq;
420 	struct irdma_uk_attrs *uk_attrs;
421 	u32 IOMEM *wqe_alloc_db;
422 	__le64 *shadow_area;
423 	struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
424 	struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
425 	u64 *rq_wrid_array;
426 	u32 qp_id;
427 	u32 qp_caps;
428 	u32 sq_size;
429 	u32 rq_size;
430 	u32 max_sq_frag_cnt;
431 	u32 max_rq_frag_cnt;
432 	u32 max_inline_data;
433 	u32 sq_depth;
434 	u32 rq_depth;
435 	u8 first_sq_wq;
436 	u8 start_wqe_idx;
437 	u8 type;
438 	u8 sq_shift;
439 	u8 rq_shift;
440 	u8 rd_fence_rate;
441 	int abi_ver;
442 	bool legacy_mode;
443 };
444 
445 struct irdma_cq_uk_init_info {
446 	u32 IOMEM *cqe_alloc_db;
447 	u32 IOMEM *cq_ack_db;
448 	struct irdma_cqe *cq_base;
449 	__le64 *shadow_area;
450 	u32 cq_size;
451 	u32 cq_id;
452 	bool avoid_mem_cflct;
453 };
454 
455 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
456 				   u16 *quanta, u32 total_size,
457 				   struct irdma_post_sq_info *info);
458 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
459 int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
460 int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
461 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
462 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
463 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
464 			 u32 inline_data, u8 *shift);
465 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
466 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
467 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
468 		       u32 wqe_idx, bool post_sq);
469 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
470 #endif /* IRDMA_USER_H */
471