xref: /freebsd/contrib/ofed/libbnxtre/main.h (revision e9ac41698b2f322d55ccf9da50a3596edb2c1800)
1 /*
2  * Copyright (c) 2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  */
29 
30 #ifndef __BNXT_RE_MAIN_H__
31 #define __BNXT_RE_MAIN_H__
32 
33 #include <sys/param.h>
34 #include <sys/queue.h>
35 
36 #include <infiniband/driver.h>
37 #include <infiniband/endian.h>
38 #include <infiniband/udma_barrier.h>
39 
40 #include <inttypes.h>
41 #include <math.h>
42 #include <pthread.h>
43 #include <stdatomic.h>
44 #include <stdbool.h>
45 #include <stddef.h>
46 #include <stdio.h>
47 #include <stdlib.h>
48 
49 #include "abi.h"
50 #include "list.h"
51 #include "memory.h"
52 
53 #define DEV	"bnxt_re : "
54 #define BNXT_RE_UD_QP_STALL	0x400000
55 
56 #define CHIP_NUM_57508		0x1750
57 #define CHIP_NUM_57504		0x1751
58 #define CHIP_NUM_57502		0x1752
59 #define CHIP_NUM_58818          0xd818
60 #define CHIP_NUM_57608		0x1760
61 
62 #define BNXT_NSEC_PER_SEC	1000000000UL
63 
64 struct bnxt_re_chip_ctx {
65 	__u16	chip_num;
66 	__u8	chip_rev;
67 	__u8	chip_metal;
68 	bool	chip_is_gen_p5_thor2;
69 };
70 
71 #define BNXT_RE_MAP_WC	0x1000
72 #define BNXT_RE_DBR_PAGE 0x2000
73 #define BNXT_RE_DB_RECOVERY_PAGE 0x3000
74 
75 #define BNXT_RE_DB_REPLAY_YIELD_CNT 256
76 #define BNXT_RE_DB_KEY_INVALID -1
77 #define BNXT_RE_MAX_DO_PACING 0xFFFF
78 #define bnxt_re_wm_barrier()		udma_to_device_barrier()
79 #define unlikely(x)	__builtin_expect(!!(x), 0)
80 #define likely(x)	__builtin_expect(!!(x), 1)
81 
82 #define CNA(v, d)					\
83 	{	.vendor = PCI_VENDOR_ID_##v,		\
84 		.device = d }
85 #define BNXT_RE_DEFINE_CNA_TABLE(_name)			\
86 	static const struct {				\
87 		unsigned vendor;			\
88 		unsigned device;			\
89 	} _name[]
90 
91 struct bnxt_re_dpi {
92 	__u32 dpindx;
93 	__u32 wcdpi;
94 	__u64 *dbpage;
95 	__u64 *wcdbpg;
96 };
97 
98 struct bnxt_re_pd {
99 	struct ibv_pd ibvpd;
100 	uint32_t pdid;
101 };
102 
103 struct xorshift32_state {
104 	uint32_t seed;
105 };
106 
107 struct bnxt_re_cq {
108 	struct ibv_cq ibvcq;
109 	struct bnxt_re_list_head sfhead;
110 	struct bnxt_re_list_head rfhead;
111 	struct bnxt_re_list_head prev_cq_head;
112 	struct bnxt_re_context *cntx;
113 	struct bnxt_re_queue *cqq;
114 	struct bnxt_re_dpi *udpi;
115 	struct bnxt_re_mem *resize_mem;
116 	struct bnxt_re_mem *mem;
117 	struct bnxt_re_list_node dbnode;
118 	uint64_t shadow_db_key;
119 	uint32_t cqe_sz;
120 	uint32_t cqid;
121 	struct xorshift32_state rand;
122 	int deferred_arm_flags;
123 	bool first_arm;
124 	bool deferred_arm;
125 	bool phase;
126 	uint8_t dbr_lock;
127 	void *cq_page;
128 };
129 
130 struct bnxt_re_push_buffer {
131 	__u64 *pbuf; /*push wc buffer */
132 	__u64 *wqe; /* hwqe addresses */
133 	__u64 *ucdb;
134 	uint32_t st_idx;
135 	uint32_t qpid;
136 	uint16_t wcdpi;
137 	uint16_t nbit;
138 	uint32_t tail;
139 };
140 
141 enum bnxt_re_push_info_mask {
142 	BNXT_RE_PUSH_SIZE_MASK  = 0x1FUL,
143 	BNXT_RE_PUSH_SIZE_SHIFT = 0x18UL
144 };
145 
146 struct bnxt_re_db_ppp_hdr {
147 	struct bnxt_re_db_hdr db_hdr;
148 	__u64 rsv_psz_pidx;
149 };
150 
151 struct bnxt_re_push_rec {
152 	struct bnxt_re_dpi *udpi;
153 	struct bnxt_re_push_buffer *pbuf;
154 	__u32 pbmap; /* only 16 bits in use */
155 };
156 
157 struct bnxt_re_wrid {
158 	uint64_t wrid;
159 	int next_idx;
160 	uint32_t bytes;
161 	uint8_t sig;
162 	uint8_t slots;
163 	uint8_t wc_opcd;
164 };
165 
166 struct bnxt_re_qpcap {
167 	uint32_t max_swr;
168 	uint32_t max_rwr;
169 	uint32_t max_ssge;
170 	uint32_t max_rsge;
171 	uint32_t max_inline;
172 	uint8_t	sqsig;
173 	uint8_t is_atomic_cap;
174 };
175 
176 struct bnxt_re_srq {
177 	struct ibv_srq ibvsrq;
178 	struct ibv_srq_attr cap;
179 	uint32_t srqid;
180 	struct bnxt_re_context *uctx;
181 	struct bnxt_re_queue *srqq;
182 	struct bnxt_re_wrid *srwrid;
183 	struct bnxt_re_dpi *udpi;
184 	struct bnxt_re_mem *mem;
185 	int start_idx;
186 	int last_idx;
187 	struct bnxt_re_list_node dbnode;
188 	uint64_t shadow_db_key;
189 	struct xorshift32_state rand;
190 	uint8_t dbr_lock;
191 	bool arm_req;
192 };
193 
194 struct bnxt_re_joint_queue {
195 	struct bnxt_re_context *cntx;
196 	struct bnxt_re_queue *hwque;
197 	struct bnxt_re_wrid *swque;
198 	uint32_t start_idx;
199 	uint32_t last_idx;
200 };
201 
202 struct bnxt_re_qp {
203 	struct ibv_qp ibvqp;
204 	struct bnxt_re_qpcap cap;
205 	struct bnxt_re_context *cntx;
206 	struct bnxt_re_chip_ctx *cctx;
207 	struct bnxt_re_joint_queue *jsqq;
208 	struct bnxt_re_joint_queue *jrqq;
209 	struct bnxt_re_dpi *udpi;
210 	uint64_t wqe_cnt;
211 	uint16_t mtu;
212 	uint16_t qpst;
213 	uint8_t qptyp;
214 	uint8_t qpmode;
215 	uint8_t push_st_en;
216 	uint8_t ppp_idx;
217 	uint32_t sq_psn;
218 	uint32_t sq_msn;
219 	uint32_t qpid;
220 	uint16_t max_push_sz;
221 	uint8_t sq_dbr_lock;
222 	uint8_t rq_dbr_lock;
223 	struct xorshift32_state rand;
224 	struct bnxt_re_list_node snode;
225 	struct bnxt_re_list_node rnode;
226 	struct bnxt_re_srq *srq;
227 	struct bnxt_re_cq *rcq;
228 	struct bnxt_re_cq *scq;
229 	struct bnxt_re_mem *mem;/* at cl 6 */
230 	struct bnxt_re_list_node dbnode;
231 	uint64_t sq_shadow_db_key;
232 	uint64_t rq_shadow_db_key;
233 };
234 
235 struct bnxt_re_mr {
236 	struct ibv_mr vmr;
237 };
238 
239 struct bnxt_re_ah {
240 	struct ibv_ah ibvah;
241 	struct bnxt_re_pd *pd;
242 	uint32_t avid;
243 };
244 
245 struct bnxt_re_dev {
246 	struct verbs_device vdev;
247 	struct ibv_device_attr devattr;
248 	uint32_t pg_size;
249 	uint32_t cqe_size;
250 	uint32_t max_cq_depth;
251 	uint8_t abi_version;
252 };
253 
254 struct bnxt_re_res_list {
255 	struct bnxt_re_list_head head;
256 	pthread_spinlock_t lock;
257 };
258 
259 struct bnxt_re_context {
260 	struct ibv_context ibvctx;
261 	struct bnxt_re_dev *rdev;
262 	struct bnxt_re_chip_ctx *cctx;
263 	uint64_t comp_mask;
264 	struct bnxt_re_dpi udpi;
265 	uint32_t dev_id;
266 	uint32_t max_qp;
267 	uint32_t max_srq;
268 	uint32_t modes;
269 	void *shpg;
270 	pthread_mutex_t shlock;
271 	struct bnxt_re_push_rec *pbrec;
272 	void *dbr_page;
273 	void *bar_map;
274 	struct bnxt_re_res_list qp_dbr_res;
275 	struct bnxt_re_res_list cq_dbr_res;
276 	struct bnxt_re_res_list srq_dbr_res;
277 	void *db_recovery_page;
278 	struct ibv_comp_channel *dbr_ev_chan;
279 	struct ibv_cq *dbr_cq;
280 	pthread_t dbr_thread;
281 	uint64_t replay_cnt;
282 };
283 
284 struct bnxt_re_pacing_data {
285 	uint32_t do_pacing;
286 	uint32_t pacing_th;
287 	uint32_t dev_err_state;
288 	uint32_t alarm_th;
289 };
290 
291 /* Chip context related functions */
292 bool _is_chip_gen_p5(struct bnxt_re_chip_ctx *cctx);
293 bool _is_chip_a0(struct bnxt_re_chip_ctx *cctx);
294 bool _is_chip_thor2(struct bnxt_re_chip_ctx *cctx);
295 bool _is_chip_gen_p5_thor2(struct bnxt_re_chip_ctx *cctx);
296 
297 /* DB ring functions used internally*/
298 void bnxt_re_ring_rq_db(struct bnxt_re_qp *qp);
299 void bnxt_re_ring_sq_db(struct bnxt_re_qp *qp);
300 void bnxt_re_ring_srq_arm(struct bnxt_re_srq *srq);
301 void bnxt_re_ring_srq_db(struct bnxt_re_srq *srq);
302 void bnxt_re_ring_cq_db(struct bnxt_re_cq *cq);
303 void bnxt_re_ring_cq_arm_db(struct bnxt_re_cq *cq, uint8_t aflag);
304 
305 void bnxt_re_ring_pstart_db(struct bnxt_re_qp *qp,
306 			    struct bnxt_re_push_buffer *pbuf);
307 void bnxt_re_ring_pend_db(struct bnxt_re_qp *qp,
308 			  struct bnxt_re_push_buffer *pbuf);
309 void bnxt_re_fill_push_wcb(struct bnxt_re_qp *qp,
310 			   struct bnxt_re_push_buffer *pbuf,
311 			   uint32_t idx);
312 
313 void bnxt_re_fill_ppp(struct bnxt_re_push_buffer *pbuf,
314 		      struct bnxt_re_qp *qp, uint8_t len, uint32_t idx);
315 int bnxt_re_init_pbuf_list(struct bnxt_re_context *cntx);
316 void bnxt_re_destroy_pbuf_list(struct bnxt_re_context *cntx);
317 struct bnxt_re_push_buffer *bnxt_re_get_pbuf(uint8_t *push_st_en,
318 					     uint8_t ppp_idx,
319 					     struct bnxt_re_context *cntx);
320 void bnxt_re_put_pbuf(struct bnxt_re_context *cntx,
321 		      struct bnxt_re_push_buffer *pbuf);
322 
323 void bnxt_re_db_recovery(struct bnxt_re_context *cntx);
324 void *bnxt_re_dbr_thread(void *arg);
325 bool _is_db_drop_recovery_enable(struct bnxt_re_context *cntx);
326 int bnxt_re_poll_kernel_cq(struct bnxt_re_cq *cq);
327 extern int bnxt_single_threaded;
328 extern int bnxt_dyn_debug;
329 
330 #define bnxt_re_trace(fmt, ...)					\
331 {								\
332 	if (bnxt_dyn_debug)					\
333 		fprintf(stderr, fmt, ##__VA_ARGS__);		\
334 }
335 
336 /* pointer conversion functions*/
337 static inline struct bnxt_re_dev *to_bnxt_re_dev(struct ibv_device *ibvdev)
338 {
339 	return container_of(ibvdev, struct bnxt_re_dev, vdev);
340 }
341 
342 static inline struct bnxt_re_context *to_bnxt_re_context(
343 		struct ibv_context *ibvctx)
344 {
345 	return container_of(ibvctx, struct bnxt_re_context, ibvctx);
346 }
347 
348 static inline struct bnxt_re_pd *to_bnxt_re_pd(struct ibv_pd *ibvpd)
349 {
350 	return container_of(ibvpd, struct bnxt_re_pd, ibvpd);
351 }
352 
353 static inline struct bnxt_re_cq *to_bnxt_re_cq(struct ibv_cq *ibvcq)
354 {
355 	return container_of(ibvcq, struct bnxt_re_cq, ibvcq);
356 }
357 
358 static inline struct bnxt_re_qp *to_bnxt_re_qp(struct ibv_qp *ibvqp)
359 {
360 	return container_of(ibvqp, struct bnxt_re_qp, ibvqp);
361 }
362 
363 static inline struct bnxt_re_srq *to_bnxt_re_srq(struct ibv_srq *ibvsrq)
364 {
365 	return container_of(ibvsrq, struct bnxt_re_srq, ibvsrq);
366 }
367 
368 static inline struct bnxt_re_ah *to_bnxt_re_ah(struct ibv_ah *ibvah)
369 {
370 	return container_of(ibvah, struct bnxt_re_ah, ibvah);
371 }
372 
373 /* CQE manipulations */
374 #define bnxt_re_get_cqe_sz()	(sizeof(struct bnxt_re_req_cqe) +	\
375 				 sizeof(struct bnxt_re_bcqe))
376 #define bnxt_re_get_sqe_hdr_sz()	(sizeof(struct bnxt_re_bsqe) +	\
377 					 sizeof(struct bnxt_re_send))
378 #define bnxt_re_get_srqe_hdr_sz()	(sizeof(struct bnxt_re_brqe) +	\
379 					 sizeof(struct bnxt_re_srqe))
380 #define bnxt_re_get_srqe_sz()		(sizeof(struct bnxt_re_brqe) +	\
381 					 sizeof(struct bnxt_re_srqe) +	\
382 					 BNXT_RE_MAX_INLINE_SIZE)
383 #define bnxt_re_is_cqe_valid(valid, phase)				\
384 				(((valid) & BNXT_RE_BCQE_PH_MASK) == (phase))
385 
386 static inline void bnxt_re_change_cq_phase(struct bnxt_re_cq *cq)
387 {
388 	if (!cq->cqq->head)
389 		cq->phase = !(cq->phase & BNXT_RE_BCQE_PH_MASK);
390 }
391 
392 static inline void *bnxt_re_get_swqe(struct bnxt_re_joint_queue *jqq,
393 				     uint32_t *wqe_idx)
394 {
395 	if (wqe_idx)
396 		*wqe_idx = jqq->start_idx;
397 	return &jqq->swque[jqq->start_idx];
398 }
399 
400 static inline void bnxt_re_jqq_mod_start(struct bnxt_re_joint_queue *jqq,
401 					 uint32_t idx)
402 {
403 	jqq->start_idx = jqq->swque[idx].next_idx;
404 }
405 
406 static inline void bnxt_re_jqq_mod_last(struct bnxt_re_joint_queue *jqq,
407 					uint32_t idx)
408 {
409 	jqq->last_idx = jqq->swque[idx].next_idx;
410 }
411 
412 static inline uint32_t bnxt_re_init_depth(uint32_t ent, uint64_t cmask)
413 {
414 	return cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED ?
415 		ent : roundup_pow_of_two(ent);
416 }
417 
418 static inline uint32_t bnxt_re_get_diff(uint64_t cmask)
419 {
420 	return cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED ?
421 		0 : BNXT_RE_FULL_FLAG_DELTA;
422 }
423 
424 static inline int bnxt_re_calc_wqe_sz(int nsge)
425 {
426 	/* This is used for both sq and rq. In case hdr size differs
427 	 * in future move to individual functions.
428 	 */
429 	return sizeof(struct bnxt_re_sge) * nsge + bnxt_re_get_sqe_hdr_sz();
430 }
431 
432 /* Helper function to copy to push buffers */
433 static inline void bnxt_re_copy_data_to_pb(struct bnxt_re_push_buffer *pbuf,
434 					   uint8_t offset, uint32_t idx)
435 {
436 	__u64 *src;
437 	__u64 *dst;
438 	int indx;
439 
440 	for (indx = 0; indx < idx; indx++) {
441 		dst = (__u64 *)(pbuf->pbuf + 2 * (indx + offset));
442 		src = (__u64 *)pbuf->wqe[indx];
443 		iowrite64(dst, *src);
444 
445 		dst++;
446 		src++;
447 		iowrite64(dst, *src);
448 	}
449 }
450 
451 static inline int bnxt_re_dp_spin_init(struct bnxt_spinlock *lock, int pshared, int need_lock)
452 {
453 	lock->in_use = 0;
454 	lock->need_lock = need_lock;
455 	return pthread_spin_init(&lock->lock, PTHREAD_PROCESS_PRIVATE);
456 }
457 
458 static inline int bnxt_re_dp_spin_destroy(struct bnxt_spinlock *lock)
459 {
460 	return pthread_spin_destroy(&lock->lock);
461 }
462 
463 static inline int bnxt_spin_lock(struct bnxt_spinlock *lock)
464 {
465 	if (lock->need_lock)
466 		return pthread_spin_lock(&lock->lock);
467 
468 	if (unlikely(lock->in_use)) {
469 		fprintf(stderr, "*** ERROR: multithreading violation ***\n"
470 			"You are running a multithreaded application but\n"
471 			"you set BNXT_SINGLE_THREADED=1. Please unset it.\n");
472 		abort();
473 	} else {
474 		lock->in_use = 1;
475 		 /* This fence is not at all correct, but it increases the */
476 		 /* chance that in_use is detected by another thread without */
477 		 /* much runtime cost. */
478 		atomic_thread_fence(memory_order_acq_rel);
479 	}
480 
481 	return 0;
482 }
483 
484 static inline int bnxt_spin_unlock(struct bnxt_spinlock *lock)
485 {
486 	if (lock->need_lock)
487 		return pthread_spin_unlock(&lock->lock);
488 
489 	lock->in_use = 0;
490 	return 0;
491 }
492 
493 static void timespec_sub(const struct timespec *a, const struct timespec *b,
494 			 struct timespec *res)
495 {
496 	res->tv_sec = a->tv_sec - b->tv_sec;
497 	res->tv_nsec = a->tv_nsec - b->tv_nsec;
498 	if (res->tv_nsec < 0) {
499 		res->tv_sec--;
500 		res->tv_nsec += BNXT_NSEC_PER_SEC;
501 	}
502 }
503 
504 /*
505  * Function waits in a busy loop for a given nano seconds
506  * The maximum wait period allowed is less than one second
507  */
508 static inline void bnxt_re_sub_sec_busy_wait(uint32_t nsec)
509 {
510 	struct timespec start, cur, res;
511 
512 	if (nsec >= BNXT_NSEC_PER_SEC)
513 		return;
514 
515 	if (clock_gettime(CLOCK_REALTIME, &start)) {
516 		fprintf(stderr, "%s: failed to get time : %d",
517 			__func__, errno);
518 		return;
519 	}
520 
521 	while (1) {
522 		if (clock_gettime(CLOCK_REALTIME, &cur)) {
523 			fprintf(stderr, "%s: failed to get time : %d",
524 				__func__, errno);
525 			return;
526 		}
527 
528 		timespec_sub(&cur, &start, &res);
529 		if (res.tv_nsec >= nsec)
530 			break;
531 	}
532 }
533 
534 #define BNXT_RE_HW_RETX(a) ((a)->comp_mask & BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED)
535 #define bnxt_re_dp_spin_lock(lock)     bnxt_spin_lock(lock)
536 #define bnxt_re_dp_spin_unlock(lock)   bnxt_spin_unlock(lock)
537 
538 #endif
539