xref: /freebsd/contrib/ofed/libbnxtre/db.c (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 /*
2  * Copyright (c) 2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: Doorbell handling functions.
29  */
30 
31 #include <malloc.h>
32 #include <unistd.h>
33 
34 #include "abi.h"
35 #include "main.h"
36 
37 #define BNXT_RE_DB_FIFO_ROOM_MASK_P5	0x1FFF8000
38 #define BNXT_RE_MAX_FIFO_DEPTH_P5	0x2c00
39 
40 #define BNXT_RE_DB_FIFO_ROOM_MASK_P7	0x3FFF8000
41 #define BNXT_RE_MAX_FIFO_DEPTH_P7	0x8000
42 
43 #define BNXT_RE_DB_FIFO_ROOM_SHIFT      15
44 #define BNXT_RE_DB_THRESHOLD		20
45 
46 #define BNXT_RE_DB_FIFO_ROOM_MASK(ctx)	\
47 	(_is_chip_thor2((ctx)) ? \
48 	 BNXT_RE_DB_FIFO_ROOM_MASK_P7 :\
49 	 BNXT_RE_DB_FIFO_ROOM_MASK_P5)
50 #define BNXT_RE_MAX_FIFO_DEPTH(ctx)	\
51 	(_is_chip_thor2((ctx)) ? \
52 	 BNXT_RE_MAX_FIFO_DEPTH_P7 :\
53 	 BNXT_RE_MAX_FIFO_DEPTH_P5)
54 
55 static uint32_t xorshift32(struct xorshift32_state *state)
56 {
57 	/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
58 	uint32_t x = state->seed;
59 
60 	x ^= x << 13;
61 	x ^= x >> 17;
62 	x ^= x << 5;
63 	return state->seed = x;
64 }
65 
66 static uint16_t rnd(struct xorshift32_state *state, uint16_t range)
67 {
68 	/* range must be a power of 2 - 1 */
69 	return (xorshift32(state) & range);
70 }
71 
72 static int calculate_fifo_occupancy(struct bnxt_re_context *cntx)
73 {
74 	uint32_t *dbr_map = cntx->bar_map + 0x1a8;
75 	uint32_t read_val, fifo_occup;
76 
77 	read_val = *dbr_map;
78 	fifo_occup = BNXT_RE_MAX_FIFO_DEPTH(cntx->cctx) -
79 		((read_val & BNXT_RE_DB_FIFO_ROOM_MASK(cntx->cctx)) >>
80 		 BNXT_RE_DB_FIFO_ROOM_SHIFT);
81 
82 	return fifo_occup;
83 }
84 
85 static inline uint32_t find_min(uint32_t x, uint32_t y)
86 {
87 	return (y > x ? x : y);
88 }
89 
90 int bnxt_re_do_pacing(struct bnxt_re_context *cntx, struct xorshift32_state *state)
91 {
92 	/* First 4 bytes  of shared page (pacing_info) contains the DBR
93 	 * pacing information. Second 4 bytes (pacing_th)  contains
94 	 * the pacing threshold value to determine whether to
95 	 * add delay or not
96 	 */
97 	struct bnxt_re_pacing_data *pacing_data =
98 		(struct bnxt_re_pacing_data *)cntx->dbr_page;
99 	uint32_t wait_time = 1;
100 	uint32_t fifo_occup;
101 
102 	if (!pacing_data)
103 		return 0;
104 	/* If the device in error recovery state, return error to
105 	 * not to ring new doorbells in this state.
106 	 */
107 	if (pacing_data->dev_err_state)
108 		return -EFAULT;
109 
110 	if (rnd(state, BNXT_RE_MAX_DO_PACING) < pacing_data->do_pacing) {
111 		while ((fifo_occup = calculate_fifo_occupancy(cntx))
112 			>  pacing_data->pacing_th) {
113 			struct bnxt_re_cq *cq;
114 			uint32_t usec_wait;
115 
116 			if (pacing_data->alarm_th && fifo_occup > pacing_data->alarm_th) {
117 				cq = container_of(cntx->dbr_cq, struct bnxt_re_cq, ibvcq);
118 				bnxt_re_poll_kernel_cq(cq);
119 			}
120 			usec_wait = rnd(state, wait_time - 1);
121 			if (usec_wait)
122 				bnxt_re_sub_sec_busy_wait(usec_wait * 1000);
123 			/* wait time capped at 128 us */
124 			wait_time = find_min(wait_time * 2, 128);
125 		}
126 	}
127 	return 0;
128 }
129 
130 static inline void bnxt_re_ring_db(struct bnxt_re_dpi *dpi, __u64 key,
131 				   uint64_t *db_key, uint8_t *lock)
132 {
133 	while (1) {
134 		if (__sync_bool_compare_and_swap(lock, 0, 1)) {
135 			*db_key = key;
136 			bnxt_re_wm_barrier();
137 			iowrite64(dpi->dbpage, key);
138 			bnxt_re_wm_barrier();
139 			*lock = 0;
140 			break;
141 		}
142 	}
143 }
144 
145 static inline void bnxt_re_init_push_hdr(struct bnxt_re_db_hdr *hdr,
146 					 uint32_t indx, uint32_t qid,
147 					 uint32_t typ, uint32_t pidx)
148 {
149 	__u64 key_lo, key_hi;
150 
151 	key_lo = (((pidx & BNXT_RE_DB_PILO_MASK) << BNXT_RE_DB_PILO_SHIFT) |
152 		  (indx & BNXT_RE_DB_INDX_MASK));
153 	key_hi = ((((pidx & BNXT_RE_DB_PIHI_MASK) << BNXT_RE_DB_PIHI_SHIFT) |
154 		   (qid & BNXT_RE_DB_QID_MASK)) |
155 		  ((typ & BNXT_RE_DB_TYP_MASK) << BNXT_RE_DB_TYP_SHIFT) |
156 		  (0x1UL << BNXT_RE_DB_VALID_SHIFT));
157 	hdr->typ_qid_indx = htole64((key_lo | (key_hi << 32)));
158 }
159 
160 static inline void bnxt_re_init_db_hdr(struct bnxt_re_db_hdr *hdr,
161 				       uint32_t indx, uint32_t toggle,
162 				       uint32_t qid, uint32_t typ)
163 {
164 	__u64 key_lo, key_hi;
165 
166 	key_lo = htole32(indx | toggle);
167 	key_hi = ((qid & BNXT_RE_DB_QID_MASK) |
168 		  ((typ & BNXT_RE_DB_TYP_MASK) << BNXT_RE_DB_TYP_SHIFT) |
169 		  (0x1UL << BNXT_RE_DB_VALID_SHIFT));
170 	hdr->typ_qid_indx = htole64((key_lo | (key_hi << 32)));
171 }
172 
173 static inline void __bnxt_re_ring_pend_db(__u64 *ucdb, __u64 key,
174 					  struct  bnxt_re_qp *qp)
175 {
176 	struct bnxt_re_db_hdr hdr;
177 
178 	bnxt_re_init_db_hdr(&hdr,
179 			    (*qp->jsqq->hwque->dbtail |
180 			     ((qp->jsqq->hwque->flags &
181 			       BNXT_RE_FLAG_EPOCH_TAIL_MASK) <<
182 			    BNXT_RE_DB_EPOCH_TAIL_SHIFT)), 0,
183 			    qp->qpid,
184 			    BNXT_RE_QUE_TYPE_SQ);
185 
186 	while (1) {
187 		if (__sync_bool_compare_and_swap(&qp->sq_dbr_lock, 0, 1)) {
188 			qp->sq_shadow_db_key = hdr.typ_qid_indx;
189 			bnxt_re_wm_barrier();
190 			iowrite64(ucdb, key);
191 			bnxt_re_wm_barrier();
192 			qp->sq_dbr_lock = 0;
193 			break;
194 		}
195 	}
196 }
197 
198 void bnxt_re_ring_rq_db(struct bnxt_re_qp *qp)
199 {
200 	struct bnxt_re_db_hdr hdr;
201 
202 	if (bnxt_re_do_pacing(qp->cntx, &qp->rand))
203 		return;
204 	bnxt_re_init_db_hdr(&hdr,
205 			    (*qp->jrqq->hwque->dbtail |
206 			     ((qp->jrqq->hwque->flags &
207 			       BNXT_RE_FLAG_EPOCH_TAIL_MASK) <<
208 			      BNXT_RE_DB_EPOCH_TAIL_SHIFT)), 0,
209 			    qp->qpid,
210 			    BNXT_RE_QUE_TYPE_RQ);
211 	bnxt_re_ring_db(qp->udpi, hdr.typ_qid_indx, &qp->rq_shadow_db_key,
212 			&qp->rq_dbr_lock);
213 }
214 
215 void bnxt_re_ring_sq_db(struct bnxt_re_qp *qp)
216 {
217 	struct bnxt_re_db_hdr hdr;
218 
219 	if (bnxt_re_do_pacing(qp->cntx, &qp->rand))
220 		return;
221 	bnxt_re_init_db_hdr(&hdr,
222 			    (*qp->jsqq->hwque->dbtail |
223 			     ((qp->jsqq->hwque->flags &
224 			       BNXT_RE_FLAG_EPOCH_TAIL_MASK) <<
225 			     BNXT_RE_DB_EPOCH_TAIL_SHIFT)), 0,
226 			    qp->qpid,
227 			    BNXT_RE_QUE_TYPE_SQ);
228 	bnxt_re_ring_db(qp->udpi, hdr.typ_qid_indx, &qp->sq_shadow_db_key,
229 			&qp->sq_dbr_lock);
230 }
231 
232 void bnxt_re_ring_srq_db(struct bnxt_re_srq *srq)
233 {
234 	struct bnxt_re_db_hdr hdr;
235 
236 	if (bnxt_re_do_pacing(srq->uctx, &srq->rand))
237 		return;
238 	bnxt_re_init_db_hdr(&hdr,
239 			    (srq->srqq->tail |
240 			     ((srq->srqq->flags &
241 			       BNXT_RE_FLAG_EPOCH_TAIL_MASK) <<
242 			     BNXT_RE_DB_EPOCH_TAIL_SHIFT)), 0,
243 			    srq->srqid, BNXT_RE_QUE_TYPE_SRQ);
244 	bnxt_re_ring_db(srq->udpi, hdr.typ_qid_indx, &srq->shadow_db_key,
245 			&srq->dbr_lock);
246 }
247 
248 void bnxt_re_ring_srq_arm(struct bnxt_re_srq *srq)
249 {
250 	struct bnxt_re_db_hdr hdr;
251 
252 	if (bnxt_re_do_pacing(srq->uctx, &srq->rand))
253 		return;
254 	bnxt_re_init_db_hdr(&hdr, srq->cap.srq_limit, 0, srq->srqid,
255 			    BNXT_RE_QUE_TYPE_SRQ_ARM);
256 	bnxt_re_ring_db(srq->udpi, hdr.typ_qid_indx, &srq->shadow_db_key,
257 			&srq->dbr_lock);
258 }
259 
260 void bnxt_re_ring_cq_db(struct bnxt_re_cq *cq)
261 {
262 	struct bnxt_re_db_hdr hdr;
263 
264 	if (bnxt_re_do_pacing(cq->cntx, &cq->rand))
265 		return;
266 	bnxt_re_init_db_hdr(&hdr,
267 			    (cq->cqq->head |
268 			     ((cq->cqq->flags &
269 			       BNXT_RE_FLAG_EPOCH_HEAD_MASK) <<
270 			     BNXT_RE_DB_EPOCH_HEAD_SHIFT)), 0,
271 			    cq->cqid,
272 			    BNXT_RE_QUE_TYPE_CQ);
273 	bnxt_re_ring_db(cq->udpi, hdr.typ_qid_indx, &cq->shadow_db_key,
274 			&cq->dbr_lock);
275 }
276 
277 void bnxt_re_ring_cq_arm_db(struct bnxt_re_cq *cq, uint8_t aflag)
278 {
279 	uint32_t *cq_page = cq->cq_page;
280 	struct bnxt_re_db_hdr hdr;
281 	uint32_t toggle = 0;
282 
283 	if (cq_page)
284 		toggle = *cq_page;
285 
286 	if (bnxt_re_do_pacing(cq->cntx, &cq->rand))
287 		return;
288 	bnxt_re_init_db_hdr(&hdr,
289 			    (cq->cqq->head |
290 			     ((cq->cqq->flags &
291 			       BNXT_RE_FLAG_EPOCH_HEAD_MASK) <<
292 			     BNXT_RE_DB_EPOCH_HEAD_SHIFT)),
293 			     toggle << BNXT_RE_DB_TOGGLE_SHIFT,
294 			    cq->cqid, aflag);
295 	bnxt_re_ring_db(cq->udpi, hdr.typ_qid_indx, &cq->shadow_db_key,
296 			&cq->dbr_lock);
297 }
298 
299 void bnxt_re_ring_pstart_db(struct bnxt_re_qp *qp,
300 			    struct bnxt_re_push_buffer *pbuf)
301 {
302 	__u64 key;
303 
304 	if (bnxt_re_do_pacing(qp->cntx, &qp->rand))
305 		return;
306 	key = ((((pbuf->wcdpi & BNXT_RE_DB_PIHI_MASK) <<
307 		  BNXT_RE_DB_PIHI_SHIFT) | (pbuf->qpid & BNXT_RE_DB_QID_MASK)) |
308 	       ((BNXT_RE_PUSH_TYPE_START & BNXT_RE_DB_TYP_MASK) <<
309 		 BNXT_RE_DB_TYP_SHIFT) | (0x1UL << BNXT_RE_DB_VALID_SHIFT));
310 	key <<= 32;
311 	key |= ((((__u32)pbuf->wcdpi & BNXT_RE_DB_PILO_MASK) <<
312 		  BNXT_RE_DB_PILO_SHIFT) | (pbuf->st_idx &
313 					    BNXT_RE_DB_INDX_MASK));
314 	bnxt_re_wm_barrier();
315 	iowrite64(pbuf->ucdb, key);
316 }
317 
318 void bnxt_re_ring_pend_db(struct bnxt_re_qp *qp,
319 			  struct bnxt_re_push_buffer *pbuf)
320 {
321 	__u64 key;
322 
323 	if (bnxt_re_do_pacing(qp->cntx, &qp->rand))
324 		return;
325 	key = ((((pbuf->wcdpi & BNXT_RE_DB_PIHI_MASK) <<
326 		  BNXT_RE_DB_PIHI_SHIFT) | (pbuf->qpid & BNXT_RE_DB_QID_MASK)) |
327 	       ((BNXT_RE_PUSH_TYPE_END & BNXT_RE_DB_TYP_MASK) <<
328 		 BNXT_RE_DB_TYP_SHIFT) | (0x1UL << BNXT_RE_DB_VALID_SHIFT));
329 	key <<= 32;
330 	key |= ((((__u32)pbuf->wcdpi & BNXT_RE_DB_PILO_MASK) <<
331 		  BNXT_RE_DB_PILO_SHIFT) | (pbuf->tail &
332 					    BNXT_RE_DB_INDX_MASK));
333 	__bnxt_re_ring_pend_db(pbuf->ucdb, key, qp);
334 }
335 
336 void bnxt_re_fill_ppp(struct bnxt_re_push_buffer *pbuf,
337 		      struct bnxt_re_qp *qp, uint8_t len, uint32_t idx)
338 {
339 	struct bnxt_re_db_ppp_hdr phdr = {};
340 	__u64 *dst, *src;
341 	__u8 plen;
342 	int indx;
343 
344 	src = (__u64 *)&phdr;
345 	plen = len + sizeof(phdr) + bnxt_re_get_sqe_hdr_sz();
346 
347 	bnxt_re_init_db_hdr(&phdr.db_hdr,
348 			    (*qp->jsqq->hwque->dbtail |
349 			     ((qp->jsqq->hwque->flags &
350 			       BNXT_RE_FLAG_EPOCH_TAIL_MASK) <<
351 			     BNXT_RE_DB_EPOCH_TAIL_SHIFT)), 0,
352 			    qp->qpid,
353 			    BNXT_RE_QUE_TYPE_SQ);
354 
355 	phdr.rsv_psz_pidx = ((pbuf->st_idx & BNXT_RE_DB_INDX_MASK) |
356 			     (((plen % 8 ? (plen / 8) + 1 :
357 				plen / 8) & BNXT_RE_PUSH_SIZE_MASK) <<
358 			       BNXT_RE_PUSH_SIZE_SHIFT));
359 
360 	bnxt_re_wm_barrier();
361 	for (indx = 0; indx < 2; indx++) {
362 		dst = (__u64 *)(pbuf->pbuf + indx);
363 		iowrite64(dst, *src);
364 		src++;
365 	}
366 	bnxt_re_copy_data_to_pb(pbuf, 1, idx);
367 	mmio_flush_writes();
368 }
369 
370 void bnxt_re_fill_push_wcb(struct bnxt_re_qp *qp,
371 			   struct bnxt_re_push_buffer *pbuf, uint32_t idx)
372 {
373 	bnxt_re_ring_pstart_db(qp, pbuf);
374 	mmio_wc_start();
375 	bnxt_re_copy_data_to_pb(pbuf, 0, idx);
376 	/* Flush WQE write before push end db. */
377 	mmio_flush_writes();
378 	bnxt_re_ring_pend_db(qp, pbuf);
379 }
380 
381 int bnxt_re_init_pbuf_list(struct bnxt_re_context *ucntx)
382 {
383 	struct bnxt_re_push_buffer *pbuf;
384 	int indx, wqesz;
385 	int size, offt;
386 	__u64 wcpage;
387 	__u64 dbpage;
388 	void *base;
389 
390 	size = (sizeof(*ucntx->pbrec) +
391 		16 * (sizeof(*ucntx->pbrec->pbuf) +
392 		      sizeof(struct bnxt_re_push_wqe)));
393 	ucntx->pbrec = calloc(1, size);
394 	if (!ucntx->pbrec)
395 		goto out;
396 
397 	offt = sizeof(*ucntx->pbrec);
398 	base = ucntx->pbrec;
399 	ucntx->pbrec->pbuf = (base + offt);
400 	ucntx->pbrec->pbmap = ~0x00;
401 	ucntx->pbrec->pbmap &= ~0x7fff; /* 15 bits */
402 	ucntx->pbrec->udpi = &ucntx->udpi;
403 
404 	wqesz = sizeof(struct bnxt_re_push_wqe);
405 	wcpage = (__u64)ucntx->udpi.wcdbpg;
406 	dbpage = (__u64)ucntx->udpi.dbpage;
407 	offt = sizeof(*ucntx->pbrec->pbuf) * 16;
408 	base = (char *)ucntx->pbrec->pbuf + offt;
409 	for (indx = 0; indx < 16; indx++) {
410 		pbuf = &ucntx->pbrec->pbuf[indx];
411 		pbuf->wqe = base + indx * wqesz;
412 		pbuf->pbuf = (__u64 *)(wcpage + indx * wqesz);
413 		pbuf->ucdb = (__u64 *)(dbpage + (indx + 1) * sizeof(__u64));
414 		pbuf->wcdpi = ucntx->udpi.wcdpi;
415 	}
416 
417 	return 0;
418 out:
419 	return -ENOMEM;
420 }
421 
422 struct bnxt_re_push_buffer *bnxt_re_get_pbuf(uint8_t *push_st_en,
423 					     uint8_t ppp_idx,
424 					     struct bnxt_re_context *cntx)
425 {
426 	struct bnxt_re_push_buffer *pbuf = NULL;
427 	uint8_t buf_state = 0;
428 	__u32 old;
429 	int bit;
430 
431 	if (_is_chip_thor2(cntx->cctx)) {
432 		buf_state = !!(*push_st_en & BNXT_RE_PPP_STATE_MASK);
433 		pbuf = &cntx->pbrec->pbuf[(ppp_idx * 2) + buf_state];
434 		/* Flip */
435 		*push_st_en ^= 1UL << BNXT_RE_PPP_ST_SHIFT;
436 	} else {
437 		old = cntx->pbrec->pbmap;
438 		while ((bit = __builtin_ffs(~cntx->pbrec->pbmap)) != 0) {
439 			if (__sync_bool_compare_and_swap
440 						(&cntx->pbrec->pbmap,
441 						 old,
442 						 (old | 0x01 << (bit - 1))))
443 				break;
444 			old = cntx->pbrec->pbmap;
445 		}
446 
447 		if (bit) {
448 			pbuf = &cntx->pbrec->pbuf[bit];
449 			pbuf->nbit = bit;
450 		}
451 	}
452 
453 	return pbuf;
454 }
455 
456 void bnxt_re_put_pbuf(struct bnxt_re_context *cntx,
457 		      struct bnxt_re_push_buffer *pbuf)
458 {
459 	struct bnxt_re_push_rec *pbrec;
460 	__u32 old;
461 	int bit;
462 
463 	if (_is_chip_thor2(cntx->cctx))
464 		return;
465 
466 	pbrec = cntx->pbrec;
467 
468 	if (pbuf->nbit) {
469 		bit = pbuf->nbit;
470 		pbuf->nbit = 0;
471 		old = pbrec->pbmap;
472 		while (!__sync_bool_compare_and_swap(&pbrec->pbmap, old,
473 						     (old & (~(0x01 <<
474 							       (bit - 1))))))
475 			old = pbrec->pbmap;
476 	}
477 }
478 
479 void bnxt_re_destroy_pbuf_list(struct bnxt_re_context *cntx)
480 {
481 	free(cntx->pbrec);
482 }
483 
484 void bnxt_re_replay_db(struct bnxt_re_context *cntx,
485 		       struct xorshift32_state *state, struct bnxt_re_dpi *dpi,
486 		       uint64_t *shadow_key, uint8_t *dbr_lock)
487 {
488 	if (bnxt_re_do_pacing(cntx, state))
489 		return;
490 	cntx->replay_cnt++;
491 	if (cntx->replay_cnt % BNXT_RE_DB_REPLAY_YIELD_CNT == 0)
492 		pthread_yield();
493 	if (__sync_bool_compare_and_swap(dbr_lock, 0, 1)) {
494 		bnxt_re_wm_barrier();
495 		if (*shadow_key == BNXT_RE_DB_KEY_INVALID) {
496 			*dbr_lock = 0;
497 			return;
498 		}
499 		iowrite64(dpi->dbpage, *shadow_key);
500 		bnxt_re_wm_barrier();
501 		*dbr_lock = 0;
502 	}
503 }
504 
505 void bnxt_re_db_recovery(struct bnxt_re_context *cntx)
506 {
507 	struct bnxt_re_list_node *cur, *tmp;
508 	struct bnxt_re_qp *qp;
509 	struct bnxt_re_cq *cq;
510 	struct bnxt_re_srq *srq;
511 
512 	pthread_spin_lock(&cntx->qp_dbr_res.lock);
513 	list_for_each_node_safe(cur, tmp, &cntx->qp_dbr_res.head) {
514 		qp = list_node(cur, struct bnxt_re_qp, dbnode);
515 		bnxt_re_replay_db(cntx, &qp->rand, qp->udpi,
516 				  &qp->sq_shadow_db_key, &qp->sq_dbr_lock);
517 		bnxt_re_replay_db(cntx, &qp->rand, qp->udpi,
518 				  &qp->rq_shadow_db_key, &qp->rq_dbr_lock);
519 	}
520 	pthread_spin_unlock(&cntx->qp_dbr_res.lock);
521 	pthread_spin_lock(&cntx->cq_dbr_res.lock);
522 	list_for_each_node_safe(cur, tmp, &cntx->cq_dbr_res.head) {
523 		cq = list_node(cur, struct bnxt_re_cq, dbnode);
524 		bnxt_re_replay_db(cntx, &cq->rand, cq->udpi,
525 				  &cq->shadow_db_key, &cq->dbr_lock);
526 	}
527 	pthread_spin_unlock(&cntx->cq_dbr_res.lock);
528 	pthread_spin_lock(&cntx->srq_dbr_res.lock);
529 	list_for_each_node_safe(cur, tmp, &cntx->srq_dbr_res.head) {
530 		srq = list_node(cur, struct bnxt_re_srq, dbnode);
531 		bnxt_re_replay_db(cntx, &srq->rand, srq->udpi,
532 				  &srq->shadow_db_key, &srq->dbr_lock);
533 	}
534 	pthread_spin_unlock(&cntx->srq_dbr_res.lock);
535 }
536 
537 void *bnxt_re_dbr_thread(void *arg)
538 {
539 	uint32_t *epoch, *epoch_ack, usr_epoch;
540 	struct bnxt_re_context *cntx = arg;
541 	struct ibv_cq *ev_cq;
542 	void *ev_ctx;
543 	int ret;
544 
545 	while (1) {
546 		ret = ibv_get_cq_event(cntx->dbr_ev_chan, &ev_cq, &ev_ctx);
547 		if (ret) {
548 			fprintf(stderr, "Failed to get cq_event\n");
549 			pthread_exit(NULL);
550 		}
551 		epoch = cntx->db_recovery_page;
552 		epoch_ack = epoch + 1;
553 		if (!epoch || !epoch_ack) {
554 			fprintf(stderr, "DB reovery page is NULL\n");
555 			pthread_exit(NULL);
556 		}
557 		if (*epoch == *epoch_ack) {
558 			ibv_ack_cq_events(ev_cq, 1);
559 			continue;
560 		}
561 		usr_epoch = *epoch;
562 		bnxt_re_db_recovery(cntx);
563 		*epoch_ack = usr_epoch;
564 		ibv_ack_cq_events(ev_cq, 1);
565 	}
566 }
567