xref: /freebsd/sys/dev/bnxt/bnxt_re/bnxt_re.h (revision baabb919345f05e9892c4048a1521e5da1403060)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: main (header)
29  */
30 
31 #ifndef __BNXT_RE_H__
32 #define __BNXT_RE_H__
33 
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
36 #include <linux/mutex.h>
37 #include <linux/list.h>
38 #include <linux/rculist.h>
39 #include <linux/spinlock.h>
40 #include <net/ipv6.h>
41 #include <linux/if_ether.h>
42 #include <linux/debugfs.h>
43 #include <linux/seq_file.h>
44 #include <linux/interrupt.h>
45 #include <linux/vmalloc.h>
46 #include <linux/delay.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/ib_user_verbs.h>
49 #include <rdma/ib_umem.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_mad.h>
52 #include <rdma/ib_cache.h>
53 #include <linux/pci.h>
54 
55 #include "bnxt.h"
56 #include "bnxt_ulp.h"
57 #include "hsi_struct_def.h"
58 #include "qplib_res.h"
59 #include "qplib_sp.h"
60 #include "qplib_fp.h"
61 #include "qplib_rcfw.h"
62 #include "ib_verbs.h"
63 #include "stats.h"
64 
65 #define ROCE_DRV_MODULE_NAME		"bnxt_re"
66 #define ROCE_DRV_MODULE_VERSION		"230.0.133.0"
67 #define ROCE_DRV_MODULE_RELDATE		"April 22, 2024"
68 
69 #define BNXT_RE_REF_WAIT_COUNT		20
70 #define BNXT_RE_ROCE_V1_ETH_TYPE	0x8915
71 #define BNXT_RE_ROCE_V2_PORT_NO		4791
72 #define BNXT_RE_RES_FREE_WAIT_COUNT	1000
73 
74 #define BNXT_RE_PAGE_SHIFT_4K		(12)
75 #define BNXT_RE_PAGE_SHIFT_8K		(13)
76 #define BNXT_RE_PAGE_SHIFT_64K		(16)
77 #define BNXT_RE_PAGE_SHIFT_2M		(21)
78 #define BNXT_RE_PAGE_SHIFT_8M		(23)
79 #define BNXT_RE_PAGE_SHIFT_1G		(30)
80 
81 #define BNXT_RE_PAGE_SIZE_4K		BIT(BNXT_RE_PAGE_SHIFT_4K)
82 #define BNXT_RE_PAGE_SIZE_8K		BIT(BNXT_RE_PAGE_SHIFT_8K)
83 #define BNXT_RE_PAGE_SIZE_64K		BIT(BNXT_RE_PAGE_SHIFT_64K)
84 #define BNXT_RE_PAGE_SIZE_2M		BIT(BNXT_RE_PAGE_SHIFT_2M)
85 #define BNXT_RE_PAGE_SIZE_8M		BIT(BNXT_RE_PAGE_SHIFT_8M)
86 #define BNXT_RE_PAGE_SIZE_1G		BIT(BNXT_RE_PAGE_SHIFT_1G)
87 
88 #define BNXT_RE_MAX_MR_SIZE_LOW		BIT(BNXT_RE_PAGE_SHIFT_1G)
89 #define BNXT_RE_MAX_MR_SIZE_HIGH	BIT(39)
90 #define BNXT_RE_MAX_MR_SIZE		BNXT_RE_MAX_MR_SIZE_HIGH
91 
92 /* Number of MRs to reserve for PF, leaving remainder for VFs */
93 #define BNXT_RE_RESVD_MR_FOR_PF		(32 * 1024)
94 #define BNXT_RE_MAX_GID_PER_VF		128
95 
96 #define BNXT_RE_MAX_VF_QPS_PER_PF	(6 * 1024)
97 
98 /**
99  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
100  * @x: value1
101  * @y: value2
102  */
103 #define min_not_zero(x, y) ({			\
104 	typeof(x) __x = (x);			\
105 	typeof(y) __y = (y);			\
106 	__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
107 
108 struct ib_mr_init_attr {
109 	int		max_reg_descriptors;
110 	u32		flags;
111 };
112 
113 struct bnxt_re_dev;
114 
115 int bnxt_re_register_netdevice_notifier(struct notifier_block *nb);
116 int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb);
117 int ib_register_device_compat(struct bnxt_re_dev *rdev);
118 
119 #ifndef __struct_group
120 #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
121 	union { \
122 		struct { MEMBERS } ATTRS; \
123 		struct TAG { MEMBERS } ATTRS NAME; \
124 	}
125 #endif /* __struct_group */
126 #ifndef struct_group_attr
127 #define struct_group_attr(NAME, ATTRS, MEMBERS...) \
128 	__struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
129 #endif /* struct_group_attr */
130 /*
131  * Percentage of resources of each type reserved for PF.
132  * Remaining resources are divided equally among VFs.
133  * [0, 100]
134  */
135 
136 #define BNXT_RE_RQ_WQE_THRESHOLD	32
137 #define BNXT_RE_UD_QP_HW_STALL		0x400000
138 
139 /*
140  * Setting the default ack delay value to 16, which means
141  * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
142  */
143 
144 #define BNXT_RE_DEFAULT_ACK_DELAY	16
145 #define BNXT_RE_BOND_PF_MAX		2
146 
147 #define BNXT_RE_STATS_CTX_UPDATE_TIMER	250
148 #define BNXT_RE_30SEC_MSEC		(30 * 1000)
149 
150 #define BNXT_RE_BOND_RESCHED_CNT	10
151 
152 #define BNXT_RE_CHIP_NUM_57454         0xC454
153 #define BNXT_RE_CHIP_NUM_57452         0xC452
154 
155 #define BNXT_RE_CHIP_NUM_5745X(chip_num)          \
156 	((chip_num) == BNXT_RE_CHIP_NUM_57454 ||       \
157 	 (chip_num) == BNXT_RE_CHIP_NUM_57452)
158 
159 #define BNXT_RE_MIN_KERNEL_QP_TX_DEPTH	4096
160 #define BNXT_RE_STOP_QPS_BUDGET		200
161 
162 #define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
163 		((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
164 
165 extern unsigned int min_tx_depth;
166 extern struct mutex bnxt_re_dev_lock;
167 extern struct mutex bnxt_re_mutex;
168 extern struct list_head bnxt_re_dev_list;
169 
170 struct bnxt_re_ring_attr {
171 	dma_addr_t	*dma_arr;
172 	int		pages;
173 	int	 	type;
174 	u32		depth;
175 	u32		lrid; /* Logical ring id */
176 	u16		flags;
177 	u8		mode;
178 	u8		rsvd;
179 };
180 
181 #define BNXT_RE_MAX_DEVICES		256
182 #define BNXT_RE_MSIX_FROM_MOD_PARAM	-1
183 #define BNXT_RE_MIN_MSIX		2
184 #define BNXT_RE_MAX_MSIX_VF		2
185 #define BNXT_RE_MAX_MSIX_PF		9
186 #define BNXT_RE_MAX_MSIX_NPAR_PF	5
187 #define BNXT_RE_MAX_MSIX		64
188 #define BNXT_RE_MAX_MSIX_GEN_P5_PF	BNXT_RE_MAX_MSIX
189 #define BNXT_RE_GEN_P5_MAX_VF		64
190 
191 struct bnxt_re_nq_record {
192 	struct bnxt_msix_entry	msix_entries[BNXT_RE_MAX_MSIX];
193 	/* FP Notification Queue (CQ & SRQ) */
194 	struct bnxt_qplib_nq    nq[BNXT_RE_MAX_MSIX];
195 	int			num_msix;
196 	int			max_init;
197 	struct mutex		load_lock;
198 };
199 
200 struct bnxt_re_work {
201 	struct work_struct	work;
202 	unsigned long		event;
203 	struct bnxt_re_dev      *rdev;
204 	struct ifnet		*vlan_dev;
205 	bool do_lag;
206 
207 	/* netdev where we received the event */
208 	struct ifnet *netdev;
209 	struct auxiliary_device *adev;
210 };
211 
212 /*
213  * Data structure and defines to handle
214  * recovery
215  */
216 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_RETRY   20
217 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_TIME_MS 30000 /* 30sec timeout */
218 #define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
219 #define BNXT_RE_COMPLETE_REMOVE 0x2
220 #define BNXT_RE_POST_RECOVERY_INIT 0x4
221 #define BNXT_RE_COMPLETE_INIT 0x8
222 #define BNXT_RE_COMPLETE_SHUTDOWN 0x10
223 
224 /* QP1 SQ entry data strucutre */
225 struct bnxt_re_sqp_entries {
226 	u64 wrid;
227 	struct bnxt_qplib_sge sge;
228 	/* For storing the actual qp1 cqe */
229 	struct bnxt_qplib_cqe cqe;
230 	struct bnxt_re_qp *qp1_qp;
231 };
232 
233 /* GSI QP mode enum */
234 enum bnxt_re_gsi_mode {
235 	BNXT_RE_GSI_MODE_INVALID = 0,
236 	BNXT_RE_GSI_MODE_ALL = 1,
237 	BNXT_RE_GSI_MODE_ROCE_V1,
238 	BNXT_RE_GSI_MODE_ROCE_V2_IPV4,
239 	BNXT_RE_GSI_MODE_ROCE_V2_IPV6,
240 	BNXT_RE_GSI_MODE_UD
241 };
242 
243 enum bnxt_re_roce_cap {
244 	BNXT_RE_FLAG_ROCEV1_CAP = 1,
245 	BNXT_RE_FLAG_ROCEV2_CAP,
246 	BNXT_RE_FLAG_ROCEV1_V2_CAP,
247 };
248 
249 #define BNXT_RE_MAX_GSI_SQP_ENTRIES	1024
250 struct bnxt_re_gsi_context {
251 	u8	gsi_qp_mode;
252 	bool	first_cq_created;
253 	/* Start: used only in gsi_mode_all */
254 	struct	bnxt_re_qp *gsi_qp;
255 	struct	bnxt_re_qp *gsi_sqp;
256 	struct	bnxt_re_ah *gsi_sah;
257 	struct	bnxt_re_sqp_entries *sqp_tbl;
258 	/* End: used only in gsi_mode_all */
259 };
260 
261 struct bnxt_re_tc_rec {
262 	u8 cos_id_roce;
263 	u8 tc_roce;
264 	u8 cos_id_cnp;
265 	u8 tc_cnp;
266 	u8 tc_def;
267 	u8 cos_id_def;
268 	u8 max_tc;
269 	u8 roce_prio;
270 	u8 cnp_prio;
271 	u8 roce_dscp;
272 	u8 cnp_dscp;
273 	u8 prio_valid;
274 	u8 dscp_valid;
275 	bool ecn_enabled;
276 	bool serv_type_enabled;
277 	u64 cnp_dscp_bv;
278 	u64 roce_dscp_bv;
279 };
280 
281 struct bnxt_re_dscp2pri {
282 	u8 dscp;
283 	u8 mask;
284 	u8 pri;
285 };
286 
287 struct bnxt_re_cos2bw_cfg {
288 	u8	pad[3];
289 	struct_group_attr(cfg, __packed,
290 		u8	queue_id;
291 		__le32	min_bw;
292 		__le32	max_bw;
293 		u8	tsa;
294 		u8	pri_lvl;
295 		u8	bw_weight;
296 	);
297 	u8	unused;
298 };
299 
300 #define BNXT_RE_AEQ_IDX			0
301 #define BNXT_RE_MAX_SGID_ENTRIES	256
302 
303 #define BNXT_RE_DBGFS_FILE_MEM          65536
304 enum {
305 	BNXT_RE_STATS_QUERY = 1,
306 	BNXT_RE_QP_QUERY = 2,
307 	BNXT_RE_SERVICE_FN_QUERY = 3,
308 };
309 
310 struct bnxt_re_dbg_file {
311 	struct bnxt_re_dev *rdev;
312 	u32 type;
313 	union {
314 		struct bnxt_qplib_query_stats_info sinfo;
315 		struct bnxt_qplib_query_fn_info fninfo;
316 	}params;
317 	char dbg_buf[BNXT_RE_DBGFS_FILE_MEM];
318 };
319 
320 struct bnxt_re_debug_entries {
321 	/* Dir entries */
322 	struct dentry *qpinfo_dir;
323 	struct dentry *service_fn_dir;
324 	/* file entries */
325 	struct dentry *stat_query;
326 	struct bnxt_re_dbg_file stat_file;
327 	struct dentry *qplist_query;
328 	struct bnxt_re_dbg_file qp_file;
329 	struct dentry *service_fn_query;
330 	struct bnxt_re_dbg_file service_fn_file;
331 };
332 
333 struct bnxt_re_en_dev_info {
334 	struct list_head en_list;
335 	struct bnxt_en_dev *en_dev;
336 	struct bnxt_re_dev *rdev;
337 	unsigned long flags;
338 #define BNXT_RE_FLAG_EN_DEV_NETDEV_REG		0
339 #define BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV		1
340 #define BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV	2
341 	u8 wqe_mode;
342 	u8 gsi_mode;
343 	bool te_bypass;
344 	bool ib_uninit_done;
345 	u32 num_msix_requested;
346 	wait_queue_head_t waitq;
347 };
348 
349 #define BNXT_RE_DB_FIFO_ROOM_MASK_P5	0x1FFF8000
350 #define BNXT_RE_MAX_FIFO_DEPTH_P5	0x2c00
351 #define BNXT_RE_DB_FIFO_ROOM_SHIFT	15
352 
353 #define BNXT_RE_DB_FIFO_ROOM_MASK_P7	0x3FFF8000
354 #define BNXT_RE_MAX_FIFO_DEPTH_P7	0x8000
355 
356 #define BNXT_RE_DB_FIFO_ROOM_MASK(ctx)	\
357 	(_is_chip_p7((ctx)) ? \
358 	 BNXT_RE_DB_FIFO_ROOM_MASK_P7 :\
359 	 BNXT_RE_DB_FIFO_ROOM_MASK_P5)
360 #define BNXT_RE_MAX_FIFO_DEPTH(ctx)	\
361 	(_is_chip_p7((ctx)) ? \
362 	 BNXT_RE_MAX_FIFO_DEPTH_P7 :\
363 	 BNXT_RE_MAX_FIFO_DEPTH_P5)
364 
365 struct bnxt_dbq_nq_list {
366 	int num_nql_entries;
367 	u16 nq_id[16];
368 };
369 
370 #define BNXT_RE_ASYNC_ERR_REP_BASE(_type)				\
371 	 (ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_##_type)
372 
373 #define BNXT_RE_ASYNC_ERR_DBR_TRESH(_type)				\
374 	 (ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_##_type)
375 
376 #define BNXT_RE_EVENT_DBR_EPOCH(data)					\
377 	(((data) &							\
378 	  BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_MASK)) >>	\
379 	 BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_SFT))
380 
381 #define BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1)				\
382 	(((data1) &							\
383 	  BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_MASK))  >>			\
384 	 BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_SFT))
385 
386 #define BNXT_RE_DBR_LIST_ADD(_rdev, _res, _type)			\
387 {									\
388 	spin_lock(&(_rdev)->res_list[_type].lock);			\
389 	list_add_tail(&(_res)->dbr_list,				\
390 		      &(_rdev)->res_list[_type].head);			\
391 	spin_unlock(&(_rdev)->res_list[_type].lock);			\
392 }
393 
394 #define BNXT_RE_DBR_LIST_DEL(_rdev, _res, _type)			\
395 {									\
396 	spin_lock(&(_rdev)->res_list[_type].lock);			\
397 	list_del(&(_res)->dbr_list);					\
398 	spin_unlock(&(_rdev)->res_list[_type].lock);			\
399 }
400 
401 #define BNXT_RE_CQ_PAGE_LIST_ADD(_uctx, _cq)				\
402 {									\
403 	mutex_lock(&(_uctx)->cq_lock);					\
404 	list_add_tail(&(_cq)->cq_list, &(_uctx)->cq_list);		\
405 	mutex_unlock(&(_uctx)->cq_lock);				\
406 }
407 
408 #define BNXT_RE_CQ_PAGE_LIST_DEL(_uctx, _cq)				\
409 {									\
410 	mutex_lock(&(_uctx)->cq_lock);					\
411 	list_del(&(_cq)->cq_list);					\
412 	mutex_unlock(&(_uctx)->cq_lock);				\
413 }
414 
415 #define BNXT_RE_NETDEV_EVENT(event, x)					\
416 	do {								\
417 		if ((event) == (x))					\
418 			return #x;					\
419 	} while (0)
420 
421 /* Do not change the seq of this enum which is followed by dbr recov */
422 enum {
423 	BNXT_RE_RES_TYPE_CQ = 0,
424 	BNXT_RE_RES_TYPE_UCTX,
425 	BNXT_RE_RES_TYPE_QP,
426 	BNXT_RE_RES_TYPE_SRQ,
427 	BNXT_RE_RES_TYPE_MAX
428 };
429 
430 struct bnxt_re_dbr_res_list {
431 	struct list_head head;
432 	spinlock_t lock;
433 };
434 
435 struct bnxt_re_dbr_drop_recov_work {
436 	struct work_struct work;
437 	struct bnxt_re_dev *rdev;
438 	u32 curr_epoch;
439 };
440 
441 struct bnxt_re_aer_work {
442 	struct work_struct work;
443 	struct bnxt_re_dev *rdev;
444 };
445 
446 struct bnxt_re_dbq_stats {
447 	u64 fifo_occup_slab_1;
448 	u64 fifo_occup_slab_2;
449 	u64 fifo_occup_slab_3;
450 	u64 fifo_occup_slab_4;
451 	u64 fifo_occup_water_mark;
452 	u64 do_pacing_slab_1;
453 	u64 do_pacing_slab_2;
454 	u64 do_pacing_slab_3;
455 	u64 do_pacing_slab_4;
456 	u64 do_pacing_slab_5;
457 	u64 do_pacing_water_mark;
458 };
459 
460 /* Device debug statistics */
461 struct bnxt_re_drv_dbg_stats {
462 	struct bnxt_re_dbq_stats dbq;
463 };
464 
465 /* DB pacing counters */
466 struct bnxt_re_dbr_sw_stats {
467 	u64 dbq_int_recv;
468 	u64 dbq_int_en;
469 	u64 dbq_pacing_resched;
470 	u64 dbq_pacing_complete;
471 	u64 dbq_pacing_alerts;
472 	u64 dbr_drop_recov_events;
473 	u64 dbr_drop_recov_timeouts;
474 	u64 dbr_drop_recov_timeout_users;
475 	u64 dbr_drop_recov_event_skips;
476 };
477 
478 struct bnxt_re_dev {
479 	struct ib_device		ibdev;
480 	struct list_head		list;
481 	atomic_t			ref_count;
482 	atomic_t			sched_count;
483 	unsigned long			flags;
484 #define BNXT_RE_FLAG_NETDEV_REGISTERED		0
485 #define BNXT_RE_FLAG_IBDEV_REGISTERED		1
486 #define BNXT_RE_FLAG_GOT_MSIX			2
487 #define BNXT_RE_FLAG_HAVE_L2_REF		3
488 #define BNXT_RE_FLAG_ALLOC_RCFW			4
489 #define BNXT_RE_FLAG_NET_RING_ALLOC		5
490 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN		6
491 #define BNXT_RE_FLAG_ALLOC_CTX			7
492 #define BNXT_RE_FLAG_STATS_CTX_ALLOC		8
493 #define BNXT_RE_FLAG_STATS_CTX2_ALLOC		9
494 #define BNXT_RE_FLAG_RCFW_CHANNEL_INIT		10
495 #define BNXT_RE_FLAG_WORKER_REG			11
496 #define BNXT_RE_FLAG_TBLS_ALLOCINIT		12
497 #define BNXT_RE_FLAG_SETUP_NQ			13
498 #define BNXT_RE_FLAG_BOND_DEV_REGISTERED	14
499 #define BNXT_RE_FLAG_PER_PORT_DEBUG_INFO	15
500 #define BNXT_RE_FLAG_DEV_LIST_INITIALIZED	16
501 #define BNXT_RE_FLAG_ERR_DEVICE_DETACHED	17
502 #define BNXT_RE_FLAG_INIT_DCBX_CC_PARAM		18
503 #define BNXT_RE_FLAG_STOP_IN_PROGRESS		20
504 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS		29
505 #define BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS	30
506 	struct ifnet			*netdev;
507 	struct auxiliary_device		*adev;
508 	struct bnxt_qplib_chip_ctx	*chip_ctx;
509 	struct bnxt_en_dev		*en_dev;
510 	struct bnxt_re_nq_record	nqr;
511 	int				id;
512 	struct delayed_work		worker;
513 	u16				worker_30s;
514 	struct bnxt_re_tc_rec		tc_rec[2];
515 	u8				cur_prio_map;
516 	/* RCFW Channel */
517 	struct bnxt_qplib_rcfw		rcfw;
518 	/* Device Resources */
519 	struct bnxt_qplib_dev_attr	*dev_attr;
520 	struct bnxt_qplib_res		qplib_res;
521 	struct bnxt_qplib_dpi		dpi_privileged;
522 	struct bnxt_qplib_cc_param	cc_param;
523 	struct mutex			cc_lock;
524 	struct mutex			qp_lock;
525 	struct list_head		qp_list;
526 	u8				roce_mode;
527 
528 	/* Max of 2 lossless traffic class supported per port */
529 	u16				cosq[2];
530 	/* Start: QP for handling QP1 packets */
531 	struct bnxt_re_gsi_context	gsi_ctx;
532 	/* End: QP for handling QP1 packets */
533 	bool				is_virtfn;
534 	u32				num_vfs;
535 	u32				espeed;
536 	/*
537 	 * For storing the speed of slave interfaces.
538 	 * Same as espeed when bond is not configured
539 	 */
540 	u32				sl_espeed;
541 	/* To be used for a workaround for ISER stack */
542 	u32				min_tx_depth;
543 	/* To enable qp debug info. Disabled during driver load */
544 	u32				en_qp_dbg;
545 	/* Array to handle gid mapping */
546 	char				*gid_map;
547 
548 	struct bnxt_re_device_stats	stats;
549 	struct bnxt_re_drv_dbg_stats	*dbg_stats;
550 	/* debugfs to expose per port information*/
551 	struct dentry                   *port_debug_dir;
552 	struct dentry                   *info;
553 	struct dentry                   *drv_dbg_stats;
554 	struct dentry                   *sp_perf_stats;
555 	struct dentry                   *pdev_debug_dir;
556 	struct dentry                   *pdev_qpinfo_dir;
557 	struct bnxt_re_debug_entries	*dbg_ent;
558 	struct workqueue_struct		*resolve_wq;
559 	struct list_head		mac_wq_list;
560 	struct workqueue_struct		*dcb_wq;
561 	struct workqueue_struct		*aer_wq;
562 	u32				event_bitmap[3];
563 	bool unreg_sched;
564 	u64	dbr_throttling_reg_off;
565 	u64	dbr_aeq_arm_reg_off;
566 	u64	dbr_db_fifo_reg_off;
567 	void *dbr_page;
568 	u64 dbr_bar_addr;
569 	u32 pacing_algo_th;
570 	u32 pacing_en_int_th;
571 	u32 do_pacing_save;
572 	struct workqueue_struct		*dbq_wq;
573 	struct workqueue_struct		*dbr_drop_recov_wq;
574 	struct work_struct		dbq_fifo_check_work;
575 	struct delayed_work		dbq_pacing_work;
576 	/* protect DB pacing */
577 	struct mutex dbq_lock;
578 	/* Control DBR pacing feature. Set if enabled */
579 	bool dbr_pacing;
580 	/* Control DBR recovery feature. Set if enabled */
581 	bool dbr_drop_recov;
582 	bool user_dbr_drop_recov;
583 	/* DBR recovery feature. Set if running */
584 	bool dbr_recovery_on;
585 	u32 user_dbr_drop_recov_timeout;
586 	 /*
587 	  * Value used for pacing algo when pacing is active
588 	  */
589 #define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
590 	u32 dbr_do_pacing;
591 	u32 dbq_watermark; /* Current watermark set in HW registers */
592 	u32 dbq_nq_id; /* Current NQ ID for DBQ events */
593 	u32 dbq_pacing_time; /* ms */
594 	u32 dbr_def_do_pacing; /* do_pacing when no congestion */
595 	u32 dbr_evt_curr_epoch;
596 	bool dbq_int_disable;
597 
598 	bool mod_exit;
599 	struct bnxt_re_dbr_sw_stats *dbr_sw_stats;
600 	struct bnxt_re_dbr_res_list res_list[BNXT_RE_RES_TYPE_MAX];
601 	struct bnxt_dbq_nq_list nq_list;
602 	char dev_name[IB_DEVICE_NAME_MAX];
603 	atomic_t dbq_intr_running;
604 	u32 num_msix_requested;
605 	unsigned char	*dev_addr; /* For netdev->dev_addr */
606 };
607 
608 #define BNXT_RE_RESOLVE_RETRY_COUNT_US	5000000 /* 5 sec */
609 struct bnxt_re_resolve_dmac_work{
610 	struct work_struct      work;
611 	struct list_head	list;
612 	struct bnxt_re_dev 	*rdev;
613 	struct ib_ah_attr	*ah_attr;
614 	struct bnxt_re_ah_info *ah_info;
615 	atomic_t		status_wait;
616 };
617 
618 static inline u8 bnxt_re_get_prio(u8 prio_map)
619 {
620 	u8 prio = 0xFF;
621 
622 	for (prio = 0; prio < 8; prio++)
623 		if (prio_map & (1UL << prio))
624 			break;
625 	return prio;
626 }
627 
628 /* This should be called with bnxt_re_dev_lock mutex held */
629 static inline bool __bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
630 {
631 	struct bnxt_re_dev *tmp_rdev;
632 
633 	list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
634 		if (rdev == tmp_rdev)
635 			return true;
636 	}
637 	return false;
638 }
639 
640 static inline bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
641 {
642 	struct bnxt_re_dev *tmp_rdev;
643 
644 	mutex_lock(&bnxt_re_dev_lock);
645 	list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
646 		if (rdev == tmp_rdev) {
647 			mutex_unlock(&bnxt_re_dev_lock);
648 			return true;
649 		}
650 	}
651 	mutex_unlock(&bnxt_re_dev_lock);
652 
653 	pr_debug("bnxt_re: %s : Invalid rdev received rdev = %p\n",
654 		 __func__, rdev);
655 	return false;
656 }
657 
658 int bnxt_re_send_hwrm_cmd(struct bnxt_re_dev *rdev, void *cmd,
659 			  int cmdlen);
660 void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev);
661 int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
662 			      struct bnxt_re_dscp2pri *d2p, u16 count,
663 			      u16 target_id);
664 int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
665 				struct bnxt_re_dscp2pri *d2p, u16 *count,
666 				u16 target_id);
667 int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev,
668 				struct bnxt_re_tc_rec *cnprec, u16 tid);
669 int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id,
670 			     struct bnxt_re_cos2bw_cfg *cfg);
671 int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id,
672 			    struct bnxt_re_cos2bw_cfg *cfg);
673 int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev,
674 			     u16 target_id, u16 port_id,
675 			     u8 *cos_id_map, u8 pri_map);
676 int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev);
677 int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev);
678 struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev);
679 struct bnxt_re_dev *bnxt_re_from_netdev(struct ifnet *netdev);
680 u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector);
681 struct bnxt_qplib_nq * bnxt_re_get_nq(struct bnxt_re_dev *rdev);
682 void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq);
683 
684 #define to_bnxt_re(ptr, type, member)	\
685 	container_of(ptr, type, member)
686 
687 #define to_bnxt_re_dev(ptr, member)	\
688 	container_of((ptr), struct bnxt_re_dev, member)
689 
690 /* Even number functions from port 0 and odd number from port 1 */
691 #define BNXT_RE_IS_PORT0(rdev) (!(rdev->en_dev->pdev->devfn & 1))
692 
693 #define BNXT_RE_ROCE_V1_PACKET		0
694 #define BNXT_RE_ROCEV2_IPV4_PACKET	2
695 #define BNXT_RE_ROCEV2_IPV6_PACKET	3
696 #define BNXT_RE_ACTIVE_MAP_PORT1    0x1  /*port-1 active */
697 #define BNXT_RE_ACTIVE_MAP_PORT2    0x2  /*port-2 active */
698 
699 #define BNXT_RE_MEMBER_PORT_MAP		(BNXT_RE_ACTIVE_MAP_PORT1 | \
700 					BNXT_RE_ACTIVE_MAP_PORT2)
701 
702 #define	rdev_to_dev(rdev)	((rdev) ? (&(rdev)->ibdev.dev) : NULL)
703 
704 void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev);
705 bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev);
706 
707 #define bnxt_re_rdev_ready(rdev)	(bnxt_re_is_rdev_valid(rdev) && \
708 					 (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)))
709 #define BNXT_RE_SRIOV_CFG_TIMEOUT 6
710 
711 int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev);
712 void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type,
713 			   struct auxiliary_device *aux_dev);
714 void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev);
715 int bnxt_re_add_device(struct bnxt_re_dev **rdev,
716 		       struct ifnet *netdev,
717 		       u8 qp_mode, u8 op_type, u8 wqe_mode, u32 num_msix_requested,
718 		       struct auxiliary_device *aux_dev);
719 void bnxt_re_create_base_interface(bool primary);
720 int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
721 			  struct ifnet *vlan_dev,
722 			  struct ifnet *netdev,
723 			  struct auxiliary_device *aux_dev);
724 void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev);
725 int _bnxt_re_ib_init(struct bnxt_re_dev *rdev);
726 int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev);
727 void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev);
728 void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev);
729 
730 /* The rdev ref_count is to protect immature removal of the device */
731 static inline void bnxt_re_hold(struct bnxt_re_dev *rdev)
732 {
733 	atomic_inc(&rdev->ref_count);
734 	dev_dbg(rdev_to_dev(rdev),
735 		"Hold ref_count = 0x%x", atomic_read(&rdev->ref_count));
736 }
737 
738 static inline void bnxt_re_put(struct bnxt_re_dev *rdev)
739 {
740 	atomic_dec(&rdev->ref_count);
741 	dev_dbg(rdev_to_dev(rdev),
742 		"Put ref_count = 0x%x", atomic_read(&rdev->ref_count));
743 }
744 
745 /*
746 * Responder Error reason codes
747 * FIXME: Remove these when the defs
748 * are properly included in hsi header
749 */
750 enum res_err_state_reason {
751 	/* No error. */
752 	CFCQ_RES_ERR_STATE_REASON_NO_ERROR = 0,
753 	/*
754 	 * Incoming Send, RDMA write, or RDMA read exceeds the maximum
755 	 * transfer length. Detected on RX first and only packets for
756 	 * write. Detected on RX request for read. This is an RX
757 	 * Detected Error.
758 	 */
759 	CFCQ_RES_ERR_STATE_REASON_RES_EXCEED_MAX,
760 	/*
761 	 * RDMA write payload size does not match write length. Detected
762 	 * when total write payload is not equal to the RDMA write
763 	 * length that was given in the first or only packet of the
764 	 * request. This is an RX Detected Error.
765 	 */
766 	CFCQ_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH,
767 	/*
768 	 * Send payload exceeds RQ/SRQ WQE buffer capacity. The total
769 	 * send payload that arrived is more than the size of the WQE
770 	 * buffer that was fetched from the RQ/SRQ. This is an RX
771 	 * Detected Error.
772 	 */
773 	CFCQ_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE,
774 	/*
775 	 * Responder detected opcode error. * First, only, middle, last
776 	 * for incoming requests are improperly ordered with respect to
777 	 * previous (PSN) packet. * First or middle packet is not full
778 	 * MTU size. This is an RX Detected Error.
779 	 */
780 	CFCQ_RES_ERR_STATE_REASON_RES_OPCODE_ERROR,
781 	/*
782 	 * PSN sequence error retry limit exceeded. The responder
783 	 * encountered a PSN sequence error for the same PSN too many
784 	 * times. This can occur via implicit or explicit NAK. This is
785 	 * an RX Detected Error.
786 	 */
787 	CFCQ_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT,
788 	/*
789 	 * Invalid R_Key. An incoming request contained an R_Key that
790 	 * did not reference a valid MR/MW. This error may be detected
791 	 * by the RX engine for RDMA write or by the TX engine for RDMA
792 	 * read (detected while servicing IRRQ). This is an RX Detected
793 	 * Error.
794 	 */
795 	CFCQ_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY,
796 	/*
797 	 * Domain error. An incoming request specified an R_Key which
798 	 * referenced a MR/MW that was not in the same PD as the QP on
799 	 * which the request arrived. This is an RX Detected Error.
800 	 */
801 	CFCQ_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR,
802 	/*
803 	 * No permission. An incoming request contained an R_Key that
804 	 * referenced a MR/MW which did not have the access permission
805 	 * needed for the operation. This is an RX Detected Error.
806 	 */
807 	CFCQ_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION,
808 	/*
809 	 * Range error. An incoming request had a combination of R_Key,
810 	 * VA, and length that was out of bounds of the associated
811 	 * MR/MW. This is an RX Detected Error.
812 	 */
813 	CFCQ_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR,
814 	/*
815 	 * Invalid R_Key. An incoming request contained an R_Key that
816 	 * did not reference a valid MR/MW. This error may be detected
817 	 * by the RX engine for RDMA write or by the TX engine for RDMA
818 	 * read (detected while servicing IRRQ). This is a TX Detected
819 	 * Error.
820 	 */
821 	CFCQ_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY,
822 	/*
823 	 * Domain error. An incoming request specified an R_Key which
824 	 * referenced a MR/MW that was not in the same PD as the QP on
825 	 * which the request arrived. This is a TX Detected Error.
826 	 */
827 	CFCQ_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR,
828 	/*
829 	 * No permission. An incoming request contained an R_Key that
830 	 * referenced a MR/MW which did not have the access permission
831 	 * needed for the operation. This is a TX Detected Error.
832 	 */
833 	CFCQ_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION,
834 	/*
835 	 * Range error. An incoming request had a combination of R_Key,
836 	 * VA, and length that was out of bounds of the associated
837 	 * MR/MW. This is a TX Detected Error.
838 	 */
839 	CFCQ_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR,
840 	/*
841 	 * IRRQ overflow. The peer sent us more RDMA read or atomic
842 	 * requests than the negotiated maximum. This is an RX Detected
843 	 * Error.
844 	 */
845 	CFCQ_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW,
846 	/*
847 	 * Unsupported opcode. The peer sent us a request with an opcode
848 	 * for a request type that is not supported on this QP. This is
849 	 * an RX Detected Error.
850 	 */
851 	CFCQ_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE,
852 	/*
853 	 * Unaligned atomic operation. The VA of an atomic request is on
854 	 * a memory boundary that prevents atomic execution. This is an
855 	 * RX Detected Error.
856 	 */
857 	CFCQ_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC,
858 	/*
859 	 * Remote invalidate error. A send with invalidate request
860 	 * arrived in which the R_Key to invalidate did not describe a
861 	 * MR/MW which could be invalidated. RQ WQE completes with error
862 	 * status. This error is only reported if the send operation did
863 	 * not fail. If the send operation failed then the remote
864 	 * invalidate error is not reported. This is an RX Detected
865 	 * Error.
866 	 */
867 	CFCQ_RES_ERR_STATE_REASON_RES_REM_INVALIDATE,
868 	/*
869 	 * Local memory error. An RQ/SRQ SGE described an inaccessible
870 	 * memory. This is an RX Detected Error.
871 	 */
872 	CFCQ_RES_ERR_STATE_REASON_RES_MEMORY_ERROR,
873 	/*
874 	 * SRQ in error. The QP is moving to error state because it
875 	 * found SRQ it uses in error. This is an RX Detected Error.
876 	 */
877 	CFCQ_RES_ERR_STATE_REASON_RES_SRQ_ERROR,
878 	/*
879 	 * Completion error. No CQE space available on queue or CQ not
880 	 * in VALID state. This is a Completion Detected Error.
881 	 */
882 	CFCQ_RES_ERR_STATE_REASON_RES_CMP_ERROR,
883 	/*
884 	 * Invalid R_Key while resending responses to duplicate request.
885 	 * This is a TX Detected Error.
886 	 */
887 	CFCQ_RES_ERR_STATE_REASON_RES_IVALID_DUP_RKEY,
888 	/*
889 	 * Problem was found in the format of a WQE in the RQ/SRQ. This
890 	 * is an RX Detected Error.
891 	 */
892 	CFCQ_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR,
893 	/*
894 	 * A load error occurred on an attempt to load the CQ Context.
895 	 * This is a Completion Detected Error.
896 	 */
897 	CFCQ_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR = 0x18,
898 	/*
899 	 * A load error occurred on an attempt to load the SRQ Context.
900 	 * This is an RX Detected Error.
901 	 */
902 	CFCQ_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR,
903 	/*
904 	 * A fatal error was detected on an attempt to read from or
905 	 * write to PCIe on the transmit side. This error is detected by
906 	 * the TX side, but has the priority of a Completion Detected
907 	 * Error.
908 	 */
909 	CFCQ_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR = 0x1b,
910 	/*
911 	 * A fatal error was detected on an attempt to read from or
912 	 * write to PCIe on the receive side. This error is detected by
913 	 * the RX side (or CAGR), but has the priority of a Completion
914 	 * Detected Error.
915 	 */
916 	CFCQ_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR = 0x1c
917 };
918 
919 int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev,
920 			     struct bnxt_qplib_query_fn_info *fn_info,
921 			     u32 *pf_mask, u32 *first_pf);
922 
923 /* Default DCBx and CC values */
924 #define BNXT_RE_DEFAULT_CNP_DSCP	48
925 #define BNXT_RE_DEFAULT_CNP_PRI		7
926 #define BNXT_RE_DEFAULT_ROCE_DSCP	26
927 #define BNXT_RE_DEFAULT_ROCE_PRI	3
928 
929 #define BNXT_RE_DEFAULT_L2_BW		50
930 #define BNXT_RE_DEFAULT_ROCE_BW		50
931 
932 #define ROCE_PRIO_VALID	0x0
933 #define CNP_PRIO_VALID	0x1
934 #define ROCE_DSCP_VALID	0x0
935 #define CNP_DSCP_VALID	0x1
936 
937 int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev,
938 				  u16 target_id,
939 				  struct bnxt_re_tc_rec *tc_rec);
940 
941 int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev);
942 int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev);
943 int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset);
944 
945 static inline enum ib_port_state bnxt_re_get_link_state(struct bnxt_re_dev *rdev)
946 {
947 	if (rdev->netdev->if_drv_flags & IFF_DRV_RUNNING &&
948 	    rdev->netdev->if_link_state == LINK_STATE_UP)
949 		return IB_PORT_ACTIVE;
950 	return IB_PORT_DOWN;
951 }
952 
953 static inline int bnxt_re_link_state(struct bnxt_re_dev *rdev)
954 {
955 	return bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE ? 1:0;
956 }
957 
958 static inline int is_cc_enabled(struct bnxt_re_dev *rdev)
959 {
960 	return rdev->cc_param.enable;
961 }
962 
963 static inline void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev,
964 					 struct input *hdr, u16 opcd,
965 					 u16 crid, u16 trid)
966 {
967 	hdr->req_type = cpu_to_le16(opcd);
968 	hdr->cmpl_ring = cpu_to_le16(crid);
969 	hdr->target_id = cpu_to_le16(trid);
970 }
971 
972 static inline void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg,
973 				       void *msg, int msg_len, void *resp,
974 				       int resp_max_len, int timeout)
975 {
976 	fw_msg->msg = msg;
977 	fw_msg->msg_len = msg_len;
978 	fw_msg->resp = resp;
979 	fw_msg->resp_max_len = resp_max_len;
980 	fw_msg->timeout = timeout;
981 }
982 
983 static inline bool is_qport_service_type_supported(struct bnxt_re_dev *rdev)
984 {
985 	return rdev->tc_rec[0].serv_type_enabled;
986 }
987 
988 static inline bool is_bnxt_roce_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
989 {
990 	if (is_qport_service_type_supported(rdev))
991 		return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE);
992 	else
993 		return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE);
994 }
995 
996 static inline bool is_bnxt_cnp_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
997 {
998 	if (is_qport_service_type_supported(rdev))
999 		return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP);
1000 	else
1001 		return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP);
1002 }
1003 
1004 #define BNXT_RE_MAP_SH_PAGE		0x0
1005 #define BNXT_RE_MAP_WC			0x1
1006 #define BNXT_RE_DBR_PAGE		0x2
1007 #define BNXT_RE_MAP_DB_RECOVERY_PAGE	0x3
1008 
1009 #define BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT (20)  /*  20 ms */
1010 #define BNXT_RE_DBR_INT_TIME 5 /* ms */
1011 #define BNXT_RE_PACING_EN_INT_THRESHOLD 50 /* Entries in DB FIFO */
1012 #define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
1013 /* Percentage of DB FIFO depth */
1014 #define BNXT_RE_PACING_DBQ_THRESHOLD BNXT_RE_PACING_DBQ_HIGH_WATERMARK
1015 
1016 #define BNXT_RE_PACING_ALARM_TH_MULTIPLE(ctx) (_is_chip_p7(ctx) ? 0 : 2)
1017 
1018 /*
1019  * Maximum Percentage of configurable DB FIFO depth.
1020  * The Doorbell FIFO depth is 0x2c00. But the DBR_REG_DB_THROTTLING register has only 12 bits
1021  * to program the high watermark. This means user can configure maximum 36% only(4095/11264).
1022  */
1023 #define BNXT_RE_PACING_DBQ_HIGH_WATERMARK 36
1024 
1025 /* Default do_pacing value when there is no congestion */
1026 #define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
1027 
1028 enum {
1029 	BNXT_RE_DBQ_EVENT_SCHED = 0,
1030 	BNXT_RE_DBR_PACING_EVENT = 1,
1031 	BNXT_RE_DBR_NQ_PACING_NOTIFICATION = 2,
1032 };
1033 
1034 struct bnxt_re_dbq_work {
1035 	struct work_struct work;
1036 	struct bnxt_re_dev *rdev;
1037 	struct hwrm_async_event_cmpl cmpl;
1038 	u32 event;
1039 };
1040 
1041 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
1042 int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev);
1043 int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev);
1044 int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev,
1045 				   u16 nq_id, u32 throttle);
1046 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
1047 int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, struct bnxt_re_tc_rec *tc_rec,
1048 			      u16 target_id);
1049 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
1050 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
1051 
1052 static inline unsigned int bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev *rdev)
1053 {
1054 	return (atomic_read(&rdev->stats.rsors.mr_count) +
1055 		atomic_read(&rdev->stats.rsors.mw_count));
1056 }
1057 
1058 static inline void bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev *rdev)
1059 {
1060 	rdev->qplib_res.pacing_data->pacing_th = rdev->pacing_algo_th;
1061 	rdev->qplib_res.pacing_data->alarm_th =
1062 		rdev->pacing_algo_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
1063 }
1064 
1065 static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev)
1066 {
1067 	rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing;
1068 }
1069 
1070 static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
1071 {
1072 	rdev->qplib_res.pacing_data->dev_err_state =
1073 		test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
1074 }
1075 #endif
1076