1 /*
2 * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
3 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Description: main (header)
29 */
30
31 #ifndef __BNXT_RE_H__
32 #define __BNXT_RE_H__
33
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
36 #include <linux/mutex.h>
37 #include <linux/list.h>
38 #include <linux/rculist.h>
39 #include <linux/spinlock.h>
40 #include <net/ipv6.h>
41 #include <linux/if_ether.h>
42 #include <linux/debugfs.h>
43 #include <linux/seq_file.h>
44 #include <linux/interrupt.h>
45 #include <linux/vmalloc.h>
46 #include <linux/delay.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/ib_user_verbs.h>
49 #include <rdma/ib_umem.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_mad.h>
52 #include <rdma/ib_cache.h>
53 #include <linux/pci.h>
54
55 #include "bnxt.h"
56 #include "bnxt_ulp.h"
57 #include "hsi_struct_def.h"
58 #include "qplib_res.h"
59 #include "qplib_sp.h"
60 #include "qplib_fp.h"
61 #include "qplib_rcfw.h"
62 #include "ib_verbs.h"
63 #include "stats.h"
64
65 #define ROCE_DRV_MODULE_NAME "bnxt_re"
66 #define ROCE_DRV_MODULE_VERSION "230.0.133.0"
67 #define ROCE_DRV_MODULE_RELDATE "April 22, 2024"
68
69 #define BNXT_RE_REF_WAIT_COUNT 20
70 #define BNXT_RE_ROCE_V1_ETH_TYPE 0x8915
71 #define BNXT_RE_ROCE_V2_PORT_NO 4791
72 #define BNXT_RE_RES_FREE_WAIT_COUNT 1000
73
74 #define BNXT_RE_PAGE_SHIFT_4K (12)
75 #define BNXT_RE_PAGE_SHIFT_8K (13)
76 #define BNXT_RE_PAGE_SHIFT_64K (16)
77 #define BNXT_RE_PAGE_SHIFT_2M (21)
78 #define BNXT_RE_PAGE_SHIFT_8M (23)
79 #define BNXT_RE_PAGE_SHIFT_1G (30)
80
81 #define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
82 #define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
83 #define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
84 #define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
85 #define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
86 #define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
87
88 #define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
89 #define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
90 #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
91
92 /* Number of MRs to reserve for PF, leaving remainder for VFs */
93 #define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
94 #define BNXT_RE_MAX_GID_PER_VF 128
95
96 #define BNXT_RE_MAX_VF_QPS_PER_PF (6 * 1024)
97
98 /**
99 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
100 * @x: value1
101 * @y: value2
102 */
103 #ifndef min_not_zero
104 #define min_not_zero(x, y) ({ \
105 typeof(x) __x = (x); \
106 typeof(y) __y = (y); \
107 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
108 #endif
109
110 struct ib_mr_init_attr {
111 int max_reg_descriptors;
112 u32 flags;
113 };
114
115 struct bnxt_re_dev;
116
117 int bnxt_re_register_netdevice_notifier(struct notifier_block *nb);
118 int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb);
119 int ib_register_device_compat(struct bnxt_re_dev *rdev);
120
121 #ifndef __struct_group
122 #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
123 union { \
124 struct { MEMBERS } ATTRS; \
125 struct TAG { MEMBERS } ATTRS NAME; \
126 }
127 #endif /* __struct_group */
128 #ifndef struct_group_attr
129 #define struct_group_attr(NAME, ATTRS, MEMBERS...) \
130 __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
131 #endif /* struct_group_attr */
132 /*
133 * Percentage of resources of each type reserved for PF.
134 * Remaining resources are divided equally among VFs.
135 * [0, 100]
136 */
137
138 #define BNXT_RE_RQ_WQE_THRESHOLD 32
139 #define BNXT_RE_UD_QP_HW_STALL 0x400000
140
141 /*
142 * Setting the default ack delay value to 16, which means
143 * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
144 */
145
146 #define BNXT_RE_DEFAULT_ACK_DELAY 16
147 #define BNXT_RE_BOND_PF_MAX 2
148
149 #define BNXT_RE_STATS_CTX_UPDATE_TIMER 250
150 #define BNXT_RE_30SEC_MSEC (30 * 1000)
151
152 #define BNXT_RE_BOND_RESCHED_CNT 10
153
154 #define BNXT_RE_CHIP_NUM_57454 0xC454
155 #define BNXT_RE_CHIP_NUM_57452 0xC452
156
157 #define BNXT_RE_CHIP_NUM_5745X(chip_num) \
158 ((chip_num) == BNXT_RE_CHIP_NUM_57454 || \
159 (chip_num) == BNXT_RE_CHIP_NUM_57452)
160
161 #define BNXT_RE_MIN_KERNEL_QP_TX_DEPTH 4096
162 #define BNXT_RE_STOP_QPS_BUDGET 200
163
164 #define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
165 ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
166
167 extern unsigned int min_tx_depth;
168 extern struct mutex bnxt_re_dev_lock;
169 extern struct mutex bnxt_re_mutex;
170 extern struct list_head bnxt_re_dev_list;
171
172 struct bnxt_re_ring_attr {
173 dma_addr_t *dma_arr;
174 int pages;
175 int type;
176 u32 depth;
177 u32 lrid; /* Logical ring id */
178 u16 flags;
179 u8 mode;
180 u8 rsvd;
181 };
182
183 #define BNXT_RE_MAX_DEVICES 256
184 #define BNXT_RE_MSIX_FROM_MOD_PARAM -1
185 #define BNXT_RE_MIN_MSIX 2
186 #define BNXT_RE_MAX_MSIX_VF 2
187 #define BNXT_RE_MAX_MSIX_PF 9
188 #define BNXT_RE_MAX_MSIX_NPAR_PF 5
189 #define BNXT_RE_MAX_MSIX 64
190 #define BNXT_RE_MAX_MSIX_GEN_P5_PF BNXT_RE_MAX_MSIX
191 #define BNXT_RE_GEN_P5_MAX_VF 64
192
193 struct bnxt_re_nq_record {
194 struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
195 /* FP Notification Queue (CQ & SRQ) */
196 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
197 int num_msix;
198 int max_init;
199 struct mutex load_lock;
200 };
201
202 struct bnxt_re_work {
203 struct work_struct work;
204 unsigned long event;
205 struct bnxt_re_dev *rdev;
206 struct ifnet *vlan_dev;
207 bool do_lag;
208
209 /* netdev where we received the event */
210 struct ifnet *netdev;
211 struct auxiliary_device *adev;
212 };
213
214 /*
215 * Data structure and defines to handle
216 * recovery
217 */
218 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_RETRY 20
219 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_TIME_MS 30000 /* 30sec timeout */
220 #define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
221 #define BNXT_RE_COMPLETE_REMOVE 0x2
222 #define BNXT_RE_POST_RECOVERY_INIT 0x4
223 #define BNXT_RE_COMPLETE_INIT 0x8
224 #define BNXT_RE_COMPLETE_SHUTDOWN 0x10
225
226 /* QP1 SQ entry data strucutre */
227 struct bnxt_re_sqp_entries {
228 u64 wrid;
229 struct bnxt_qplib_sge sge;
230 /* For storing the actual qp1 cqe */
231 struct bnxt_qplib_cqe cqe;
232 struct bnxt_re_qp *qp1_qp;
233 };
234
235 /* GSI QP mode enum */
236 enum bnxt_re_gsi_mode {
237 BNXT_RE_GSI_MODE_INVALID = 0,
238 BNXT_RE_GSI_MODE_ALL = 1,
239 BNXT_RE_GSI_MODE_ROCE_V1,
240 BNXT_RE_GSI_MODE_ROCE_V2_IPV4,
241 BNXT_RE_GSI_MODE_ROCE_V2_IPV6,
242 BNXT_RE_GSI_MODE_UD
243 };
244
245 enum bnxt_re_roce_cap {
246 BNXT_RE_FLAG_ROCEV1_CAP = 1,
247 BNXT_RE_FLAG_ROCEV2_CAP,
248 BNXT_RE_FLAG_ROCEV1_V2_CAP,
249 };
250
251 #define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
252 struct bnxt_re_gsi_context {
253 u8 gsi_qp_mode;
254 bool first_cq_created;
255 /* Start: used only in gsi_mode_all */
256 struct bnxt_re_qp *gsi_qp;
257 struct bnxt_re_qp *gsi_sqp;
258 struct bnxt_re_ah *gsi_sah;
259 struct bnxt_re_sqp_entries *sqp_tbl;
260 /* End: used only in gsi_mode_all */
261 };
262
263 struct bnxt_re_tc_rec {
264 u8 cos_id_roce;
265 u8 tc_roce;
266 u8 cos_id_cnp;
267 u8 tc_cnp;
268 u8 tc_def;
269 u8 cos_id_def;
270 u8 max_tc;
271 u8 roce_prio;
272 u8 cnp_prio;
273 u8 roce_dscp;
274 u8 cnp_dscp;
275 u8 prio_valid;
276 u8 dscp_valid;
277 bool ecn_enabled;
278 bool serv_type_enabled;
279 u64 cnp_dscp_bv;
280 u64 roce_dscp_bv;
281 };
282
283 struct bnxt_re_dscp2pri {
284 u8 dscp;
285 u8 mask;
286 u8 pri;
287 };
288
289 struct bnxt_re_cos2bw_cfg {
290 u8 pad[3];
291 struct_group_attr(cfg, __packed,
292 u8 queue_id;
293 __le32 min_bw;
294 __le32 max_bw;
295 u8 tsa;
296 u8 pri_lvl;
297 u8 bw_weight;
298 );
299 u8 unused;
300 };
301
302 #define BNXT_RE_AEQ_IDX 0
303 #define BNXT_RE_MAX_SGID_ENTRIES 256
304
305 #define BNXT_RE_DBGFS_FILE_MEM 65536
306 enum {
307 BNXT_RE_STATS_QUERY = 1,
308 BNXT_RE_QP_QUERY = 2,
309 BNXT_RE_SERVICE_FN_QUERY = 3,
310 };
311
312 struct bnxt_re_dbg_file {
313 struct bnxt_re_dev *rdev;
314 u32 type;
315 union {
316 struct bnxt_qplib_query_stats_info sinfo;
317 struct bnxt_qplib_query_fn_info fninfo;
318 }params;
319 char dbg_buf[BNXT_RE_DBGFS_FILE_MEM];
320 };
321
322 struct bnxt_re_debug_entries {
323 /* Dir entries */
324 struct dentry *qpinfo_dir;
325 struct dentry *service_fn_dir;
326 /* file entries */
327 struct dentry *stat_query;
328 struct bnxt_re_dbg_file stat_file;
329 struct dentry *qplist_query;
330 struct bnxt_re_dbg_file qp_file;
331 struct dentry *service_fn_query;
332 struct bnxt_re_dbg_file service_fn_file;
333 };
334
335 struct bnxt_re_en_dev_info {
336 struct list_head en_list;
337 struct bnxt_en_dev *en_dev;
338 struct bnxt_re_dev *rdev;
339 unsigned long flags;
340 #define BNXT_RE_FLAG_EN_DEV_NETDEV_REG 0
341 #define BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV 1
342 #define BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV 2
343 u8 wqe_mode;
344 u8 gsi_mode;
345 bool te_bypass;
346 bool ib_uninit_done;
347 u32 num_msix_requested;
348 wait_queue_head_t waitq;
349 };
350
351 #define BNXT_RE_DB_FIFO_ROOM_MASK_P5 0x1FFF8000
352 #define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00
353 #define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
354
355 #define BNXT_RE_DB_FIFO_ROOM_MASK_P7 0x3FFF8000
356 #define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000
357
358 #define BNXT_RE_DB_FIFO_ROOM_MASK(ctx) \
359 (_is_chip_p7((ctx)) ? \
360 BNXT_RE_DB_FIFO_ROOM_MASK_P7 :\
361 BNXT_RE_DB_FIFO_ROOM_MASK_P5)
362 #define BNXT_RE_MAX_FIFO_DEPTH(ctx) \
363 (_is_chip_p7((ctx)) ? \
364 BNXT_RE_MAX_FIFO_DEPTH_P7 :\
365 BNXT_RE_MAX_FIFO_DEPTH_P5)
366
367 struct bnxt_dbq_nq_list {
368 int num_nql_entries;
369 u16 nq_id[16];
370 };
371
372 #define BNXT_RE_ASYNC_ERR_REP_BASE(_type) \
373 (ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_##_type)
374
375 #define BNXT_RE_ASYNC_ERR_DBR_TRESH(_type) \
376 (ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_##_type)
377
378 #define BNXT_RE_EVENT_DBR_EPOCH(data) \
379 (((data) & \
380 BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_MASK)) >> \
381 BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_SFT))
382
383 #define BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1) \
384 (((data1) & \
385 BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_MASK)) >> \
386 BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_SFT))
387
388 #define BNXT_RE_DBR_LIST_ADD(_rdev, _res, _type) \
389 { \
390 spin_lock(&(_rdev)->res_list[_type].lock); \
391 list_add_tail(&(_res)->dbr_list, \
392 &(_rdev)->res_list[_type].head); \
393 spin_unlock(&(_rdev)->res_list[_type].lock); \
394 }
395
396 #define BNXT_RE_DBR_LIST_DEL(_rdev, _res, _type) \
397 { \
398 spin_lock(&(_rdev)->res_list[_type].lock); \
399 list_del(&(_res)->dbr_list); \
400 spin_unlock(&(_rdev)->res_list[_type].lock); \
401 }
402
403 #define BNXT_RE_CQ_PAGE_LIST_ADD(_uctx, _cq) \
404 { \
405 mutex_lock(&(_uctx)->cq_lock); \
406 list_add_tail(&(_cq)->cq_list, &(_uctx)->cq_list); \
407 mutex_unlock(&(_uctx)->cq_lock); \
408 }
409
410 #define BNXT_RE_CQ_PAGE_LIST_DEL(_uctx, _cq) \
411 { \
412 mutex_lock(&(_uctx)->cq_lock); \
413 list_del(&(_cq)->cq_list); \
414 mutex_unlock(&(_uctx)->cq_lock); \
415 }
416
417 #define BNXT_RE_NETDEV_EVENT(event, x) \
418 do { \
419 if ((event) == (x)) \
420 return #x; \
421 } while (0)
422
423 /* Do not change the seq of this enum which is followed by dbr recov */
424 enum {
425 BNXT_RE_RES_TYPE_CQ = 0,
426 BNXT_RE_RES_TYPE_UCTX,
427 BNXT_RE_RES_TYPE_QP,
428 BNXT_RE_RES_TYPE_SRQ,
429 BNXT_RE_RES_TYPE_MAX
430 };
431
432 struct bnxt_re_dbr_res_list {
433 struct list_head head;
434 spinlock_t lock;
435 };
436
437 struct bnxt_re_dbr_drop_recov_work {
438 struct work_struct work;
439 struct bnxt_re_dev *rdev;
440 u32 curr_epoch;
441 };
442
443 struct bnxt_re_aer_work {
444 struct work_struct work;
445 struct bnxt_re_dev *rdev;
446 };
447
448 struct bnxt_re_dbq_stats {
449 u64 fifo_occup_slab_1;
450 u64 fifo_occup_slab_2;
451 u64 fifo_occup_slab_3;
452 u64 fifo_occup_slab_4;
453 u64 fifo_occup_water_mark;
454 u64 do_pacing_slab_1;
455 u64 do_pacing_slab_2;
456 u64 do_pacing_slab_3;
457 u64 do_pacing_slab_4;
458 u64 do_pacing_slab_5;
459 u64 do_pacing_water_mark;
460 };
461
462 /* Device debug statistics */
463 struct bnxt_re_drv_dbg_stats {
464 struct bnxt_re_dbq_stats dbq;
465 };
466
467 /* DB pacing counters */
468 struct bnxt_re_dbr_sw_stats {
469 u64 dbq_int_recv;
470 u64 dbq_int_en;
471 u64 dbq_pacing_resched;
472 u64 dbq_pacing_complete;
473 u64 dbq_pacing_alerts;
474 u64 dbr_drop_recov_events;
475 u64 dbr_drop_recov_timeouts;
476 u64 dbr_drop_recov_timeout_users;
477 u64 dbr_drop_recov_event_skips;
478 };
479
480 struct bnxt_re_dev {
481 struct ib_device ibdev;
482 struct list_head list;
483 atomic_t ref_count;
484 atomic_t sched_count;
485 unsigned long flags;
486 #define BNXT_RE_FLAG_NETDEV_REGISTERED 0
487 #define BNXT_RE_FLAG_IBDEV_REGISTERED 1
488 #define BNXT_RE_FLAG_GOT_MSIX 2
489 #define BNXT_RE_FLAG_HAVE_L2_REF 3
490 #define BNXT_RE_FLAG_ALLOC_RCFW 4
491 #define BNXT_RE_FLAG_NET_RING_ALLOC 5
492 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 6
493 #define BNXT_RE_FLAG_ALLOC_CTX 7
494 #define BNXT_RE_FLAG_STATS_CTX_ALLOC 8
495 #define BNXT_RE_FLAG_STATS_CTX2_ALLOC 9
496 #define BNXT_RE_FLAG_RCFW_CHANNEL_INIT 10
497 #define BNXT_RE_FLAG_WORKER_REG 11
498 #define BNXT_RE_FLAG_TBLS_ALLOCINIT 12
499 #define BNXT_RE_FLAG_SETUP_NQ 13
500 #define BNXT_RE_FLAG_BOND_DEV_REGISTERED 14
501 #define BNXT_RE_FLAG_PER_PORT_DEBUG_INFO 15
502 #define BNXT_RE_FLAG_DEV_LIST_INITIALIZED 16
503 #define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
504 #define BNXT_RE_FLAG_INIT_DCBX_CC_PARAM 18
505 #define BNXT_RE_FLAG_STOP_IN_PROGRESS 20
506 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
507 #define BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS 30
508 struct ifnet *netdev;
509 struct auxiliary_device *adev;
510 struct bnxt_qplib_chip_ctx *chip_ctx;
511 struct bnxt_en_dev *en_dev;
512 struct bnxt_re_nq_record nqr;
513 int id;
514 struct delayed_work worker;
515 u16 worker_30s;
516 struct bnxt_re_tc_rec tc_rec[2];
517 u8 cur_prio_map;
518 /* RCFW Channel */
519 struct bnxt_qplib_rcfw rcfw;
520 /* Device Resources */
521 struct bnxt_qplib_dev_attr *dev_attr;
522 struct bnxt_qplib_res qplib_res;
523 struct bnxt_qplib_dpi dpi_privileged;
524 struct bnxt_qplib_cc_param cc_param;
525 struct mutex cc_lock;
526 struct mutex qp_lock;
527 struct list_head qp_list;
528 u8 roce_mode;
529
530 /* Max of 2 lossless traffic class supported per port */
531 u16 cosq[2];
532 /* Start: QP for handling QP1 packets */
533 struct bnxt_re_gsi_context gsi_ctx;
534 /* End: QP for handling QP1 packets */
535 bool is_virtfn;
536 u32 num_vfs;
537 u32 espeed;
538 u8 lanes;
539 /*
540 * For storing the speed of slave interfaces.
541 * Same as espeed when bond is not configured
542 */
543 u32 sl_espeed;
544 /* To be used for a workaround for ISER stack */
545 u32 min_tx_depth;
546 /* To enable qp debug info. Disabled during driver load */
547 u32 en_qp_dbg;
548 /* Array to handle gid mapping */
549 char *gid_map;
550
551 struct bnxt_re_device_stats stats;
552 struct bnxt_re_drv_dbg_stats *dbg_stats;
553 /* debugfs to expose per port information*/
554 struct dentry *port_debug_dir;
555 struct dentry *info;
556 struct dentry *drv_dbg_stats;
557 struct dentry *sp_perf_stats;
558 struct dentry *pdev_debug_dir;
559 struct dentry *pdev_qpinfo_dir;
560 struct bnxt_re_debug_entries *dbg_ent;
561 struct workqueue_struct *resolve_wq;
562 struct list_head mac_wq_list;
563 struct workqueue_struct *dcb_wq;
564 struct workqueue_struct *aer_wq;
565 u32 event_bitmap[3];
566 bool unreg_sched;
567 u64 dbr_throttling_reg_off;
568 u64 dbr_aeq_arm_reg_off;
569 u64 dbr_db_fifo_reg_off;
570 void *dbr_page;
571 u64 dbr_bar_addr;
572 u32 pacing_algo_th;
573 u32 pacing_en_int_th;
574 u32 do_pacing_save;
575 struct workqueue_struct *dbq_wq;
576 struct workqueue_struct *dbr_drop_recov_wq;
577 struct work_struct dbq_fifo_check_work;
578 struct delayed_work dbq_pacing_work;
579 /* protect DB pacing */
580 struct mutex dbq_lock;
581 /* Control DBR pacing feature. Set if enabled */
582 bool dbr_pacing;
583 /* Control DBR recovery feature. Set if enabled */
584 bool dbr_drop_recov;
585 bool user_dbr_drop_recov;
586 /* DBR recovery feature. Set if running */
587 bool dbr_recovery_on;
588 u32 user_dbr_drop_recov_timeout;
589 /*
590 * Value used for pacing algo when pacing is active
591 */
592 #define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
593 u32 dbr_do_pacing;
594 u32 dbq_watermark; /* Current watermark set in HW registers */
595 u32 dbq_nq_id; /* Current NQ ID for DBQ events */
596 u32 dbq_pacing_time; /* ms */
597 u32 dbr_def_do_pacing; /* do_pacing when no congestion */
598 u32 dbr_evt_curr_epoch;
599 bool dbq_int_disable;
600
601 bool mod_exit;
602 struct bnxt_re_dbr_sw_stats *dbr_sw_stats;
603 struct bnxt_re_dbr_res_list res_list[BNXT_RE_RES_TYPE_MAX];
604 struct bnxt_dbq_nq_list nq_list;
605 char dev_name[IB_DEVICE_NAME_MAX];
606 atomic_t dbq_intr_running;
607 u32 num_msix_requested;
608 unsigned char *dev_addr; /* For netdev->dev_addr */
609 };
610
611 #define BNXT_RE_RESOLVE_RETRY_COUNT_US 5000000 /* 5 sec */
612 struct bnxt_re_resolve_dmac_work{
613 struct work_struct work;
614 struct list_head list;
615 struct bnxt_re_dev *rdev;
616 struct ib_ah_attr *ah_attr;
617 struct bnxt_re_ah_info *ah_info;
618 atomic_t status_wait;
619 };
620
bnxt_re_get_prio(u8 prio_map)621 static inline u8 bnxt_re_get_prio(u8 prio_map)
622 {
623 u8 prio = 0xFF;
624
625 for (prio = 0; prio < 8; prio++)
626 if (prio_map & (1UL << prio))
627 break;
628 return prio;
629 }
630
631 /* This should be called with bnxt_re_dev_lock mutex held */
__bnxt_re_is_rdev_valid(struct bnxt_re_dev * rdev)632 static inline bool __bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
633 {
634 struct bnxt_re_dev *tmp_rdev;
635
636 list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
637 if (rdev == tmp_rdev)
638 return true;
639 }
640 return false;
641 }
642
bnxt_re_is_rdev_valid(struct bnxt_re_dev * rdev)643 static inline bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
644 {
645 struct bnxt_re_dev *tmp_rdev;
646
647 mutex_lock(&bnxt_re_dev_lock);
648 list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
649 if (rdev == tmp_rdev) {
650 mutex_unlock(&bnxt_re_dev_lock);
651 return true;
652 }
653 }
654 mutex_unlock(&bnxt_re_dev_lock);
655
656 pr_debug("bnxt_re: %s : Invalid rdev received rdev = %p\n",
657 __func__, rdev);
658 return false;
659 }
660
661 int bnxt_re_send_hwrm_cmd(struct bnxt_re_dev *rdev, void *cmd,
662 int cmdlen);
663 void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev);
664 int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
665 struct bnxt_re_dscp2pri *d2p, u16 count,
666 u16 target_id);
667 int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
668 struct bnxt_re_dscp2pri *d2p, u16 *count,
669 u16 target_id);
670 int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev,
671 struct bnxt_re_tc_rec *cnprec, u16 tid);
672 int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id,
673 struct bnxt_re_cos2bw_cfg *cfg);
674 int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id,
675 struct bnxt_re_cos2bw_cfg *cfg);
676 int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev,
677 u16 target_id, u16 port_id,
678 u8 *cos_id_map, u8 pri_map);
679 int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev);
680 int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev);
681 struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev);
682 struct bnxt_re_dev *bnxt_re_from_netdev(struct ifnet *netdev);
683 u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector);
684 struct bnxt_qplib_nq * bnxt_re_get_nq(struct bnxt_re_dev *rdev);
685 void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq);
686
687 #define to_bnxt_re(ptr, type, member) \
688 container_of(ptr, type, member)
689
690 #define to_bnxt_re_dev(ptr, member) \
691 container_of((ptr), struct bnxt_re_dev, member)
692
693 /* Even number functions from port 0 and odd number from port 1 */
694 #define BNXT_RE_IS_PORT0(rdev) (!(rdev->en_dev->pdev->devfn & 1))
695
696 #define BNXT_RE_ROCE_V1_PACKET 0
697 #define BNXT_RE_ROCEV2_IPV4_PACKET 2
698 #define BNXT_RE_ROCEV2_IPV6_PACKET 3
699 #define BNXT_RE_ACTIVE_MAP_PORT1 0x1 /*port-1 active */
700 #define BNXT_RE_ACTIVE_MAP_PORT2 0x2 /*port-2 active */
701
702 #define BNXT_RE_MEMBER_PORT_MAP (BNXT_RE_ACTIVE_MAP_PORT1 | \
703 BNXT_RE_ACTIVE_MAP_PORT2)
704
705 #define rdev_to_dev(rdev) ((rdev) ? (&(rdev)->ibdev.dev) : NULL)
706
707 void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev);
708 bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev);
709
710 #define bnxt_re_rdev_ready(rdev) (bnxt_re_is_rdev_valid(rdev) && \
711 (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)))
712 #define BNXT_RE_SRIOV_CFG_TIMEOUT 6
713
714 int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev);
715 void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type,
716 struct auxiliary_device *aux_dev);
717 void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev);
718 int bnxt_re_add_device(struct bnxt_re_dev **rdev,
719 struct ifnet *netdev,
720 u8 qp_mode, u8 op_type, u32 num_msix_requested,
721 struct auxiliary_device *aux_dev);
722 void bnxt_re_create_base_interface(bool primary);
723 int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
724 struct ifnet *vlan_dev,
725 struct ifnet *netdev,
726 struct auxiliary_device *aux_dev);
727 void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev);
728 int _bnxt_re_ib_init(struct bnxt_re_dev *rdev);
729 int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev);
730 void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev);
731 void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev);
732
733 /* The rdev ref_count is to protect immature removal of the device */
bnxt_re_hold(struct bnxt_re_dev * rdev)734 static inline void bnxt_re_hold(struct bnxt_re_dev *rdev)
735 {
736 atomic_inc(&rdev->ref_count);
737 dev_dbg(rdev_to_dev(rdev),
738 "Hold ref_count = 0x%x", atomic_read(&rdev->ref_count));
739 }
740
bnxt_re_put(struct bnxt_re_dev * rdev)741 static inline void bnxt_re_put(struct bnxt_re_dev *rdev)
742 {
743 atomic_dec(&rdev->ref_count);
744 dev_dbg(rdev_to_dev(rdev),
745 "Put ref_count = 0x%x", atomic_read(&rdev->ref_count));
746 }
747
748 /*
749 * Responder Error reason codes
750 * FIXME: Remove these when the defs
751 * are properly included in hsi header
752 */
753 enum res_err_state_reason {
754 /* No error. */
755 CFCQ_RES_ERR_STATE_REASON_NO_ERROR = 0,
756 /*
757 * Incoming Send, RDMA write, or RDMA read exceeds the maximum
758 * transfer length. Detected on RX first and only packets for
759 * write. Detected on RX request for read. This is an RX
760 * Detected Error.
761 */
762 CFCQ_RES_ERR_STATE_REASON_RES_EXCEED_MAX,
763 /*
764 * RDMA write payload size does not match write length. Detected
765 * when total write payload is not equal to the RDMA write
766 * length that was given in the first or only packet of the
767 * request. This is an RX Detected Error.
768 */
769 CFCQ_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH,
770 /*
771 * Send payload exceeds RQ/SRQ WQE buffer capacity. The total
772 * send payload that arrived is more than the size of the WQE
773 * buffer that was fetched from the RQ/SRQ. This is an RX
774 * Detected Error.
775 */
776 CFCQ_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE,
777 /*
778 * Responder detected opcode error. * First, only, middle, last
779 * for incoming requests are improperly ordered with respect to
780 * previous (PSN) packet. * First or middle packet is not full
781 * MTU size. This is an RX Detected Error.
782 */
783 CFCQ_RES_ERR_STATE_REASON_RES_OPCODE_ERROR,
784 /*
785 * PSN sequence error retry limit exceeded. The responder
786 * encountered a PSN sequence error for the same PSN too many
787 * times. This can occur via implicit or explicit NAK. This is
788 * an RX Detected Error.
789 */
790 CFCQ_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT,
791 /*
792 * Invalid R_Key. An incoming request contained an R_Key that
793 * did not reference a valid MR/MW. This error may be detected
794 * by the RX engine for RDMA write or by the TX engine for RDMA
795 * read (detected while servicing IRRQ). This is an RX Detected
796 * Error.
797 */
798 CFCQ_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY,
799 /*
800 * Domain error. An incoming request specified an R_Key which
801 * referenced a MR/MW that was not in the same PD as the QP on
802 * which the request arrived. This is an RX Detected Error.
803 */
804 CFCQ_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR,
805 /*
806 * No permission. An incoming request contained an R_Key that
807 * referenced a MR/MW which did not have the access permission
808 * needed for the operation. This is an RX Detected Error.
809 */
810 CFCQ_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION,
811 /*
812 * Range error. An incoming request had a combination of R_Key,
813 * VA, and length that was out of bounds of the associated
814 * MR/MW. This is an RX Detected Error.
815 */
816 CFCQ_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR,
817 /*
818 * Invalid R_Key. An incoming request contained an R_Key that
819 * did not reference a valid MR/MW. This error may be detected
820 * by the RX engine for RDMA write or by the TX engine for RDMA
821 * read (detected while servicing IRRQ). This is a TX Detected
822 * Error.
823 */
824 CFCQ_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY,
825 /*
826 * Domain error. An incoming request specified an R_Key which
827 * referenced a MR/MW that was not in the same PD as the QP on
828 * which the request arrived. This is a TX Detected Error.
829 */
830 CFCQ_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR,
831 /*
832 * No permission. An incoming request contained an R_Key that
833 * referenced a MR/MW which did not have the access permission
834 * needed for the operation. This is a TX Detected Error.
835 */
836 CFCQ_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION,
837 /*
838 * Range error. An incoming request had a combination of R_Key,
839 * VA, and length that was out of bounds of the associated
840 * MR/MW. This is a TX Detected Error.
841 */
842 CFCQ_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR,
843 /*
844 * IRRQ overflow. The peer sent us more RDMA read or atomic
845 * requests than the negotiated maximum. This is an RX Detected
846 * Error.
847 */
848 CFCQ_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW,
849 /*
850 * Unsupported opcode. The peer sent us a request with an opcode
851 * for a request type that is not supported on this QP. This is
852 * an RX Detected Error.
853 */
854 CFCQ_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE,
855 /*
856 * Unaligned atomic operation. The VA of an atomic request is on
857 * a memory boundary that prevents atomic execution. This is an
858 * RX Detected Error.
859 */
860 CFCQ_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC,
861 /*
862 * Remote invalidate error. A send with invalidate request
863 * arrived in which the R_Key to invalidate did not describe a
864 * MR/MW which could be invalidated. RQ WQE completes with error
865 * status. This error is only reported if the send operation did
866 * not fail. If the send operation failed then the remote
867 * invalidate error is not reported. This is an RX Detected
868 * Error.
869 */
870 CFCQ_RES_ERR_STATE_REASON_RES_REM_INVALIDATE,
871 /*
872 * Local memory error. An RQ/SRQ SGE described an inaccessible
873 * memory. This is an RX Detected Error.
874 */
875 CFCQ_RES_ERR_STATE_REASON_RES_MEMORY_ERROR,
876 /*
877 * SRQ in error. The QP is moving to error state because it
878 * found SRQ it uses in error. This is an RX Detected Error.
879 */
880 CFCQ_RES_ERR_STATE_REASON_RES_SRQ_ERROR,
881 /*
882 * Completion error. No CQE space available on queue or CQ not
883 * in VALID state. This is a Completion Detected Error.
884 */
885 CFCQ_RES_ERR_STATE_REASON_RES_CMP_ERROR,
886 /*
887 * Invalid R_Key while resending responses to duplicate request.
888 * This is a TX Detected Error.
889 */
890 CFCQ_RES_ERR_STATE_REASON_RES_IVALID_DUP_RKEY,
891 /*
892 * Problem was found in the format of a WQE in the RQ/SRQ. This
893 * is an RX Detected Error.
894 */
895 CFCQ_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR,
896 /*
897 * A load error occurred on an attempt to load the CQ Context.
898 * This is a Completion Detected Error.
899 */
900 CFCQ_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR = 0x18,
901 /*
902 * A load error occurred on an attempt to load the SRQ Context.
903 * This is an RX Detected Error.
904 */
905 CFCQ_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR,
906 /*
907 * A fatal error was detected on an attempt to read from or
908 * write to PCIe on the transmit side. This error is detected by
909 * the TX side, but has the priority of a Completion Detected
910 * Error.
911 */
912 CFCQ_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR = 0x1b,
913 /*
914 * A fatal error was detected on an attempt to read from or
915 * write to PCIe on the receive side. This error is detected by
916 * the RX side (or CAGR), but has the priority of a Completion
917 * Detected Error.
918 */
919 CFCQ_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR = 0x1c
920 };
921
922 int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev,
923 struct bnxt_qplib_query_fn_info *fn_info,
924 u32 *pf_mask, u32 *first_pf);
925
926 /* Default DCBx and CC values */
927 #define BNXT_RE_DEFAULT_CNP_DSCP 48
928 #define BNXT_RE_DEFAULT_CNP_PRI 7
929 #define BNXT_RE_DEFAULT_ROCE_DSCP 26
930 #define BNXT_RE_DEFAULT_ROCE_PRI 3
931
932 #define BNXT_RE_DEFAULT_L2_BW 50
933 #define BNXT_RE_DEFAULT_ROCE_BW 50
934
935 #define ROCE_PRIO_VALID 0x0
936 #define CNP_PRIO_VALID 0x1
937 #define ROCE_DSCP_VALID 0x0
938 #define CNP_DSCP_VALID 0x1
939
940 int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev,
941 u16 target_id,
942 struct bnxt_re_tc_rec *tc_rec);
943
944 int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev);
945 int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev);
946 int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset);
947
bnxt_re_get_link_state(struct bnxt_re_dev * rdev)948 static inline enum ib_port_state bnxt_re_get_link_state(struct bnxt_re_dev *rdev)
949 {
950 if (if_getdrvflags(rdev->netdev) & IFF_DRV_RUNNING &&
951 if_getlinkstate(rdev->netdev) == LINK_STATE_UP)
952 return IB_PORT_ACTIVE;
953 return IB_PORT_DOWN;
954 }
955
bnxt_re_link_state(struct bnxt_re_dev * rdev)956 static inline int bnxt_re_link_state(struct bnxt_re_dev *rdev)
957 {
958 return bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE ? 1:0;
959 }
960
is_cc_enabled(struct bnxt_re_dev * rdev)961 static inline int is_cc_enabled(struct bnxt_re_dev *rdev)
962 {
963 return rdev->cc_param.enable;
964 }
965
bnxt_re_init_hwrm_hdr(struct bnxt_re_dev * rdev,struct input * hdr,u16 opcd,u16 crid,u16 trid)966 static inline void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev,
967 struct input *hdr, u16 opcd,
968 u16 crid, u16 trid)
969 {
970 hdr->req_type = cpu_to_le16(opcd);
971 hdr->cmpl_ring = cpu_to_le16(crid);
972 hdr->target_id = cpu_to_le16(trid);
973 }
974
bnxt_re_fill_fw_msg(struct bnxt_fw_msg * fw_msg,void * msg,int msg_len,void * resp,int resp_max_len,int timeout)975 static inline void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg,
976 void *msg, int msg_len, void *resp,
977 int resp_max_len, int timeout)
978 {
979 fw_msg->msg = msg;
980 fw_msg->msg_len = msg_len;
981 fw_msg->resp = resp;
982 fw_msg->resp_max_len = resp_max_len;
983 fw_msg->timeout = timeout;
984 }
985
is_qport_service_type_supported(struct bnxt_re_dev * rdev)986 static inline bool is_qport_service_type_supported(struct bnxt_re_dev *rdev)
987 {
988 return rdev->tc_rec[0].serv_type_enabled;
989 }
990
is_bnxt_roce_queue(struct bnxt_re_dev * rdev,u8 ser_prof,u8 prof_type)991 static inline bool is_bnxt_roce_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
992 {
993 if (is_qport_service_type_supported(rdev))
994 return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE);
995 else
996 return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE);
997 }
998
is_bnxt_cnp_queue(struct bnxt_re_dev * rdev,u8 ser_prof,u8 prof_type)999 static inline bool is_bnxt_cnp_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
1000 {
1001 if (is_qport_service_type_supported(rdev))
1002 return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP);
1003 else
1004 return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP);
1005 }
1006
1007 #define BNXT_RE_MAP_SH_PAGE 0x0
1008 #define BNXT_RE_MAP_WC 0x1
1009 #define BNXT_RE_DBR_PAGE 0x2
1010 #define BNXT_RE_MAP_DB_RECOVERY_PAGE 0x3
1011
1012 #define BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT (20) /* 20 ms */
1013 #define BNXT_RE_DBR_INT_TIME 5 /* ms */
1014 #define BNXT_RE_PACING_EN_INT_THRESHOLD 50 /* Entries in DB FIFO */
1015 #define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
1016 /* Percentage of DB FIFO depth */
1017 #define BNXT_RE_PACING_DBQ_THRESHOLD BNXT_RE_PACING_DBQ_HIGH_WATERMARK
1018
1019 #define BNXT_RE_PACING_ALARM_TH_MULTIPLE(ctx) (_is_chip_p7(ctx) ? 0 : 2)
1020
1021 /*
1022 * Maximum Percentage of configurable DB FIFO depth.
1023 * The Doorbell FIFO depth is 0x2c00. But the DBR_REG_DB_THROTTLING register has only 12 bits
1024 * to program the high watermark. This means user can configure maximum 36% only(4095/11264).
1025 */
1026 #define BNXT_RE_PACING_DBQ_HIGH_WATERMARK 36
1027
1028 /* Default do_pacing value when there is no congestion */
1029 #define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
1030
1031 enum {
1032 BNXT_RE_DBQ_EVENT_SCHED = 0,
1033 BNXT_RE_DBR_PACING_EVENT = 1,
1034 BNXT_RE_DBR_NQ_PACING_NOTIFICATION = 2,
1035 };
1036
1037 struct bnxt_re_dbq_work {
1038 struct work_struct work;
1039 struct bnxt_re_dev *rdev;
1040 struct hwrm_async_event_cmpl cmpl;
1041 u32 event;
1042 };
1043
1044 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
1045 int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev);
1046 int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev);
1047 int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev,
1048 u16 nq_id, u32 throttle);
1049 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
1050 int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, struct bnxt_re_tc_rec *tc_rec,
1051 u16 target_id);
1052 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
1053 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
1054
bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev * rdev)1055 static inline unsigned int bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev *rdev)
1056 {
1057 return (atomic_read(&rdev->stats.rsors.mr_count) +
1058 atomic_read(&rdev->stats.rsors.mw_count));
1059 }
1060
bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev * rdev)1061 static inline void bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev *rdev)
1062 {
1063 rdev->qplib_res.pacing_data->pacing_th = rdev->pacing_algo_th;
1064 rdev->qplib_res.pacing_data->alarm_th =
1065 rdev->pacing_algo_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
1066 }
1067
bnxt_re_set_def_do_pacing(struct bnxt_re_dev * rdev)1068 static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev)
1069 {
1070 rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing;
1071 }
1072
bnxt_re_is_var_size_supported(struct bnxt_re_dev * rdev,struct bnxt_re_ucontext * uctx)1073 static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
1074 struct bnxt_re_ucontext *uctx)
1075 {
1076 if (uctx)
1077 return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
1078 else
1079 return rdev->chip_ctx->modes.wqe_mode;
1080 }
1081
bnxt_re_set_pacing_dev_state(struct bnxt_re_dev * rdev)1082 static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
1083 {
1084 rdev->qplib_res.pacing_data->dev_err_state =
1085 test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
1086 }
1087 #endif
1088