1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (c) 2025 Broadcom.
3
4 #ifndef __BNG_RES_H__
5 #define __BNG_RES_H__
6
7 #include "roce_hsi.h"
8
9 #define BNG_ROCE_FW_MAX_TIMEOUT 60
10
11 #define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
12 #define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
13 #define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
14 #define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
15
16 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
17 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
18 ((HWQ_CMP(hwq->prod, hwq)\
19 - HWQ_CMP(hwq->cons, hwq))\
20 & (hwq->max_elements - 1)))
21
22 #define MAX_PBL_LVL_0_PGS 1
23 #define MAX_PBL_LVL_1_PGS 512
24 #define MAX_PBL_LVL_1_PGS_SHIFT 9
25 #define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
26 #define MAX_PBL_LVL_2_PGS (256 * 512)
27 #define MAX_PDL_LVL_SHIFT 9
28
29 #define BNG_RE_DBR_VALID (0x1UL << 26)
30 #define BNG_RE_DBR_EPOCH_SHIFT 24
31 #define BNG_RE_DBR_TOGGLE_SHIFT 25
32
33 #define BNG_MAX_TQM_ALLOC_REQ 48
34
35 struct bng_re_reg_desc {
36 u8 bar_id;
37 resource_size_t bar_base;
38 unsigned long offset;
39 void __iomem *bar_reg;
40 size_t len;
41 };
42
43 struct bng_re_db_info {
44 void __iomem *db;
45 void __iomem *priv_db;
46 struct bng_re_hwq *hwq;
47 u32 xid;
48 u32 max_slot;
49 u32 flags;
50 u8 toggle;
51 };
52
53 enum bng_re_db_info_flags_mask {
54 BNG_RE_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
55 BNG_RE_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
56 BNG_RE_FLAG_EPOCH_CONS_MASK = 0x1UL,
57 BNG_RE_FLAG_EPOCH_PROD_MASK = 0x2UL,
58 };
59
60 enum bng_re_db_epoch_flag_shift {
61 BNG_RE_DB_EPOCH_CONS_SHIFT = BNG_RE_DBR_EPOCH_SHIFT,
62 BNG_RE_DB_EPOCH_PROD_SHIFT = (BNG_RE_DBR_EPOCH_SHIFT - 1),
63 };
64
65 struct bng_re_chip_ctx {
66 u16 chip_num;
67 u16 hw_stats_size;
68 u64 hwrm_intf_ver;
69 u16 hwrm_cmd_max_timeout;
70 };
71
72 struct bng_re_pbl {
73 u32 pg_count;
74 u32 pg_size;
75 void **pg_arr;
76 dma_addr_t *pg_map_arr;
77 };
78
79 enum bng_re_pbl_lvl {
80 BNG_PBL_LVL_0,
81 BNG_PBL_LVL_1,
82 BNG_PBL_LVL_2,
83 BNG_PBL_LVL_MAX
84 };
85
86 enum bng_re_hwq_type {
87 BNG_HWQ_TYPE_CTX,
88 BNG_HWQ_TYPE_QUEUE
89 };
90
91 struct bng_re_sg_info {
92 u32 npages;
93 u32 pgshft;
94 u32 pgsize;
95 bool nopte;
96 };
97
98 struct bng_re_hwq_attr {
99 struct bng_re_res *res;
100 struct bng_re_sg_info *sginfo;
101 enum bng_re_hwq_type type;
102 u32 depth;
103 u32 stride;
104 u32 aux_stride;
105 u32 aux_depth;
106 };
107
108 struct bng_re_hwq {
109 struct pci_dev *pdev;
110 /* lock to protect hwq */
111 spinlock_t lock;
112 struct bng_re_pbl pbl[BNG_PBL_LVL_MAX + 1];
113 /* Valid values: 0, 1, 2 */
114 enum bng_re_pbl_lvl level;
115 /* PBL entries */
116 void **pbl_ptr;
117 /* PBL dma_addr */
118 dma_addr_t *pbl_dma_ptr;
119 u32 max_elements;
120 u32 depth;
121 u16 element_size;
122 u32 prod;
123 u32 cons;
124 /* queue entry per page */
125 u16 qe_ppg;
126 };
127
128 struct bng_re_stats {
129 dma_addr_t dma_map;
130 void *dma;
131 u32 size;
132 u32 fw_id;
133 };
134
135 struct bng_re_res {
136 struct pci_dev *pdev;
137 struct bng_re_chip_ctx *cctx;
138 struct bng_re_dev_attr *dattr;
139 };
140
bng_re_get_qe(struct bng_re_hwq * hwq,u32 indx,u64 * pg)141 static inline void *bng_re_get_qe(struct bng_re_hwq *hwq,
142 u32 indx, u64 *pg)
143 {
144 u32 pg_num, pg_idx;
145
146 pg_num = (indx / hwq->qe_ppg);
147 pg_idx = (indx % hwq->qe_ppg);
148 if (pg)
149 *pg = (u64)&hwq->pbl_ptr[pg_num];
150 return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
151 }
152
153 #define BNG_RE_INIT_DBHDR(xid, type, indx, toggle) \
154 (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
155 (type) | BNG_RE_DBR_VALID) << 32) | (indx) | \
156 (((u32)(toggle)) << (BNG_RE_DBR_TOGGLE_SHIFT)))
157
bng_re_ring_db(struct bng_re_db_info * info,u32 type)158 static inline void bng_re_ring_db(struct bng_re_db_info *info,
159 u32 type)
160 {
161 u64 key = 0;
162 u32 indx;
163 u8 toggle = 0;
164
165 if (type == DBC_DBC_TYPE_CQ_ARMALL ||
166 type == DBC_DBC_TYPE_CQ_ARMSE)
167 toggle = info->toggle;
168
169 indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
170 ((info->flags & BNG_RE_FLAG_EPOCH_CONS_MASK) <<
171 BNG_RE_DB_EPOCH_CONS_SHIFT);
172
173 key = BNG_RE_INIT_DBHDR(info->xid, type, indx, toggle);
174 writeq(key, info->db);
175 }
176
bng_re_ring_nq_db(struct bng_re_db_info * info,struct bng_re_chip_ctx * cctx,bool arm)177 static inline void bng_re_ring_nq_db(struct bng_re_db_info *info,
178 struct bng_re_chip_ctx *cctx,
179 bool arm)
180 {
181 u32 type;
182
183 type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
184 bng_re_ring_db(info, type);
185 }
186
bng_re_hwq_incr_cons(u32 max_elements,u32 * cons,u32 cnt,u32 * dbinfo_flags)187 static inline void bng_re_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
188 u32 *dbinfo_flags)
189 {
190 /* move cons and update toggle/epoch if wrap around */
191 *cons += cnt;
192 if (*cons >= max_elements) {
193 *cons %= max_elements;
194 *dbinfo_flags ^= 1UL << BNG_RE_FLAG_EPOCH_CONS_SHIFT;
195 }
196 }
197
_is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)198 static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
199 {
200 return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
201 }
202
203 void bng_re_free_hwq(struct bng_re_res *res,
204 struct bng_re_hwq *hwq);
205
206 int bng_re_alloc_init_hwq(struct bng_re_hwq *hwq,
207 struct bng_re_hwq_attr *hwq_attr);
208
209 void bng_re_free_stats_ctx_mem(struct pci_dev *pdev,
210 struct bng_re_stats *stats);
211
212 int bng_re_alloc_stats_ctx_mem(struct pci_dev *pdev,
213 struct bng_re_chip_ctx *cctx,
214 struct bng_re_stats *stats);
215 #endif
216