xref: /linux/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2025 Broadcom */
3 
4 #ifndef _BNGE_NETDEV_H_
5 #define _BNGE_NETDEV_H_
6 
7 #include <linux/bnge/hsi.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/refcount.h>
10 #include "bnge_db.h"
11 #include "bnge_hw_def.h"
12 
13 struct tx_bd {
14 	__le32 tx_bd_len_flags_type;
15 	#define TX_BD_TYPE					(0x3f << 0)
16 	#define TX_BD_TYPE_SHORT_TX_BD				(0x00 << 0)
17 	#define TX_BD_TYPE_LONG_TX_BD				(0x10 << 0)
18 	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
19 	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
20 	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
21 	#define TX_BD_FLAGS_BD_CNT_SHIFT			8
22 	#define TX_BD_FLAGS_LHINT				(3 << 13)
23 	#define TX_BD_FLAGS_LHINT_SHIFT				13
24 	#define TX_BD_FLAGS_LHINT_512_AND_SMALLER		(0 << 13)
25 	#define TX_BD_FLAGS_LHINT_512_TO_1023			(1 << 13)
26 	#define TX_BD_FLAGS_LHINT_1024_TO_2047			(2 << 13)
27 	#define TX_BD_FLAGS_LHINT_2048_AND_LARGER		(3 << 13)
28 	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
29 	#define TX_BD_LEN					(0xffff << 16)
30 	#define TX_BD_LEN_SHIFT					16
31 	u32 tx_bd_opaque;
32 	__le64 tx_bd_haddr;
33 } __packed;
34 
35 struct rx_bd {
36 	__le32 rx_bd_len_flags_type;
37 	#define RX_BD_TYPE					(0x3f << 0)
38 	#define RX_BD_TYPE_RX_PACKET_BD				0x4
39 	#define RX_BD_TYPE_RX_BUFFER_BD				0x5
40 	#define RX_BD_TYPE_RX_AGG_BD				0x6
41 	#define RX_BD_TYPE_16B_BD_SIZE				(0 << 4)
42 	#define RX_BD_TYPE_32B_BD_SIZE				(1 << 4)
43 	#define RX_BD_TYPE_48B_BD_SIZE				(2 << 4)
44 	#define RX_BD_TYPE_64B_BD_SIZE				(3 << 4)
45 	#define RX_BD_FLAGS_SOP					(1 << 6)
46 	#define RX_BD_FLAGS_EOP					(1 << 7)
47 	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
48 	#define RX_BD_FLAGS_1_BUFFER_PACKET			(0 << 8)
49 	#define RX_BD_FLAGS_2_BUFFER_PACKET			(1 << 8)
50 	#define RX_BD_FLAGS_3_BUFFER_PACKET			(2 << 8)
51 	#define RX_BD_FLAGS_4_BUFFER_PACKET			(3 << 8)
52 	#define RX_BD_LEN					(0xffff << 16)
53 	#define RX_BD_LEN_SHIFT					16
54 	u32 rx_bd_opaque;
55 	__le64 rx_bd_haddr;
56 };
57 
58 struct tx_cmp {
59 	__le32 tx_cmp_flags_type;
60 	#define CMP_TYPE					(0x3f << 0)
61 	#define CMP_TYPE_TX_L2_CMP				0
62 	#define CMP_TYPE_TX_L2_COAL_CMP				2
63 	#define CMP_TYPE_TX_L2_PKT_TS_CMP			4
64 	#define CMP_TYPE_RX_L2_CMP				17
65 	#define CMP_TYPE_RX_AGG_CMP				18
66 	#define CMP_TYPE_RX_L2_TPA_START_CMP			19
67 	#define CMP_TYPE_RX_L2_TPA_END_CMP			21
68 	#define CMP_TYPE_RX_TPA_AGG_CMP				22
69 	#define CMP_TYPE_RX_L2_V3_CMP				23
70 	#define CMP_TYPE_RX_L2_TPA_START_V3_CMP			25
71 	#define CMP_TYPE_STATUS_CMP				32
72 	#define CMP_TYPE_REMOTE_DRIVER_REQ			34
73 	#define CMP_TYPE_REMOTE_DRIVER_RESP			36
74 	#define CMP_TYPE_ERROR_STATUS				48
75 	#define CMPL_BASE_TYPE_STAT_EJECT			0x1aUL
76 	#define CMPL_BASE_TYPE_HWRM_DONE			0x20UL
77 	#define CMPL_BASE_TYPE_HWRM_FWD_REQ			0x22UL
78 	#define CMPL_BASE_TYPE_HWRM_FWD_RESP			0x24UL
79 	#define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT			0x2eUL
80 	#define CMPL_BA_TY_HWRM_ASY_EVT	CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
81 	#define TX_CMP_FLAGS_ERROR				(1 << 6)
82 	#define TX_CMP_FLAGS_PUSH				(1 << 7)
83 	u32 tx_cmp_opaque;
84 	__le32 tx_cmp_errors_v;
85 	#define TX_CMP_V					(1 << 0)
86 	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
87 	#define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		0
88 	#define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		2
89 	#define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG		4
90 	#define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		5
91 	#define TX_CMP_ERRORS_ZERO_LENGTH_PKT			(1 << 4)
92 	#define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			(1 << 5)
93 	#define TX_CMP_ERRORS_DMA_ERROR				(1 << 6)
94 	#define TX_CMP_ERRORS_HINT_TOO_SHORT			(1 << 7)
95 	__le32 sq_cons_idx;
96 	#define TX_CMP_SQ_CONS_IDX_MASK				0x00ffffff
97 };
98 
99 struct bnge_sw_tx_bd {
100 	struct sk_buff		*skb;
101 	DEFINE_DMA_UNMAP_ADDR(mapping);
102 	DEFINE_DMA_UNMAP_LEN(len);
103 	struct page		*page;
104 	u8			is_ts_pkt;
105 	u8			is_push;
106 	u8			action;
107 	unsigned short		nr_frags;
108 	union {
109 		u16		rx_prod;
110 		u16		txts_prod;
111 	};
112 };
113 
114 struct bnge_sw_rx_bd {
115 	void			*data;
116 	u8			*data_ptr;
117 	dma_addr_t		mapping;
118 };
119 
120 struct bnge_sw_rx_agg_bd {
121 	netmem_ref		netmem;
122 	unsigned int		offset;
123 	dma_addr_t		mapping;
124 };
125 
126 #define HWRM_RING_ALLOC_TX	0x1
127 #define HWRM_RING_ALLOC_RX	0x2
128 #define HWRM_RING_ALLOC_AGG	0x4
129 #define HWRM_RING_ALLOC_CMPL	0x8
130 #define HWRM_RING_ALLOC_NQ	0x10
131 
132 struct bnge_ring_grp_info {
133 	u16	fw_stats_ctx;
134 	u16	fw_grp_id;
135 	u16	rx_fw_ring_id;
136 	u16	agg_fw_ring_id;
137 	u16	nq_fw_ring_id;
138 };
139 
140 #define BNGE_DEFAULT_RX_COPYBREAK	256
141 #define BNGE_MAX_RX_COPYBREAK		1024
142 
143 #define BNGE_HW_FEATURE_VLAN_ALL_RX	\
144 		(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
145 #define BNGE_HW_FEATURE_VLAN_ALL_TX	\
146 		(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
147 
148 enum {
149 	BNGE_NET_EN_GRO		= BIT(0),
150 	BNGE_NET_EN_LRO		= BIT(1),
151 	BNGE_NET_EN_JUMBO	= BIT(2),
152 };
153 
154 #define BNGE_NET_EN_TPA		(BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
155 
156 #define BNGE_NO_FW_ACCESS(bd)	(pci_channel_offline((bd)->pdev))
157 
158 #define MAX_TPA		256
159 #define MAX_TPA_MASK	(MAX_TPA - 1)
160 #define MAX_TPA_SEGS	0x3f
161 
162 #define BNGE_TPA_INNER_L3_OFF(hdr_info)	\
163 	(((hdr_info) >> 18) & 0x1ff)
164 
165 #define BNGE_TPA_INNER_L2_OFF(hdr_info)	\
166 	(((hdr_info) >> 9) & 0x1ff)
167 
168 #define BNGE_TPA_OUTER_L3_OFF(hdr_info)	\
169 	((hdr_info) & 0x1ff)
170 
171 struct bnge_tpa_idx_map {
172 	u16		agg_id_tbl[1024];
173 	DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
174 };
175 
176 struct bnge_tpa_info {
177 	void			*data;
178 	u8			*data_ptr;
179 	dma_addr_t		mapping;
180 	u16			len;
181 	unsigned short		gso_type;
182 	u32			flags2;
183 	u32			metadata;
184 	enum pkt_hash_types	hash_type;
185 	u32			rss_hash;
186 	u32			hdr_info;
187 
188 	u16			cfa_code; /* cfa_code in TPA start compl */
189 	u8			agg_count;
190 	bool			vlan_valid;
191 	bool			cfa_code_valid;
192 	struct rx_agg_cmp	*agg_arr;
193 };
194 
195 /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
196  * BD because the first TX BD is always a long BD.
197  */
198 #define BNGE_MIN_TX_DESC_CNT	(MAX_SKB_FRAGS + 2)
199 
200 #define RX_RING(bn, x)	(((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
201 #define RX_AGG_RING(bn, x)	(((x) & (bn)->rx_agg_ring_mask) >>	\
202 				 (BNGE_PAGE_SHIFT - 4))
203 #define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
204 
205 #define TX_RING(bn, x)	(((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
206 #define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
207 
208 #define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
209 #define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
210 
211 #define RING_RX(bn, idx)	((idx) & (bn)->rx_ring_mask)
212 #define NEXT_RX(idx)		((idx) + 1)
213 
214 #define RING_RX_AGG(bn, idx)	((idx) & (bn)->rx_agg_ring_mask)
215 #define NEXT_RX_AGG(idx)	((idx) + 1)
216 
217 #define BNGE_NQ_HDL_IDX_MASK	0x00ffffff
218 #define BNGE_NQ_HDL_TYPE_MASK	0xff000000
219 #define BNGE_NQ_HDL_TYPE_SHIFT	24
220 #define BNGE_NQ_HDL_TYPE_RX	0x00
221 #define BNGE_NQ_HDL_TYPE_TX	0x01
222 
223 #define BNGE_NQ_HDL_IDX(hdl)	((hdl) & BNGE_NQ_HDL_IDX_MASK)
224 #define BNGE_NQ_HDL_TYPE(hdl)	(((hdl) & BNGE_NQ_HDL_TYPE_MASK) >>	\
225 				 BNGE_NQ_HDL_TYPE_SHIFT)
226 
227 struct bnge_net {
228 	struct bnge_dev		*bd;
229 	struct net_device	*netdev;
230 
231 	u32			priv_flags;
232 
233 	u32			rx_ring_size;
234 	u32			rx_buf_size;
235 	u32			rx_buf_use_size; /* usable size */
236 	u32			rx_agg_ring_size;
237 	u32			rx_copybreak;
238 	u32			rx_ring_mask;
239 	u32			rx_agg_ring_mask;
240 	u16			rx_nr_pages;
241 	u16			rx_agg_nr_pages;
242 
243 	u32			tx_ring_size;
244 	u32			tx_ring_mask;
245 	u16			tx_nr_pages;
246 
247 	/* NQs and Completion rings */
248 	u32			cp_ring_size;
249 	u32			cp_ring_mask;
250 	u32			cp_bit;
251 	u16			cp_nr_pages;
252 
253 #define BNGE_L2_FLTR_HASH_SIZE	32
254 #define BNGE_L2_FLTR_HASH_MASK	(BNGE_L2_FLTR_HASH_SIZE - 1)
255 	struct hlist_head	l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
256 	u32			hash_seed;
257 	u64			toeplitz_prefix;
258 
259 	struct bnge_napi		**bnapi;
260 
261 	struct bnge_rx_ring_info	*rx_ring;
262 	struct bnge_tx_ring_info	*tx_ring;
263 
264 	u16				*tx_ring_map;
265 	enum dma_data_direction		rx_dir;
266 
267 	/* grp_info indexed by napi/nq index */
268 	struct bnge_ring_grp_info	*grp_info;
269 	struct bnge_vnic_info		*vnic_info;
270 	int				nr_vnics;
271 	int				total_irqs;
272 
273 	u32			tx_wake_thresh;
274 	u16			rx_offset;
275 	u16			rx_dma_offset;
276 
277 	u8			rss_hash_key[HW_HASH_KEY_SIZE];
278 	u8			rss_hash_key_valid:1;
279 	u8			rss_hash_key_updated:1;
280 	int			rsscos_nr_ctxs;
281 	u32			stats_coal_ticks;
282 
283 	unsigned long		state;
284 #define BNGE_STATE_NAPI_DISABLED	0
285 
286 	u32			msg_enable;
287 	u16			max_tpa;
288 	__be16			vxlan_port;
289 	__be16			nge_port;
290 	__be16			vxlan_gpe_port;
291 };
292 
293 #define BNGE_DEFAULT_RX_RING_SIZE	511
294 #define BNGE_DEFAULT_TX_RING_SIZE	511
295 
296 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
297 void bnge_netdev_free(struct bnge_dev *bd);
298 void bnge_set_ring_params(struct bnge_dev *bd);
299 
300 #if (BNGE_PAGE_SHIFT == 16)
301 #define MAX_RX_PAGES_AGG_ENA	1
302 #define MAX_RX_PAGES		4
303 #define MAX_RX_AGG_PAGES	4
304 #define MAX_TX_PAGES		1
305 #define MAX_CP_PAGES		16
306 #else
307 #define MAX_RX_PAGES_AGG_ENA	8
308 #define MAX_RX_PAGES		32
309 #define MAX_RX_AGG_PAGES	32
310 #define MAX_TX_PAGES		8
311 #define MAX_CP_PAGES		128
312 #endif
313 
314 #define BNGE_RX_PAGE_SIZE		(1 << BNGE_RX_PAGE_SHIFT)
315 
316 #define RX_DESC_CNT			(BNGE_PAGE_SIZE / sizeof(struct rx_bd))
317 #define TX_DESC_CNT			(BNGE_PAGE_SIZE / sizeof(struct tx_bd))
318 #define CP_DESC_CNT			(BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
319 #define SW_RXBD_RING_SIZE		(sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
320 #define HW_RXBD_RING_SIZE		(sizeof(struct rx_bd) * RX_DESC_CNT)
321 #define SW_RXBD_AGG_RING_SIZE		(sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
322 #define SW_TXBD_RING_SIZE		(sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
323 #define HW_TXBD_RING_SIZE		(sizeof(struct tx_bd) * TX_DESC_CNT)
324 #define HW_CMPD_RING_SIZE		(sizeof(struct tx_cmp) * CP_DESC_CNT)
325 #define BNGE_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
326 #define BNGE_MAX_RX_DESC_CNT_JUM_ENA	(RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
327 #define BNGE_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
328 #define BNGE_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
329 
330 #define BNGE_MAX_TXR_PER_NAPI	8
331 
332 #define bnge_for_each_napi_tx(iter, bnapi, txr)		\
333 	for (iter = 0, txr = (bnapi)->tx_ring[0]; txr;	\
334 	     txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ?	\
335 	     (bnapi)->tx_ring[++iter] : NULL)
336 
337 #define DB_EPOCH(db, idx)	(((idx) & (db)->db_epoch_mask) <<	\
338 				 ((db)->db_epoch_shift))
339 
340 #define DB_TOGGLE(tgl)		((tgl) << DBR_TOGGLE_SFT)
341 
342 #define DB_RING_IDX(db, idx)	(((idx) & (db)->db_ring_mask) |		\
343 				 DB_EPOCH(db, idx))
344 
345 #define BNGE_SET_NQ_HDL(cpr)						\
346 	(((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
347 
348 #define BNGE_DB_NQ(bd, db, idx)						\
349 	bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
350 		    (db)->doorbell)
351 
352 #define BNGE_DB_NQ_ARM(bd, db, idx)					\
353 	bnge_writeq(bd, (db)->db_key64 | DBR_TYPE_NQ_ARM |	\
354 		    DB_RING_IDX(db, idx), (db)->doorbell)
355 
356 struct bnge_stats_mem {
357 	u64		*sw_stats;
358 	u64		*hw_masks;
359 	void		*hw_stats;
360 	dma_addr_t	hw_stats_map;
361 	int		len;
362 };
363 
364 struct nqe_cn {
365 	__le16	type;
366 	#define NQ_CN_TYPE_MASK			0x3fUL
367 	#define NQ_CN_TYPE_SFT			0
368 	#define NQ_CN_TYPE_CQ_NOTIFICATION	0x30UL
369 	#define NQ_CN_TYPE_LAST			NQ_CN_TYPE_CQ_NOTIFICATION
370 	#define NQ_CN_TOGGLE_MASK		0xc0UL
371 	#define NQ_CN_TOGGLE_SFT		6
372 	__le16	reserved16;
373 	__le32	cq_handle_low;
374 	__le32	v;
375 	#define NQ_CN_V				0x1UL
376 	__le32	cq_handle_high;
377 };
378 
379 #define NQE_CN_TYPE(type)	((type) & NQ_CN_TYPE_MASK)
380 #define NQE_CN_TOGGLE(type)	(((type) & NQ_CN_TOGGLE_MASK) >>	\
381 				 NQ_CN_TOGGLE_SFT)
382 
383 struct bnge_cp_ring_info {
384 	struct bnge_napi	*bnapi;
385 	dma_addr_t		*desc_mapping;
386 	struct tx_cmp		**desc_ring;
387 	struct bnge_ring_struct	ring_struct;
388 	u8			cp_ring_type;
389 	u8			cp_idx;
390 	u32			cp_raw_cons;
391 	struct bnge_db_info	cp_db;
392 	bool			had_work_done;
393 	bool			has_more_work;
394 	bool			had_nqe_notify;
395 	u8			toggle;
396 };
397 
398 struct bnge_nq_ring_info {
399 	struct bnge_napi	*bnapi;
400 	dma_addr_t		*desc_mapping;
401 	struct nqe_cn		**desc_ring;
402 	struct bnge_ring_struct	ring_struct;
403 	u32			nq_raw_cons;
404 	struct bnge_db_info	nq_db;
405 
406 	struct bnge_stats_mem	stats;
407 	u32			hw_stats_ctx_id;
408 	bool			has_more_work;
409 
410 	u16				cp_ring_count;
411 	struct bnge_cp_ring_info	*cp_ring_arr;
412 };
413 
414 struct bnge_rx_ring_info {
415 	struct bnge_napi	*bnapi;
416 	struct bnge_cp_ring_info	*rx_cpr;
417 	u16			rx_prod;
418 	u16			rx_agg_prod;
419 	u16			rx_sw_agg_prod;
420 	u16			rx_next_cons;
421 	struct bnge_db_info	rx_db;
422 	struct bnge_db_info	rx_agg_db;
423 
424 	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
425 	struct bnge_sw_rx_bd	*rx_buf_ring;
426 
427 	struct rx_bd			*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
428 	struct bnge_sw_rx_agg_bd	*rx_agg_buf_ring;
429 
430 	unsigned long		*rx_agg_bmap;
431 	u16			rx_agg_bmap_size;
432 
433 	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
434 	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
435 
436 	struct bnge_tpa_info	*rx_tpa;
437 	struct bnge_tpa_idx_map *rx_tpa_idx_map;
438 
439 	struct bnge_ring_struct	rx_ring_struct;
440 	struct bnge_ring_struct	rx_agg_ring_struct;
441 	struct page_pool	*page_pool;
442 	struct page_pool	*head_pool;
443 	bool			need_head_pool;
444 };
445 
446 struct bnge_tx_ring_info {
447 	struct bnge_napi	*bnapi;
448 	struct bnge_cp_ring_info	*tx_cpr;
449 	u16			tx_prod;
450 	u16			tx_cons;
451 	u16			tx_hw_cons;
452 	u16			txq_index;
453 	u8			tx_napi_idx;
454 	u8			kick_pending;
455 	struct bnge_db_info	tx_db;
456 
457 	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
458 	struct bnge_sw_tx_bd	*tx_buf_ring;
459 
460 	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
461 
462 	u32			dev_state;
463 #define BNGE_DEV_STATE_CLOSING	0x1
464 
465 	struct bnge_ring_struct	tx_ring_struct;
466 };
467 
468 struct bnge_napi {
469 	struct napi_struct		napi;
470 	struct bnge_net			*bn;
471 	int				index;
472 
473 	struct bnge_nq_ring_info	nq_ring;
474 	struct bnge_rx_ring_info	*rx_ring;
475 	struct bnge_tx_ring_info	*tx_ring[BNGE_MAX_TXR_PER_NAPI];
476 	u8				events;
477 #define BNGE_RX_EVENT			1
478 #define BNGE_AGG_EVENT			2
479 #define BNGE_TX_EVENT			4
480 #define BNGE_REDIRECT_EVENT		8
481 #define BNGE_TX_CMP_EVENT		0x10
482 	bool				in_reset;
483 	bool				tx_fault;
484 };
485 
486 #define INVALID_STATS_CTX_ID	-1
487 #define BNGE_VNIC_DEFAULT	0
488 #define BNGE_MAX_UC_ADDRS	4
489 
490 struct bnge_vnic_info {
491 	u16		fw_vnic_id;
492 #define BNGE_MAX_CTX_PER_VNIC	8
493 	u16		fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
494 	u16		mru;
495 	/* index 0 always dev_addr */
496 	struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
497 	u16		uc_filter_count;
498 	u8		*uc_list;
499 	dma_addr_t	rss_table_dma_addr;
500 	__le16		*rss_table;
501 	dma_addr_t	rss_hash_key_dma_addr;
502 	u64		*rss_hash_key;
503 	int		rss_table_size;
504 #define BNGE_RSS_TABLE_ENTRIES		64
505 #define BNGE_RSS_TABLE_SIZE		(BNGE_RSS_TABLE_ENTRIES * 4)
506 #define BNGE_RSS_TABLE_MAX_TBL		8
507 #define BNGE_MAX_RSS_TABLE_SIZE			\
508 	(BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
509 	u32		rx_mask;
510 
511 	u8		*mc_list;
512 	int		mc_list_size;
513 	int		mc_list_count;
514 	dma_addr_t	mc_list_mapping;
515 #define BNGE_MAX_MC_ADDRS	16
516 
517 	u32		flags;
518 #define BNGE_VNIC_RSS_FLAG	1
519 #define BNGE_VNIC_MCAST_FLAG	4
520 #define BNGE_VNIC_UCAST_FLAG	8
521 	u32		vnic_id;
522 };
523 
524 struct bnge_filter_base {
525 	struct hlist_node	hash;
526 	struct list_head	list;
527 	__le64			filter_id;
528 	u8			type;
529 #define BNGE_FLTR_TYPE_L2	2
530 	u8			flags;
531 	u16			rxq;
532 	u16			fw_vnic_id;
533 	u16			vf_idx;
534 	unsigned long		state;
535 #define BNGE_FLTR_VALID		0
536 #define BNGE_FLTR_FW_DELETED	2
537 
538 	struct rcu_head         rcu;
539 };
540 
541 struct bnge_l2_key {
542 	union {
543 		struct {
544 			u8	dst_mac_addr[ETH_ALEN];
545 			u16	vlan;
546 		};
547 		u32	filter_key;
548 	};
549 };
550 
551 #define BNGE_L2_KEY_SIZE	(sizeof(struct bnge_l2_key) / 4)
552 struct bnge_l2_filter {
553 	/* base filter must be the first member */
554 	struct bnge_filter_base	base;
555 	struct bnge_l2_key	l2_key;
556 	refcount_t		refcnt;
557 };
558 
559 u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
560 u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
561 void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
562 int bnge_alloc_rx_data(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
563 		       u16 prod, gfp_t gfp);
564 u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx);
565 u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
566 			 struct bnge_rx_ring_info *rxr, gfp_t gfp);
567 int bnge_alloc_rx_netmem(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
568 			 u16 prod, gfp_t gfp);
569 #endif /* _BNGE_NETDEV_H_ */
570