xref: /linux/drivers/net/ethernet/intel/ice/ice.h (revision 5832c4a77d6931cebf9ba737129ae8f14b66ee1d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #ifndef _ICE_H_
5 #define _ICE_H_
6 
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/netdevice.h>
13 #include <linux/compiler.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/cpumask.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/if_vlan.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/pci.h>
21 #include <linux/workqueue.h>
22 #include <linux/wait.h>
23 #include <linux/interrupt.h>
24 #include <linux/ethtool.h>
25 #include <linux/timer.h>
26 #include <linux/delay.h>
27 #include <linux/bitmap.h>
28 #include <linux/log2.h>
29 #include <linux/ip.h>
30 #include <linux/sctp.h>
31 #include <linux/ipv6.h>
32 #include <linux/pkt_sched.h>
33 #include <linux/if_bridge.h>
34 #include <linux/ctype.h>
35 #include <linux/linkmode.h>
36 #include <linux/bpf.h>
37 #include <linux/btf.h>
38 #include <linux/auxiliary_bus.h>
39 #include <linux/avf/virtchnl.h>
40 #include <linux/cpu_rmap.h>
41 #include <linux/dim.h>
42 #include <linux/gnss.h>
43 #include <net/pkt_cls.h>
44 #include <net/pkt_sched.h>
45 #include <net/tc_act/tc_mirred.h>
46 #include <net/tc_act/tc_gact.h>
47 #include <net/ip.h>
48 #include <net/devlink.h>
49 #include <net/ipv6.h>
50 #include <net/xdp_sock.h>
51 #include <net/xdp_sock_drv.h>
52 #include <net/geneve.h>
53 #include <net/gre.h>
54 #include <net/udp_tunnel.h>
55 #include <net/vxlan.h>
56 #include <net/gtp.h>
57 #include <linux/ppp_defs.h>
58 #include "ice_devids.h"
59 #include "ice_type.h"
60 #include "ice_txrx.h"
61 #include "ice_dcb.h"
62 #include "ice_switch.h"
63 #include "ice_common.h"
64 #include "ice_flow.h"
65 #include "ice_sched.h"
66 #include "ice_idc_int.h"
67 #include "ice_sriov.h"
68 #include "ice_vf_mbx.h"
69 #include "ice_ptp.h"
70 #include "ice_fdir.h"
71 #include "ice_xsk.h"
72 #include "ice_arfs.h"
73 #include "ice_repr.h"
74 #include "ice_eswitch.h"
75 #include "ice_lag.h"
76 #include "ice_vsi_vlan_ops.h"
77 #include "ice_gnss.h"
78 #include "ice_irq.h"
79 #include "ice_dpll.h"
80 
81 #define ICE_BAR0		0
82 #define ICE_REQ_DESC_MULTIPLE	32
83 #define ICE_MIN_NUM_DESC	64
84 #define ICE_MAX_NUM_DESC	8160
85 #define ICE_DFLT_MIN_RX_DESC	512
86 #define ICE_DFLT_NUM_TX_DESC	256
87 #define ICE_DFLT_NUM_RX_DESC	2048
88 
89 #define ICE_DFLT_TRAFFIC_CLASS	BIT(0)
90 #define ICE_INT_NAME_STR_LEN	(IFNAMSIZ + 16)
91 #define ICE_AQ_LEN		192
92 #define ICE_MBXSQ_LEN		64
93 #define ICE_SBQ_LEN		64
94 #define ICE_MIN_LAN_TXRX_MSIX	1
95 #define ICE_MIN_LAN_OICR_MSIX	1
96 #define ICE_MIN_MSIX		(ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
97 #define ICE_FDIR_MSIX		2
98 #define ICE_RDMA_NUM_AEQ_MSIX	4
99 #define ICE_MIN_RDMA_MSIX	2
100 #define ICE_ESWITCH_MSIX	1
101 #define ICE_NO_VSI		0xffff
102 #define ICE_VSI_MAP_CONTIG	0
103 #define ICE_VSI_MAP_SCATTER	1
104 #define ICE_MAX_SCATTER_TXQS	16
105 #define ICE_MAX_SCATTER_RXQS	16
106 #define ICE_Q_WAIT_RETRY_LIMIT	10
107 #define ICE_Q_WAIT_MAX_RETRY	(5 * ICE_Q_WAIT_RETRY_LIMIT)
108 #define ICE_MAX_LG_RSS_QS	256
109 #define ICE_INVAL_Q_INDEX	0xffff
110 
111 #define ICE_MAX_RXQS_PER_TC		256	/* Used when setting VSI context per TC Rx queues */
112 
113 #define ICE_CHNL_START_TC		1
114 
115 #define ICE_MAX_RESET_WAIT		20
116 
117 #define ICE_VSIQF_HKEY_ARRAY_SIZE	((VSIQF_HKEY_MAX_INDEX + 1) *	4)
118 
119 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
120 
121 #define ICE_MAX_MTU	(ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
122 
123 #define ICE_MAX_TSO_SIZE 131072
124 
125 #define ICE_UP_TABLE_TRANSLATE(val, i) \
126 		(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
127 		  ICE_AQ_VSI_UP_TABLE_UP##i##_M)
128 
129 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
130 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
131 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
132 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
133 
134 /* Minimum BW limit is 500 Kbps for any scheduler node */
135 #define ICE_MIN_BW_LIMIT		500
136 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
137  * use it to convert user specified BW limit into Kbps
138  */
139 #define ICE_BW_KBPS_DIVISOR		125
140 
141 /* Default recipes have priority 4 and below, hence priority values between 5..7
142  * can be used as filter priority for advanced switch filter (advanced switch
143  * filters need new recipe to be created for specified extraction sequence
144  * because default recipe extraction sequence does not represent custom
145  * extraction)
146  */
147 #define ICE_SWITCH_FLTR_PRIO_QUEUE	7
148 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
149  * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
150  * SYN/FIN/RST))
151  */
152 #define ICE_SWITCH_FLTR_PRIO_RSVD	6
153 #define ICE_SWITCH_FLTR_PRIO_VSI	5
154 #define ICE_SWITCH_FLTR_PRIO_QGRP	ICE_SWITCH_FLTR_PRIO_VSI
155 
156 /* Macro for each VSI in a PF */
157 #define ice_for_each_vsi(pf, i) \
158 	for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
159 
160 /* Macros for each Tx/Xdp/Rx ring in a VSI */
161 #define ice_for_each_txq(vsi, i) \
162 	for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
163 
164 #define ice_for_each_xdp_txq(vsi, i) \
165 	for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
166 
167 #define ice_for_each_rxq(vsi, i) \
168 	for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
169 
170 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
171 #define ice_for_each_alloc_txq(vsi, i) \
172 	for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
173 
174 #define ice_for_each_alloc_rxq(vsi, i) \
175 	for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
176 
177 #define ice_for_each_q_vector(vsi, i) \
178 	for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
179 
180 #define ice_for_each_chnl_tc(i)	\
181 	for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
182 
183 #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
184 
185 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
186 				     ICE_PROMISC_UCAST_RX | \
187 				     ICE_PROMISC_VLAN_TX  | \
188 				     ICE_PROMISC_VLAN_RX)
189 
190 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
191 
192 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
193 				     ICE_PROMISC_MCAST_RX | \
194 				     ICE_PROMISC_VLAN_TX  | \
195 				     ICE_PROMISC_VLAN_RX)
196 
197 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
198 
199 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
200 
201 enum ice_feature {
202 	ICE_F_DSCP,
203 	ICE_F_PHY_RCLK,
204 	ICE_F_SMA_CTRL,
205 	ICE_F_CGU,
206 	ICE_F_GNSS,
207 	ICE_F_ROCE_LAG,
208 	ICE_F_SRIOV_LAG,
209 	ICE_F_MAX
210 };
211 
212 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
213 
214 struct ice_channel {
215 	struct list_head list;
216 	u8 type;
217 	u16 sw_id;
218 	u16 base_q;
219 	u16 num_rxq;
220 	u16 num_txq;
221 	u16 vsi_num;
222 	u8 ena_tc;
223 	struct ice_aqc_vsi_props info;
224 	u64 max_tx_rate;
225 	u64 min_tx_rate;
226 	atomic_t num_sb_fltr;
227 	struct ice_vsi *ch_vsi;
228 };
229 
230 struct ice_txq_meta {
231 	u32 q_teid;	/* Tx-scheduler element identifier */
232 	u16 q_id;	/* Entry in VSI's txq_map bitmap */
233 	u16 q_handle;	/* Relative index of Tx queue within TC */
234 	u16 vsi_idx;	/* VSI index that Tx queue belongs to */
235 	u8 tc;		/* TC number that Tx queue belongs to */
236 };
237 
238 struct ice_tc_info {
239 	u16 qoffset;
240 	u16 qcount_tx;
241 	u16 qcount_rx;
242 	u8 netdev_tc;
243 };
244 
245 struct ice_tc_cfg {
246 	u8 numtc; /* Total number of enabled TCs */
247 	u16 ena_tc; /* Tx map */
248 	struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
249 };
250 
251 struct ice_qs_cfg {
252 	struct mutex *qs_mutex;  /* will be assigned to &pf->avail_q_mutex */
253 	unsigned long *pf_map;
254 	unsigned long pf_map_size;
255 	unsigned int q_count;
256 	unsigned int scatter_count;
257 	u16 *vsi_map;
258 	u16 vsi_map_offset;
259 	u8 mapping_mode;
260 };
261 
262 struct ice_sw {
263 	struct ice_pf *pf;
264 	u16 sw_id;		/* switch ID for this switch */
265 	u16 bridge_mode;	/* VEB/VEPA/Port Virtualizer */
266 };
267 
268 enum ice_pf_state {
269 	ICE_TESTING,
270 	ICE_DOWN,
271 	ICE_NEEDS_RESTART,
272 	ICE_PREPARED_FOR_RESET,	/* set by driver when prepared */
273 	ICE_RESET_OICR_RECV,		/* set by driver after rcv reset OICR */
274 	ICE_PFR_REQ,		/* set by driver */
275 	ICE_CORER_REQ,		/* set by driver */
276 	ICE_GLOBR_REQ,		/* set by driver */
277 	ICE_CORER_RECV,		/* set by OICR handler */
278 	ICE_GLOBR_RECV,		/* set by OICR handler */
279 	ICE_EMPR_RECV,		/* set by OICR handler */
280 	ICE_SUSPENDED,		/* set on module remove path */
281 	ICE_RESET_FAILED,		/* set by reset/rebuild */
282 	/* When checking for the PF to be in a nominal operating state, the
283 	 * bits that are grouped at the beginning of the list need to be
284 	 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will
285 	 * be checked. If you need to add a bit into consideration for nominal
286 	 * operating state, it must be added before
287 	 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
288 	 * without appropriate consideration.
289 	 */
290 	ICE_STATE_NOMINAL_CHECK_BITS,
291 	ICE_ADMINQ_EVENT_PENDING,
292 	ICE_MAILBOXQ_EVENT_PENDING,
293 	ICE_SIDEBANDQ_EVENT_PENDING,
294 	ICE_MDD_EVENT_PENDING,
295 	ICE_VFLR_EVENT_PENDING,
296 	ICE_FLTR_OVERFLOW_PROMISC,
297 	ICE_VF_DIS,
298 	ICE_CFG_BUSY,
299 	ICE_SERVICE_SCHED,
300 	ICE_SERVICE_DIS,
301 	ICE_FD_FLUSH_REQ,
302 	ICE_OICR_INTR_DIS,		/* Global OICR interrupt disabled */
303 	ICE_MDD_VF_PRINT_PENDING,	/* set when MDD event handle */
304 	ICE_VF_RESETS_DISABLED,	/* disable resets during ice_remove */
305 	ICE_LINK_DEFAULT_OVERRIDE_PENDING,
306 	ICE_PHY_INIT_COMPLETE,
307 	ICE_FD_VF_FLUSH_CTX,		/* set at FD Rx IRQ or timeout */
308 	ICE_AUX_ERR_PENDING,
309 	ICE_STATE_NBITS		/* must be last */
310 };
311 
312 enum ice_vsi_state {
313 	ICE_VSI_DOWN,
314 	ICE_VSI_NEEDS_RESTART,
315 	ICE_VSI_NETDEV_ALLOCD,
316 	ICE_VSI_NETDEV_REGISTERED,
317 	ICE_VSI_UMAC_FLTR_CHANGED,
318 	ICE_VSI_MMAC_FLTR_CHANGED,
319 	ICE_VSI_PROMISC_CHANGED,
320 	ICE_VSI_STATE_NBITS		/* must be last */
321 };
322 
323 struct ice_vsi_stats {
324 	struct ice_ring_stats **tx_ring_stats;  /* Tx ring stats array */
325 	struct ice_ring_stats **rx_ring_stats;  /* Rx ring stats array */
326 };
327 
328 /* struct that defines a VSI, associated with a dev */
329 struct ice_vsi {
330 	struct net_device *netdev;
331 	struct ice_sw *vsw;		 /* switch this VSI is on */
332 	struct ice_pf *back;		 /* back pointer to PF */
333 	struct ice_port_info *port_info; /* back pointer to port_info */
334 	struct ice_rx_ring **rx_rings;	 /* Rx ring array */
335 	struct ice_tx_ring **tx_rings;	 /* Tx ring array */
336 	struct ice_q_vector **q_vectors; /* q_vector array */
337 
338 	irqreturn_t (*irq_handler)(int irq, void *data);
339 
340 	u64 tx_linearize;
341 	DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS);
342 	unsigned int current_netdev_flags;
343 	u32 tx_restart;
344 	u32 tx_busy;
345 	u32 rx_buf_failed;
346 	u32 rx_page_failed;
347 	u16 num_q_vectors;
348 	/* tell if only dynamic irq allocation is allowed */
349 	bool irq_dyn_alloc;
350 
351 	enum ice_vsi_type type;
352 	u16 vsi_num;			/* HW (absolute) index of this VSI */
353 	u16 idx;			/* software index in pf->vsi[] */
354 
355 	struct ice_vf *vf;		/* VF associated with this VSI */
356 
357 	u16 num_gfltr;
358 	u16 num_bfltr;
359 
360 	/* RSS config */
361 	u16 rss_table_size;	/* HW RSS table size */
362 	u16 rss_size;		/* Allocated RSS queues */
363 	u8 rss_hfunc;		/* User configured hash type */
364 	u8 *rss_hkey_user;	/* User configured hash keys */
365 	u8 *rss_lut_user;	/* User configured lookup table entries */
366 	u8 rss_lut_type;	/* used to configure Get/Set RSS LUT AQ call */
367 
368 	/* aRFS members only allocated for the PF VSI */
369 #define ICE_MAX_ARFS_LIST	1024
370 #define ICE_ARFS_LST_MASK	(ICE_MAX_ARFS_LIST - 1)
371 	struct hlist_head *arfs_fltr_list;
372 	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
373 	spinlock_t arfs_lock;	/* protects aRFS hash table and filter state */
374 	atomic_t *arfs_last_fltr_id;
375 
376 	u16 max_frame;
377 	u16 rx_buf_len;
378 
379 	struct ice_aqc_vsi_props info;	 /* VSI properties */
380 	struct ice_vsi_vlan_info vlan_info;	/* vlan config to be restored */
381 
382 	/* VSI stats */
383 	struct rtnl_link_stats64 net_stats;
384 	struct rtnl_link_stats64 net_stats_prev;
385 	struct ice_eth_stats eth_stats;
386 	struct ice_eth_stats eth_stats_prev;
387 
388 	struct list_head tmp_sync_list;		/* MAC filters to be synced */
389 	struct list_head tmp_unsync_list;	/* MAC filters to be unsynced */
390 
391 	u8 irqs_ready:1;
392 	u8 current_isup:1;		 /* Sync 'link up' logging */
393 	u8 stat_offsets_loaded:1;
394 	struct ice_vsi_vlan_ops inner_vlan_ops;
395 	struct ice_vsi_vlan_ops outer_vlan_ops;
396 	u16 num_vlan;
397 
398 	/* queue information */
399 	u8 tx_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
400 	u8 rx_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
401 	u16 *txq_map;			 /* index in pf->avail_txqs */
402 	u16 *rxq_map;			 /* index in pf->avail_rxqs */
403 	u16 alloc_txq;			 /* Allocated Tx queues */
404 	u16 num_txq;			 /* Used Tx queues */
405 	u16 alloc_rxq;			 /* Allocated Rx queues */
406 	u16 num_rxq;			 /* Used Rx queues */
407 	u16 req_txq;			 /* User requested Tx queues */
408 	u16 req_rxq;			 /* User requested Rx queues */
409 	u16 num_rx_desc;
410 	u16 num_tx_desc;
411 	u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
412 	struct ice_tc_cfg tc_cfg;
413 	struct bpf_prog *xdp_prog;
414 	struct ice_tx_ring **xdp_rings;	 /* XDP ring array */
415 	unsigned long *af_xdp_zc_qps;	 /* tracks AF_XDP ZC enabled qps */
416 	u16 num_xdp_txq;		 /* Used XDP queues */
417 	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
418 
419 	struct net_device **target_netdevs;
420 
421 	struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
422 
423 	/* Channel Specific Fields */
424 	struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
425 	u16 cnt_q_avail;
426 	u16 next_base_q;	/* next queue to be used for channel setup */
427 	struct list_head ch_list;
428 	u16 num_chnl_rxq;
429 	u16 num_chnl_txq;
430 	u16 ch_rss_size;
431 	u16 num_chnl_fltr;
432 	/* store away rss size info before configuring ADQ channels so that,
433 	 * it can be used after tc-qdisc delete, to get back RSS setting as
434 	 * they were before
435 	 */
436 	u16 orig_rss_size;
437 	/* this keeps tracks of all enabled TC with and without DCB
438 	 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
439 	 * information
440 	 */
441 	u8 all_numtc;
442 	u16 all_enatc;
443 
444 	/* store away TC info, to be used for rebuild logic */
445 	u8 old_numtc;
446 	u16 old_ena_tc;
447 
448 	struct ice_channel *ch;
449 
450 	/* setup back reference, to which aggregator node this VSI
451 	 * corresponds to
452 	 */
453 	struct ice_agg_node *agg_node;
454 } ____cacheline_internodealigned_in_smp;
455 
456 /* struct that defines an interrupt vector */
457 struct ice_q_vector {
458 	struct ice_vsi *vsi;
459 
460 	u16 v_idx;			/* index in the vsi->q_vector array. */
461 	u16 reg_idx;
462 	u8 num_ring_rx;			/* total number of Rx rings in vector */
463 	u8 num_ring_tx;			/* total number of Tx rings in vector */
464 	u8 wb_on_itr:1;			/* if true, WB on ITR is enabled */
465 	/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
466 	 * value to the device
467 	 */
468 	u8 intrl;
469 
470 	struct napi_struct napi;
471 
472 	struct ice_ring_container rx;
473 	struct ice_ring_container tx;
474 
475 	cpumask_t affinity_mask;
476 	struct irq_affinity_notify affinity_notify;
477 
478 	struct ice_channel *ch;
479 
480 	char name[ICE_INT_NAME_STR_LEN];
481 
482 	u16 total_events;	/* net_dim(): number of interrupts processed */
483 	struct msi_map irq;
484 } ____cacheline_internodealigned_in_smp;
485 
486 enum ice_pf_flags {
487 	ICE_FLAG_FLTR_SYNC,
488 	ICE_FLAG_RDMA_ENA,
489 	ICE_FLAG_RSS_ENA,
490 	ICE_FLAG_SRIOV_ENA,
491 	ICE_FLAG_SRIOV_CAPABLE,
492 	ICE_FLAG_DCB_CAPABLE,
493 	ICE_FLAG_DCB_ENA,
494 	ICE_FLAG_FD_ENA,
495 	ICE_FLAG_PTP_SUPPORTED,		/* PTP is supported by NVM */
496 	ICE_FLAG_ADV_FEATURES,
497 	ICE_FLAG_TC_MQPRIO,		/* support for Multi queue TC */
498 	ICE_FLAG_CLS_FLOWER,
499 	ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
500 	ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
501 	ICE_FLAG_NO_MEDIA,
502 	ICE_FLAG_FW_LLDP_AGENT,
503 	ICE_FLAG_MOD_POWER_UNSUPPORTED,
504 	ICE_FLAG_PHY_FW_LOAD_FAILED,
505 	ICE_FLAG_ETHTOOL_CTXT,		/* set when ethtool holds RTNL lock */
506 	ICE_FLAG_LEGACY_RX,
507 	ICE_FLAG_VF_TRUE_PROMISC_ENA,
508 	ICE_FLAG_MDD_AUTO_RESET_VF,
509 	ICE_FLAG_VF_VLAN_PRUNING,
510 	ICE_FLAG_LINK_LENIENT_MODE_ENA,
511 	ICE_FLAG_PLUG_AUX_DEV,
512 	ICE_FLAG_UNPLUG_AUX_DEV,
513 	ICE_FLAG_MTU_CHANGED,
514 	ICE_FLAG_GNSS,			/* GNSS successfully initialized */
515 	ICE_FLAG_DPLL,			/* SyncE/PTP dplls initialized */
516 	ICE_PF_FLAGS_NBITS		/* must be last */
517 };
518 
519 enum ice_misc_thread_tasks {
520 	ICE_MISC_THREAD_TX_TSTAMP,
521 	ICE_MISC_THREAD_NBITS		/* must be last */
522 };
523 
524 struct ice_eswitch {
525 	struct ice_vsi *uplink_vsi;
526 	struct ice_esw_br_offloads *br_offloads;
527 	struct xarray reprs;
528 	bool is_running;
529 };
530 
531 struct ice_agg_node {
532 	u32 agg_id;
533 #define ICE_MAX_VSIS_IN_AGG_NODE	64
534 	u32 num_vsis;
535 	u8 valid;
536 };
537 
538 struct ice_pf {
539 	struct pci_dev *pdev;
540 
541 	struct devlink_region *nvm_region;
542 	struct devlink_region *sram_region;
543 	struct devlink_region *devcaps_region;
544 
545 	/* devlink port data */
546 	struct devlink_port devlink_port;
547 
548 	/* OS reserved IRQ details */
549 	struct msix_entry *msix_entries;
550 	struct ice_irq_tracker irq_tracker;
551 	/* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
552 	 * number of MSIX vectors needed for all SR-IOV VFs from the number of
553 	 * MSIX vectors allowed on this PF.
554 	 */
555 	u16 sriov_base_vector;
556 	unsigned long *sriov_irq_bm;	/* bitmap to track irq usage */
557 	u16 sriov_irq_size;		/* size of the irq_bm bitmap */
558 
559 	u16 ctrl_vsi_idx;		/* control VSI index in pf->vsi array */
560 
561 	struct ice_vsi **vsi;		/* VSIs created by the driver */
562 	struct ice_vsi_stats **vsi_stats;
563 	struct ice_sw *first_sw;	/* first switch created by firmware */
564 	u16 eswitch_mode;		/* current mode of eswitch */
565 	struct dentry *ice_debugfs_pf;
566 	struct dentry *ice_debugfs_pf_fwlog;
567 	/* keep track of all the dentrys for FW log modules */
568 	struct dentry **ice_debugfs_pf_fwlog_modules;
569 	struct ice_vfs vfs;
570 	DECLARE_BITMAP(features, ICE_F_MAX);
571 	DECLARE_BITMAP(state, ICE_STATE_NBITS);
572 	DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
573 	DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
574 	unsigned long *avail_txqs;	/* bitmap to track PF Tx queue usage */
575 	unsigned long *avail_rxqs;	/* bitmap to track PF Rx queue usage */
576 	unsigned long serv_tmr_period;
577 	unsigned long serv_tmr_prev;
578 	struct timer_list serv_tmr;
579 	struct work_struct serv_task;
580 	struct mutex avail_q_mutex;	/* protects access to avail_[rx|tx]qs */
581 	struct mutex sw_mutex;		/* lock for protecting VSI alloc flow */
582 	struct mutex tc_mutex;		/* lock to protect TC changes */
583 	struct mutex adev_mutex;	/* lock to protect aux device access */
584 	struct mutex lag_mutex;		/* protect ice_lag struct in PF */
585 	u32 msg_enable;
586 	struct ice_ptp ptp;
587 	struct gnss_serial *gnss_serial;
588 	struct gnss_device *gnss_dev;
589 	u16 num_rdma_msix;		/* Total MSIX vectors for RDMA driver */
590 	u16 rdma_base_vector;
591 
592 	/* spinlock to protect the AdminQ wait list */
593 	spinlock_t aq_wait_lock;
594 	struct hlist_head aq_wait_list;
595 	wait_queue_head_t aq_wait_queue;
596 	bool fw_emp_reset_disabled;
597 
598 	wait_queue_head_t reset_wait_queue;
599 
600 	u32 hw_csum_rx_error;
601 	u32 hw_rx_eipe_error;
602 	u32 oicr_err_reg;
603 	struct msi_map oicr_irq;	/* Other interrupt cause MSIX vector */
604 	struct msi_map ll_ts_irq;	/* LL_TS interrupt MSIX vector */
605 	u16 max_pf_txqs;	/* Total Tx queues PF wide */
606 	u16 max_pf_rxqs;	/* Total Rx queues PF wide */
607 	u16 num_lan_msix;	/* Total MSIX vectors for base driver */
608 	u16 num_lan_tx;		/* num LAN Tx queues setup */
609 	u16 num_lan_rx;		/* num LAN Rx queues setup */
610 	u16 next_vsi;		/* Next free slot in pf->vsi[] - 0-based! */
611 	u16 num_alloc_vsi;
612 	u16 corer_count;	/* Core reset count */
613 	u16 globr_count;	/* Global reset count */
614 	u16 empr_count;		/* EMP reset count */
615 	u16 pfr_count;		/* PF reset count */
616 
617 	u8 wol_ena : 1;		/* software state of WoL */
618 	u32 wakeup_reason;	/* last wakeup reason */
619 	struct ice_hw_port_stats stats;
620 	struct ice_hw_port_stats stats_prev;
621 	struct ice_hw hw;
622 	u8 stat_prev_loaded:1; /* has previous stats been loaded */
623 	u8 rdma_mode;
624 	u16 dcbx_cap;
625 	u32 tx_timeout_count;
626 	unsigned long tx_timeout_last_recovery;
627 	u32 tx_timeout_recovery_level;
628 	char int_name[ICE_INT_NAME_STR_LEN];
629 	char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
630 	struct auxiliary_device *adev;
631 	int aux_idx;
632 	u32 sw_int_count;
633 	/* count of tc_flower filters specific to channel (aka where filter
634 	 * action is "hw_tc <tc_num>")
635 	 */
636 	u16 num_dmac_chnl_fltrs;
637 	struct hlist_head tc_flower_fltr_list;
638 
639 	u64 supported_rxdids;
640 
641 	__le64 nvm_phy_type_lo; /* NVM PHY type low */
642 	__le64 nvm_phy_type_hi; /* NVM PHY type high */
643 	struct ice_link_default_override_tlv link_dflt_override;
644 	struct ice_lag *lag; /* Link Aggregation information */
645 
646 	struct ice_eswitch eswitch;
647 	struct ice_esw_br_port *br_port;
648 
649 #define ICE_INVALID_AGG_NODE_ID		0
650 #define ICE_PF_AGG_NODE_ID_START	1
651 #define ICE_MAX_PF_AGG_NODES		32
652 	struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
653 #define ICE_VF_AGG_NODE_ID_START	65
654 #define ICE_MAX_VF_AGG_NODES		32
655 	struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
656 	struct ice_dplls dplls;
657 	struct device *hwmon_dev;
658 };
659 
660 extern struct workqueue_struct *ice_lag_wq;
661 
662 struct ice_netdev_priv {
663 	struct ice_vsi *vsi;
664 	struct ice_repr *repr;
665 	/* indirect block callbacks on registered higher level devices
666 	 * (e.g. tunnel devices)
667 	 *
668 	 * tc_indr_block_cb_priv_list is used to look up indirect callback
669 	 * private data
670 	 */
671 	struct list_head tc_indr_block_priv_list;
672 };
673 
674 /**
675  * ice_vector_ch_enabled
676  * @qv: pointer to q_vector, can be NULL
677  *
678  * This function returns true if vector is channel enabled otherwise false
679  */
680 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
681 {
682 	return !!qv->ch; /* Enable it to run with TC */
683 }
684 
685 /**
686  * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
687  * @pf: Board private structure
688  *
689  * Return true if this PF should respond to the Tx timestamp interrupt
690  * indication in the miscellaneous OICR interrupt handler.
691  */
692 static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
693 {
694 	return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE;
695 }
696 
697 /**
698  * ice_irq_dynamic_ena - Enable default interrupt generation settings
699  * @hw: pointer to HW struct
700  * @vsi: pointer to VSI struct, can be NULL
701  * @q_vector: pointer to q_vector, can be NULL
702  */
703 static inline void
704 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
705 		    struct ice_q_vector *q_vector)
706 {
707 	u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
708 				((struct ice_pf *)hw->back)->oicr_irq.index;
709 	int itr = ICE_ITR_NONE;
710 	u32 val;
711 
712 	/* clear the PBA here, as this function is meant to clean out all
713 	 * previous interrupts and enable the interrupt
714 	 */
715 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
716 	      (itr << GLINT_DYN_CTL_ITR_INDX_S);
717 	if (vsi)
718 		if (test_bit(ICE_VSI_DOWN, vsi->state))
719 			return;
720 	wr32(hw, GLINT_DYN_CTL(vector), val);
721 }
722 
723 /**
724  * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
725  * @netdev: pointer to the netdev struct
726  */
727 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
728 {
729 	struct ice_netdev_priv *np = netdev_priv(netdev);
730 
731 	return np->vsi->back;
732 }
733 
734 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
735 {
736 	return !!READ_ONCE(vsi->xdp_prog);
737 }
738 
739 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
740 {
741 	ring->flags |= ICE_TX_FLAGS_RING_XDP;
742 }
743 
744 /**
745  * ice_xsk_pool - get XSK buffer pool bound to a ring
746  * @ring: Rx ring to use
747  *
748  * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
749  * present, NULL otherwise.
750  */
751 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
752 {
753 	struct ice_vsi *vsi = ring->vsi;
754 	u16 qid = ring->q_index;
755 
756 	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
757 		return NULL;
758 
759 	return xsk_get_pool_from_qid(vsi->netdev, qid);
760 }
761 
762 /**
763  * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
764  * @vsi: pointer to VSI
765  * @qid: index of a queue to look at XSK buff pool presence
766  *
767  * Sets XSK buff pool pointer on XDP ring.
768  *
769  * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
770  * queue id. Reason for doing so is that queue vectors might have assigned more
771  * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
772  * carries a pointer to one of these XDP rings for its own purposes, such as
773  * handling XDP_TX action, therefore we can piggyback here on the
774  * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
775  */
776 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
777 {
778 	struct ice_tx_ring *ring;
779 
780 	ring = vsi->rx_rings[qid]->xdp_ring;
781 	if (!ring)
782 		return;
783 
784 	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
785 		ring->xsk_pool = NULL;
786 		return;
787 	}
788 
789 	ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
790 }
791 
792 /**
793  * ice_get_main_vsi - Get the PF VSI
794  * @pf: PF instance
795  *
796  * returns pf->vsi[0], which by definition is the PF VSI
797  */
798 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
799 {
800 	if (pf->vsi)
801 		return pf->vsi[0];
802 
803 	return NULL;
804 }
805 
806 /**
807  * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
808  * @np: private netdev structure
809  */
810 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
811 {
812 	/* In case of port representor return source port VSI. */
813 	if (np->repr)
814 		return np->repr->src_vsi;
815 	else
816 		return np->vsi;
817 }
818 
819 /**
820  * ice_get_ctrl_vsi - Get the control VSI
821  * @pf: PF instance
822  */
823 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
824 {
825 	/* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
826 	if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
827 		return NULL;
828 
829 	return pf->vsi[pf->ctrl_vsi_idx];
830 }
831 
832 /**
833  * ice_find_vsi - Find the VSI from VSI ID
834  * @pf: The PF pointer to search in
835  * @vsi_num: The VSI ID to search for
836  */
837 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
838 {
839 	int i;
840 
841 	ice_for_each_vsi(pf, i)
842 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
843 			return  pf->vsi[i];
844 	return NULL;
845 }
846 
847 /**
848  * ice_is_switchdev_running - check if switchdev is configured
849  * @pf: pointer to PF structure
850  *
851  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
852  * and switchdev is configured, false otherwise.
853  */
854 static inline bool ice_is_switchdev_running(struct ice_pf *pf)
855 {
856 	return pf->eswitch.is_running;
857 }
858 
859 #define ICE_FD_STAT_CTR_BLOCK_COUNT	256
860 #define ICE_FD_STAT_PF_IDX(base_idx) \
861 			((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
862 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
863 #define ICE_FD_STAT_CH			1
864 #define ICE_FD_CH_STAT_IDX(base_idx) \
865 			(ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
866 
867 /**
868  * ice_is_adq_active - any active ADQs
869  * @pf: pointer to PF
870  *
871  * This function returns true if there are any ADQs configured (which is
872  * determined by looking at VSI type (which should be VSI_PF), numtc, and
873  * TC_MQPRIO flag) otherwise return false
874  */
875 static inline bool ice_is_adq_active(struct ice_pf *pf)
876 {
877 	struct ice_vsi *vsi;
878 
879 	vsi = ice_get_main_vsi(pf);
880 	if (!vsi)
881 		return false;
882 
883 	/* is ADQ configured */
884 	if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
885 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
886 		return true;
887 
888 	return false;
889 }
890 
891 void ice_debugfs_fwlog_init(struct ice_pf *pf);
892 void ice_debugfs_pf_deinit(struct ice_pf *pf);
893 void ice_debugfs_init(void);
894 void ice_debugfs_exit(void);
895 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
896 
897 bool netif_is_ice(const struct net_device *dev);
898 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
899 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
900 int ice_vsi_open_ctrl(struct ice_vsi *vsi);
901 int ice_vsi_open(struct ice_vsi *vsi);
902 void ice_set_ethtool_ops(struct net_device *netdev);
903 void ice_set_ethtool_repr_ops(struct net_device *netdev);
904 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
905 u16 ice_get_avail_txq_count(struct ice_pf *pf);
906 u16 ice_get_avail_rxq_count(struct ice_pf *pf);
907 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
908 void ice_update_vsi_stats(struct ice_vsi *vsi);
909 void ice_update_pf_stats(struct ice_pf *pf);
910 void
911 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
912 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes);
913 int ice_up(struct ice_vsi *vsi);
914 int ice_down(struct ice_vsi *vsi);
915 int ice_down_up(struct ice_vsi *vsi);
916 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
917 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
918 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
919 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
920 int ice_destroy_xdp_rings(struct ice_vsi *vsi);
921 int
922 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
923 	     u32 flags);
924 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
925 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
926 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
927 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
928 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
929 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
930 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
931 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
932 int ice_plug_aux_dev(struct ice_pf *pf);
933 void ice_unplug_aux_dev(struct ice_pf *pf);
934 int ice_init_rdma(struct ice_pf *pf);
935 void ice_deinit_rdma(struct ice_pf *pf);
936 const char *ice_aq_str(enum ice_aq_err aq_err);
937 bool ice_is_wol_supported(struct ice_hw *hw);
938 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
939 int
940 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
941 		    bool is_tun);
942 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
943 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
944 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
945 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
946 int
947 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
948 		      u32 *rule_locs);
949 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
950 void ice_fdir_release_flows(struct ice_hw *hw);
951 void ice_fdir_replay_flows(struct ice_hw *hw);
952 void ice_fdir_replay_fltrs(struct ice_pf *pf);
953 int ice_fdir_create_dflt_rules(struct ice_pf *pf);
954 
955 enum ice_aq_task_state {
956 	ICE_AQ_TASK_NOT_PREPARED,
957 	ICE_AQ_TASK_WAITING,
958 	ICE_AQ_TASK_COMPLETE,
959 	ICE_AQ_TASK_CANCELED,
960 };
961 
962 struct ice_aq_task {
963 	struct hlist_node entry;
964 	struct ice_rq_event_info event;
965 	enum ice_aq_task_state state;
966 	u16 opcode;
967 };
968 
969 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
970 			   u16 opcode);
971 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
972 			  unsigned long timeout);
973 int ice_open(struct net_device *netdev);
974 int ice_open_internal(struct net_device *netdev);
975 int ice_stop(struct net_device *netdev);
976 void ice_service_task_schedule(struct ice_pf *pf);
977 int ice_load(struct ice_pf *pf);
978 void ice_unload(struct ice_pf *pf);
979 void ice_adv_lnk_speed_maps_init(void);
980 int ice_init_dev(struct ice_pf *pf);
981 void ice_deinit_dev(struct ice_pf *pf);
982 
983 /**
984  * ice_set_rdma_cap - enable RDMA support
985  * @pf: PF struct
986  */
987 static inline void ice_set_rdma_cap(struct ice_pf *pf)
988 {
989 	if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
990 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
991 		set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
992 	}
993 }
994 
995 /**
996  * ice_clear_rdma_cap - disable RDMA support
997  * @pf: PF struct
998  */
999 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
1000 {
1001 	/* defer unplug to service task to avoid RTNL lock and
1002 	 * clear PLUG bit so that pending plugs don't interfere
1003 	 */
1004 	clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
1005 	set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
1006 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
1007 }
1008 
1009 extern const struct xdp_metadata_ops ice_xdp_md_ops;
1010 #endif /* _ICE_H_ */
1011