xref: /linux/drivers/net/ethernet/intel/ice/ice.h (revision 2151003e773c7e7dba4d64bed4bfc483681b5f6a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #ifndef _ICE_H_
5 #define _ICE_H_
6 
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/netdevice.h>
13 #include <linux/compiler.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/cpumask.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/if_vlan.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/pci.h>
21 #include <linux/workqueue.h>
22 #include <linux/wait.h>
23 #include <linux/interrupt.h>
24 #include <linux/ethtool.h>
25 #include <linux/timer.h>
26 #include <linux/delay.h>
27 #include <linux/bitmap.h>
28 #include <linux/log2.h>
29 #include <linux/ip.h>
30 #include <linux/sctp.h>
31 #include <linux/ipv6.h>
32 #include <linux/pkt_sched.h>
33 #include <linux/if_bridge.h>
34 #include <linux/ctype.h>
35 #include <linux/linkmode.h>
36 #include <linux/bpf.h>
37 #include <linux/btf.h>
38 #include <linux/auxiliary_bus.h>
39 #include <linux/avf/virtchnl.h>
40 #include <linux/cpu_rmap.h>
41 #include <linux/dim.h>
42 #include <linux/gnss.h>
43 #include <net/pkt_cls.h>
44 #include <net/pkt_sched.h>
45 #include <net/tc_act/tc_mirred.h>
46 #include <net/tc_act/tc_gact.h>
47 #include <net/ip.h>
48 #include <net/devlink.h>
49 #include <net/ipv6.h>
50 #include <net/xdp_sock.h>
51 #include <net/xdp_sock_drv.h>
52 #include <net/geneve.h>
53 #include <net/gre.h>
54 #include <net/udp_tunnel.h>
55 #include <net/vxlan.h>
56 #include <net/gtp.h>
57 #include <linux/ppp_defs.h>
58 #include "ice_devids.h"
59 #include "ice_type.h"
60 #include "ice_txrx.h"
61 #include "ice_dcb.h"
62 #include "ice_switch.h"
63 #include "ice_common.h"
64 #include "ice_flow.h"
65 #include "ice_sched.h"
66 #include "ice_idc_int.h"
67 #include "ice_sriov.h"
68 #include "ice_vf_mbx.h"
69 #include "ice_ptp.h"
70 #include "ice_fdir.h"
71 #include "ice_xsk.h"
72 #include "ice_arfs.h"
73 #include "ice_repr.h"
74 #include "ice_eswitch.h"
75 #include "ice_lag.h"
76 #include "ice_vsi_vlan_ops.h"
77 #include "ice_gnss.h"
78 #include "ice_irq.h"
79 #include "ice_dpll.h"
80 #include "ice_adapter.h"
81 #include "devlink/health.h"
82 
83 #define ICE_BAR0		0
84 #define ICE_REQ_DESC_MULTIPLE	32
85 #define ICE_MIN_NUM_DESC	64
86 #define ICE_MAX_NUM_DESC	8160
87 #define ICE_DFLT_MIN_RX_DESC	512
88 #define ICE_DFLT_NUM_TX_DESC	256
89 #define ICE_DFLT_NUM_RX_DESC	2048
90 
91 #define ICE_DFLT_TRAFFIC_CLASS	BIT(0)
92 #define ICE_INT_NAME_STR_LEN	(IFNAMSIZ + 16)
93 #define ICE_AQ_LEN		192
94 #define ICE_MBXSQ_LEN		64
95 #define ICE_SBQ_LEN		64
96 #define ICE_MIN_LAN_TXRX_MSIX	1
97 #define ICE_MIN_LAN_OICR_MSIX	1
98 #define ICE_MIN_MSIX		(ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
99 #define ICE_FDIR_MSIX		2
100 #define ICE_NO_VSI		0xffff
101 #define ICE_VSI_MAP_CONTIG	0
102 #define ICE_VSI_MAP_SCATTER	1
103 #define ICE_MAX_SCATTER_TXQS	16
104 #define ICE_MAX_SCATTER_RXQS	16
105 #define ICE_Q_WAIT_RETRY_LIMIT	10
106 #define ICE_Q_WAIT_MAX_RETRY	(5 * ICE_Q_WAIT_RETRY_LIMIT)
107 #define ICE_MAX_LG_RSS_QS	256
108 #define ICE_INVAL_Q_INDEX	0xffff
109 
110 #define ICE_MAX_RXQS_PER_TC		256	/* Used when setting VSI context per TC Rx queues */
111 
112 #define ICE_CHNL_START_TC		1
113 
114 #define ICE_MAX_RESET_WAIT		20
115 
116 #define ICE_VSIQF_HKEY_ARRAY_SIZE	((VSIQF_HKEY_MAX_INDEX + 1) *	4)
117 
118 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
119 
120 #define ICE_MAX_MTU	(ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
121 
122 #define ICE_MAX_TSO_SIZE 131072
123 
124 #define ICE_UP_TABLE_TRANSLATE(val, i) \
125 		(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
126 		  ICE_AQ_VSI_UP_TABLE_UP##i##_M)
127 
128 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
129 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
130 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
131 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
132 
133 /* Minimum BW limit is 500 Kbps for any scheduler node */
134 #define ICE_MIN_BW_LIMIT		500
135 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
136  * use it to convert user specified BW limit into Kbps
137  */
138 #define ICE_BW_KBPS_DIVISOR		125
139 
140 /* Default recipes have priority 4 and below, hence priority values between 5..7
141  * can be used as filter priority for advanced switch filter (advanced switch
142  * filters need new recipe to be created for specified extraction sequence
143  * because default recipe extraction sequence does not represent custom
144  * extraction)
145  */
146 #define ICE_SWITCH_FLTR_PRIO_QUEUE	7
147 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
148  * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
149  * SYN/FIN/RST))
150  */
151 #define ICE_SWITCH_FLTR_PRIO_RSVD	6
152 #define ICE_SWITCH_FLTR_PRIO_VSI	5
153 #define ICE_SWITCH_FLTR_PRIO_QGRP	ICE_SWITCH_FLTR_PRIO_VSI
154 
155 /* Macro for each VSI in a PF */
156 #define ice_for_each_vsi(pf, i) \
157 	for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
158 
159 /* Macros for each Tx/Xdp/Rx ring in a VSI */
160 #define ice_for_each_txq(vsi, i) \
161 	for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
162 
163 #define ice_for_each_xdp_txq(vsi, i) \
164 	for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
165 
166 #define ice_for_each_rxq(vsi, i) \
167 	for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
168 
169 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
170 #define ice_for_each_alloc_txq(vsi, i) \
171 	for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
172 
173 #define ice_for_each_alloc_rxq(vsi, i) \
174 	for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
175 
176 #define ice_for_each_q_vector(vsi, i) \
177 	for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
178 
179 #define ice_for_each_chnl_tc(i)	\
180 	for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
181 
182 #define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX
183 
184 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \
185 				     ICE_PROMISC_VLAN_RX)
186 
187 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
188 
189 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
190 				     ICE_PROMISC_MCAST_RX | \
191 				     ICE_PROMISC_VLAN_TX  | \
192 				     ICE_PROMISC_VLAN_RX)
193 
194 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
195 
196 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
197 
198 enum ice_feature {
199 	ICE_F_DSCP,
200 	ICE_F_PHY_RCLK,
201 	ICE_F_SMA_CTRL,
202 	ICE_F_CGU,
203 	ICE_F_GNSS,
204 	ICE_F_ROCE_LAG,
205 	ICE_F_SRIOV_LAG,
206 	ICE_F_MBX_LIMIT,
207 	ICE_F_MAX
208 };
209 
210 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
211 
212 struct ice_channel {
213 	struct list_head list;
214 	u8 type;
215 	u16 sw_id;
216 	u16 base_q;
217 	u16 num_rxq;
218 	u16 num_txq;
219 	u16 vsi_num;
220 	u8 ena_tc;
221 	struct ice_aqc_vsi_props info;
222 	u64 max_tx_rate;
223 	u64 min_tx_rate;
224 	atomic_t num_sb_fltr;
225 	struct ice_vsi *ch_vsi;
226 };
227 
228 struct ice_txq_meta {
229 	u32 q_teid;	/* Tx-scheduler element identifier */
230 	u16 q_id;	/* Entry in VSI's txq_map bitmap */
231 	u16 q_handle;	/* Relative index of Tx queue within TC */
232 	u16 vsi_idx;	/* VSI index that Tx queue belongs to */
233 	u8 tc;		/* TC number that Tx queue belongs to */
234 };
235 
236 struct ice_tc_info {
237 	u16 qoffset;
238 	u16 qcount_tx;
239 	u16 qcount_rx;
240 	u8 netdev_tc;
241 };
242 
243 struct ice_tc_cfg {
244 	u8 numtc; /* Total number of enabled TCs */
245 	u16 ena_tc; /* Tx map */
246 	struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
247 };
248 
249 struct ice_qs_cfg {
250 	struct mutex *qs_mutex;  /* will be assigned to &pf->avail_q_mutex */
251 	unsigned long *pf_map;
252 	unsigned long pf_map_size;
253 	unsigned int q_count;
254 	unsigned int scatter_count;
255 	u16 *vsi_map;
256 	u16 vsi_map_offset;
257 	u8 mapping_mode;
258 };
259 
260 struct ice_sw {
261 	struct ice_pf *pf;
262 	u16 sw_id;		/* switch ID for this switch */
263 	u16 bridge_mode;	/* VEB/VEPA/Port Virtualizer */
264 };
265 
266 enum ice_pf_state {
267 	ICE_TESTING,
268 	ICE_DOWN,
269 	ICE_NEEDS_RESTART,
270 	ICE_PREPARED_FOR_RESET,	/* set by driver when prepared */
271 	ICE_RESET_OICR_RECV,		/* set by driver after rcv reset OICR */
272 	ICE_PFR_REQ,		/* set by driver */
273 	ICE_CORER_REQ,		/* set by driver */
274 	ICE_GLOBR_REQ,		/* set by driver */
275 	ICE_CORER_RECV,		/* set by OICR handler */
276 	ICE_GLOBR_RECV,		/* set by OICR handler */
277 	ICE_EMPR_RECV,		/* set by OICR handler */
278 	ICE_SUSPENDED,		/* set on module remove path */
279 	ICE_RESET_FAILED,		/* set by reset/rebuild */
280 	/* When checking for the PF to be in a nominal operating state, the
281 	 * bits that are grouped at the beginning of the list need to be
282 	 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will
283 	 * be checked. If you need to add a bit into consideration for nominal
284 	 * operating state, it must be added before
285 	 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
286 	 * without appropriate consideration.
287 	 */
288 	ICE_STATE_NOMINAL_CHECK_BITS,
289 	ICE_ADMINQ_EVENT_PENDING,
290 	ICE_MAILBOXQ_EVENT_PENDING,
291 	ICE_SIDEBANDQ_EVENT_PENDING,
292 	ICE_MDD_EVENT_PENDING,
293 	ICE_VFLR_EVENT_PENDING,
294 	ICE_FLTR_OVERFLOW_PROMISC,
295 	ICE_VF_DIS,
296 	ICE_CFG_BUSY,
297 	ICE_SERVICE_SCHED,
298 	ICE_SERVICE_DIS,
299 	ICE_FD_FLUSH_REQ,
300 	ICE_OICR_INTR_DIS,		/* Global OICR interrupt disabled */
301 	ICE_MDD_VF_PRINT_PENDING,	/* set when MDD event handle */
302 	ICE_VF_RESETS_DISABLED,	/* disable resets during ice_remove */
303 	ICE_LINK_DEFAULT_OVERRIDE_PENDING,
304 	ICE_PHY_INIT_COMPLETE,
305 	ICE_FD_VF_FLUSH_CTX,		/* set at FD Rx IRQ or timeout */
306 	ICE_AUX_ERR_PENDING,
307 	ICE_STATE_NBITS		/* must be last */
308 };
309 
310 enum ice_vsi_state {
311 	ICE_VSI_DOWN,
312 	ICE_VSI_NEEDS_RESTART,
313 	ICE_VSI_NETDEV_ALLOCD,
314 	ICE_VSI_NETDEV_REGISTERED,
315 	ICE_VSI_UMAC_FLTR_CHANGED,
316 	ICE_VSI_MMAC_FLTR_CHANGED,
317 	ICE_VSI_PROMISC_CHANGED,
318 	ICE_VSI_REBUILD_PENDING,
319 	ICE_VSI_STATE_NBITS		/* must be last */
320 };
321 
322 struct ice_vsi_stats {
323 	struct ice_ring_stats **tx_ring_stats;  /* Tx ring stats array */
324 	struct ice_ring_stats **rx_ring_stats;  /* Rx ring stats array */
325 };
326 
327 /* struct that defines a VSI, associated with a dev */
328 struct ice_vsi {
329 	struct net_device *netdev;
330 	struct ice_sw *vsw;		 /* switch this VSI is on */
331 	struct ice_pf *back;		 /* back pointer to PF */
332 	struct ice_rx_ring **rx_rings;	 /* Rx ring array */
333 	struct ice_tx_ring **tx_rings;	 /* Tx ring array */
334 	struct ice_q_vector **q_vectors; /* q_vector array */
335 
336 	irqreturn_t (*irq_handler)(int irq, void *data);
337 
338 	u64 tx_linearize;
339 	DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS);
340 	unsigned int current_netdev_flags;
341 	u32 tx_restart;
342 	u32 tx_busy;
343 	u32 rx_buf_failed;
344 	u32 rx_page_failed;
345 	u16 num_q_vectors;
346 	/* tell if only dynamic irq allocation is allowed */
347 	bool irq_dyn_alloc;
348 
349 	u16 vsi_num;			/* HW (absolute) index of this VSI */
350 	u16 idx;			/* software index in pf->vsi[] */
351 
352 	u16 num_gfltr;
353 	u16 num_bfltr;
354 
355 	/* RSS config */
356 	u16 rss_table_size;	/* HW RSS table size */
357 	u16 rss_size;		/* Allocated RSS queues */
358 	u8 rss_hfunc;		/* User configured hash type */
359 	u8 *rss_hkey_user;	/* User configured hash keys */
360 	u8 *rss_lut_user;	/* User configured lookup table entries */
361 	u8 rss_lut_type;	/* used to configure Get/Set RSS LUT AQ call */
362 
363 	/* aRFS members only allocated for the PF VSI */
364 #define ICE_MAX_ARFS_LIST	1024
365 #define ICE_ARFS_LST_MASK	(ICE_MAX_ARFS_LIST - 1)
366 	struct hlist_head *arfs_fltr_list;
367 	struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
368 	spinlock_t arfs_lock;	/* protects aRFS hash table and filter state */
369 	atomic_t *arfs_last_fltr_id;
370 
371 	struct ice_aqc_vsi_props info;	 /* VSI properties */
372 	struct ice_vsi_vlan_info vlan_info;	/* vlan config to be restored */
373 
374 	/* VSI stats */
375 	struct rtnl_link_stats64 net_stats;
376 	struct rtnl_link_stats64 net_stats_prev;
377 	struct ice_eth_stats eth_stats;
378 	struct ice_eth_stats eth_stats_prev;
379 
380 	struct list_head tmp_sync_list;		/* MAC filters to be synced */
381 	struct list_head tmp_unsync_list;	/* MAC filters to be unsynced */
382 
383 	u8 irqs_ready:1;
384 	u8 current_isup:1;		 /* Sync 'link up' logging */
385 	u8 stat_offsets_loaded:1;
386 	struct ice_vsi_vlan_ops inner_vlan_ops;
387 	struct ice_vsi_vlan_ops outer_vlan_ops;
388 	u16 num_vlan;
389 
390 	/* queue information */
391 	u8 tx_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
392 	u8 rx_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
393 	u16 *txq_map;			 /* index in pf->avail_txqs */
394 	u16 *rxq_map;			 /* index in pf->avail_rxqs */
395 	u16 alloc_txq;			 /* Allocated Tx queues */
396 	u16 num_txq;			 /* Used Tx queues */
397 	u16 alloc_rxq;			 /* Allocated Rx queues */
398 	u16 num_rxq;			 /* Used Rx queues */
399 	u16 req_txq;			 /* User requested Tx queues */
400 	u16 req_rxq;			 /* User requested Rx queues */
401 	u16 num_rx_desc;
402 	u16 num_tx_desc;
403 	u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
404 	struct ice_tc_cfg tc_cfg;
405 	struct bpf_prog *xdp_prog;
406 	struct ice_tx_ring **xdp_rings;	 /* XDP ring array */
407 	u16 num_xdp_txq;		 /* Used XDP queues */
408 	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
409 	struct mutex xdp_state_lock;
410 
411 	struct net_device **target_netdevs;
412 
413 	struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
414 
415 	/* Channel Specific Fields */
416 	struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
417 	u16 cnt_q_avail;
418 	u16 next_base_q;	/* next queue to be used for channel setup */
419 	struct list_head ch_list;
420 	u16 num_chnl_rxq;
421 	u16 num_chnl_txq;
422 	u16 ch_rss_size;
423 	u16 num_chnl_fltr;
424 	/* store away rss size info before configuring ADQ channels so that,
425 	 * it can be used after tc-qdisc delete, to get back RSS setting as
426 	 * they were before
427 	 */
428 	u16 orig_rss_size;
429 	/* this keeps tracks of all enabled TC with and without DCB
430 	 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
431 	 * information
432 	 */
433 	u8 all_numtc;
434 	u16 all_enatc;
435 
436 	/* store away TC info, to be used for rebuild logic */
437 	u8 old_numtc;
438 	u16 old_ena_tc;
439 
440 	/* setup back reference, to which aggregator node this VSI
441 	 * corresponds to
442 	 */
443 	struct ice_agg_node *agg_node;
444 
445 	struct_group_tagged(ice_vsi_cfg_params, params,
446 		struct ice_port_info *port_info; /* back pointer to port_info */
447 		struct ice_channel *ch; /* VSI's channel structure, may be NULL */
448 		union {
449 			/* VF associated with this VSI, may be NULL */
450 			struct ice_vf *vf;
451 			/* SF associated with this VSI, may be NULL */
452 			struct ice_dynamic_port *sf;
453 		};
454 		u32 flags; /* VSI flags used for rebuild and configuration */
455 		enum ice_vsi_type type; /* the type of the VSI */
456 	);
457 } ____cacheline_internodealigned_in_smp;
458 
459 /* struct that defines an interrupt vector */
460 struct ice_q_vector {
461 	struct ice_vsi *vsi;
462 
463 	u16 v_idx;			/* index in the vsi->q_vector array. */
464 	u16 reg_idx;			/* PF relative register index */
465 	u8 num_ring_rx;			/* total number of Rx rings in vector */
466 	u8 num_ring_tx;			/* total number of Tx rings in vector */
467 	u8 wb_on_itr:1;			/* if true, WB on ITR is enabled */
468 	/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
469 	 * value to the device
470 	 */
471 	u8 intrl;
472 
473 	struct napi_struct napi;
474 
475 	struct ice_ring_container rx;
476 	struct ice_ring_container tx;
477 
478 	cpumask_t affinity_mask;
479 	struct irq_affinity_notify affinity_notify;
480 
481 	struct ice_channel *ch;
482 
483 	char name[ICE_INT_NAME_STR_LEN];
484 
485 	u16 total_events;	/* net_dim(): number of interrupts processed */
486 	u16 vf_reg_idx;		/* VF relative register index */
487 	struct msi_map irq;
488 } ____cacheline_internodealigned_in_smp;
489 
490 enum ice_pf_flags {
491 	ICE_FLAG_FLTR_SYNC,
492 	ICE_FLAG_RDMA_ENA,
493 	ICE_FLAG_RSS_ENA,
494 	ICE_FLAG_SRIOV_ENA,
495 	ICE_FLAG_SRIOV_CAPABLE,
496 	ICE_FLAG_DCB_CAPABLE,
497 	ICE_FLAG_DCB_ENA,
498 	ICE_FLAG_FD_ENA,
499 	ICE_FLAG_PTP_SUPPORTED,		/* PTP is supported by NVM */
500 	ICE_FLAG_ADV_FEATURES,
501 	ICE_FLAG_TC_MQPRIO,		/* support for Multi queue TC */
502 	ICE_FLAG_CLS_FLOWER,
503 	ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
504 	ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
505 	ICE_FLAG_NO_MEDIA,
506 	ICE_FLAG_FW_LLDP_AGENT,
507 	ICE_FLAG_MOD_POWER_UNSUPPORTED,
508 	ICE_FLAG_PHY_FW_LOAD_FAILED,
509 	ICE_FLAG_ETHTOOL_CTXT,		/* set when ethtool holds RTNL lock */
510 	ICE_FLAG_LEGACY_RX,
511 	ICE_FLAG_VF_TRUE_PROMISC_ENA,
512 	ICE_FLAG_MDD_AUTO_RESET_VF,
513 	ICE_FLAG_VF_VLAN_PRUNING,
514 	ICE_FLAG_LINK_LENIENT_MODE_ENA,
515 	ICE_FLAG_PLUG_AUX_DEV,
516 	ICE_FLAG_UNPLUG_AUX_DEV,
517 	ICE_FLAG_MTU_CHANGED,
518 	ICE_FLAG_GNSS,			/* GNSS successfully initialized */
519 	ICE_FLAG_DPLL,			/* SyncE/PTP dplls initialized */
520 	ICE_PF_FLAGS_NBITS		/* must be last */
521 };
522 
523 enum ice_misc_thread_tasks {
524 	ICE_MISC_THREAD_TX_TSTAMP,
525 	ICE_MISC_THREAD_NBITS		/* must be last */
526 };
527 
528 struct ice_eswitch {
529 	struct ice_vsi *uplink_vsi;
530 	struct ice_esw_br_offloads *br_offloads;
531 	struct xarray reprs;
532 	bool is_running;
533 };
534 
535 struct ice_agg_node {
536 	u32 agg_id;
537 #define ICE_MAX_VSIS_IN_AGG_NODE	64
538 	u32 num_vsis;
539 	u8 valid;
540 };
541 
542 struct ice_pf_msix {
543 	u32 cur;
544 	u32 min;
545 	u32 max;
546 	u32 total;
547 	u32 rest;
548 };
549 
550 struct ice_pf {
551 	struct pci_dev *pdev;
552 	struct ice_adapter *adapter;
553 
554 	struct devlink_region *nvm_region;
555 	struct devlink_region *sram_region;
556 	struct devlink_region *devcaps_region;
557 
558 	/* devlink port data */
559 	struct devlink_port devlink_port;
560 
561 	/* OS reserved IRQ details */
562 	struct msix_entry *msix_entries;
563 	struct ice_irq_tracker irq_tracker;
564 	struct ice_virt_irq_tracker virt_irq_tracker;
565 
566 	u16 ctrl_vsi_idx;		/* control VSI index in pf->vsi array */
567 
568 	struct ice_vsi **vsi;		/* VSIs created by the driver */
569 	struct ice_vsi_stats **vsi_stats;
570 	struct ice_sw *first_sw;	/* first switch created by firmware */
571 	u16 eswitch_mode;		/* current mode of eswitch */
572 	struct dentry *ice_debugfs_pf;
573 	struct dentry *ice_debugfs_pf_fwlog;
574 	/* keep track of all the dentrys for FW log modules */
575 	struct dentry **ice_debugfs_pf_fwlog_modules;
576 	struct ice_vfs vfs;
577 	DECLARE_BITMAP(features, ICE_F_MAX);
578 	DECLARE_BITMAP(state, ICE_STATE_NBITS);
579 	DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
580 	DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
581 	unsigned long *avail_txqs;	/* bitmap to track PF Tx queue usage */
582 	unsigned long *avail_rxqs;	/* bitmap to track PF Rx queue usage */
583 	unsigned long serv_tmr_period;
584 	unsigned long serv_tmr_prev;
585 	struct timer_list serv_tmr;
586 	struct work_struct serv_task;
587 	struct mutex avail_q_mutex;	/* protects access to avail_[rx|tx]qs */
588 	struct mutex sw_mutex;		/* lock for protecting VSI alloc flow */
589 	struct mutex tc_mutex;		/* lock to protect TC changes */
590 	struct mutex adev_mutex;	/* lock to protect aux device access */
591 	struct mutex lag_mutex;		/* protect ice_lag struct in PF */
592 	u32 msg_enable;
593 	struct ice_ptp ptp;
594 	struct gnss_serial *gnss_serial;
595 	struct gnss_device *gnss_dev;
596 	u16 num_rdma_msix;		/* Total MSIX vectors for RDMA driver */
597 	u16 rdma_base_vector;
598 
599 	/* spinlock to protect the AdminQ wait list */
600 	spinlock_t aq_wait_lock;
601 	struct hlist_head aq_wait_list;
602 	wait_queue_head_t aq_wait_queue;
603 	bool fw_emp_reset_disabled;
604 
605 	wait_queue_head_t reset_wait_queue;
606 
607 	u32 hw_csum_rx_error;
608 	u32 hw_rx_eipe_error;
609 	u32 oicr_err_reg;
610 	struct msi_map oicr_irq;	/* Other interrupt cause MSIX vector */
611 	struct msi_map ll_ts_irq;	/* LL_TS interrupt MSIX vector */
612 	u16 max_pf_txqs;	/* Total Tx queues PF wide */
613 	u16 max_pf_rxqs;	/* Total Rx queues PF wide */
614 	struct ice_pf_msix msix;
615 	u16 num_lan_tx;		/* num LAN Tx queues setup */
616 	u16 num_lan_rx;		/* num LAN Rx queues setup */
617 	u16 next_vsi;		/* Next free slot in pf->vsi[] - 0-based! */
618 	u16 num_alloc_vsi;
619 	u16 corer_count;	/* Core reset count */
620 	u16 globr_count;	/* Global reset count */
621 	u16 empr_count;		/* EMP reset count */
622 	u16 pfr_count;		/* PF reset count */
623 
624 	u8 wol_ena : 1;		/* software state of WoL */
625 	u32 wakeup_reason;	/* last wakeup reason */
626 	struct ice_hw_port_stats stats;
627 	struct ice_hw_port_stats stats_prev;
628 	struct ice_hw hw;
629 	u8 stat_prev_loaded:1; /* has previous stats been loaded */
630 	u8 rdma_mode;
631 	u16 dcbx_cap;
632 	u32 tx_timeout_count;
633 	unsigned long tx_timeout_last_recovery;
634 	u32 tx_timeout_recovery_level;
635 	char int_name[ICE_INT_NAME_STR_LEN];
636 	char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
637 	struct auxiliary_device *adev;
638 	int aux_idx;
639 	u32 sw_int_count;
640 	/* count of tc_flower filters specific to channel (aka where filter
641 	 * action is "hw_tc <tc_num>")
642 	 */
643 	u16 num_dmac_chnl_fltrs;
644 	struct hlist_head tc_flower_fltr_list;
645 
646 	u64 supported_rxdids;
647 
648 	__le64 nvm_phy_type_lo; /* NVM PHY type low */
649 	__le64 nvm_phy_type_hi; /* NVM PHY type high */
650 	struct ice_link_default_override_tlv link_dflt_override;
651 	struct ice_lag *lag; /* Link Aggregation information */
652 
653 	struct ice_eswitch eswitch;
654 	struct ice_esw_br_port *br_port;
655 
656 	struct xarray dyn_ports;
657 	struct xarray sf_nums;
658 
659 #define ICE_INVALID_AGG_NODE_ID		0
660 #define ICE_PF_AGG_NODE_ID_START	1
661 #define ICE_MAX_PF_AGG_NODES		32
662 	struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
663 #define ICE_VF_AGG_NODE_ID_START	65
664 #define ICE_MAX_VF_AGG_NODES		32
665 	struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
666 	struct ice_dplls dplls;
667 	struct device *hwmon_dev;
668 	struct ice_health health_reporters;
669 
670 	u8 num_quanta_prof_used;
671 };
672 
673 extern struct workqueue_struct *ice_lag_wq;
674 
675 struct ice_netdev_priv {
676 	struct ice_vsi *vsi;
677 	struct ice_repr *repr;
678 	/* indirect block callbacks on registered higher level devices
679 	 * (e.g. tunnel devices)
680 	 *
681 	 * tc_indr_block_cb_priv_list is used to look up indirect callback
682 	 * private data
683 	 */
684 	struct list_head tc_indr_block_priv_list;
685 };
686 
687 /**
688  * ice_vector_ch_enabled
689  * @qv: pointer to q_vector, can be NULL
690  *
691  * This function returns true if vector is channel enabled otherwise false
692  */
693 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
694 {
695 	return !!qv->ch; /* Enable it to run with TC */
696 }
697 
698 /**
699  * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
700  * @pf: Board private structure
701  *
702  * Return true if this PF should respond to the Tx timestamp interrupt
703  * indication in the miscellaneous OICR interrupt handler.
704  */
705 static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
706 {
707 	return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE;
708 }
709 
710 /**
711  * ice_irq_dynamic_ena - Enable default interrupt generation settings
712  * @hw: pointer to HW struct
713  * @vsi: pointer to VSI struct, can be NULL
714  * @q_vector: pointer to q_vector, can be NULL
715  */
716 static inline void
717 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
718 		    struct ice_q_vector *q_vector)
719 {
720 	u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
721 				((struct ice_pf *)hw->back)->oicr_irq.index;
722 	int itr = ICE_ITR_NONE;
723 	u32 val;
724 
725 	/* clear the PBA here, as this function is meant to clean out all
726 	 * previous interrupts and enable the interrupt
727 	 */
728 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
729 	      (itr << GLINT_DYN_CTL_ITR_INDX_S);
730 	if (vsi)
731 		if (test_bit(ICE_VSI_DOWN, vsi->state))
732 			return;
733 	wr32(hw, GLINT_DYN_CTL(vector), val);
734 }
735 
736 /**
737  * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
738  * @netdev: pointer to the netdev struct
739  */
740 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
741 {
742 	struct ice_netdev_priv *np = netdev_priv(netdev);
743 
744 	return np->vsi->back;
745 }
746 
747 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
748 {
749 	return !!READ_ONCE(vsi->xdp_prog);
750 }
751 
752 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
753 {
754 	ring->flags |= ICE_TX_FLAGS_RING_XDP;
755 }
756 
757 /**
758  * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
759  * @vsi: pointer to VSI
760  * @qid: index of a queue to look at XSK buff pool presence
761  *
762  * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
763  * attached and configured as zero-copy, NULL otherwise.
764  */
765 static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
766 							u16 qid)
767 {
768 	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
769 
770 	if (!ice_is_xdp_ena_vsi(vsi))
771 		return NULL;
772 
773 	return (pool && pool->dev) ? pool : NULL;
774 }
775 
776 /**
777  * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
778  * @ring: Rx ring to use
779  *
780  * Sets XSK buff pool pointer on Rx ring.
781  */
782 static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
783 {
784 	struct ice_vsi *vsi = ring->vsi;
785 	u16 qid = ring->q_index;
786 
787 	WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
788 }
789 
790 /**
791  * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
792  * @vsi: pointer to VSI
793  * @qid: index of a queue to look at XSK buff pool presence
794  *
795  * Sets XSK buff pool pointer on XDP ring.
796  *
797  * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
798  * queue id. Reason for doing so is that queue vectors might have assigned more
799  * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
800  * carries a pointer to one of these XDP rings for its own purposes, such as
801  * handling XDP_TX action, therefore we can piggyback here on the
802  * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
803  */
804 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
805 {
806 	struct ice_tx_ring *ring;
807 
808 	ring = vsi->rx_rings[qid]->xdp_ring;
809 	if (!ring)
810 		return;
811 
812 	WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
813 }
814 
815 /**
816  * ice_get_main_vsi - Get the PF VSI
817  * @pf: PF instance
818  *
819  * returns pf->vsi[0], which by definition is the PF VSI
820  */
821 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
822 {
823 	if (pf->vsi)
824 		return pf->vsi[0];
825 
826 	return NULL;
827 }
828 
829 /**
830  * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
831  * @np: private netdev structure
832  */
833 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
834 {
835 	/* In case of port representor return source port VSI. */
836 	if (np->repr)
837 		return np->repr->src_vsi;
838 	else
839 		return np->vsi;
840 }
841 
842 /**
843  * ice_get_ctrl_vsi - Get the control VSI
844  * @pf: PF instance
845  */
846 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
847 {
848 	/* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
849 	if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
850 		return NULL;
851 
852 	return pf->vsi[pf->ctrl_vsi_idx];
853 }
854 
855 /**
856  * ice_find_vsi - Find the VSI from VSI ID
857  * @pf: The PF pointer to search in
858  * @vsi_num: The VSI ID to search for
859  */
860 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
861 {
862 	int i;
863 
864 	ice_for_each_vsi(pf, i)
865 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
866 			return  pf->vsi[i];
867 	return NULL;
868 }
869 
870 /**
871  * ice_is_switchdev_running - check if switchdev is configured
872  * @pf: pointer to PF structure
873  *
874  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
875  * and switchdev is configured, false otherwise.
876  */
877 static inline bool ice_is_switchdev_running(struct ice_pf *pf)
878 {
879 	return pf->eswitch.is_running;
880 }
881 
882 #define ICE_FD_STAT_CTR_BLOCK_COUNT	256
883 #define ICE_FD_STAT_PF_IDX(base_idx) \
884 			((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
885 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
886 #define ICE_FD_STAT_CH			1
887 #define ICE_FD_CH_STAT_IDX(base_idx) \
888 			(ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
889 
890 /**
891  * ice_is_adq_active - any active ADQs
892  * @pf: pointer to PF
893  *
894  * This function returns true if there are any ADQs configured (which is
895  * determined by looking at VSI type (which should be VSI_PF), numtc, and
896  * TC_MQPRIO flag) otherwise return false
897  */
898 static inline bool ice_is_adq_active(struct ice_pf *pf)
899 {
900 	struct ice_vsi *vsi;
901 
902 	vsi = ice_get_main_vsi(pf);
903 	if (!vsi)
904 		return false;
905 
906 	/* is ADQ configured */
907 	if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
908 	    test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
909 		return true;
910 
911 	return false;
912 }
913 
914 void ice_debugfs_fwlog_init(struct ice_pf *pf);
915 void ice_debugfs_pf_deinit(struct ice_pf *pf);
916 void ice_debugfs_init(void);
917 void ice_debugfs_exit(void);
918 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
919 
920 bool netif_is_ice(const struct net_device *dev);
921 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
922 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
923 int ice_vsi_open_ctrl(struct ice_vsi *vsi);
924 int ice_vsi_open(struct ice_vsi *vsi);
925 void ice_set_ethtool_ops(struct net_device *netdev);
926 void ice_set_ethtool_repr_ops(struct net_device *netdev);
927 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
928 void ice_set_ethtool_sf_ops(struct net_device *netdev);
929 u16 ice_get_avail_txq_count(struct ice_pf *pf);
930 u16 ice_get_avail_rxq_count(struct ice_pf *pf);
931 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
932 void ice_update_vsi_stats(struct ice_vsi *vsi);
933 void ice_update_pf_stats(struct ice_pf *pf);
934 void
935 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
936 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes);
937 int ice_up(struct ice_vsi *vsi);
938 int ice_down(struct ice_vsi *vsi);
939 int ice_down_up(struct ice_vsi *vsi);
940 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
941 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
942 
943 enum ice_xdp_cfg {
944 	ICE_XDP_CFG_FULL,	/* Fully apply new config in .ndo_bpf() */
945 	ICE_XDP_CFG_PART,	/* Save/use part of config in VSI rebuild */
946 };
947 
948 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
949 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
950 			  enum ice_xdp_cfg cfg_type);
951 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
952 void ice_map_xdp_rings(struct ice_vsi *vsi);
953 int
954 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
955 	     u32 flags);
956 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
957 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
958 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
959 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
960 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
961 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
962 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
963 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
964 int ice_plug_aux_dev(struct ice_pf *pf);
965 void ice_unplug_aux_dev(struct ice_pf *pf);
966 int ice_init_rdma(struct ice_pf *pf);
967 void ice_deinit_rdma(struct ice_pf *pf);
968 const char *ice_aq_str(enum ice_aq_err aq_err);
969 bool ice_is_wol_supported(struct ice_hw *hw);
970 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
971 int
972 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
973 		    bool is_tun);
974 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
975 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
976 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
977 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
978 int
979 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
980 		      u32 *rule_locs);
981 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
982 void ice_fdir_release_flows(struct ice_hw *hw);
983 void ice_fdir_replay_flows(struct ice_hw *hw);
984 void ice_fdir_replay_fltrs(struct ice_pf *pf);
985 int ice_fdir_create_dflt_rules(struct ice_pf *pf);
986 
987 enum ice_aq_task_state {
988 	ICE_AQ_TASK_NOT_PREPARED,
989 	ICE_AQ_TASK_WAITING,
990 	ICE_AQ_TASK_COMPLETE,
991 	ICE_AQ_TASK_CANCELED,
992 };
993 
994 struct ice_aq_task {
995 	struct hlist_node entry;
996 	struct ice_rq_event_info event;
997 	enum ice_aq_task_state state;
998 	u16 opcode;
999 };
1000 
1001 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1002 			   u16 opcode);
1003 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1004 			  unsigned long timeout);
1005 int ice_open(struct net_device *netdev);
1006 int ice_open_internal(struct net_device *netdev);
1007 int ice_stop(struct net_device *netdev);
1008 void ice_service_task_schedule(struct ice_pf *pf);
1009 int ice_load(struct ice_pf *pf);
1010 void ice_unload(struct ice_pf *pf);
1011 void ice_adv_lnk_speed_maps_init(void);
1012 int ice_init_dev(struct ice_pf *pf);
1013 void ice_deinit_dev(struct ice_pf *pf);
1014 int ice_change_mtu(struct net_device *netdev, int new_mtu);
1015 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1016 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
1017 void ice_set_netdev_features(struct net_device *netdev);
1018 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
1019 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
1020 void ice_get_stats64(struct net_device *netdev,
1021 		     struct rtnl_link_stats64 *stats);
1022 
1023 /**
1024  * ice_set_rdma_cap - enable RDMA support
1025  * @pf: PF struct
1026  */
1027 static inline void ice_set_rdma_cap(struct ice_pf *pf)
1028 {
1029 	if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
1030 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
1031 		set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
1032 	}
1033 }
1034 
1035 /**
1036  * ice_clear_rdma_cap - disable RDMA support
1037  * @pf: PF struct
1038  */
1039 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
1040 {
1041 	/* defer unplug to service task to avoid RTNL lock and
1042 	 * clear PLUG bit so that pending plugs don't interfere
1043 	 */
1044 	clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
1045 	set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
1046 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
1047 }
1048 
1049 extern const struct xdp_metadata_ops ice_xdp_md_ops;
1050 #endif /* _ICE_H_ */
1051