xref: /linux/drivers/net/ethernet/intel/idpf/idpf.h (revision ff7e082ea40d70b7613e8db2cb11e3555ebcc546)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_H_
5 #define _IDPF_H_
6 
7 /* Forward declaration */
8 struct idpf_adapter;
9 struct idpf_vport;
10 struct idpf_vport_max_q;
11 
12 #include <net/pkt_sched.h>
13 #include <linux/aer.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ioport.h>
16 #include <linux/pci.h>
17 #include <linux/bitfield.h>
18 #include <linux/sctp.h>
19 #include <linux/ethtool_netlink.h>
20 #include <net/gro.h>
21 
22 #include <linux/net/intel/iidc_rdma.h>
23 #include <linux/net/intel/iidc_rdma_idpf.h>
24 
25 #include "virtchnl2.h"
26 #include "idpf_txrx.h"
27 #include "idpf_controlq.h"
28 
29 #define GETMAXVAL(num_bits)		GENMASK((num_bits) - 1, 0)
30 
31 #define IDPF_NO_FREE_SLOT		0xffff
32 
33 /* Default Mailbox settings */
34 #define IDPF_NUM_FILTERS_PER_MSG	20
35 #define IDPF_NUM_DFLT_MBX_Q		2	/* includes both TX and RX */
36 #define IDPF_DFLT_MBX_Q_LEN		64
37 #define IDPF_DFLT_MBX_ID		-1
38 /* maximum number of times to try before resetting mailbox */
39 #define IDPF_MB_MAX_ERR			20
40 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz)	\
41 	((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
42 
43 #define IDPF_WAIT_FOR_MARKER_TIMEO	500
44 #define IDPF_MAX_WAIT			500
45 
46 /* available message levels */
47 #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
48 
49 #define IDPF_DIM_PROFILE_SLOTS  5
50 
51 #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
52 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
53 
54 /**
55  * struct idpf_mac_filter
56  * @list: list member field
57  * @macaddr: MAC address
58  * @remove: filter should be removed (virtchnl)
59  * @add: filter should be added (virtchnl)
60  */
61 struct idpf_mac_filter {
62 	struct list_head list;
63 	u8 macaddr[ETH_ALEN];
64 	bool remove;
65 	bool add;
66 };
67 
68 /**
69  * enum idpf_state - State machine to handle bring up
70  * @__IDPF_VER_CHECK: Negotiate virtchnl version
71  * @__IDPF_GET_CAPS: Negotiate capabilities
72  * @__IDPF_INIT_SW: Init based on given capabilities
73  * @__IDPF_STATE_LAST: Must be last, used to determine size
74  */
75 enum idpf_state {
76 	__IDPF_VER_CHECK,
77 	__IDPF_GET_CAPS,
78 	__IDPF_INIT_SW,
79 	__IDPF_STATE_LAST,
80 };
81 
82 /**
83  * enum idpf_flags - Hard reset causes.
84  * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout
85  * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
86  * @IDPF_HR_RESET_IN_PROG: Reset in progress
87  * @IDPF_REMOVE_IN_PROG: Driver remove in progress
88  * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
89  * @IDPF_VC_CORE_INIT: virtchnl core has been init
90  * @IDPF_FLAGS_NBITS: Must be last
91  */
92 enum idpf_flags {
93 	IDPF_HR_FUNC_RESET,
94 	IDPF_HR_DRV_LOAD,
95 	IDPF_HR_RESET_IN_PROG,
96 	IDPF_REMOVE_IN_PROG,
97 	IDPF_MB_INTR_MODE,
98 	IDPF_VC_CORE_INIT,
99 	IDPF_FLAGS_NBITS,
100 };
101 
102 /**
103  * enum idpf_cap_field - Offsets into capabilities struct for specific caps
104  * @IDPF_BASE_CAPS: generic base capabilities
105  * @IDPF_CSUM_CAPS: checksum offload capabilities
106  * @IDPF_SEG_CAPS: segmentation offload capabilities
107  * @IDPF_RSS_CAPS: RSS offload capabilities
108  * @IDPF_HSPLIT_CAPS: Header split capabilities
109  * @IDPF_RSC_CAPS: RSC offload capabilities
110  * @IDPF_OTHER_CAPS: miscellaneous offloads
111  *
112  * Used when checking for a specific capability flag since different capability
113  * sets are not mutually exclusive numerically, the caller must specify which
114  * type of capability they are checking for.
115  */
116 enum idpf_cap_field {
117 	IDPF_BASE_CAPS		= -1,
118 	IDPF_CSUM_CAPS		= offsetof(struct virtchnl2_get_capabilities,
119 					   csum_caps),
120 	IDPF_SEG_CAPS		= offsetof(struct virtchnl2_get_capabilities,
121 					   seg_caps),
122 	IDPF_RSS_CAPS		= offsetof(struct virtchnl2_get_capabilities,
123 					   rss_caps),
124 	IDPF_HSPLIT_CAPS	= offsetof(struct virtchnl2_get_capabilities,
125 					   hsplit_caps),
126 	IDPF_RSC_CAPS		= offsetof(struct virtchnl2_get_capabilities,
127 					   rsc_caps),
128 	IDPF_OTHER_CAPS		= offsetof(struct virtchnl2_get_capabilities,
129 					   other_caps),
130 };
131 
132 /**
133  * enum idpf_vport_state - Current vport state
134  * @IDPF_VPORT_UP: Vport is up
135  * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
136  */
137 enum idpf_vport_state {
138 	IDPF_VPORT_UP,
139 	IDPF_VPORT_STATE_NBITS
140 };
141 
142 /**
143  * struct idpf_netdev_priv - Struct to store vport back pointer
144  * @adapter: Adapter back pointer
145  * @vport: Vport back pointer
146  * @vport_id: Vport identifier
147  * @link_speed_mbps: Link speed in mbps
148  * @vport_idx: Relative vport index
149  * @max_tx_hdr_size: Max header length hardware can support
150  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
151  * @state: See enum idpf_vport_state
152  * @netstats: Packet and byte stats
153  * @stats_lock: Lock to protect stats update
154  */
155 struct idpf_netdev_priv {
156 	struct idpf_adapter *adapter;
157 	struct idpf_vport *vport;
158 	u32 vport_id;
159 	u32 link_speed_mbps;
160 	u16 vport_idx;
161 	u16 max_tx_hdr_size;
162 	u16 tx_max_bufs;
163 	DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
164 	struct rtnl_link_stats64 netstats;
165 	spinlock_t stats_lock;
166 };
167 
168 /**
169  * struct idpf_reset_reg - Reset register offsets/masks
170  * @rstat: Reset status register
171  * @rstat_m: Reset status mask
172  */
173 struct idpf_reset_reg {
174 	void __iomem *rstat;
175 	u32 rstat_m;
176 };
177 
178 /**
179  * struct idpf_vport_max_q - Queue limits
180  * @max_rxq: Maximum number of RX queues supported
181  * @max_txq: Maixmum number of TX queues supported
182  * @max_bufq: In splitq, maximum number of buffer queues supported
183  * @max_complq: In splitq, maximum number of completion queues supported
184  */
185 struct idpf_vport_max_q {
186 	u16 max_rxq;
187 	u16 max_txq;
188 	u16 max_bufq;
189 	u16 max_complq;
190 };
191 
192 /**
193  * struct idpf_reg_ops - Device specific register operation function pointers
194  * @ctlq_reg_init: Mailbox control queue register initialization
195  * @intr_reg_init: Traffic interrupt register initialization
196  * @mb_intr_reg_init: Mailbox interrupt register initialization
197  * @reset_reg_init: Reset register initialization
198  * @trigger_reset: Trigger a reset to occur
199  * @ptp_reg_init: PTP register initialization
200  */
201 struct idpf_reg_ops {
202 	void (*ctlq_reg_init)(struct idpf_adapter *adapter,
203 			      struct idpf_ctlq_create_info *cq);
204 	int (*intr_reg_init)(struct idpf_vport *vport);
205 	void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
206 	void (*reset_reg_init)(struct idpf_adapter *adapter);
207 	void (*trigger_reset)(struct idpf_adapter *adapter,
208 			      enum idpf_flags trig_cause);
209 	void (*ptp_reg_init)(const struct idpf_adapter *adapter);
210 };
211 
212 #define IDPF_MMIO_REG_NUM_STATIC	2
213 #define IDPF_PF_MBX_REGION_SZ		4096
214 #define IDPF_PF_RSTAT_REGION_SZ		2048
215 #define IDPF_VF_MBX_REGION_SZ		10240
216 #define IDPF_VF_RSTAT_REGION_SZ		2048
217 
218 /**
219  * struct idpf_dev_ops - Device specific operations
220  * @reg_ops: Register operations
221  * @idc_init: IDC initialization
222  * @static_reg_info: array of mailbox and rstat register info
223  */
224 struct idpf_dev_ops {
225 	struct idpf_reg_ops reg_ops;
226 
227 	int (*idc_init)(struct idpf_adapter *adapter);
228 
229 	/* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
230 	struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
231 };
232 
233 /**
234  * enum idpf_vport_reset_cause - Vport soft reset causes
235  * @IDPF_SR_Q_CHANGE: Soft reset queue change
236  * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change
237  * @IDPF_SR_MTU_CHANGE: Soft reset MTU change
238  * @IDPF_SR_RSC_CHANGE: Soft reset RSC change
239  */
240 enum idpf_vport_reset_cause {
241 	IDPF_SR_Q_CHANGE,
242 	IDPF_SR_Q_DESC_CHANGE,
243 	IDPF_SR_MTU_CHANGE,
244 	IDPF_SR_RSC_CHANGE,
245 };
246 
247 /**
248  * enum idpf_vport_flags - Vport flags
249  * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
250  * @IDPF_VPORT_FLAGS_NBITS: Must be last
251  */
252 enum idpf_vport_flags {
253 	IDPF_VPORT_DEL_QUEUES,
254 	IDPF_VPORT_FLAGS_NBITS,
255 };
256 
257 /**
258  * struct idpf_tstamp_stats - Tx timestamp statistics
259  * @stats_sync: See struct u64_stats_sync
260  * @packets: Number of packets successfully timestamped by the hardware
261  * @discarded: Number of Tx skbs discarded due to cached PHC
262  *	       being too old to correctly extend timestamp
263  * @flushed: Number of Tx skbs flushed due to interface closed
264  */
265 struct idpf_tstamp_stats {
266 	struct u64_stats_sync stats_sync;
267 	u64_stats_t packets;
268 	u64_stats_t discarded;
269 	u64_stats_t flushed;
270 };
271 
272 struct idpf_port_stats {
273 	struct u64_stats_sync stats_sync;
274 	u64_stats_t rx_hw_csum_err;
275 	u64_stats_t rx_hsplit;
276 	u64_stats_t rx_hsplit_hbo;
277 	u64_stats_t rx_bad_descs;
278 	u64_stats_t tx_linearize;
279 	u64_stats_t tx_busy;
280 	u64_stats_t tx_drops;
281 	u64_stats_t tx_dma_map_errs;
282 	struct virtchnl2_vport_stats vport_stats;
283 };
284 
285 struct idpf_fsteer_fltr {
286 	struct list_head list;
287 	u32 loc;
288 	u32 q_index;
289 };
290 
291 /**
292  * struct idpf_vport - Handle for netdevices and queue resources
293  * @num_txq: Number of allocated TX queues
294  * @num_complq: Number of allocated completion queues
295  * @txq_desc_count: TX queue descriptor count
296  * @complq_desc_count: Completion queue descriptor count
297  * @compln_clean_budget: Work budget for completion clean
298  * @num_txq_grp: Number of TX queue groups
299  * @txq_grps: Array of TX queue groups
300  * @txq_model: Split queue or single queue queuing model
301  * @txqs: Used only in hotpath to get to the right queue very fast
302  * @crc_enable: Enable CRC insertion offload
303  * @xdpsq_share: whether XDPSQ sharing is enabled
304  * @num_xdp_txq: number of XDPSQs
305  * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
306  * @xdp_prog: installed XDP program
307  * @num_rxq: Number of allocated RX queues
308  * @num_bufq: Number of allocated buffer queues
309  * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
310  *		    to complete all buffer descriptors for all buffer queues in
311  *		    the worst case.
312  * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
313  * @bufq_desc_count: Buffer queue descriptor count
314  * @num_rxq_grp: Number of RX queues in a group
315  * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
316  *	      group will yield total number of RX queues.
317  * @rxq_model: Splitq queue or single queue queuing model
318  * @rx_ptype_lkup: Lookup table for ptypes on RX
319  * @vdev_info: IDC vport device info pointer
320  * @adapter: back pointer to associated adapter
321  * @netdev: Associated net_device. Each vport should have one and only one
322  *	    associated netdev.
323  * @flags: See enum idpf_vport_flags
324  * @vport_type: Default SRIOV, SIOV, etc.
325  * @vport_id: Device given vport identifier
326  * @idx: Software index in adapter vports struct
327  * @default_vport: Use this vport if one isn't specified
328  * @base_rxd: True if the driver should use base descriptors instead of flex
329  * @num_q_vectors: Number of IRQ vectors allocated
330  * @q_vectors: Array of queue vectors
331  * @q_vector_idxs: Starting index of queue vectors
332  * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
333  * @noirq_dyn_ctl_ena: value to write to the above to enable it
334  * @noirq_v_idx: ID of the NOIRQ vector
335  * @max_mtu: device given max possible MTU
336  * @default_mac_addr: device will give a default MAC to use
337  * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
338  * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
339  * @port_stats: per port csum, header split, and other offload stats
340  * @link_up: True if link is up
341  * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
342  * @tstamp_config: The Tx tstamp config
343  * @tstamp_task: Tx timestamping task
344  * @tstamp_stats: Tx timestamping statistics
345  */
346 struct idpf_vport {
347 	u16 num_txq;
348 	u16 num_complq;
349 	u32 txq_desc_count;
350 	u32 complq_desc_count;
351 	u32 compln_clean_budget;
352 	u16 num_txq_grp;
353 	struct idpf_txq_group *txq_grps;
354 	u32 txq_model;
355 	struct idpf_tx_queue **txqs;
356 	bool crc_enable;
357 
358 	bool xdpsq_share;
359 	u16 num_xdp_txq;
360 	u16 xdp_txq_offset;
361 	struct bpf_prog *xdp_prog;
362 
363 	u16 num_rxq;
364 	u16 num_bufq;
365 	u32 rxq_desc_count;
366 	u8 num_bufqs_per_qgrp;
367 	u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
368 	u16 num_rxq_grp;
369 	struct idpf_rxq_group *rxq_grps;
370 	u32 rxq_model;
371 	struct libeth_rx_pt *rx_ptype_lkup;
372 
373 	struct iidc_rdma_vport_dev_info *vdev_info;
374 
375 	struct idpf_adapter *adapter;
376 	struct net_device *netdev;
377 	DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
378 	u16 vport_type;
379 	u32 vport_id;
380 	u16 idx;
381 	bool default_vport;
382 	bool base_rxd;
383 
384 	u16 num_q_vectors;
385 	struct idpf_q_vector *q_vectors;
386 	u16 *q_vector_idxs;
387 
388 	void __iomem *noirq_dyn_ctl;
389 	u32 noirq_dyn_ctl_ena;
390 	u16 noirq_v_idx;
391 
392 	u16 max_mtu;
393 	u8 default_mac_addr[ETH_ALEN];
394 	u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
395 	u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
396 	struct idpf_port_stats port_stats;
397 
398 	bool link_up;
399 
400 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
401 	struct kernel_hwtstamp_config tstamp_config;
402 	struct work_struct tstamp_task;
403 	struct idpf_tstamp_stats tstamp_stats;
404 };
405 
406 /**
407  * enum idpf_user_flags
408  * @__IDPF_USER_FLAG_HSPLIT: header split state
409  * @__IDPF_PROMISC_UC: Unicast promiscuous mode
410  * @__IDPF_PROMISC_MC: Multicast promiscuous mode
411  * @__IDPF_USER_FLAGS_NBITS: Must be last
412  */
413 enum idpf_user_flags {
414 	__IDPF_USER_FLAG_HSPLIT = 0U,
415 	__IDPF_PROMISC_UC = 32,
416 	__IDPF_PROMISC_MC,
417 
418 	__IDPF_USER_FLAGS_NBITS,
419 };
420 
421 /**
422  * struct idpf_rss_data - Associated RSS data
423  * @rss_key_size: Size of RSS hash key
424  * @rss_key: RSS hash key
425  * @rss_lut_size: Size of RSS lookup table
426  * @rss_lut: RSS lookup table
427  * @cached_lut: Used to restore previously init RSS lut
428  */
429 struct idpf_rss_data {
430 	u16 rss_key_size;
431 	u8 *rss_key;
432 	u16 rss_lut_size;
433 	u32 *rss_lut;
434 	u32 *cached_lut;
435 };
436 
437 /**
438  * struct idpf_q_coalesce - User defined coalescing configuration values for
439  *			   a single queue.
440  * @tx_intr_mode: Dynamic TX ITR or not
441  * @rx_intr_mode: Dynamic RX ITR or not
442  * @tx_coalesce_usecs: TX interrupt throttling rate
443  * @rx_coalesce_usecs: RX interrupt throttling rate
444  *
445  * Used to restore user coalescing configuration after a reset.
446  */
447 struct idpf_q_coalesce {
448 	u32 tx_intr_mode;
449 	u32 rx_intr_mode;
450 	u32 tx_coalesce_usecs;
451 	u32 rx_coalesce_usecs;
452 };
453 
454 /**
455  * struct idpf_vport_user_config_data - User defined configuration values for
456  *					each vport.
457  * @rss_data: See struct idpf_rss_data
458  * @q_coalesce: Array of per queue coalescing data
459  * @num_req_tx_qs: Number of user requested TX queues through ethtool
460  * @num_req_rx_qs: Number of user requested RX queues through ethtool
461  * @num_req_txq_desc: Number of user requested TX queue descriptors through
462  *		      ethtool
463  * @num_req_rxq_desc: Number of user requested RX queue descriptors through
464  *		      ethtool
465  * @xdp_prog: requested XDP program to install
466  * @user_flags: User toggled config flags
467  * @mac_filter_list: List of MAC filters
468  * @num_fsteer_fltrs: number of flow steering filters
469  * @flow_steer_list: list of flow steering filters
470  *
471  * Used to restore configuration after a reset as the vport will get wiped.
472  */
473 struct idpf_vport_user_config_data {
474 	struct idpf_rss_data rss_data;
475 	struct idpf_q_coalesce *q_coalesce;
476 	u16 num_req_tx_qs;
477 	u16 num_req_rx_qs;
478 	u32 num_req_txq_desc;
479 	u32 num_req_rxq_desc;
480 	struct bpf_prog *xdp_prog;
481 	DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
482 	struct list_head mac_filter_list;
483 	u32 num_fsteer_fltrs;
484 	struct list_head flow_steer_list;
485 };
486 
487 /**
488  * enum idpf_vport_config_flags - Vport config flags
489  * @IDPF_VPORT_REG_NETDEV: Register netdev
490  * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
491  * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
492  */
493 enum idpf_vport_config_flags {
494 	IDPF_VPORT_REG_NETDEV,
495 	IDPF_VPORT_UP_REQUESTED,
496 	IDPF_VPORT_CONFIG_FLAGS_NBITS,
497 };
498 
499 /**
500  * struct idpf_avail_queue_info
501  * @avail_rxq: Available RX queues
502  * @avail_txq: Available TX queues
503  * @avail_bufq: Available buffer queues
504  * @avail_complq: Available completion queues
505  *
506  * Maintain total queues available after allocating max queues to each vport.
507  */
508 struct idpf_avail_queue_info {
509 	u16 avail_rxq;
510 	u16 avail_txq;
511 	u16 avail_bufq;
512 	u16 avail_complq;
513 };
514 
515 /**
516  * struct idpf_vector_info - Utility structure to pass function arguments as a
517  *			     structure
518  * @num_req_vecs: Vectors required based on the number of queues updated by the
519  *		  user via ethtool
520  * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs
521  * @index: Relative starting index for vectors
522  * @default_vport: Vectors are for default vport
523  */
524 struct idpf_vector_info {
525 	u16 num_req_vecs;
526 	u16 num_curr_vecs;
527 	u16 index;
528 	bool default_vport;
529 };
530 
531 /**
532  * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
533  *			     distribution algorithm
534  * @top: Points to stack top i.e. next available vector index
535  * @base: Always points to start of the free pool
536  * @size: Total size of the vector stack
537  * @vec_idx: Array to store all the vector indexes
538  *
539  * Vector stack maintains all the relative vector indexes at the *adapter*
540  * level. This stack is divided into 2 parts, first one is called as 'default
541  * pool' and other one is called 'free pool'.  Vector distribution algorithm
542  * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
543  * vectors are allocated per default vport and the relative vector indexes for
544  * those are maintained in default pool. Free pool contains all the unallocated
545  * vector indexes which can be allocated on-demand basis. Mailbox vector index
546  * is maintained in the default pool of the stack.
547  */
548 struct idpf_vector_lifo {
549 	u16 top;
550 	u16 base;
551 	u16 size;
552 	u16 *vec_idx;
553 };
554 
555 /**
556  * struct idpf_vport_config - Vport configuration data
557  * @user_config: see struct idpf_vport_user_config_data
558  * @max_q: Maximum possible queues
559  * @req_qs_chunks: Queue chunk data for requested queues
560  * @mac_filter_list_lock: Lock to protect mac filters
561  * @flags: See enum idpf_vport_config_flags
562  */
563 struct idpf_vport_config {
564 	struct idpf_vport_user_config_data user_config;
565 	struct idpf_vport_max_q max_q;
566 	struct virtchnl2_add_queues *req_qs_chunks;
567 	spinlock_t mac_filter_list_lock;
568 	DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
569 };
570 
571 struct idpf_vc_xn_manager;
572 
573 #define idpf_for_each_vport(adapter, iter) \
574 	for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
575 	     *iter = (adapter)->max_vports ? *__##iter : NULL; \
576 	     iter; \
577 	     iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
578 	     *__##iter : NULL)
579 
580 /**
581  * struct idpf_adapter - Device data struct generated on probe
582  * @pdev: PCI device struct given on probe
583  * @virt_ver_maj: Virtchnl version major
584  * @virt_ver_min: Virtchnl version minor
585  * @msg_enable: Debug message level enabled
586  * @mb_wait_count: Number of times mailbox was attempted initialization
587  * @state: Init state machine
588  * @flags: See enum idpf_flags
589  * @reset_reg: See struct idpf_reset_reg
590  * @hw: Device access data
591  * @num_avail_msix: Available number of MSIX vectors
592  * @num_msix_entries: Number of entries in MSIX table
593  * @msix_entries: MSIX table
594  * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
595  * @rdma_msix_entries: RDMA MSIX table
596  * @req_vec_chunks: Requested vector chunk data
597  * @mb_vector: Mailbox vector data
598  * @vector_stack: Stack to store the msix vector indexes
599  * @irq_mb_handler: Handler for hard interrupt for mailbox
600  * @tx_timeout_count: Number of TX timeouts that have occurred
601  * @avail_queues: Device given queue limits
602  * @vports: Array to store vports created by the driver
603  * @netdevs: Associated Vport netdevs
604  * @vport_params_reqd: Vport params requested
605  * @vport_params_recvd: Vport params received
606  * @vport_ids: Array of device given vport identifiers
607  * @vport_config: Vport config parameters
608  * @max_vports: Maximum vports that can be allocated
609  * @num_alloc_vports: Current number of vports allocated
610  * @next_vport: Next free slot in pf->vport[] - 0-based!
611  * @init_task: Initialization task
612  * @init_wq: Workqueue for initialization task
613  * @serv_task: Periodically recurring maintenance task
614  * @serv_wq: Workqueue for service task
615  * @mbx_task: Task to handle mailbox interrupts
616  * @mbx_wq: Workqueue for mailbox responses
617  * @vc_event_task: Task to handle out of band virtchnl event notifications
618  * @vc_event_wq: Workqueue for virtchnl events
619  * @stats_task: Periodic statistics retrieval task
620  * @stats_wq: Workqueue for statistics task
621  * @caps: Negotiated capabilities with device
622  * @vcxn_mngr: Virtchnl transaction manager
623  * @dev_ops: See idpf_dev_ops
624  * @cdev_info: IDC core device info pointer
625  * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
626  *	     to VFs but is used to initialize them
627  * @crc_enable: Enable CRC insertion offload
628  * @req_tx_splitq: TX split or single queue model to request
629  * @req_rx_splitq: RX split or single queue model to request
630  * @vport_ctrl_lock: Lock to protect the vport control flow
631  * @vector_lock: Lock to protect vector distribution
632  * @queue_lock: Lock to protect queue distribution
633  * @vc_buf_lock: Lock to protect virtchnl buffer
634  * @ptp: Storage for PTP-related data
635  */
636 struct idpf_adapter {
637 	struct pci_dev *pdev;
638 	u32 virt_ver_maj;
639 	u32 virt_ver_min;
640 
641 	u32 msg_enable;
642 	u32 mb_wait_count;
643 	enum idpf_state state;
644 	DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
645 	struct idpf_reset_reg reset_reg;
646 	struct idpf_hw hw;
647 	u16 num_avail_msix;
648 	u16 num_msix_entries;
649 	struct msix_entry *msix_entries;
650 	u16 num_rdma_msix_entries;
651 	struct msix_entry *rdma_msix_entries;
652 	struct virtchnl2_alloc_vectors *req_vec_chunks;
653 	struct idpf_q_vector mb_vector;
654 	struct idpf_vector_lifo vector_stack;
655 	irqreturn_t (*irq_mb_handler)(int irq, void *data);
656 
657 	u32 tx_timeout_count;
658 	struct idpf_avail_queue_info avail_queues;
659 	struct idpf_vport **vports;
660 	struct net_device **netdevs;
661 	struct virtchnl2_create_vport **vport_params_reqd;
662 	struct virtchnl2_create_vport **vport_params_recvd;
663 	u32 *vport_ids;
664 
665 	struct idpf_vport_config **vport_config;
666 	u16 max_vports;
667 	u16 num_alloc_vports;
668 	u16 next_vport;
669 
670 	struct delayed_work init_task;
671 	struct workqueue_struct *init_wq;
672 	struct delayed_work serv_task;
673 	struct workqueue_struct *serv_wq;
674 	struct delayed_work mbx_task;
675 	struct workqueue_struct *mbx_wq;
676 	struct delayed_work vc_event_task;
677 	struct workqueue_struct *vc_event_wq;
678 	struct delayed_work stats_task;
679 	struct workqueue_struct *stats_wq;
680 	struct virtchnl2_get_capabilities caps;
681 	struct idpf_vc_xn_manager *vcxn_mngr;
682 
683 	struct idpf_dev_ops dev_ops;
684 	struct iidc_rdma_core_dev_info *cdev_info;
685 	int num_vfs;
686 	bool crc_enable;
687 	bool req_tx_splitq;
688 	bool req_rx_splitq;
689 
690 	struct mutex vport_ctrl_lock;
691 	struct mutex vector_lock;
692 	struct mutex queue_lock;
693 	struct mutex vc_buf_lock;
694 
695 	struct idpf_ptp *ptp;
696 };
697 
698 /**
699  * idpf_is_queue_model_split - check if queue model is split
700  * @q_model: queue model single or split
701  *
702  * Returns true if queue model is split else false
703  */
704 static inline int idpf_is_queue_model_split(u16 q_model)
705 {
706 	return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
707 	       q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
708 }
709 
710 static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
711 {
712 	return vport->adapter && vport->xdp_prog;
713 }
714 
715 #define idpf_is_cap_ena(adapter, field, flag) \
716 	idpf_is_capability_ena(adapter, false, field, flag)
717 #define idpf_is_cap_ena_all(adapter, field, flag) \
718 	idpf_is_capability_ena(adapter, true, field, flag)
719 
720 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
721 			    enum idpf_cap_field field, u64 flag);
722 
723 /**
724  * idpf_is_rdma_cap_ena - Determine if RDMA is supported
725  * @adapter: private data struct
726  *
727  * Return: true if RDMA capability is enabled, false otherwise
728  */
729 static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
730 {
731 	return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
732 }
733 
734 #define IDPF_CAP_RSS (\
735 	VIRTCHNL2_FLOW_IPV4_TCP		|\
736 	VIRTCHNL2_FLOW_IPV4_UDP		|\
737 	VIRTCHNL2_FLOW_IPV4_SCTP	|\
738 	VIRTCHNL2_FLOW_IPV4_OTHER	|\
739 	VIRTCHNL2_FLOW_IPV6_TCP		|\
740 	VIRTCHNL2_FLOW_IPV6_UDP		|\
741 	VIRTCHNL2_FLOW_IPV6_SCTP	|\
742 	VIRTCHNL2_FLOW_IPV6_OTHER)
743 
744 #define IDPF_CAP_RSC (\
745 	VIRTCHNL2_CAP_RSC_IPV4_TCP	|\
746 	VIRTCHNL2_CAP_RSC_IPV6_TCP)
747 
748 #define IDPF_CAP_HSPLIT	(\
749 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|\
750 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
751 
752 #define IDPF_CAP_TX_CSUM_L4V4 (\
753 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|\
754 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
755 
756 #define IDPF_CAP_TX_CSUM_L4V6 (\
757 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|\
758 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
759 
760 #define IDPF_CAP_RX_CSUM (\
761 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		|\
762 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|\
763 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|\
764 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|\
765 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
766 
767 #define IDPF_CAP_TX_SCTP_CSUM (\
768 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|\
769 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
770 
771 #define IDPF_CAP_TUNNEL_TX_CSUM (\
772 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	|\
773 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL)
774 
775 /**
776  * idpf_get_reserved_vecs - Get reserved vectors
777  * @adapter: private data struct
778  */
779 static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
780 {
781 	return le16_to_cpu(adapter->caps.num_allocated_vectors);
782 }
783 
784 /**
785  * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
786  * @adapter: private data struct
787  *
788  * Return: number of vectors reserved for RDMA
789  */
790 static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
791 {
792 	return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
793 }
794 
795 /**
796  * idpf_get_default_vports - Get default number of vports
797  * @adapter: private data struct
798  */
799 static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
800 {
801 	return le16_to_cpu(adapter->caps.default_num_vports);
802 }
803 
804 /**
805  * idpf_get_max_vports - Get max number of vports
806  * @adapter: private data struct
807  */
808 static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
809 {
810 	return le16_to_cpu(adapter->caps.max_vports);
811 }
812 
813 /**
814  * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
815  * @adapter: private data struct
816  */
817 static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
818 {
819 	return adapter->caps.max_sg_bufs_per_tx_pkt;
820 }
821 
822 /**
823  * idpf_get_min_tx_pkt_len - Get min packet length supported by the device
824  * @adapter: private data struct
825  */
826 static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
827 {
828 	u8 pkt_len = adapter->caps.min_sso_packet_len;
829 
830 	return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
831 }
832 
833 /**
834  * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
835  * @adapter: private data struct
836  * @reg_offset: register offset value
837  *
838  * Return: BAR0 mailbox register address based on register offset.
839  */
840 static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
841 						  resource_size_t reg_offset)
842 {
843 	return adapter->hw.mbx.vaddr + reg_offset;
844 }
845 
846 /**
847  * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
848  * @adapter: private data struct
849  * @reg_offset: register offset value
850  *
851  * Return: BAR0 rstat register address based on register offset.
852  */
853 static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
854 						    resource_size_t reg_offset)
855 {
856 	reg_offset -= adapter->dev_ops.static_reg_info[1].start;
857 
858 	return adapter->hw.rstat.vaddr + reg_offset;
859 }
860 
861 /**
862  * idpf_get_reg_addr - Get BAR0 register address
863  * @adapter: private data struct
864  * @reg_offset: register offset value
865  *
866  * Based on the register offset, return the actual BAR0 register address
867  */
868 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
869 					      resource_size_t reg_offset)
870 {
871 	struct idpf_hw *hw = &adapter->hw;
872 
873 	for (int i = 0; i < hw->num_lan_regs; i++) {
874 		struct idpf_mmio_reg *region = &hw->lan_regs[i];
875 
876 		if (reg_offset >= region->addr_start &&
877 		    reg_offset < (region->addr_start + region->addr_len)) {
878 			/* Convert the offset so that it is relative to the
879 			 * start of the region.  Then add the base address of
880 			 * the region to get the final address.
881 			 */
882 			reg_offset -= region->addr_start;
883 
884 			return region->vaddr + reg_offset;
885 		}
886 	}
887 
888 	/* It's impossible to hit this case with offsets from the CP. But if we
889 	 * do for any other reason, the kernel will panic on that register
890 	 * access. Might as well do it here to make it clear what's happening.
891 	 */
892 	BUG();
893 
894 	return NULL;
895 }
896 
897 /**
898  * idpf_is_reset_detected - check if we were reset at some point
899  * @adapter: driver specific private structure
900  *
901  * Returns true if we are either in reset currently or were previously reset.
902  */
903 static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
904 {
905 	if (!adapter->hw.arq)
906 		return true;
907 
908 	return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
909 		 adapter->hw.arq->reg.len_mask);
910 }
911 
912 /**
913  * idpf_is_reset_in_prog - check if reset is in progress
914  * @adapter: driver specific private structure
915  *
916  * Returns true if hard reset is in progress, false otherwise
917  */
918 static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
919 {
920 	return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
921 		test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
922 		test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
923 }
924 
925 /**
926  * idpf_netdev_to_vport - get a vport handle from a netdev
927  * @netdev: network interface device structure
928  */
929 static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
930 {
931 	struct idpf_netdev_priv *np = netdev_priv(netdev);
932 
933 	return np->vport;
934 }
935 
936 /**
937  * idpf_netdev_to_adapter - Get adapter handle from a netdev
938  * @netdev: Network interface device structure
939  */
940 static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev)
941 {
942 	struct idpf_netdev_priv *np = netdev_priv(netdev);
943 
944 	return np->adapter;
945 }
946 
947 /**
948  * idpf_is_feature_ena - Determine if a particular feature is enabled
949  * @vport: Vport to check
950  * @feature: Netdev flag to check
951  *
952  * Returns true or false if a particular feature is enabled.
953  */
954 static inline bool idpf_is_feature_ena(const struct idpf_vport *vport,
955 				       netdev_features_t feature)
956 {
957 	return vport->netdev->features & feature;
958 }
959 
960 /**
961  * idpf_get_max_tx_hdr_size -- get the size of tx header
962  * @adapter: Driver specific private structure
963  */
964 static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter)
965 {
966 	return le16_to_cpu(adapter->caps.max_tx_hdr_size);
967 }
968 
969 /**
970  * idpf_vport_ctrl_lock - Acquire the vport control lock
971  * @netdev: Network interface device structure
972  *
973  * This lock should be used by non-datapath code to protect against vport
974  * destruction.
975  */
976 static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
977 {
978 	struct idpf_netdev_priv *np = netdev_priv(netdev);
979 
980 	mutex_lock(&np->adapter->vport_ctrl_lock);
981 }
982 
983 /**
984  * idpf_vport_ctrl_unlock - Release the vport control lock
985  * @netdev: Network interface device structure
986  */
987 static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
988 {
989 	struct idpf_netdev_priv *np = netdev_priv(netdev);
990 
991 	mutex_unlock(&np->adapter->vport_ctrl_lock);
992 }
993 
994 static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
995 {
996 	struct idpf_netdev_priv *np = netdev_priv(netdev);
997 
998 	return mutex_is_locked(&np->adapter->vport_ctrl_lock);
999 }
1000 
1001 void idpf_statistics_task(struct work_struct *work);
1002 void idpf_init_task(struct work_struct *work);
1003 void idpf_service_task(struct work_struct *work);
1004 void idpf_mbx_task(struct work_struct *work);
1005 void idpf_vc_event_task(struct work_struct *work);
1006 void idpf_dev_ops_init(struct idpf_adapter *adapter);
1007 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
1008 int idpf_intr_req(struct idpf_adapter *adapter);
1009 void idpf_intr_rel(struct idpf_adapter *adapter);
1010 u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
1011 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1012 			     enum idpf_vport_reset_cause reset_cause);
1013 void idpf_deinit_task(struct idpf_adapter *adapter);
1014 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
1015 				u16 *q_vector_idxs,
1016 				struct idpf_vector_info *vec_info);
1017 void idpf_set_ethtool_ops(struct net_device *netdev);
1018 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
1019 			       u16 itr, bool tx);
1020 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
1021 
1022 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
1023 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
1024 int idpf_idc_init(struct idpf_adapter *adapter);
1025 int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
1026 			       enum iidc_function_type ftype);
1027 void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
1028 void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
1029 void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
1030 void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
1031 			     enum iidc_rdma_event_type event_type);
1032 
1033 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1034 				struct virtchnl2_flow_rule_add_del *rule,
1035 				enum virtchnl2_op opcode);
1036 #endif /* !_IDPF_H_ */
1037