xref: /linux/drivers/net/ethernet/intel/idpf/idpf.h (revision 822cd0923344f03bb568f8a675fd1b955f9e858c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_H_
5 #define _IDPF_H_
6 
7 /* Forward declaration */
8 struct idpf_adapter;
9 struct idpf_vport;
10 struct idpf_vport_max_q;
11 struct idpf_q_vec_rsrc;
12 struct idpf_rss_data;
13 
14 #include <net/pkt_sched.h>
15 #include <linux/aer.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ioport.h>
18 #include <linux/pci.h>
19 #include <linux/bitfield.h>
20 #include <linux/sctp.h>
21 #include <linux/ethtool_netlink.h>
22 #include <net/gro.h>
23 
24 #include <linux/net/intel/iidc_rdma.h>
25 #include <linux/net/intel/iidc_rdma_idpf.h>
26 
27 #include "virtchnl2.h"
28 #include "idpf_txrx.h"
29 #include "idpf_controlq.h"
30 
31 #define GETMAXVAL(num_bits)		GENMASK((num_bits) - 1, 0)
32 
33 #define IDPF_NO_FREE_SLOT		0xffff
34 
35 /* Default Mailbox settings */
36 #define IDPF_NUM_FILTERS_PER_MSG	20
37 #define IDPF_NUM_DFLT_MBX_Q		2	/* includes both TX and RX */
38 #define IDPF_DFLT_MBX_Q_LEN		64
39 #define IDPF_DFLT_MBX_ID		-1
40 /* maximum number of times to try before resetting mailbox */
41 #define IDPF_MB_MAX_ERR			20
42 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz)	\
43 	((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
44 
45 #define IDPF_WAIT_FOR_MARKER_TIMEO	500
46 #define IDPF_MAX_WAIT			500
47 
48 /* available message levels */
49 #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
50 
51 #define IDPF_DIM_PROFILE_SLOTS  5
52 
53 #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
54 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
55 
56 /**
57  * struct idpf_mac_filter
58  * @list: list member field
59  * @macaddr: MAC address
60  * @remove: filter should be removed (virtchnl)
61  * @add: filter should be added (virtchnl)
62  */
63 struct idpf_mac_filter {
64 	struct list_head list;
65 	u8 macaddr[ETH_ALEN];
66 	bool remove;
67 	bool add;
68 };
69 
70 /**
71  * enum idpf_state - State machine to handle bring up
72  * @__IDPF_VER_CHECK: Negotiate virtchnl version
73  * @__IDPF_GET_CAPS: Negotiate capabilities
74  * @__IDPF_INIT_SW: Init based on given capabilities
75  * @__IDPF_STATE_LAST: Must be last, used to determine size
76  */
77 enum idpf_state {
78 	__IDPF_VER_CHECK,
79 	__IDPF_GET_CAPS,
80 	__IDPF_INIT_SW,
81 	__IDPF_STATE_LAST,
82 };
83 
84 /**
85  * enum idpf_flags - Hard reset causes.
86  * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout
87  * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
88  * @IDPF_HR_RESET_IN_PROG: Reset in progress
89  * @IDPF_REMOVE_IN_PROG: Driver remove in progress
90  * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
91  * @IDPF_VC_CORE_INIT: virtchnl core has been init
92  * @IDPF_FLAGS_NBITS: Must be last
93  */
94 enum idpf_flags {
95 	IDPF_HR_FUNC_RESET,
96 	IDPF_HR_DRV_LOAD,
97 	IDPF_HR_RESET_IN_PROG,
98 	IDPF_REMOVE_IN_PROG,
99 	IDPF_MB_INTR_MODE,
100 	IDPF_VC_CORE_INIT,
101 	IDPF_FLAGS_NBITS,
102 };
103 
104 /**
105  * enum idpf_cap_field - Offsets into capabilities struct for specific caps
106  * @IDPF_BASE_CAPS: generic base capabilities
107  * @IDPF_CSUM_CAPS: checksum offload capabilities
108  * @IDPF_SEG_CAPS: segmentation offload capabilities
109  * @IDPF_RSS_CAPS: RSS offload capabilities
110  * @IDPF_HSPLIT_CAPS: Header split capabilities
111  * @IDPF_RSC_CAPS: RSC offload capabilities
112  * @IDPF_OTHER_CAPS: miscellaneous offloads
113  *
114  * Used when checking for a specific capability flag since different capability
115  * sets are not mutually exclusive numerically, the caller must specify which
116  * type of capability they are checking for.
117  */
118 enum idpf_cap_field {
119 	IDPF_BASE_CAPS		= -1,
120 	IDPF_CSUM_CAPS		= offsetof(struct virtchnl2_get_capabilities,
121 					   csum_caps),
122 	IDPF_SEG_CAPS		= offsetof(struct virtchnl2_get_capabilities,
123 					   seg_caps),
124 	IDPF_RSS_CAPS		= offsetof(struct virtchnl2_get_capabilities,
125 					   rss_caps),
126 	IDPF_HSPLIT_CAPS	= offsetof(struct virtchnl2_get_capabilities,
127 					   hsplit_caps),
128 	IDPF_RSC_CAPS		= offsetof(struct virtchnl2_get_capabilities,
129 					   rsc_caps),
130 	IDPF_OTHER_CAPS		= offsetof(struct virtchnl2_get_capabilities,
131 					   other_caps),
132 };
133 
134 /**
135  * enum idpf_vport_state - Current vport state
136  * @IDPF_VPORT_UP: Vport is up
137  * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
138  */
139 enum idpf_vport_state {
140 	IDPF_VPORT_UP,
141 	IDPF_VPORT_STATE_NBITS
142 };
143 
144 /**
145  * struct idpf_netdev_priv - Struct to store vport back pointer
146  * @adapter: Adapter back pointer
147  * @vport: Vport back pointer
148  * @vport_id: Vport identifier
149  * @link_speed_mbps: Link speed in mbps
150  * @vport_idx: Relative vport index
151  * @max_tx_hdr_size: Max header length hardware can support
152  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
153  * @state: See enum idpf_vport_state
154  * @netstats: Packet and byte stats
155  * @stats_lock: Lock to protect stats update
156  */
157 struct idpf_netdev_priv {
158 	struct idpf_adapter *adapter;
159 	struct idpf_vport *vport;
160 	u32 vport_id;
161 	u32 link_speed_mbps;
162 	u16 vport_idx;
163 	u16 max_tx_hdr_size;
164 	u16 tx_max_bufs;
165 	DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
166 	struct rtnl_link_stats64 netstats;
167 	spinlock_t stats_lock;
168 };
169 
170 /**
171  * struct idpf_reset_reg - Reset register offsets/masks
172  * @rstat: Reset status register
173  * @rstat_m: Reset status mask
174  */
175 struct idpf_reset_reg {
176 	void __iomem *rstat;
177 	u32 rstat_m;
178 };
179 
180 /**
181  * struct idpf_vport_max_q - Queue limits
182  * @max_rxq: Maximum number of RX queues supported
183  * @max_txq: Maixmum number of TX queues supported
184  * @max_bufq: In splitq, maximum number of buffer queues supported
185  * @max_complq: In splitq, maximum number of completion queues supported
186  */
187 struct idpf_vport_max_q {
188 	u16 max_rxq;
189 	u16 max_txq;
190 	u16 max_bufq;
191 	u16 max_complq;
192 };
193 
194 /**
195  * struct idpf_reg_ops - Device specific register operation function pointers
196  * @ctlq_reg_init: Mailbox control queue register initialization
197  * @intr_reg_init: Traffic interrupt register initialization
198  * @mb_intr_reg_init: Mailbox interrupt register initialization
199  * @reset_reg_init: Reset register initialization
200  * @trigger_reset: Trigger a reset to occur
201  * @ptp_reg_init: PTP register initialization
202  */
203 struct idpf_reg_ops {
204 	void (*ctlq_reg_init)(struct idpf_adapter *adapter,
205 			      struct idpf_ctlq_create_info *cq);
206 	int (*intr_reg_init)(struct idpf_vport *vport,
207 			     struct idpf_q_vec_rsrc *rsrc);
208 	void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
209 	void (*reset_reg_init)(struct idpf_adapter *adapter);
210 	void (*trigger_reset)(struct idpf_adapter *adapter,
211 			      enum idpf_flags trig_cause);
212 	void (*ptp_reg_init)(const struct idpf_adapter *adapter);
213 };
214 
215 #define IDPF_MMIO_REG_NUM_STATIC	2
216 #define IDPF_PF_MBX_REGION_SZ		4096
217 #define IDPF_PF_RSTAT_REGION_SZ		2048
218 #define IDPF_VF_MBX_REGION_SZ		10240
219 #define IDPF_VF_RSTAT_REGION_SZ		2048
220 
221 /**
222  * struct idpf_dev_ops - Device specific operations
223  * @reg_ops: Register operations
224  * @idc_init: IDC initialization
225  * @static_reg_info: array of mailbox and rstat register info
226  */
227 struct idpf_dev_ops {
228 	struct idpf_reg_ops reg_ops;
229 
230 	int (*idc_init)(struct idpf_adapter *adapter);
231 
232 	/* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
233 	struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
234 };
235 
236 /**
237  * enum idpf_vport_reset_cause - Vport soft reset causes
238  * @IDPF_SR_Q_CHANGE: Soft reset queue change
239  * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change
240  * @IDPF_SR_MTU_CHANGE: Soft reset MTU change
241  * @IDPF_SR_RSC_CHANGE: Soft reset RSC change
242  */
243 enum idpf_vport_reset_cause {
244 	IDPF_SR_Q_CHANGE,
245 	IDPF_SR_Q_DESC_CHANGE,
246 	IDPF_SR_MTU_CHANGE,
247 	IDPF_SR_RSC_CHANGE,
248 };
249 
250 /**
251  * enum idpf_vport_flags - Vport flags
252  * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
253  * @IDPF_VPORT_FLAGS_NBITS: Must be last
254  */
255 enum idpf_vport_flags {
256 	IDPF_VPORT_DEL_QUEUES,
257 	IDPF_VPORT_FLAGS_NBITS,
258 };
259 
260 /**
261  * struct idpf_tstamp_stats - Tx timestamp statistics
262  * @stats_sync: See struct u64_stats_sync
263  * @packets: Number of packets successfully timestamped by the hardware
264  * @discarded: Number of Tx skbs discarded due to cached PHC
265  *	       being too old to correctly extend timestamp
266  * @flushed: Number of Tx skbs flushed due to interface closed
267  */
268 struct idpf_tstamp_stats {
269 	struct u64_stats_sync stats_sync;
270 	u64_stats_t packets;
271 	u64_stats_t discarded;
272 	u64_stats_t flushed;
273 };
274 
275 struct idpf_port_stats {
276 	struct u64_stats_sync stats_sync;
277 	u64_stats_t rx_hw_csum_err;
278 	u64_stats_t rx_hsplit;
279 	u64_stats_t rx_hsplit_hbo;
280 	u64_stats_t rx_bad_descs;
281 	u64_stats_t tx_linearize;
282 	u64_stats_t tx_busy;
283 	u64_stats_t tx_drops;
284 	u64_stats_t tx_dma_map_errs;
285 	struct virtchnl2_vport_stats vport_stats;
286 };
287 
288 struct idpf_fsteer_fltr {
289 	struct list_head list;
290 	struct ethtool_rx_flow_spec fs;
291 };
292 
293 /**
294  * struct idpf_q_vec_rsrc - handle for queue and vector resources
295  * @dev: device pointer for DMA mapping
296  * @q_vectors: array of queue vectors
297  * @q_vector_idxs: starting index of queue vectors
298  * @num_q_vectors: number of IRQ vectors allocated
299  * @noirq_v_idx: ID of the NOIRQ vector
300  * @noirq_dyn_ctl_ena: value to write to the above to enable it
301  * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
302  * @txq_grps: array of TX queue groups
303  * @txq_desc_count: TX queue descriptor count
304  * @complq_desc_count: completion queue descriptor count
305  * @txq_model: split queue or single queue queuing model
306  * @num_txq: number of allocated TX queues
307  * @num_complq: number of allocated completion queues
308  * @num_txq_grp: number of TX queue groups
309  * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
310  * @num_rxq_grp: number of RX queues in a group
311  * @rxq_model: splitq queue or single queue queuing model
312  * @rxq_grps: total number of RX groups. Number of groups * number of RX per
313  *	      group will yield total number of RX queues.
314  * @num_rxq: number of allocated RX queues
315  * @num_bufq: number of allocated buffer queues
316  * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
317  *		    to complete all buffer descriptors for all buffer queues in
318  *		    the worst case.
319  * @bufq_desc_count: buffer queue descriptor count
320  * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping
321  * @base_rxd: true if the driver should use base descriptors instead of flex
322  */
323 struct idpf_q_vec_rsrc {
324 	struct device		*dev;
325 	struct idpf_q_vector	*q_vectors;
326 	u16			*q_vector_idxs;
327 	u16			num_q_vectors;
328 	u16			noirq_v_idx;
329 	u32			noirq_dyn_ctl_ena;
330 	void __iomem		*noirq_dyn_ctl;
331 
332 	struct idpf_txq_group	*txq_grps;
333 	u32			txq_desc_count;
334 	u32			complq_desc_count;
335 	u32			txq_model;
336 	u16			num_txq;
337 	u16			num_complq;
338 	u16			num_txq_grp;
339 	u16			xdp_txq_offset;
340 
341 	u16			num_rxq_grp;
342 	u32			rxq_model;
343 	struct idpf_rxq_group	*rxq_grps;
344 	u16			num_rxq;
345 	u16			num_bufq;
346 	u32			rxq_desc_count;
347 	u32			bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
348 	u8			num_bufqs_per_qgrp;
349 	bool			base_rxd;
350 };
351 
352 /**
353  * struct idpf_vport - Handle for netdevices and queue resources
354  * @dflt_qv_rsrc: contains default queue and vector resources
355  * @txqs: Used only in hotpath to get to the right queue very fast
356  * @num_txq: Number of allocated TX queues
357  * @num_xdp_txq: number of XDPSQs
358  * @xdpsq_share: whether XDPSQ sharing is enabled
359  * @xdp_prog: installed XDP program
360  * @vdev_info: IDC vport device info pointer
361  * @adapter: back pointer to associated adapter
362  * @netdev: Associated net_device. Each vport should have one and only one
363  *	    associated netdev.
364  * @flags: See enum idpf_vport_flags
365  * @compln_clean_budget: Work budget for completion clean
366  * @vport_id: Device given vport identifier
367  * @vport_type: Default SRIOV, SIOV, etc.
368  * @idx: Software index in adapter vports struct
369  * @max_mtu: device given max possible MTU
370  * @default_mac_addr: device will give a default MAC to use
371  * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
372  * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
373  * @port_stats: per port csum, header split, and other offload stats
374  * @default_vport: Use this vport if one isn't specified
375  * @crc_enable: Enable CRC insertion offload
376  * @link_up: True if link is up
377  * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
378  * @tstamp_config: The Tx tstamp config
379  * @tstamp_task: Tx timestamping task
380  * @tstamp_stats: Tx timestamping statistics
381  */
382 struct idpf_vport {
383 	struct idpf_q_vec_rsrc dflt_qv_rsrc;
384 	struct idpf_tx_queue **txqs;
385 	u16 num_txq;
386 	u16 num_xdp_txq;
387 	bool xdpsq_share;
388 	struct bpf_prog *xdp_prog;
389 
390 	struct iidc_rdma_vport_dev_info *vdev_info;
391 
392 	struct idpf_adapter *adapter;
393 	struct net_device *netdev;
394 	DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
395 	u32 compln_clean_budget;
396 	u32 vport_id;
397 	u16 vport_type;
398 	u16 idx;
399 
400 	u16 max_mtu;
401 	u8 default_mac_addr[ETH_ALEN];
402 	u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
403 	u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
404 
405 	struct idpf_port_stats port_stats;
406 	bool default_vport;
407 	bool crc_enable;
408 	bool link_up;
409 
410 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
411 	struct kernel_hwtstamp_config tstamp_config;
412 	struct work_struct tstamp_task;
413 	struct idpf_tstamp_stats tstamp_stats;
414 };
415 
416 /**
417  * enum idpf_user_flags
418  * @__IDPF_USER_FLAG_HSPLIT: header split state
419  * @__IDPF_PROMISC_UC: Unicast promiscuous mode
420  * @__IDPF_PROMISC_MC: Multicast promiscuous mode
421  * @__IDPF_USER_FLAGS_NBITS: Must be last
422  */
423 enum idpf_user_flags {
424 	__IDPF_USER_FLAG_HSPLIT = 0U,
425 	__IDPF_PROMISC_UC = 32,
426 	__IDPF_PROMISC_MC,
427 
428 	__IDPF_USER_FLAGS_NBITS,
429 };
430 
431 /**
432  * struct idpf_rss_data - Associated RSS data
433  * @rss_key_size: Size of RSS hash key
434  * @rss_key: RSS hash key
435  * @rss_lut_size: Size of RSS lookup table
436  * @rss_lut: RSS lookup table
437  */
438 struct idpf_rss_data {
439 	u16 rss_key_size;
440 	u8 *rss_key;
441 	u16 rss_lut_size;
442 	u32 *rss_lut;
443 };
444 
445 /**
446  * struct idpf_q_coalesce - User defined coalescing configuration values for
447  *			   a single queue.
448  * @tx_intr_mode: Dynamic TX ITR or not
449  * @rx_intr_mode: Dynamic RX ITR or not
450  * @tx_coalesce_usecs: TX interrupt throttling rate
451  * @rx_coalesce_usecs: RX interrupt throttling rate
452  *
453  * Used to restore user coalescing configuration after a reset.
454  */
455 struct idpf_q_coalesce {
456 	u32 tx_intr_mode;
457 	u32 rx_intr_mode;
458 	u32 tx_coalesce_usecs;
459 	u32 rx_coalesce_usecs;
460 };
461 
462 /**
463  * struct idpf_vport_user_config_data - User defined configuration values for
464  *					each vport.
465  * @rss_data: See struct idpf_rss_data
466  * @q_coalesce: Array of per queue coalescing data
467  * @num_req_tx_qs: Number of user requested TX queues through ethtool
468  * @num_req_rx_qs: Number of user requested RX queues through ethtool
469  * @num_req_txq_desc: Number of user requested TX queue descriptors through
470  *		      ethtool
471  * @num_req_rxq_desc: Number of user requested RX queue descriptors through
472  *		      ethtool
473  * @xdp_prog: requested XDP program to install
474  * @user_flags: User toggled config flags
475  * @mac_filter_list: List of MAC filters
476  * @num_fsteer_fltrs: number of flow steering filters
477  * @flow_steer_list: list of flow steering filters
478  *
479  * Used to restore configuration after a reset as the vport will get wiped.
480  */
481 struct idpf_vport_user_config_data {
482 	struct idpf_rss_data rss_data;
483 	struct idpf_q_coalesce *q_coalesce;
484 	u16 num_req_tx_qs;
485 	u16 num_req_rx_qs;
486 	u32 num_req_txq_desc;
487 	u32 num_req_rxq_desc;
488 	struct bpf_prog *xdp_prog;
489 	DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
490 	struct list_head mac_filter_list;
491 	u32 num_fsteer_fltrs;
492 	struct list_head flow_steer_list;
493 };
494 
495 /**
496  * enum idpf_vport_config_flags - Vport config flags
497  * @IDPF_VPORT_REG_NETDEV: Register netdev
498  * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
499  * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
500  */
501 enum idpf_vport_config_flags {
502 	IDPF_VPORT_REG_NETDEV,
503 	IDPF_VPORT_UP_REQUESTED,
504 	IDPF_VPORT_CONFIG_FLAGS_NBITS,
505 };
506 
507 /**
508  * struct idpf_avail_queue_info
509  * @avail_rxq: Available RX queues
510  * @avail_txq: Available TX queues
511  * @avail_bufq: Available buffer queues
512  * @avail_complq: Available completion queues
513  *
514  * Maintain total queues available after allocating max queues to each vport.
515  */
516 struct idpf_avail_queue_info {
517 	u16 avail_rxq;
518 	u16 avail_txq;
519 	u16 avail_bufq;
520 	u16 avail_complq;
521 };
522 
523 /**
524  * struct idpf_vector_info - Utility structure to pass function arguments as a
525  *			     structure
526  * @num_req_vecs: Vectors required based on the number of queues updated by the
527  *		  user via ethtool
528  * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs
529  * @index: Relative starting index for vectors
530  * @default_vport: Vectors are for default vport
531  */
532 struct idpf_vector_info {
533 	u16 num_req_vecs;
534 	u16 num_curr_vecs;
535 	u16 index;
536 	bool default_vport;
537 };
538 
539 /**
540  * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
541  *			     distribution algorithm
542  * @top: Points to stack top i.e. next available vector index
543  * @base: Always points to start of the free pool
544  * @size: Total size of the vector stack
545  * @vec_idx: Array to store all the vector indexes
546  *
547  * Vector stack maintains all the relative vector indexes at the *adapter*
548  * level. This stack is divided into 2 parts, first one is called as 'default
549  * pool' and other one is called 'free pool'.  Vector distribution algorithm
550  * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
551  * vectors are allocated per default vport and the relative vector indexes for
552  * those are maintained in default pool. Free pool contains all the unallocated
553  * vector indexes which can be allocated on-demand basis. Mailbox vector index
554  * is maintained in the default pool of the stack.
555  */
556 struct idpf_vector_lifo {
557 	u16 top;
558 	u16 base;
559 	u16 size;
560 	u16 *vec_idx;
561 };
562 
563 /**
564  * struct idpf_queue_id_reg_chunk - individual queue ID and register chunk
565  * @qtail_reg_start: queue tail register offset
566  * @qtail_reg_spacing: queue tail register spacing
567  * @type: queue type of the queues in the chunk
568  * @start_queue_id: starting queue ID in the chunk
569  * @num_queues: number of queues in the chunk
570  */
571 struct idpf_queue_id_reg_chunk {
572 	u64 qtail_reg_start;
573 	u32 qtail_reg_spacing;
574 	u32 type;
575 	u32 start_queue_id;
576 	u32 num_queues;
577 };
578 
579 /**
580  * struct idpf_queue_id_reg_info - queue ID and register chunk info received
581  *				   over the mailbox
582  * @num_chunks: number of chunks
583  * @queue_chunks: array of chunks
584  */
585 struct idpf_queue_id_reg_info {
586 	u16 num_chunks;
587 	struct idpf_queue_id_reg_chunk *queue_chunks;
588 };
589 
590 /**
591  * struct idpf_vport_config - Vport configuration data
592  * @user_config: see struct idpf_vport_user_config_data
593  * @max_q: Maximum possible queues
594  * @qid_reg_info: Struct to store the queue ID and register info
595  * @mac_filter_list_lock: Lock to protect mac filters
596  * @flow_steer_list_lock: Lock to protect fsteer filters
597  * @flags: See enum idpf_vport_config_flags
598  */
599 struct idpf_vport_config {
600 	struct idpf_vport_user_config_data user_config;
601 	struct idpf_vport_max_q max_q;
602 	struct idpf_queue_id_reg_info qid_reg_info;
603 	spinlock_t mac_filter_list_lock;
604 	spinlock_t flow_steer_list_lock;
605 	DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
606 };
607 
608 struct idpf_vc_xn_manager;
609 
610 #define idpf_for_each_vport(adapter, iter) \
611 	for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
612 	     *iter = (adapter)->max_vports ? *__##iter : NULL; \
613 	     iter; \
614 	     iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
615 	     *__##iter : NULL)
616 
617 /**
618  * struct idpf_adapter - Device data struct generated on probe
619  * @pdev: PCI device struct given on probe
620  * @virt_ver_maj: Virtchnl version major
621  * @virt_ver_min: Virtchnl version minor
622  * @msg_enable: Debug message level enabled
623  * @mb_wait_count: Number of times mailbox was attempted initialization
624  * @state: Init state machine
625  * @flags: See enum idpf_flags
626  * @reset_reg: See struct idpf_reset_reg
627  * @hw: Device access data
628  * @num_avail_msix: Available number of MSIX vectors
629  * @num_msix_entries: Number of entries in MSIX table
630  * @msix_entries: MSIX table
631  * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
632  * @rdma_msix_entries: RDMA MSIX table
633  * @req_vec_chunks: Requested vector chunk data
634  * @mb_vector: Mailbox vector data
635  * @vector_stack: Stack to store the msix vector indexes
636  * @irq_mb_handler: Handler for hard interrupt for mailbox
637  * @tx_timeout_count: Number of TX timeouts that have occurred
638  * @avail_queues: Device given queue limits
639  * @vports: Array to store vports created by the driver
640  * @netdevs: Associated Vport netdevs
641  * @vport_params_reqd: Vport params requested
642  * @vport_params_recvd: Vport params received
643  * @vport_ids: Array of device given vport identifiers
644  * @singleq_pt_lkup: Lookup table for singleq RX ptypes
645  * @splitq_pt_lkup: Lookup table for splitq RX ptypes
646  * @vport_config: Vport config parameters
647  * @max_vports: Maximum vports that can be allocated
648  * @num_alloc_vports: Current number of vports allocated
649  * @next_vport: Next free slot in pf->vport[] - 0-based!
650  * @init_task: Initialization task
651  * @init_wq: Workqueue for initialization task
652  * @serv_task: Periodically recurring maintenance task
653  * @serv_wq: Workqueue for service task
654  * @mbx_task: Task to handle mailbox interrupts
655  * @mbx_wq: Workqueue for mailbox responses
656  * @vc_event_task: Task to handle out of band virtchnl event notifications
657  * @vc_event_wq: Workqueue for virtchnl events
658  * @stats_task: Periodic statistics retrieval task
659  * @stats_wq: Workqueue for statistics task
660  * @caps: Negotiated capabilities with device
661  * @vcxn_mngr: Virtchnl transaction manager
662  * @dev_ops: See idpf_dev_ops
663  * @cdev_info: IDC core device info pointer
664  * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
665  *	     to VFs but is used to initialize them
666  * @crc_enable: Enable CRC insertion offload
667  * @req_tx_splitq: TX split or single queue model to request
668  * @req_rx_splitq: RX split or single queue model to request
669  * @vport_ctrl_lock: Lock to protect the vport control flow
670  * @vector_lock: Lock to protect vector distribution
671  * @queue_lock: Lock to protect queue distribution
672  * @vc_buf_lock: Lock to protect virtchnl buffer
673  * @ptp: Storage for PTP-related data
674  */
675 struct idpf_adapter {
676 	struct pci_dev *pdev;
677 	u32 virt_ver_maj;
678 	u32 virt_ver_min;
679 
680 	u32 msg_enable;
681 	u32 mb_wait_count;
682 	enum idpf_state state;
683 	DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
684 	struct idpf_reset_reg reset_reg;
685 	struct idpf_hw hw;
686 	u16 num_avail_msix;
687 	u16 num_msix_entries;
688 	struct msix_entry *msix_entries;
689 	u16 num_rdma_msix_entries;
690 	struct msix_entry *rdma_msix_entries;
691 	struct virtchnl2_alloc_vectors *req_vec_chunks;
692 	struct idpf_q_vector mb_vector;
693 	struct idpf_vector_lifo vector_stack;
694 	irqreturn_t (*irq_mb_handler)(int irq, void *data);
695 
696 	u32 tx_timeout_count;
697 	struct idpf_avail_queue_info avail_queues;
698 	struct idpf_vport **vports;
699 	struct net_device **netdevs;
700 	struct virtchnl2_create_vport **vport_params_reqd;
701 	struct virtchnl2_create_vport **vport_params_recvd;
702 	u32 *vport_ids;
703 
704 	struct libeth_rx_pt *singleq_pt_lkup;
705 	struct libeth_rx_pt *splitq_pt_lkup;
706 
707 	struct idpf_vport_config **vport_config;
708 	u16 max_vports;
709 	u16 num_alloc_vports;
710 	u16 next_vport;
711 
712 	struct delayed_work init_task;
713 	struct workqueue_struct *init_wq;
714 	struct delayed_work serv_task;
715 	struct workqueue_struct *serv_wq;
716 	struct delayed_work mbx_task;
717 	struct workqueue_struct *mbx_wq;
718 	struct delayed_work vc_event_task;
719 	struct workqueue_struct *vc_event_wq;
720 	struct delayed_work stats_task;
721 	struct workqueue_struct *stats_wq;
722 	struct virtchnl2_get_capabilities caps;
723 	struct idpf_vc_xn_manager *vcxn_mngr;
724 
725 	struct idpf_dev_ops dev_ops;
726 	struct iidc_rdma_core_dev_info *cdev_info;
727 	int num_vfs;
728 	bool crc_enable;
729 	bool req_tx_splitq;
730 	bool req_rx_splitq;
731 
732 	struct mutex vport_ctrl_lock;
733 	struct mutex vector_lock;
734 	struct mutex queue_lock;
735 	struct mutex vc_buf_lock;
736 
737 	struct idpf_ptp *ptp;
738 };
739 
740 /**
741  * idpf_is_queue_model_split - check if queue model is split
742  * @q_model: queue model single or split
743  *
744  * Returns true if queue model is split else false
745  */
746 static inline int idpf_is_queue_model_split(u16 q_model)
747 {
748 	return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
749 	       q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
750 }
751 
752 static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
753 {
754 	return vport->adapter && vport->xdp_prog;
755 }
756 
757 #define idpf_is_cap_ena(adapter, field, flag) \
758 	idpf_is_capability_ena(adapter, false, field, flag)
759 #define idpf_is_cap_ena_all(adapter, field, flag) \
760 	idpf_is_capability_ena(adapter, true, field, flag)
761 
762 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
763 			    enum idpf_cap_field field, u64 flag);
764 
765 /**
766  * idpf_is_rdma_cap_ena - Determine if RDMA is supported
767  * @adapter: private data struct
768  *
769  * Return: true if RDMA capability is enabled, false otherwise
770  */
771 static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
772 {
773 	return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
774 }
775 
776 #define IDPF_CAP_RSS (\
777 	VIRTCHNL2_FLOW_IPV4_TCP		|\
778 	VIRTCHNL2_FLOW_IPV4_UDP		|\
779 	VIRTCHNL2_FLOW_IPV4_SCTP	|\
780 	VIRTCHNL2_FLOW_IPV4_OTHER	|\
781 	VIRTCHNL2_FLOW_IPV6_TCP		|\
782 	VIRTCHNL2_FLOW_IPV6_UDP		|\
783 	VIRTCHNL2_FLOW_IPV6_SCTP	|\
784 	VIRTCHNL2_FLOW_IPV6_OTHER)
785 
786 #define IDPF_CAP_RSC (\
787 	VIRTCHNL2_CAP_RSC_IPV4_TCP	|\
788 	VIRTCHNL2_CAP_RSC_IPV6_TCP)
789 
790 #define IDPF_CAP_HSPLIT	(\
791 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|\
792 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
793 
794 #define IDPF_CAP_TX_CSUM_L4V4 (\
795 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|\
796 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
797 
798 #define IDPF_CAP_TX_CSUM_L4V6 (\
799 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|\
800 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
801 
802 #define IDPF_CAP_RX_CSUM (\
803 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		|\
804 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|\
805 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|\
806 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|\
807 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
808 
809 #define IDPF_CAP_TX_SCTP_CSUM (\
810 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|\
811 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
812 
813 #define IDPF_CAP_TUNNEL_TX_CSUM (\
814 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	|\
815 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL)
816 
817 /**
818  * idpf_get_reserved_vecs - Get reserved vectors
819  * @adapter: private data struct
820  */
821 static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
822 {
823 	return le16_to_cpu(adapter->caps.num_allocated_vectors);
824 }
825 
826 /**
827  * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
828  * @adapter: private data struct
829  *
830  * Return: number of vectors reserved for RDMA
831  */
832 static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
833 {
834 	return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
835 }
836 
837 /**
838  * idpf_get_default_vports - Get default number of vports
839  * @adapter: private data struct
840  */
841 static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
842 {
843 	return le16_to_cpu(adapter->caps.default_num_vports);
844 }
845 
846 /**
847  * idpf_get_max_vports - Get max number of vports
848  * @adapter: private data struct
849  */
850 static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
851 {
852 	return le16_to_cpu(adapter->caps.max_vports);
853 }
854 
855 /**
856  * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
857  * @adapter: private data struct
858  */
859 static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
860 {
861 	return adapter->caps.max_sg_bufs_per_tx_pkt;
862 }
863 
864 /**
865  * idpf_get_min_tx_pkt_len - Get min packet length supported by the device
866  * @adapter: private data struct
867  */
868 static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
869 {
870 	u8 pkt_len = adapter->caps.min_sso_packet_len;
871 
872 	return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
873 }
874 
875 /**
876  * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
877  * @adapter: private data struct
878  * @reg_offset: register offset value
879  *
880  * Return: BAR0 mailbox register address based on register offset.
881  */
882 static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
883 						  resource_size_t reg_offset)
884 {
885 	return adapter->hw.mbx.vaddr + reg_offset;
886 }
887 
888 /**
889  * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
890  * @adapter: private data struct
891  * @reg_offset: register offset value
892  *
893  * Return: BAR0 rstat register address based on register offset.
894  */
895 static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
896 						    resource_size_t reg_offset)
897 {
898 	reg_offset -= adapter->dev_ops.static_reg_info[1].start;
899 
900 	return adapter->hw.rstat.vaddr + reg_offset;
901 }
902 
903 /**
904  * idpf_get_reg_addr - Get BAR0 register address
905  * @adapter: private data struct
906  * @reg_offset: register offset value
907  *
908  * Based on the register offset, return the actual BAR0 register address
909  */
910 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
911 					      resource_size_t reg_offset)
912 {
913 	struct idpf_hw *hw = &adapter->hw;
914 
915 	for (int i = 0; i < hw->num_lan_regs; i++) {
916 		struct idpf_mmio_reg *region = &hw->lan_regs[i];
917 
918 		if (reg_offset >= region->addr_start &&
919 		    reg_offset < (region->addr_start + region->addr_len)) {
920 			/* Convert the offset so that it is relative to the
921 			 * start of the region.  Then add the base address of
922 			 * the region to get the final address.
923 			 */
924 			reg_offset -= region->addr_start;
925 
926 			return region->vaddr + reg_offset;
927 		}
928 	}
929 
930 	/* It's impossible to hit this case with offsets from the CP. But if we
931 	 * do for any other reason, the kernel will panic on that register
932 	 * access. Might as well do it here to make it clear what's happening.
933 	 */
934 	BUG();
935 
936 	return NULL;
937 }
938 
939 /**
940  * idpf_is_reset_detected - check if we were reset at some point
941  * @adapter: driver specific private structure
942  *
943  * Returns true if we are either in reset currently or were previously reset.
944  */
945 static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
946 {
947 	if (!adapter->hw.arq)
948 		return true;
949 
950 	return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
951 		 adapter->hw.arq->reg.len_mask);
952 }
953 
954 /**
955  * idpf_is_reset_in_prog - check if reset is in progress
956  * @adapter: driver specific private structure
957  *
958  * Returns true if hard reset is in progress, false otherwise
959  */
960 static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
961 {
962 	return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
963 		test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
964 		test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
965 }
966 
967 /**
968  * idpf_netdev_to_vport - get a vport handle from a netdev
969  * @netdev: network interface device structure
970  */
971 static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
972 {
973 	struct idpf_netdev_priv *np = netdev_priv(netdev);
974 
975 	return np->vport;
976 }
977 
978 /**
979  * idpf_netdev_to_adapter - Get adapter handle from a netdev
980  * @netdev: Network interface device structure
981  */
982 static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev)
983 {
984 	struct idpf_netdev_priv *np = netdev_priv(netdev);
985 
986 	return np->adapter;
987 }
988 
989 /**
990  * idpf_is_feature_ena - Determine if a particular feature is enabled
991  * @vport: Vport to check
992  * @feature: Netdev flag to check
993  *
994  * Returns true or false if a particular feature is enabled.
995  */
996 static inline bool idpf_is_feature_ena(const struct idpf_vport *vport,
997 				       netdev_features_t feature)
998 {
999 	return vport->netdev->features & feature;
1000 }
1001 
1002 /**
1003  * idpf_get_max_tx_hdr_size -- get the size of tx header
1004  * @adapter: Driver specific private structure
1005  */
1006 static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter)
1007 {
1008 	return le16_to_cpu(adapter->caps.max_tx_hdr_size);
1009 }
1010 
1011 /**
1012  * idpf_vport_ctrl_lock - Acquire the vport control lock
1013  * @netdev: Network interface device structure
1014  *
1015  * This lock should be used by non-datapath code to protect against vport
1016  * destruction.
1017  */
1018 static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
1019 {
1020 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1021 
1022 	mutex_lock(&np->adapter->vport_ctrl_lock);
1023 }
1024 
1025 /**
1026  * idpf_vport_ctrl_unlock - Release the vport control lock
1027  * @netdev: Network interface device structure
1028  */
1029 static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
1030 {
1031 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1032 
1033 	mutex_unlock(&np->adapter->vport_ctrl_lock);
1034 }
1035 
1036 static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
1037 {
1038 	struct idpf_netdev_priv *np = netdev_priv(netdev);
1039 
1040 	return mutex_is_locked(&np->adapter->vport_ctrl_lock);
1041 }
1042 
1043 void idpf_statistics_task(struct work_struct *work);
1044 void idpf_init_task(struct work_struct *work);
1045 void idpf_service_task(struct work_struct *work);
1046 void idpf_mbx_task(struct work_struct *work);
1047 void idpf_vc_event_task(struct work_struct *work);
1048 void idpf_dev_ops_init(struct idpf_adapter *adapter);
1049 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
1050 int idpf_intr_req(struct idpf_adapter *adapter);
1051 void idpf_intr_rel(struct idpf_adapter *adapter);
1052 u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
1053 int idpf_initiate_soft_reset(struct idpf_vport *vport,
1054 			     enum idpf_vport_reset_cause reset_cause);
1055 void idpf_deinit_task(struct idpf_adapter *adapter);
1056 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
1057 				u16 *q_vector_idxs,
1058 				struct idpf_vector_info *vec_info);
1059 void idpf_set_ethtool_ops(struct net_device *netdev);
1060 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
1061 			       u16 itr, bool tx);
1062 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
1063 
1064 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
1065 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
1066 int idpf_idc_init(struct idpf_adapter *adapter);
1067 int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
1068 			       enum iidc_function_type ftype);
1069 void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
1070 void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
1071 void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
1072 void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
1073 			     enum iidc_rdma_event_type event_type);
1074 
1075 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1076 				struct virtchnl2_flow_rule_add_del *rule,
1077 				enum virtchnl2_op opcode);
1078 #endif /* !_IDPF_H_ */
1079