xref: /linux/drivers/net/ethernet/intel/idpf/idpf.h (revision 797f080c463d9866ca8a4bcc8cf0f512dec634e6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _IDPF_H_
5 #define _IDPF_H_
6 
7 /* Forward declaration */
8 struct idpf_adapter;
9 struct idpf_vport;
10 struct idpf_vport_max_q;
11 
12 #include <net/pkt_sched.h>
13 #include <linux/aer.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ioport.h>
16 #include <linux/pci.h>
17 #include <linux/bitfield.h>
18 #include <linux/sctp.h>
19 #include <linux/ethtool_netlink.h>
20 #include <net/gro.h>
21 
22 #include <linux/net/intel/iidc_rdma.h>
23 #include <linux/net/intel/iidc_rdma_idpf.h>
24 
25 #include "virtchnl2.h"
26 #include "idpf_txrx.h"
27 #include "idpf_controlq.h"
28 
29 #define GETMAXVAL(num_bits)		GENMASK((num_bits) - 1, 0)
30 
31 #define IDPF_NO_FREE_SLOT		0xffff
32 
33 /* Default Mailbox settings */
34 #define IDPF_NUM_FILTERS_PER_MSG	20
35 #define IDPF_NUM_DFLT_MBX_Q		2	/* includes both TX and RX */
36 #define IDPF_DFLT_MBX_Q_LEN		64
37 #define IDPF_DFLT_MBX_ID		-1
38 /* maximum number of times to try before resetting mailbox */
39 #define IDPF_MB_MAX_ERR			20
40 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz)	\
41 	((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
42 
43 #define IDPF_MAX_WAIT			500
44 
45 /* available message levels */
46 #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
47 
48 #define IDPF_DIM_PROFILE_SLOTS  5
49 
50 #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
51 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
52 
53 /**
54  * struct idpf_mac_filter
55  * @list: list member field
56  * @macaddr: MAC address
57  * @remove: filter should be removed (virtchnl)
58  * @add: filter should be added (virtchnl)
59  */
60 struct idpf_mac_filter {
61 	struct list_head list;
62 	u8 macaddr[ETH_ALEN];
63 	bool remove;
64 	bool add;
65 };
66 
67 /**
68  * enum idpf_state - State machine to handle bring up
69  * @__IDPF_VER_CHECK: Negotiate virtchnl version
70  * @__IDPF_GET_CAPS: Negotiate capabilities
71  * @__IDPF_INIT_SW: Init based on given capabilities
72  * @__IDPF_STATE_LAST: Must be last, used to determine size
73  */
74 enum idpf_state {
75 	__IDPF_VER_CHECK,
76 	__IDPF_GET_CAPS,
77 	__IDPF_INIT_SW,
78 	__IDPF_STATE_LAST,
79 };
80 
81 /**
82  * enum idpf_flags - Hard reset causes.
83  * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout
84  * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
85  * @IDPF_HR_RESET_IN_PROG: Reset in progress
86  * @IDPF_REMOVE_IN_PROG: Driver remove in progress
87  * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
88  * @IDPF_VC_CORE_INIT: virtchnl core has been init
89  * @IDPF_FLAGS_NBITS: Must be last
90  */
91 enum idpf_flags {
92 	IDPF_HR_FUNC_RESET,
93 	IDPF_HR_DRV_LOAD,
94 	IDPF_HR_RESET_IN_PROG,
95 	IDPF_REMOVE_IN_PROG,
96 	IDPF_MB_INTR_MODE,
97 	IDPF_VC_CORE_INIT,
98 	IDPF_FLAGS_NBITS,
99 };
100 
101 /**
102  * enum idpf_cap_field - Offsets into capabilities struct for specific caps
103  * @IDPF_BASE_CAPS: generic base capabilities
104  * @IDPF_CSUM_CAPS: checksum offload capabilities
105  * @IDPF_SEG_CAPS: segmentation offload capabilities
106  * @IDPF_RSS_CAPS: RSS offload capabilities
107  * @IDPF_HSPLIT_CAPS: Header split capabilities
108  * @IDPF_RSC_CAPS: RSC offload capabilities
109  * @IDPF_OTHER_CAPS: miscellaneous offloads
110  *
111  * Used when checking for a specific capability flag since different capability
112  * sets are not mutually exclusive numerically, the caller must specify which
113  * type of capability they are checking for.
114  */
115 enum idpf_cap_field {
116 	IDPF_BASE_CAPS		= -1,
117 	IDPF_CSUM_CAPS		= offsetof(struct virtchnl2_get_capabilities,
118 					   csum_caps),
119 	IDPF_SEG_CAPS		= offsetof(struct virtchnl2_get_capabilities,
120 					   seg_caps),
121 	IDPF_RSS_CAPS		= offsetof(struct virtchnl2_get_capabilities,
122 					   rss_caps),
123 	IDPF_HSPLIT_CAPS	= offsetof(struct virtchnl2_get_capabilities,
124 					   hsplit_caps),
125 	IDPF_RSC_CAPS		= offsetof(struct virtchnl2_get_capabilities,
126 					   rsc_caps),
127 	IDPF_OTHER_CAPS		= offsetof(struct virtchnl2_get_capabilities,
128 					   other_caps),
129 };
130 
131 /**
132  * enum idpf_vport_state - Current vport state
133  * @__IDPF_VPORT_DOWN: Vport is down
134  * @__IDPF_VPORT_UP: Vport is up
135  * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
136  */
137 enum idpf_vport_state {
138 	__IDPF_VPORT_DOWN,
139 	__IDPF_VPORT_UP,
140 	__IDPF_VPORT_STATE_LAST,
141 };
142 
143 /**
144  * struct idpf_netdev_priv - Struct to store vport back pointer
145  * @adapter: Adapter back pointer
146  * @vport: Vport back pointer
147  * @vport_id: Vport identifier
148  * @link_speed_mbps: Link speed in mbps
149  * @vport_idx: Relative vport index
150  * @max_tx_hdr_size: Max header length hardware can support
151  * @state: See enum idpf_vport_state
152  * @netstats: Packet and byte stats
153  * @stats_lock: Lock to protect stats update
154  */
155 struct idpf_netdev_priv {
156 	struct idpf_adapter *adapter;
157 	struct idpf_vport *vport;
158 	u32 vport_id;
159 	u32 link_speed_mbps;
160 	u16 vport_idx;
161 	u16 max_tx_hdr_size;
162 	enum idpf_vport_state state;
163 	struct rtnl_link_stats64 netstats;
164 	spinlock_t stats_lock;
165 };
166 
167 /**
168  * struct idpf_reset_reg - Reset register offsets/masks
169  * @rstat: Reset status register
170  * @rstat_m: Reset status mask
171  */
172 struct idpf_reset_reg {
173 	void __iomem *rstat;
174 	u32 rstat_m;
175 };
176 
177 /**
178  * struct idpf_vport_max_q - Queue limits
179  * @max_rxq: Maximum number of RX queues supported
180  * @max_txq: Maixmum number of TX queues supported
181  * @max_bufq: In splitq, maximum number of buffer queues supported
182  * @max_complq: In splitq, maximum number of completion queues supported
183  */
184 struct idpf_vport_max_q {
185 	u16 max_rxq;
186 	u16 max_txq;
187 	u16 max_bufq;
188 	u16 max_complq;
189 };
190 
191 /**
192  * struct idpf_reg_ops - Device specific register operation function pointers
193  * @ctlq_reg_init: Mailbox control queue register initialization
194  * @intr_reg_init: Traffic interrupt register initialization
195  * @mb_intr_reg_init: Mailbox interrupt register initialization
196  * @reset_reg_init: Reset register initialization
197  * @trigger_reset: Trigger a reset to occur
198  * @ptp_reg_init: PTP register initialization
199  */
200 struct idpf_reg_ops {
201 	void (*ctlq_reg_init)(struct idpf_adapter *adapter,
202 			      struct idpf_ctlq_create_info *cq);
203 	int (*intr_reg_init)(struct idpf_vport *vport);
204 	void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
205 	void (*reset_reg_init)(struct idpf_adapter *adapter);
206 	void (*trigger_reset)(struct idpf_adapter *adapter,
207 			      enum idpf_flags trig_cause);
208 	void (*ptp_reg_init)(const struct idpf_adapter *adapter);
209 };
210 
211 #define IDPF_MMIO_REG_NUM_STATIC	2
212 #define IDPF_PF_MBX_REGION_SZ		4096
213 #define IDPF_PF_RSTAT_REGION_SZ		2048
214 #define IDPF_VF_MBX_REGION_SZ		10240
215 #define IDPF_VF_RSTAT_REGION_SZ		2048
216 
217 /**
218  * struct idpf_dev_ops - Device specific operations
219  * @reg_ops: Register operations
220  * @idc_init: IDC initialization
221  * @static_reg_info: array of mailbox and rstat register info
222  */
223 struct idpf_dev_ops {
224 	struct idpf_reg_ops reg_ops;
225 
226 	int (*idc_init)(struct idpf_adapter *adapter);
227 
228 	/* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
229 	struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
230 };
231 
232 /**
233  * enum idpf_vport_reset_cause - Vport soft reset causes
234  * @IDPF_SR_Q_CHANGE: Soft reset queue change
235  * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change
236  * @IDPF_SR_MTU_CHANGE: Soft reset MTU change
237  * @IDPF_SR_RSC_CHANGE: Soft reset RSC change
238  */
239 enum idpf_vport_reset_cause {
240 	IDPF_SR_Q_CHANGE,
241 	IDPF_SR_Q_DESC_CHANGE,
242 	IDPF_SR_MTU_CHANGE,
243 	IDPF_SR_RSC_CHANGE,
244 };
245 
246 /**
247  * enum idpf_vport_flags - Vport flags
248  * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
249  * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
250  *			  processing is done
251  * @IDPF_VPORT_FLAGS_NBITS: Must be last
252  */
253 enum idpf_vport_flags {
254 	IDPF_VPORT_DEL_QUEUES,
255 	IDPF_VPORT_SW_MARKER,
256 	IDPF_VPORT_FLAGS_NBITS,
257 };
258 
259 struct idpf_port_stats {
260 	struct u64_stats_sync stats_sync;
261 	u64_stats_t rx_hw_csum_err;
262 	u64_stats_t rx_hsplit;
263 	u64_stats_t rx_hsplit_hbo;
264 	u64_stats_t rx_bad_descs;
265 	u64_stats_t tx_linearize;
266 	u64_stats_t tx_busy;
267 	u64_stats_t tx_drops;
268 	u64_stats_t tx_dma_map_errs;
269 	struct virtchnl2_vport_stats vport_stats;
270 };
271 
272 /**
273  * struct idpf_vport - Handle for netdevices and queue resources
274  * @num_txq: Number of allocated TX queues
275  * @num_complq: Number of allocated completion queues
276  * @txq_desc_count: TX queue descriptor count
277  * @complq_desc_count: Completion queue descriptor count
278  * @compln_clean_budget: Work budget for completion clean
279  * @num_txq_grp: Number of TX queue groups
280  * @txq_grps: Array of TX queue groups
281  * @txq_model: Split queue or single queue queuing model
282  * @txqs: Used only in hotpath to get to the right queue very fast
283  * @crc_enable: Enable CRC insertion offload
284  * @num_rxq: Number of allocated RX queues
285  * @num_bufq: Number of allocated buffer queues
286  * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
287  *		    to complete all buffer descriptors for all buffer queues in
288  *		    the worst case.
289  * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
290  * @bufq_desc_count: Buffer queue descriptor count
291  * @num_rxq_grp: Number of RX queues in a group
292  * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
293  *	      group will yield total number of RX queues.
294  * @rxq_model: Splitq queue or single queue queuing model
295  * @rx_ptype_lkup: Lookup table for ptypes on RX
296  * @vdev_info: IDC vport device info pointer
297  * @adapter: back pointer to associated adapter
298  * @netdev: Associated net_device. Each vport should have one and only one
299  *	    associated netdev.
300  * @flags: See enum idpf_vport_flags
301  * @vport_type: Default SRIOV, SIOV, etc.
302  * @vport_id: Device given vport identifier
303  * @idx: Software index in adapter vports struct
304  * @default_vport: Use this vport if one isn't specified
305  * @base_rxd: True if the driver should use base descriptors instead of flex
306  * @num_q_vectors: Number of IRQ vectors allocated
307  * @q_vectors: Array of queue vectors
308  * @q_vector_idxs: Starting index of queue vectors
309  * @max_mtu: device given max possible MTU
310  * @default_mac_addr: device will give a default MAC to use
311  * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
312  * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
313  * @port_stats: per port csum, header split, and other offload stats
314  * @link_up: True if link is up
315  * @sw_marker_wq: workqueue for marker packets
316  * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
317  * @tstamp_config: The Tx tstamp config
318  * @tstamp_task: Tx timestamping task
319  */
320 struct idpf_vport {
321 	u16 num_txq;
322 	u16 num_complq;
323 	u32 txq_desc_count;
324 	u32 complq_desc_count;
325 	u32 compln_clean_budget;
326 	u16 num_txq_grp;
327 	struct idpf_txq_group *txq_grps;
328 	u32 txq_model;
329 	struct idpf_tx_queue **txqs;
330 	bool crc_enable;
331 
332 	u16 num_rxq;
333 	u16 num_bufq;
334 	u32 rxq_desc_count;
335 	u8 num_bufqs_per_qgrp;
336 	u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
337 	u16 num_rxq_grp;
338 	struct idpf_rxq_group *rxq_grps;
339 	u32 rxq_model;
340 	struct libeth_rx_pt *rx_ptype_lkup;
341 
342 	struct iidc_rdma_vport_dev_info *vdev_info;
343 
344 	struct idpf_adapter *adapter;
345 	struct net_device *netdev;
346 	DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
347 	u16 vport_type;
348 	u32 vport_id;
349 	u16 idx;
350 	bool default_vport;
351 	bool base_rxd;
352 
353 	u16 num_q_vectors;
354 	struct idpf_q_vector *q_vectors;
355 	u16 *q_vector_idxs;
356 	u16 max_mtu;
357 	u8 default_mac_addr[ETH_ALEN];
358 	u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
359 	u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
360 	struct idpf_port_stats port_stats;
361 
362 	bool link_up;
363 
364 	wait_queue_head_t sw_marker_wq;
365 
366 	struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
367 	struct kernel_hwtstamp_config tstamp_config;
368 	struct work_struct tstamp_task;
369 };
370 
371 /**
372  * enum idpf_user_flags
373  * @__IDPF_USER_FLAG_HSPLIT: header split state
374  * @__IDPF_PROMISC_UC: Unicast promiscuous mode
375  * @__IDPF_PROMISC_MC: Multicast promiscuous mode
376  * @__IDPF_USER_FLAGS_NBITS: Must be last
377  */
378 enum idpf_user_flags {
379 	__IDPF_USER_FLAG_HSPLIT = 0U,
380 	__IDPF_PROMISC_UC = 32,
381 	__IDPF_PROMISC_MC,
382 
383 	__IDPF_USER_FLAGS_NBITS,
384 };
385 
386 /**
387  * struct idpf_rss_data - Associated RSS data
388  * @rss_key_size: Size of RSS hash key
389  * @rss_key: RSS hash key
390  * @rss_lut_size: Size of RSS lookup table
391  * @rss_lut: RSS lookup table
392  * @cached_lut: Used to restore previously init RSS lut
393  */
394 struct idpf_rss_data {
395 	u16 rss_key_size;
396 	u8 *rss_key;
397 	u16 rss_lut_size;
398 	u32 *rss_lut;
399 	u32 *cached_lut;
400 };
401 
402 /**
403  * struct idpf_vport_user_config_data - User defined configuration values for
404  *					each vport.
405  * @rss_data: See struct idpf_rss_data
406  * @num_req_tx_qs: Number of user requested TX queues through ethtool
407  * @num_req_rx_qs: Number of user requested RX queues through ethtool
408  * @num_req_txq_desc: Number of user requested TX queue descriptors through
409  *		      ethtool
410  * @num_req_rxq_desc: Number of user requested RX queue descriptors through
411  *		      ethtool
412  * @user_flags: User toggled config flags
413  * @mac_filter_list: List of MAC filters
414  *
415  * Used to restore configuration after a reset as the vport will get wiped.
416  */
417 struct idpf_vport_user_config_data {
418 	struct idpf_rss_data rss_data;
419 	u16 num_req_tx_qs;
420 	u16 num_req_rx_qs;
421 	u32 num_req_txq_desc;
422 	u32 num_req_rxq_desc;
423 	DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
424 	struct list_head mac_filter_list;
425 };
426 
427 /**
428  * enum idpf_vport_config_flags - Vport config flags
429  * @IDPF_VPORT_REG_NETDEV: Register netdev
430  * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
431  * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
432  */
433 enum idpf_vport_config_flags {
434 	IDPF_VPORT_REG_NETDEV,
435 	IDPF_VPORT_UP_REQUESTED,
436 	IDPF_VPORT_CONFIG_FLAGS_NBITS,
437 };
438 
439 /**
440  * struct idpf_avail_queue_info
441  * @avail_rxq: Available RX queues
442  * @avail_txq: Available TX queues
443  * @avail_bufq: Available buffer queues
444  * @avail_complq: Available completion queues
445  *
446  * Maintain total queues available after allocating max queues to each vport.
447  */
448 struct idpf_avail_queue_info {
449 	u16 avail_rxq;
450 	u16 avail_txq;
451 	u16 avail_bufq;
452 	u16 avail_complq;
453 };
454 
455 /**
456  * struct idpf_vector_info - Utility structure to pass function arguments as a
457  *			     structure
458  * @num_req_vecs: Vectors required based on the number of queues updated by the
459  *		  user via ethtool
460  * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs
461  * @index: Relative starting index for vectors
462  * @default_vport: Vectors are for default vport
463  */
464 struct idpf_vector_info {
465 	u16 num_req_vecs;
466 	u16 num_curr_vecs;
467 	u16 index;
468 	bool default_vport;
469 };
470 
471 /**
472  * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
473  *			     distribution algorithm
474  * @top: Points to stack top i.e. next available vector index
475  * @base: Always points to start of the free pool
476  * @size: Total size of the vector stack
477  * @vec_idx: Array to store all the vector indexes
478  *
479  * Vector stack maintains all the relative vector indexes at the *adapter*
480  * level. This stack is divided into 2 parts, first one is called as 'default
481  * pool' and other one is called 'free pool'.  Vector distribution algorithm
482  * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
483  * vectors are allocated per default vport and the relative vector indexes for
484  * those are maintained in default pool. Free pool contains all the unallocated
485  * vector indexes which can be allocated on-demand basis. Mailbox vector index
486  * is maintained in the default pool of the stack.
487  */
488 struct idpf_vector_lifo {
489 	u16 top;
490 	u16 base;
491 	u16 size;
492 	u16 *vec_idx;
493 };
494 
495 /**
496  * struct idpf_vport_config - Vport configuration data
497  * @user_config: see struct idpf_vport_user_config_data
498  * @max_q: Maximum possible queues
499  * @req_qs_chunks: Queue chunk data for requested queues
500  * @mac_filter_list_lock: Lock to protect mac filters
501  * @flags: See enum idpf_vport_config_flags
502  */
503 struct idpf_vport_config {
504 	struct idpf_vport_user_config_data user_config;
505 	struct idpf_vport_max_q max_q;
506 	struct virtchnl2_add_queues *req_qs_chunks;
507 	spinlock_t mac_filter_list_lock;
508 	DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
509 };
510 
511 struct idpf_vc_xn_manager;
512 
513 #define idpf_for_each_vport(adapter, iter) \
514 	for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
515 	     *iter = (adapter)->max_vports ? *__##iter : NULL; \
516 	     iter; \
517 	     iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
518 	     *__##iter : NULL)
519 
520 /**
521  * struct idpf_adapter - Device data struct generated on probe
522  * @pdev: PCI device struct given on probe
523  * @virt_ver_maj: Virtchnl version major
524  * @virt_ver_min: Virtchnl version minor
525  * @msg_enable: Debug message level enabled
526  * @mb_wait_count: Number of times mailbox was attempted initialization
527  * @state: Init state machine
528  * @flags: See enum idpf_flags
529  * @reset_reg: See struct idpf_reset_reg
530  * @hw: Device access data
531  * @num_avail_msix: Available number of MSIX vectors
532  * @num_msix_entries: Number of entries in MSIX table
533  * @msix_entries: MSIX table
534  * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA
535  * @rdma_msix_entries: RDMA MSIX table
536  * @req_vec_chunks: Requested vector chunk data
537  * @mb_vector: Mailbox vector data
538  * @vector_stack: Stack to store the msix vector indexes
539  * @irq_mb_handler: Handler for hard interrupt for mailbox
540  * @tx_timeout_count: Number of TX timeouts that have occurred
541  * @avail_queues: Device given queue limits
542  * @vports: Array to store vports created by the driver
543  * @netdevs: Associated Vport netdevs
544  * @vport_params_reqd: Vport params requested
545  * @vport_params_recvd: Vport params received
546  * @vport_ids: Array of device given vport identifiers
547  * @vport_config: Vport config parameters
548  * @max_vports: Maximum vports that can be allocated
549  * @num_alloc_vports: Current number of vports allocated
550  * @next_vport: Next free slot in pf->vport[] - 0-based!
551  * @init_task: Initialization task
552  * @init_wq: Workqueue for initialization task
553  * @serv_task: Periodically recurring maintenance task
554  * @serv_wq: Workqueue for service task
555  * @mbx_task: Task to handle mailbox interrupts
556  * @mbx_wq: Workqueue for mailbox responses
557  * @vc_event_task: Task to handle out of band virtchnl event notifications
558  * @vc_event_wq: Workqueue for virtchnl events
559  * @stats_task: Periodic statistics retrieval task
560  * @stats_wq: Workqueue for statistics task
561  * @caps: Negotiated capabilities with device
562  * @vcxn_mngr: Virtchnl transaction manager
563  * @dev_ops: See idpf_dev_ops
564  * @cdev_info: IDC core device info pointer
565  * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
566  *	     to VFs but is used to initialize them
567  * @crc_enable: Enable CRC insertion offload
568  * @req_tx_splitq: TX split or single queue model to request
569  * @req_rx_splitq: RX split or single queue model to request
570  * @vport_ctrl_lock: Lock to protect the vport control flow
571  * @vector_lock: Lock to protect vector distribution
572  * @queue_lock: Lock to protect queue distribution
573  * @vc_buf_lock: Lock to protect virtchnl buffer
574  * @ptp: Storage for PTP-related data
575  */
576 struct idpf_adapter {
577 	struct pci_dev *pdev;
578 	u32 virt_ver_maj;
579 	u32 virt_ver_min;
580 
581 	u32 msg_enable;
582 	u32 mb_wait_count;
583 	enum idpf_state state;
584 	DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
585 	struct idpf_reset_reg reset_reg;
586 	struct idpf_hw hw;
587 	u16 num_avail_msix;
588 	u16 num_msix_entries;
589 	struct msix_entry *msix_entries;
590 	u16 num_rdma_msix_entries;
591 	struct msix_entry *rdma_msix_entries;
592 	struct virtchnl2_alloc_vectors *req_vec_chunks;
593 	struct idpf_q_vector mb_vector;
594 	struct idpf_vector_lifo vector_stack;
595 	irqreturn_t (*irq_mb_handler)(int irq, void *data);
596 
597 	u32 tx_timeout_count;
598 	struct idpf_avail_queue_info avail_queues;
599 	struct idpf_vport **vports;
600 	struct net_device **netdevs;
601 	struct virtchnl2_create_vport **vport_params_reqd;
602 	struct virtchnl2_create_vport **vport_params_recvd;
603 	u32 *vport_ids;
604 
605 	struct idpf_vport_config **vport_config;
606 	u16 max_vports;
607 	u16 num_alloc_vports;
608 	u16 next_vport;
609 
610 	struct delayed_work init_task;
611 	struct workqueue_struct *init_wq;
612 	struct delayed_work serv_task;
613 	struct workqueue_struct *serv_wq;
614 	struct delayed_work mbx_task;
615 	struct workqueue_struct *mbx_wq;
616 	struct delayed_work vc_event_task;
617 	struct workqueue_struct *vc_event_wq;
618 	struct delayed_work stats_task;
619 	struct workqueue_struct *stats_wq;
620 	struct virtchnl2_get_capabilities caps;
621 	struct idpf_vc_xn_manager *vcxn_mngr;
622 
623 	struct idpf_dev_ops dev_ops;
624 	struct iidc_rdma_core_dev_info *cdev_info;
625 	int num_vfs;
626 	bool crc_enable;
627 	bool req_tx_splitq;
628 	bool req_rx_splitq;
629 
630 	struct mutex vport_ctrl_lock;
631 	struct mutex vector_lock;
632 	struct mutex queue_lock;
633 	struct mutex vc_buf_lock;
634 
635 	struct idpf_ptp *ptp;
636 };
637 
638 /**
639  * idpf_is_queue_model_split - check if queue model is split
640  * @q_model: queue model single or split
641  *
642  * Returns true if queue model is split else false
643  */
644 static inline int idpf_is_queue_model_split(u16 q_model)
645 {
646 	return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
647 	       q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
648 }
649 
650 #define idpf_is_cap_ena(adapter, field, flag) \
651 	idpf_is_capability_ena(adapter, false, field, flag)
652 #define idpf_is_cap_ena_all(adapter, field, flag) \
653 	idpf_is_capability_ena(adapter, true, field, flag)
654 
655 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
656 			    enum idpf_cap_field field, u64 flag);
657 
658 /**
659  * idpf_is_rdma_cap_ena - Determine if RDMA is supported
660  * @adapter: private data struct
661  *
662  * Return: true if RDMA capability is enabled, false otherwise
663  */
664 static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
665 {
666 	return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA);
667 }
668 
669 #define IDPF_CAP_RSS (\
670 	VIRTCHNL2_CAP_RSS_IPV4_TCP	|\
671 	VIRTCHNL2_CAP_RSS_IPV4_TCP	|\
672 	VIRTCHNL2_CAP_RSS_IPV4_UDP	|\
673 	VIRTCHNL2_CAP_RSS_IPV4_SCTP	|\
674 	VIRTCHNL2_CAP_RSS_IPV4_OTHER	|\
675 	VIRTCHNL2_CAP_RSS_IPV6_TCP	|\
676 	VIRTCHNL2_CAP_RSS_IPV6_TCP	|\
677 	VIRTCHNL2_CAP_RSS_IPV6_UDP	|\
678 	VIRTCHNL2_CAP_RSS_IPV6_SCTP	|\
679 	VIRTCHNL2_CAP_RSS_IPV6_OTHER)
680 
681 #define IDPF_CAP_RSC (\
682 	VIRTCHNL2_CAP_RSC_IPV4_TCP	|\
683 	VIRTCHNL2_CAP_RSC_IPV6_TCP)
684 
685 #define IDPF_CAP_HSPLIT	(\
686 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|\
687 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
688 
689 #define IDPF_CAP_TX_CSUM_L4V4 (\
690 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|\
691 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
692 
693 #define IDPF_CAP_TX_CSUM_L4V6 (\
694 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|\
695 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
696 
697 #define IDPF_CAP_RX_CSUM (\
698 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		|\
699 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|\
700 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|\
701 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|\
702 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
703 
704 #define IDPF_CAP_TX_SCTP_CSUM (\
705 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|\
706 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
707 
708 #define IDPF_CAP_TUNNEL_TX_CSUM (\
709 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	|\
710 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL)
711 
712 /**
713  * idpf_get_reserved_vecs - Get reserved vectors
714  * @adapter: private data struct
715  */
716 static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
717 {
718 	return le16_to_cpu(adapter->caps.num_allocated_vectors);
719 }
720 
721 /**
722  * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors
723  * @adapter: private data struct
724  *
725  * Return: number of vectors reserved for RDMA
726  */
727 static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter)
728 {
729 	return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors);
730 }
731 
732 /**
733  * idpf_get_default_vports - Get default number of vports
734  * @adapter: private data struct
735  */
736 static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
737 {
738 	return le16_to_cpu(adapter->caps.default_num_vports);
739 }
740 
741 /**
742  * idpf_get_max_vports - Get max number of vports
743  * @adapter: private data struct
744  */
745 static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
746 {
747 	return le16_to_cpu(adapter->caps.max_vports);
748 }
749 
750 /**
751  * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
752  * @adapter: private data struct
753  */
754 static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
755 {
756 	return adapter->caps.max_sg_bufs_per_tx_pkt;
757 }
758 
759 /**
760  * idpf_get_min_tx_pkt_len - Get min packet length supported by the device
761  * @adapter: private data struct
762  */
763 static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
764 {
765 	u8 pkt_len = adapter->caps.min_sso_packet_len;
766 
767 	return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
768 }
769 
770 /**
771  * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
772  * @adapter: private data struct
773  * @reg_offset: register offset value
774  *
775  * Return: BAR0 mailbox register address based on register offset.
776  */
777 static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
778 						  resource_size_t reg_offset)
779 {
780 	return adapter->hw.mbx.vaddr + reg_offset;
781 }
782 
783 /**
784  * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
785  * @adapter: private data struct
786  * @reg_offset: register offset value
787  *
788  * Return: BAR0 rstat register address based on register offset.
789  */
790 static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
791 						    resource_size_t reg_offset)
792 {
793 	reg_offset -= adapter->dev_ops.static_reg_info[1].start;
794 
795 	return adapter->hw.rstat.vaddr + reg_offset;
796 }
797 
798 /**
799  * idpf_get_reg_addr - Get BAR0 register address
800  * @adapter: private data struct
801  * @reg_offset: register offset value
802  *
803  * Based on the register offset, return the actual BAR0 register address
804  */
805 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
806 					      resource_size_t reg_offset)
807 {
808 	struct idpf_hw *hw = &adapter->hw;
809 
810 	for (int i = 0; i < hw->num_lan_regs; i++) {
811 		struct idpf_mmio_reg *region = &hw->lan_regs[i];
812 
813 		if (reg_offset >= region->addr_start &&
814 		    reg_offset < (region->addr_start + region->addr_len)) {
815 			/* Convert the offset so that it is relative to the
816 			 * start of the region.  Then add the base address of
817 			 * the region to get the final address.
818 			 */
819 			reg_offset -= region->addr_start;
820 
821 			return region->vaddr + reg_offset;
822 		}
823 	}
824 
825 	/* It's impossible to hit this case with offsets from the CP. But if we
826 	 * do for any other reason, the kernel will panic on that register
827 	 * access. Might as well do it here to make it clear what's happening.
828 	 */
829 	BUG();
830 
831 	return NULL;
832 }
833 
834 /**
835  * idpf_is_reset_detected - check if we were reset at some point
836  * @adapter: driver specific private structure
837  *
838  * Returns true if we are either in reset currently or were previously reset.
839  */
840 static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
841 {
842 	if (!adapter->hw.arq)
843 		return true;
844 
845 	return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
846 		 adapter->hw.arq->reg.len_mask);
847 }
848 
849 /**
850  * idpf_is_reset_in_prog - check if reset is in progress
851  * @adapter: driver specific private structure
852  *
853  * Returns true if hard reset is in progress, false otherwise
854  */
855 static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
856 {
857 	return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
858 		test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
859 		test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
860 }
861 
862 /**
863  * idpf_netdev_to_vport - get a vport handle from a netdev
864  * @netdev: network interface device structure
865  */
866 static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
867 {
868 	struct idpf_netdev_priv *np = netdev_priv(netdev);
869 
870 	return np->vport;
871 }
872 
873 /**
874  * idpf_netdev_to_adapter - Get adapter handle from a netdev
875  * @netdev: Network interface device structure
876  */
877 static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev)
878 {
879 	struct idpf_netdev_priv *np = netdev_priv(netdev);
880 
881 	return np->adapter;
882 }
883 
884 /**
885  * idpf_is_feature_ena - Determine if a particular feature is enabled
886  * @vport: Vport to check
887  * @feature: Netdev flag to check
888  *
889  * Returns true or false if a particular feature is enabled.
890  */
891 static inline bool idpf_is_feature_ena(const struct idpf_vport *vport,
892 				       netdev_features_t feature)
893 {
894 	return vport->netdev->features & feature;
895 }
896 
897 /**
898  * idpf_get_max_tx_hdr_size -- get the size of tx header
899  * @adapter: Driver specific private structure
900  */
901 static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter)
902 {
903 	return le16_to_cpu(adapter->caps.max_tx_hdr_size);
904 }
905 
906 /**
907  * idpf_vport_ctrl_lock - Acquire the vport control lock
908  * @netdev: Network interface device structure
909  *
910  * This lock should be used by non-datapath code to protect against vport
911  * destruction.
912  */
913 static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
914 {
915 	struct idpf_netdev_priv *np = netdev_priv(netdev);
916 
917 	mutex_lock(&np->adapter->vport_ctrl_lock);
918 }
919 
920 /**
921  * idpf_vport_ctrl_unlock - Release the vport control lock
922  * @netdev: Network interface device structure
923  */
924 static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
925 {
926 	struct idpf_netdev_priv *np = netdev_priv(netdev);
927 
928 	mutex_unlock(&np->adapter->vport_ctrl_lock);
929 }
930 
931 void idpf_statistics_task(struct work_struct *work);
932 void idpf_init_task(struct work_struct *work);
933 void idpf_service_task(struct work_struct *work);
934 void idpf_mbx_task(struct work_struct *work);
935 void idpf_vc_event_task(struct work_struct *work);
936 void idpf_dev_ops_init(struct idpf_adapter *adapter);
937 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
938 int idpf_intr_req(struct idpf_adapter *adapter);
939 void idpf_intr_rel(struct idpf_adapter *adapter);
940 u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
941 int idpf_initiate_soft_reset(struct idpf_vport *vport,
942 			     enum idpf_vport_reset_cause reset_cause);
943 void idpf_deinit_task(struct idpf_adapter *adapter);
944 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
945 				u16 *q_vector_idxs,
946 				struct idpf_vector_info *vec_info);
947 void idpf_set_ethtool_ops(struct net_device *netdev);
948 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
949 			       u16 itr, bool tx);
950 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
951 
952 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
953 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
954 int idpf_idc_init(struct idpf_adapter *adapter);
955 int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
956 			       enum iidc_function_type ftype);
957 void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info);
958 void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info);
959 void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
960 void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
961 			     enum iidc_rdma_event_type event_type);
962 
963 #endif /* !_IDPF_H_ */
964