xref: /linux/drivers/net/ethernet/intel/ice/ice_vf_lib.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2018-2021, Intel Corporation. */
3 
4 #ifndef _ICE_VF_LIB_H_
5 #define _ICE_VF_LIB_H_
6 
7 #include <linux/types.h>
8 #include <linux/hashtable.h>
9 #include <linux/bitmap.h>
10 #include <linux/mutex.h>
11 #include <linux/pci.h>
12 #include <net/devlink.h>
13 #include <linux/avf/virtchnl.h>
14 #include "ice_type.h"
15 #include "ice_flow.h"
16 #include "ice_virtchnl_fdir.h"
17 #include "ice_vsi_vlan_ops.h"
18 
19 #define ICE_MAX_SRIOV_VFS		256
20 
21 /* VF resource constraints */
22 #define ICE_MAX_RSS_QS_PER_VF	16
23 
24 struct ice_pf;
25 struct ice_vf;
26 struct ice_virtchnl_ops;
27 
28 /* VF capabilities */
29 enum ice_virtchnl_cap {
30 	ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
31 };
32 
33 /* Specific VF states */
34 enum ice_vf_states {
35 	ICE_VF_STATE_INIT = 0,		/* PF is initializing VF */
36 	ICE_VF_STATE_ACTIVE,		/* VF resources are allocated for use */
37 	ICE_VF_STATE_QS_ENA,		/* VF queue(s) enabled */
38 	ICE_VF_STATE_DIS,
39 	ICE_VF_STATE_MC_PROMISC,
40 	ICE_VF_STATE_UC_PROMISC,
41 	ICE_VF_STATES_NBITS
42 };
43 
44 struct ice_time_mac {
45 	unsigned long time_modified;
46 	u8 addr[ETH_ALEN];
47 };
48 
49 /* VF MDD events print structure */
50 struct ice_mdd_vf_events {
51 	u16 count;			/* total count of Rx|Tx events */
52 	/* count number of the last printed event */
53 	u16 last_printed;
54 };
55 
56 /* Structure to store fdir fv entry */
57 struct ice_fdir_prof_info {
58 	struct ice_parser_profile prof;
59 	u64 fdir_active_cnt;
60 };
61 
62 struct ice_vf_qs_bw {
63 	u32 committed;
64 	u32 peak;
65 	u16 queue_id;
66 	u8 tc;
67 };
68 
69 /* VF operations */
70 struct ice_vf_ops {
71 	enum ice_disq_rst_src reset_type;
72 	void (*free)(struct ice_vf *vf);
73 	void (*clear_reset_state)(struct ice_vf *vf);
74 	void (*clear_mbx_register)(struct ice_vf *vf);
75 	void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
76 	bool (*poll_reset_status)(struct ice_vf *vf);
77 	void (*clear_reset_trigger)(struct ice_vf *vf);
78 	void (*irq_close)(struct ice_vf *vf);
79 	void (*post_vsi_rebuild)(struct ice_vf *vf);
80 };
81 
82 /* Virtchnl/SR-IOV config info */
83 struct ice_vfs {
84 	DECLARE_HASHTABLE(table, 8);	/* table of VF entries */
85 	struct mutex table_lock;	/* Lock for protecting the hash table */
86 	u16 num_supported;		/* max supported VFs on this PF */
87 	u16 num_qps_per;		/* number of queue pairs per VF */
88 	u16 num_msix_per;		/* default MSI-X vectors per VF */
89 	unsigned long last_printed_mdd_jiffies;	/* MDD message rate limit */
90 };
91 
92 /* VF information structure */
93 struct ice_vf {
94 	struct hlist_node entry;
95 	struct rcu_head rcu;
96 	struct kref refcnt;
97 	struct ice_pf *pf;
98 	struct pci_dev *vfdev;
99 	/* Used during virtchnl message handling and NDO ops against the VF
100 	 * that will trigger a VFR
101 	 */
102 	struct mutex cfg_lock;
103 
104 	u16 vf_id;			/* VF ID in the PF space */
105 	u16 lan_vsi_idx;		/* index into PF struct */
106 	u16 ctrl_vsi_idx;
107 	struct ice_vf_fdir fdir;
108 	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
109 	/* first vector index of this VF in the PF space */
110 	int first_vector_idx;
111 	struct ice_sw *vf_sw_id;	/* switch ID the VF VSIs connect to */
112 	struct virtchnl_version_info vf_ver;
113 	u32 driver_caps;		/* reported by VF driver */
114 	u8 dev_lan_addr[ETH_ALEN];
115 	u8 hw_lan_addr[ETH_ALEN];
116 	struct ice_time_mac legacy_last_added_umac;
117 	DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
118 	DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
119 	struct ice_vlan port_vlan_info;	/* Port VLAN ID, QoS, and TPID */
120 	struct virtchnl_vlan_caps vlan_v2_caps;
121 	struct ice_mbx_vf_info mbx_info;
122 	u8 pf_set_mac:1;		/* VF MAC address set by VMM admin */
123 	u8 trusted:1;
124 	u8 spoofchk:1;
125 	u8 link_forced:1;
126 	u8 link_up:1;			/* only valid if VF link is forced */
127 	unsigned int min_tx_rate;	/* Minimum Tx bandwidth limit in Mbps */
128 	unsigned int max_tx_rate;	/* Maximum Tx bandwidth limit in Mbps */
129 	DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS);	/* VF runtime states */
130 
131 	unsigned long vf_caps;		/* VF's adv. capabilities */
132 	u8 num_req_qs;			/* num of queue pairs requested by VF */
133 	u16 num_mac;
134 	u16 num_vf_qs;			/* num of queue configured per VF */
135 	u8 vlan_strip_ena;		/* Outer and Inner VLAN strip enable */
136 #define ICE_INNER_VLAN_STRIP_ENA	BIT(0)
137 #define ICE_OUTER_VLAN_STRIP_ENA	BIT(1)
138 	struct ice_mdd_vf_events mdd_rx_events;
139 	struct ice_mdd_vf_events mdd_tx_events;
140 	DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
141 
142 	unsigned long repr_id;
143 	const struct ice_virtchnl_ops *virtchnl_ops;
144 	const struct ice_vf_ops *vf_ops;
145 
146 	/* devlink port data */
147 	struct devlink_port devlink_port;
148 
149 	u16 num_msix;			/* num of MSI-X configured on this VF */
150 	struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
151 };
152 
153 /* Flags for controlling behavior of ice_reset_vf */
154 enum ice_vf_reset_flags {
155 	ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
156 	ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
157 	ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
158 };
159 
ice_vf_get_port_vlan_id(struct ice_vf * vf)160 static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
161 {
162 	return vf->port_vlan_info.vid;
163 }
164 
ice_vf_get_port_vlan_prio(struct ice_vf * vf)165 static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
166 {
167 	return vf->port_vlan_info.prio;
168 }
169 
ice_vf_is_port_vlan_ena(struct ice_vf * vf)170 static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
171 {
172 	return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
173 }
174 
ice_vf_get_port_vlan_tpid(struct ice_vf * vf)175 static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
176 {
177 	return vf->port_vlan_info.tpid;
178 }
179 
180 /* VF Hash Table access functions
181  *
182  * These functions provide abstraction for interacting with the VF hash table.
183  * In general, direct access to the hash table should be avoided outside of
184  * these functions where possible.
185  *
186  * The VF entries in the hash table are protected by reference counting to
187  * track lifetime of accesses from the table. The ice_get_vf_by_id() function
188  * obtains a reference to the VF structure which must be dropped by using
189  * ice_put_vf().
190  */
191 
192 /**
193  * ice_for_each_vf - Iterate over each VF entry
194  * @pf: pointer to the PF private structure
195  * @bkt: bucket index used for iteration
196  * @vf: pointer to the VF entry currently being processed in the loop
197  *
198  * The bkt variable is an unsigned integer iterator used to traverse the VF
199  * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
200  * Use vf->vf_id to get the id number if needed.
201  *
202  * The caller is expected to be under the table_lock mutex for the entire
203  * loop. Use this iterator if your loop is long or if it might sleep.
204  */
205 #define ice_for_each_vf(pf, bkt, vf) \
206 	hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
207 
208 /**
209  * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
210  * @pf: pointer to the PF private structure
211  * @bkt: bucket index used for iteration
212  * @vf: pointer to the VF entry currently being processed in the loop
213  *
214  * The bkt variable is an unsigned integer iterator used to traverse the VF
215  * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
216  * Use vf->vf_id to get the id number if needed.
217  *
218  * The caller is expected to be under rcu_read_lock() for the entire loop.
219  * Only use this iterator if your loop is short and you can guarantee it does
220  * not sleep.
221  */
222 #define ice_for_each_vf_rcu(pf, bkt, vf) \
223 	hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
224 
225 #ifdef CONFIG_PCI_IOV
226 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
227 void ice_put_vf(struct ice_vf *vf);
228 bool ice_has_vfs(struct ice_pf *pf);
229 u16 ice_get_num_vfs(struct ice_pf *pf);
230 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
231 bool ice_is_vf_disabled(struct ice_vf *vf);
232 int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
233 void ice_set_vf_state_dis(struct ice_vf *vf);
234 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
235 void
236 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
237 			 u8 *ucast_m, u8 *mcast_m);
238 int
239 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
240 int
241 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
242 int ice_reset_vf(struct ice_vf *vf, u32 flags);
243 void ice_reset_all_vfs(struct ice_pf *pf);
244 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
245 #else /* CONFIG_PCI_IOV */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)246 static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
247 {
248 	return NULL;
249 }
250 
ice_put_vf(struct ice_vf * vf)251 static inline void ice_put_vf(struct ice_vf *vf)
252 {
253 }
254 
ice_has_vfs(struct ice_pf * pf)255 static inline bool ice_has_vfs(struct ice_pf *pf)
256 {
257 	return false;
258 }
259 
ice_get_num_vfs(struct ice_pf * pf)260 static inline u16 ice_get_num_vfs(struct ice_pf *pf)
261 {
262 	return 0;
263 }
264 
ice_get_vf_vsi(struct ice_vf * vf)265 static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
266 {
267 	return NULL;
268 }
269 
ice_is_vf_disabled(struct ice_vf * vf)270 static inline bool ice_is_vf_disabled(struct ice_vf *vf)
271 {
272 	return true;
273 }
274 
ice_check_vf_ready_for_cfg(struct ice_vf * vf)275 static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
276 {
277 	return -EOPNOTSUPP;
278 }
279 
ice_set_vf_state_dis(struct ice_vf * vf)280 static inline void ice_set_vf_state_dis(struct ice_vf *vf)
281 {
282 }
283 
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)284 static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
285 {
286 	return false;
287 }
288 
289 static inline int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)290 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
291 {
292 	return -EOPNOTSUPP;
293 }
294 
295 static inline int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)296 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
297 {
298 	return -EOPNOTSUPP;
299 }
300 
ice_reset_vf(struct ice_vf * vf,u32 flags)301 static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
302 {
303 	return 0;
304 }
305 
ice_reset_all_vfs(struct ice_pf * pf)306 static inline void ice_reset_all_vfs(struct ice_pf *pf)
307 {
308 }
309 
310 static inline struct ice_vsi *
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)311 ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
312 {
313 	return NULL;
314 }
315 #endif /* !CONFIG_PCI_IOV */
316 
317 #endif /* _ICE_VF_LIB_H_ */
318