1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2018-2021, Intel Corporation. */
3
4 #ifndef _ICE_VF_LIB_H_
5 #define _ICE_VF_LIB_H_
6
7 #include <linux/types.h>
8 #include <linux/hashtable.h>
9 #include <linux/bitmap.h>
10 #include <linux/mutex.h>
11 #include <linux/pci.h>
12 #include <net/devlink.h>
13 #include <linux/avf/virtchnl.h>
14 #include "ice_type.h"
15 #include "ice_flow.h"
16 #include "ice_virtchnl_fdir.h"
17 #include "ice_vsi_vlan_ops.h"
18
19 #define ICE_MAX_SRIOV_VFS 256
20
21 /* VF resource constraints */
22 #define ICE_MAX_RSS_QS_PER_VF 16
23
24 struct ice_pf;
25 struct ice_vf;
26 struct ice_virtchnl_ops;
27
28 /* VF capabilities */
29 enum ice_virtchnl_cap {
30 ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
31 };
32
33 /* Specific VF states */
34 enum ice_vf_states {
35 ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
36 ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
37 ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
38 ICE_VF_STATE_DIS,
39 ICE_VF_STATE_MC_PROMISC,
40 ICE_VF_STATE_UC_PROMISC,
41 ICE_VF_STATES_NBITS
42 };
43
44 struct ice_time_mac {
45 unsigned long time_modified;
46 u8 addr[ETH_ALEN];
47 };
48
49 /* VF MDD events print structure */
50 struct ice_mdd_vf_events {
51 u16 count; /* total count of Rx|Tx events */
52 /* count number of the last printed event */
53 u16 last_printed;
54 };
55
56 /* Structure to store fdir fv entry */
57 struct ice_fdir_prof_info {
58 struct ice_parser_profile prof;
59 u64 fdir_active_cnt;
60 };
61
62 struct ice_vf_qs_bw {
63 u32 committed;
64 u32 peak;
65 u16 queue_id;
66 u8 tc;
67 };
68
69 /* VF operations */
70 struct ice_vf_ops {
71 enum ice_disq_rst_src reset_type;
72 void (*free)(struct ice_vf *vf);
73 void (*clear_reset_state)(struct ice_vf *vf);
74 void (*clear_mbx_register)(struct ice_vf *vf);
75 void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
76 bool (*poll_reset_status)(struct ice_vf *vf);
77 void (*clear_reset_trigger)(struct ice_vf *vf);
78 void (*irq_close)(struct ice_vf *vf);
79 void (*post_vsi_rebuild)(struct ice_vf *vf);
80 };
81
82 /* Virtchnl/SR-IOV config info */
83 struct ice_vfs {
84 DECLARE_HASHTABLE(table, 8); /* table of VF entries */
85 struct mutex table_lock; /* Lock for protecting the hash table */
86 u16 num_supported; /* max supported VFs on this PF */
87 u16 num_qps_per; /* number of queue pairs per VF */
88 u16 num_msix_per; /* default MSI-X vectors per VF */
89 unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
90 };
91
92 /* VF information structure */
93 struct ice_vf {
94 struct hlist_node entry;
95 struct rcu_head rcu;
96 struct kref refcnt;
97 struct ice_pf *pf;
98 struct pci_dev *vfdev;
99 /* Used during virtchnl message handling and NDO ops against the VF
100 * that will trigger a VFR
101 */
102 struct mutex cfg_lock;
103
104 u16 vf_id; /* VF ID in the PF space */
105 u16 lan_vsi_idx; /* index into PF struct */
106 u16 ctrl_vsi_idx;
107 struct ice_vf_fdir fdir;
108 struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
109 u64 rss_hashcfg; /* RSS hash configuration */
110 struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
111 struct virtchnl_version_info vf_ver;
112 u32 driver_caps; /* reported by VF driver */
113 u8 dev_lan_addr[ETH_ALEN];
114 u8 hw_lan_addr[ETH_ALEN];
115 struct ice_time_mac legacy_last_added_umac;
116 DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
117 DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
118 struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
119 struct virtchnl_vlan_caps vlan_v2_caps;
120 struct ice_mbx_vf_info mbx_info;
121 u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
122 u8 trusted:1;
123 u8 spoofchk:1;
124 u8 link_forced:1;
125 u8 link_up:1; /* only valid if VF link is forced */
126 u8 lldp_tx_ena:1;
127
128 u16 num_msix; /* num of MSI-X configured on this VF */
129
130 u32 ptp_caps;
131
132 unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
133 unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
134 /* first vector index of this VF in the PF space */
135 int first_vector_idx;
136 DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
137
138 unsigned long vf_caps; /* VF's adv. capabilities */
139 u8 num_req_qs; /* num of queue pairs requested by VF */
140 u16 num_mac;
141 u16 num_mac_lldp;
142 u16 num_vf_qs; /* num of queue configured per VF */
143 u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
144 #define ICE_INNER_VLAN_STRIP_ENA BIT(0)
145 #define ICE_OUTER_VLAN_STRIP_ENA BIT(1)
146 struct ice_mdd_vf_events mdd_rx_events;
147 struct ice_mdd_vf_events mdd_tx_events;
148 DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
149
150 unsigned long repr_id;
151 const struct ice_virtchnl_ops *virtchnl_ops;
152 const struct ice_vf_ops *vf_ops;
153
154 /* devlink port data */
155 struct devlink_port devlink_port;
156
157 u16 lldp_recipe_id;
158 u16 lldp_rule_id;
159
160 struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
161 };
162
163 /* Flags for controlling behavior of ice_reset_vf */
164 enum ice_vf_reset_flags {
165 ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
166 ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
167 ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
168 };
169
ice_vf_get_port_vlan_id(struct ice_vf * vf)170 static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
171 {
172 return vf->port_vlan_info.vid;
173 }
174
ice_vf_get_port_vlan_prio(struct ice_vf * vf)175 static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
176 {
177 return vf->port_vlan_info.prio;
178 }
179
ice_vf_is_port_vlan_ena(struct ice_vf * vf)180 static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
181 {
182 return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
183 }
184
ice_vf_get_port_vlan_tpid(struct ice_vf * vf)185 static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
186 {
187 return vf->port_vlan_info.tpid;
188 }
189
ice_vf_is_lldp_ena(struct ice_vf * vf)190 static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
191 {
192 return vf->num_mac_lldp && vf->trusted;
193 }
194
195 /* VF Hash Table access functions
196 *
197 * These functions provide abstraction for interacting with the VF hash table.
198 * In general, direct access to the hash table should be avoided outside of
199 * these functions where possible.
200 *
201 * The VF entries in the hash table are protected by reference counting to
202 * track lifetime of accesses from the table. The ice_get_vf_by_id() function
203 * obtains a reference to the VF structure which must be dropped by using
204 * ice_put_vf().
205 */
206
207 /**
208 * ice_for_each_vf - Iterate over each VF entry
209 * @pf: pointer to the PF private structure
210 * @bkt: bucket index used for iteration
211 * @vf: pointer to the VF entry currently being processed in the loop
212 *
213 * The bkt variable is an unsigned integer iterator used to traverse the VF
214 * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
215 * Use vf->vf_id to get the id number if needed.
216 *
217 * The caller is expected to be under the table_lock mutex for the entire
218 * loop. Use this iterator if your loop is long or if it might sleep.
219 */
220 #define ice_for_each_vf(pf, bkt, vf) \
221 hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
222
223 /**
224 * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
225 * @pf: pointer to the PF private structure
226 * @bkt: bucket index used for iteration
227 * @vf: pointer to the VF entry currently being processed in the loop
228 *
229 * The bkt variable is an unsigned integer iterator used to traverse the VF
230 * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
231 * Use vf->vf_id to get the id number if needed.
232 *
233 * The caller is expected to be under rcu_read_lock() for the entire loop.
234 * Only use this iterator if your loop is short and you can guarantee it does
235 * not sleep.
236 */
237 #define ice_for_each_vf_rcu(pf, bkt, vf) \
238 hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
239
240 #ifdef CONFIG_PCI_IOV
241 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
242
ice_get_vf_by_dev(struct ice_pf * pf,struct pci_dev * vf_dev)243 static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
244 struct pci_dev *vf_dev)
245 {
246 int vf_id = pci_iov_vf_id(vf_dev);
247
248 if (vf_id < 0)
249 return NULL;
250
251 return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
252 }
253
254 void ice_put_vf(struct ice_vf *vf);
255 bool ice_has_vfs(struct ice_pf *pf);
256 u16 ice_get_num_vfs(struct ice_pf *pf);
257 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
258 bool ice_is_vf_disabled(struct ice_vf *vf);
259 int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
260 void ice_set_vf_state_dis(struct ice_vf *vf);
261 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
262 void
263 ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
264 u8 *ucast_m, u8 *mcast_m);
265 int
266 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
267 int
268 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
269 int ice_reset_vf(struct ice_vf *vf, u32 flags);
270 void ice_reset_all_vfs(struct ice_pf *pf);
271 struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
272 void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
273 bool incr);
274 #else /* CONFIG_PCI_IOV */
ice_get_vf_by_id(struct ice_pf * pf,u16 vf_id)275 static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
276 {
277 return NULL;
278 }
279
ice_get_vf_by_dev(struct ice_pf * pf,struct pci_dev * vf_dev)280 static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
281 struct pci_dev *vf_dev)
282 {
283 return NULL;
284 }
285
ice_put_vf(struct ice_vf * vf)286 static inline void ice_put_vf(struct ice_vf *vf)
287 {
288 }
289
ice_has_vfs(struct ice_pf * pf)290 static inline bool ice_has_vfs(struct ice_pf *pf)
291 {
292 return false;
293 }
294
ice_get_num_vfs(struct ice_pf * pf)295 static inline u16 ice_get_num_vfs(struct ice_pf *pf)
296 {
297 return 0;
298 }
299
ice_get_vf_vsi(struct ice_vf * vf)300 static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
301 {
302 return NULL;
303 }
304
ice_is_vf_disabled(struct ice_vf * vf)305 static inline bool ice_is_vf_disabled(struct ice_vf *vf)
306 {
307 return true;
308 }
309
ice_check_vf_ready_for_cfg(struct ice_vf * vf)310 static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
311 {
312 return -EOPNOTSUPP;
313 }
314
ice_set_vf_state_dis(struct ice_vf * vf)315 static inline void ice_set_vf_state_dis(struct ice_vf *vf)
316 {
317 }
318
ice_is_any_vf_in_unicast_promisc(struct ice_pf * pf)319 static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
320 {
321 return false;
322 }
323
324 static inline int
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)325 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
326 {
327 return -EOPNOTSUPP;
328 }
329
330 static inline int
ice_vf_clear_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m)331 ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
332 {
333 return -EOPNOTSUPP;
334 }
335
ice_reset_vf(struct ice_vf * vf,u32 flags)336 static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
337 {
338 return 0;
339 }
340
ice_reset_all_vfs(struct ice_pf * pf)341 static inline void ice_reset_all_vfs(struct ice_pf *pf)
342 {
343 }
344
345 static inline struct ice_vsi *
ice_get_vf_ctrl_vsi(struct ice_pf * pf,struct ice_vsi * vsi)346 ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
347 {
348 return NULL;
349 }
350 #endif /* !CONFIG_PCI_IOV */
351
352 #endif /* _ICE_VF_LIB_H_ */
353