xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h (revision 18a7e218cfcdca6666e1f7356533e4c988780b57)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35 
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
45 #include "lib/mpfs.h"
46 #include "lib/fs_chains.h"
47 #include "sf/sf.h"
48 #include "en/tc_ct.h"
49 #include "en/tc/sample.h"
50 
51 enum mlx5_mapped_obj_type {
52 	MLX5_MAPPED_OBJ_CHAIN,
53 	MLX5_MAPPED_OBJ_SAMPLE,
54 	MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55 	MLX5_MAPPED_OBJ_ACT_MISS,
56 };
57 
58 struct mlx5_mapped_obj {
59 	enum mlx5_mapped_obj_type type;
60 	union {
61 		u32 chain;
62 		u64 act_miss_cookie;
63 		struct {
64 			u32 group_id;
65 			u32 rate;
66 			u32 trunc_size;
67 			u32 tunnel_id;
68 		} sample;
69 		u32 int_port_metadata;
70 	};
71 };
72 
73 #ifdef CONFIG_MLX5_ESWITCH
74 
75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
76 
77 #define MLX5_MAX_UC_PER_VPORT(dev) \
78 	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
79 
80 #define MLX5_MAX_MC_PER_VPORT(dev) \
81 	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
82 
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 	MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
85 
86 #define esw_chains(esw) \
87 	((esw)->fdb_table.offloads.esw_chains_priv)
88 
89 enum {
90 	MAPPING_TYPE_CHAIN,
91 	MAPPING_TYPE_TUNNEL,
92 	MAPPING_TYPE_TUNNEL_ENC_OPTS,
93 	MAPPING_TYPE_LABELS,
94 	MAPPING_TYPE_ZONE,
95 	MAPPING_TYPE_INT_PORT,
96 };
97 
98 struct vport_ingress {
99 	struct mlx5_flow_table *acl;
100 	struct mlx5_flow_handle *allow_rule;
101 	struct {
102 		struct mlx5_flow_group *allow_spoofchk_only_grp;
103 		struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104 		struct mlx5_flow_group *allow_untagged_only_grp;
105 		struct mlx5_flow_group *drop_grp;
106 		struct mlx5_flow_handle *drop_rule;
107 		struct mlx5_fc *drop_counter;
108 	} legacy;
109 	struct {
110 		/* Optional group to add an FTE to do internal priority
111 		 * tagging on ingress packets.
112 		 */
113 		struct mlx5_flow_group *metadata_prio_tag_grp;
114 		/* Group to add default match-all FTE entry to tag ingress
115 		 * packet with metadata.
116 		 */
117 		struct mlx5_flow_group *metadata_allmatch_grp;
118 		/* Optional group to add a drop all rule */
119 		struct mlx5_flow_group *drop_grp;
120 		struct mlx5_modify_hdr *modify_metadata;
121 		struct mlx5_flow_handle *modify_metadata_rule;
122 		struct mlx5_flow_handle *drop_rule;
123 	} offloads;
124 };
125 
126 enum vport_egress_acl_type {
127 	VPORT_EGRESS_ACL_TYPE_DEFAULT,
128 	VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
129 };
130 
131 struct vport_egress {
132 	struct mlx5_flow_table *acl;
133 	enum vport_egress_acl_type type;
134 	struct mlx5_flow_handle  *allowed_vlan;
135 	struct mlx5_flow_group *vlan_grp;
136 	union {
137 		struct {
138 			struct mlx5_flow_group *drop_grp;
139 			struct mlx5_flow_handle *drop_rule;
140 			struct mlx5_fc *drop_counter;
141 		} legacy;
142 		struct {
143 			struct mlx5_flow_group *fwd_grp;
144 			struct mlx5_flow_handle *fwd_rule;
145 			struct xarray bounce_rules;
146 			struct mlx5_flow_group *bounce_grp;
147 		} offloads;
148 	};
149 };
150 
151 struct mlx5_vport_drop_stats {
152 	u64 rx_dropped;
153 	u64 tx_dropped;
154 };
155 
156 struct mlx5_vport_info {
157 	u8                      mac[ETH_ALEN];
158 	u16                     vlan;
159 	u64                     node_guid;
160 	int                     link_state;
161 	u8                      qos;
162 	u8                      spoofchk: 1;
163 	u8                      trusted: 1;
164 	u8                      roce_enabled: 1;
165 	u8                      mig_enabled: 1;
166 	u8                      ipsec_crypto_enabled: 1;
167 	u8                      ipsec_packet_enabled: 1;
168 };
169 
170 /* Vport context events */
171 enum mlx5_eswitch_vport_event {
172 	MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
173 	MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
174 	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
175 };
176 
177 struct mlx5_vport;
178 
179 struct mlx5_devlink_port {
180 	struct devlink_port dl_port;
181 	struct mlx5_vport *vport;
182 };
183 
mlx5_devlink_port_init(struct mlx5_devlink_port * dl_port,struct mlx5_vport * vport)184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
185 					  struct mlx5_vport *vport)
186 {
187 	dl_port->vport = vport;
188 }
189 
mlx5_devlink_port_get(struct devlink_port * dl_port)190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
191 {
192 	return container_of(dl_port, struct mlx5_devlink_port, dl_port);
193 }
194 
mlx5_devlink_port_vport_get(struct devlink_port * dl_port)195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
196 {
197 	return mlx5_devlink_port_get(dl_port)->vport;
198 }
199 
200 #define MLX5_VHCA_ID_INVALID (-1)
201 
202 #define MLX5_VPORT_INVAL_VHCA_ID(vport) \
203 	((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
204 
205 struct mlx5_vport {
206 	struct mlx5_core_dev    *dev;
207 	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
208 	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
209 	struct mlx5_flow_handle *promisc_rule;
210 	struct mlx5_flow_handle *allmulti_rule;
211 	struct work_struct      vport_change_handler;
212 
213 	struct vport_ingress    ingress;
214 	struct vport_egress     egress;
215 	u32                     default_metadata;
216 	u32                     metadata;
217 	int                     vhca_id;
218 
219 	bool adjacent; /* delegated vhca from adjacent function */
220 	struct {
221 		u16 parent_pci_devfn; /* Adjacent parent PCI device function */
222 		u16 function_id; /* Function ID of the delegated VPort */
223 	} adj_info;
224 
225 	struct mlx5_vport_info  info;
226 
227 	/* Protected with the E-Switch qos domain lock. The Vport QoS can
228 	 * either be disabled (sched_node is NULL) or in one of three states:
229 	 * 1. Regular QoS (sched_node is a vport node).
230 	 * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
231 	 * 3. TC QoS enabled on the vport's parent node
232 	 *    (sched_node is a rate limit node).
233 	 * When TC is enabled in either mode, the vport owns vport TC scheduling
234 	 * nodes.
235 	 */
236 	struct {
237 		/* Vport scheduling node. */
238 		struct mlx5_esw_sched_node *sched_node;
239 		/* Array of vport traffic class scheduling nodes. */
240 		struct mlx5_esw_sched_node **sched_nodes;
241 	} qos;
242 
243 	u16 vport;
244 	bool                    enabled;
245 	bool max_eqs_set;
246 	enum mlx5_eswitch_vport_event enabled_events;
247 	int index;
248 	struct mlx5_devlink_port *dl_port;
249 };
250 
251 struct mlx5_esw_indir_table;
252 
253 struct mlx5_eswitch_fdb {
254 	union {
255 		struct legacy_fdb {
256 			struct mlx5_flow_table *fdb;
257 			struct mlx5_flow_group *addr_grp;
258 			struct mlx5_flow_group *allmulti_grp;
259 			struct mlx5_flow_group *promisc_grp;
260 			struct mlx5_flow_table *vepa_fdb;
261 			struct mlx5_flow_handle *vepa_uplink_rule;
262 			struct mlx5_flow_handle *vepa_star_rule;
263 		} legacy;
264 
265 		struct offloads_fdb {
266 			struct mlx5_flow_namespace *ns;
267 			struct mlx5_flow_table *tc_miss_table;
268 			struct mlx5_flow_table *slow_fdb;
269 			struct mlx5_flow_group *send_to_vport_grp;
270 			struct mlx5_flow_group *send_to_vport_meta_grp;
271 			struct mlx5_flow_group *peer_miss_grp;
272 			struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
273 			struct mlx5_flow_group *miss_grp;
274 			struct mlx5_flow_handle **send_to_vport_meta_rules;
275 			struct mlx5_flow_handle *miss_rule_uni;
276 			struct mlx5_flow_handle *miss_rule_multi;
277 
278 			struct mlx5_fs_chains *esw_chains_priv;
279 			struct {
280 				DECLARE_HASHTABLE(table, 8);
281 				/* Protects vports.table */
282 				struct mutex lock;
283 			} vports;
284 
285 			struct mlx5_esw_indir_table *indir;
286 
287 		} offloads;
288 	};
289 	u32 flags;
290 };
291 
292 struct mlx5_esw_offload {
293 	struct mlx5_flow_table *ft_offloads_restore;
294 	struct mlx5_flow_group *restore_group;
295 	struct mlx5_modify_hdr *restore_copy_hdr_id;
296 	struct mapping_ctx *reg_c0_obj_pool;
297 
298 	struct mlx5_flow_table *ft_offloads;
299 	struct mlx5_flow_group *vport_rx_group;
300 	struct mlx5_flow_group *vport_rx_drop_group;
301 	struct mlx5_flow_handle *vport_rx_drop_rule;
302 	struct mlx5_flow_table *ft_ipsec_tx_pol;
303 	struct xarray vport_reps;
304 	struct list_head peer_flows[MLX5_MAX_PORTS];
305 	struct mutex peer_mutex;
306 	struct mutex encap_tbl_lock; /* protects encap_tbl */
307 	DECLARE_HASHTABLE(encap_tbl, 8);
308 	struct mutex decap_tbl_lock; /* protects decap_tbl */
309 	DECLARE_HASHTABLE(decap_tbl, 8);
310 	struct mod_hdr_tbl mod_hdr;
311 	DECLARE_HASHTABLE(termtbl_tbl, 8);
312 	struct mutex termtbl_mutex; /* protects termtbl hash */
313 	struct xarray vhca_map;
314 	const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
315 	u8 inline_mode;
316 	atomic64_t num_flows;
317 	u64 num_block_encap;
318 	u64 num_block_mode;
319 	enum devlink_eswitch_encap_mode encap;
320 	struct ida vport_metadata_ida;
321 	unsigned int host_number; /* ECPF supports one external host */
322 };
323 
324 /* E-Switch MC FDB table hash node */
325 struct esw_mc_addr { /* SRIOV only */
326 	struct l2addr_node     node;
327 	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
328 	u32                    refcnt;
329 };
330 
331 struct mlx5_host_work {
332 	struct work_struct	work;
333 	struct mlx5_eswitch	*esw;
334 };
335 
336 struct mlx5_esw_functions {
337 	struct mlx5_nb		nb;
338 	bool			host_funcs_disabled;
339 	u16			num_vfs;
340 	u16			num_ec_vfs;
341 };
342 
343 enum {
344 	MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
345 	MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
346 	MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
347 };
348 
349 struct mlx5_esw_bridge_offloads;
350 
351 enum {
352 	MLX5_ESW_FDB_CREATED = BIT(0),
353 };
354 
355 struct dentry;
356 struct mlx5_qos_domain;
357 
358 struct mlx5_eswitch {
359 	struct mlx5_core_dev    *dev;
360 	struct mlx5_nb          nb;
361 	struct mlx5_eswitch_fdb fdb_table;
362 	/* legacy data structures */
363 	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
364 	struct esw_mc_addr mc_promisc;
365 	/* end of legacy */
366 	struct dentry *debugfs_root;
367 	struct workqueue_struct *work_queue;
368 	struct xarray vports;
369 	u32 flags;
370 	int                     total_vports;
371 	int                     enabled_vports;
372 	/* Synchronize between vport change events
373 	 * and async SRIOV admin state changes
374 	 */
375 	struct mutex            state_lock;
376 
377 	/* Protects eswitch mode change that occurs via one or more
378 	 * user commands, i.e. sriov state change, devlink commands.
379 	 */
380 	struct rw_semaphore mode_lock;
381 	atomic64_t user_count;
382 
383 	/* Protected with the E-Switch qos domain lock. */
384 	struct {
385 		/* Initially 0, meaning no QoS users and QoS is disabled. */
386 		refcount_t refcnt;
387 		u32 root_tsar_ix;
388 		struct mlx5_qos_domain *domain;
389 	} qos;
390 
391 	struct mlx5_esw_bridge_offloads *br_offloads;
392 	struct mlx5_esw_offload offloads;
393 	u32 last_vport_idx;
394 	int                     mode;
395 	u16                     manager_vport;
396 	u16                     first_host_vport;
397 	u8			num_peers;
398 	struct mlx5_esw_functions esw_funcs;
399 	struct {
400 		u32             large_group_num;
401 	}  params;
402 	struct blocking_notifier_head n_head;
403 	struct xarray paired;
404 	struct mlx5_devcom_comp_dev *devcom;
405 	u16 enabled_ipsec_vf_count;
406 	bool eswitch_operation_in_progress;
407 };
408 
409 void esw_offloads_disable(struct mlx5_eswitch *esw);
410 int esw_offloads_enable(struct mlx5_eswitch *esw);
411 void esw_offloads_cleanup(struct mlx5_eswitch *esw);
412 int esw_offloads_init(struct mlx5_eswitch *esw);
413 
414 struct mlx5_flow_handle *
415 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
416 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
417 
418 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
419 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
420 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
421 
422 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
423 
424 /* E-Switch API */
425 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
426 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
427 int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
428 void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
429 
430 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
431 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
432 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
433 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
434 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
435 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
436 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
437 				   const struct mlx5_devcom_match_attr *attr);
438 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
439 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
440 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
441 			       u16 vport, const u8 *mac);
442 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
443 				 u16 vport, int link_state);
444 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
445 				u16 vport, u16 vlan, u8 qos);
446 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
447 				    u16 vport, bool spoofchk);
448 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
449 				 u16 vport_num, bool setting);
450 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
451 				u32 max_rate, u32 min_rate);
452 int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
453 				     struct netlink_ext_ack *extack);
454 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
455 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
456 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
457 				  u16 vport, struct ifla_vf_info *ivi);
458 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
459 				 u16 vport,
460 				 struct ifla_vf_stats *vf_stats);
461 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
462 
463 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
464 					  bool other_vport, void *in);
465 
466 struct mlx5_flow_spec;
467 struct mlx5_esw_flow_attr;
468 struct mlx5_termtbl_handle;
469 
470 bool
471 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
472 			      struct mlx5_flow_attr *attr,
473 			      struct mlx5_flow_act *flow_act,
474 			      struct mlx5_flow_spec *spec);
475 
476 struct mlx5_flow_handle *
477 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
478 			      struct mlx5_flow_table *ft,
479 			      struct mlx5_flow_spec *spec,
480 			      struct mlx5_esw_flow_attr *attr,
481 			      struct mlx5_flow_act *flow_act,
482 			      struct mlx5_flow_destination *dest,
483 			      int num_dest);
484 
485 void
486 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
487 			 struct mlx5_termtbl_handle *tt);
488 
489 void
490 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
491 
492 struct mlx5_flow_handle *
493 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
494 				struct mlx5_flow_spec *spec,
495 				struct mlx5_flow_attr *attr);
496 struct mlx5_flow_handle *
497 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
498 			  struct mlx5_flow_spec *spec,
499 			  struct mlx5_flow_attr *attr);
500 void
501 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
502 				struct mlx5_flow_handle *rule,
503 				struct mlx5_flow_attr *attr);
504 void
505 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
506 			  struct mlx5_flow_handle *rule,
507 			  struct mlx5_flow_attr *attr);
508 
509 struct mlx5_flow_handle *
510 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
511 				  struct mlx5_flow_destination *dest);
512 
513 enum {
514 	SET_VLAN_STRIP	= BIT(0),
515 	SET_VLAN_INSERT	= BIT(1)
516 };
517 
518 enum mlx5_flow_match_level {
519 	MLX5_MATCH_NONE	= MLX5_INLINE_MODE_NONE,
520 	MLX5_MATCH_L2	= MLX5_INLINE_MODE_L2,
521 	MLX5_MATCH_L3	= MLX5_INLINE_MODE_IP,
522 	MLX5_MATCH_L4	= MLX5_INLINE_MODE_TCP_UDP,
523 };
524 
525 /* current maximum for flow based vport multicasting */
526 #define MLX5_MAX_FLOW_FWD_VPORTS 32
527 
528 enum {
529 	MLX5_ESW_DEST_ENCAP         = BIT(0),
530 	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
531 	MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE  = BIT(2),
532 };
533 
534 struct mlx5_esw_flow_attr {
535 	struct mlx5_eswitch_rep *in_rep;
536 	struct mlx5_core_dev	*in_mdev;
537 	struct mlx5_core_dev    *counter_dev;
538 	struct mlx5e_tc_int_port *dest_int_port;
539 	struct mlx5e_tc_int_port *int_port;
540 
541 	int split_count;
542 	int out_count;
543 
544 	__be16	vlan_proto[MLX5_FS_VLAN_DEPTH];
545 	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
546 	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
547 	u8	total_vlan;
548 	struct {
549 		u32 flags;
550 		bool vport_valid;
551 		u16 vport;
552 		struct mlx5_pkt_reformat *pkt_reformat;
553 		struct mlx5_core_dev *mdev;
554 		struct mlx5_termtbl_handle *termtbl;
555 		int src_port_rewrite_act_id;
556 	} dests[MLX5_MAX_FLOW_FWD_VPORTS];
557 	struct mlx5_rx_tun_attr *rx_tun_attr;
558 	struct ethhdr eth;
559 	struct mlx5_pkt_reformat *decap_pkt_reformat;
560 };
561 
562 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
563 				  struct netlink_ext_ack *extack);
564 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
565 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
566 					 struct netlink_ext_ack *extack);
567 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
568 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
569 					enum devlink_eswitch_encap_mode encap,
570 					struct netlink_ext_ack *extack);
571 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
572 					enum devlink_eswitch_encap_mode *encap);
573 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
574 				     u8 *hw_addr, int *hw_addr_len,
575 				     struct netlink_ext_ack *extack);
576 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
577 				     const u8 *hw_addr, int hw_addr_len,
578 				     struct netlink_ext_ack *extack);
579 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
580 				  struct netlink_ext_ack *extack);
581 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
582 				  struct netlink_ext_ack *extack);
583 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
584 					struct netlink_ext_ack *extack);
585 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
586 					struct netlink_ext_ack *extack);
587 #ifdef CONFIG_XFRM_OFFLOAD
588 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
589 					  struct netlink_ext_ack *extack);
590 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
591 					  struct netlink_ext_ack *extack);
592 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
593 					  struct netlink_ext_ack *extack);
594 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
595 					  struct netlink_ext_ack *extack);
596 #endif /* CONFIG_XFRM_OFFLOAD */
597 int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
598 					u32 *max_io_eqs,
599 					struct netlink_ext_ack *extack);
600 int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
601 					u32 max_io_eqs,
602 					struct netlink_ext_ack *extack);
603 int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
604 						   struct netlink_ext_ack *extack);
605 
606 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
607 
608 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
609 				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
610 
esw_vst_mode_is_steering(struct mlx5_eswitch * esw)611 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
612 {
613 	return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
614 		MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
615 }
616 
mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev * dev,u8 vlan_depth)617 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
618 						       u8 vlan_depth)
619 {
620 	bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
621 		   MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
622 
623 	if (vlan_depth == 1)
624 		return ret;
625 
626 	return  ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
627 		MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
628 }
629 
630 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
631 			       struct mlx5_core_dev *dev1);
632 
633 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
634 
635 void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
636 void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
637 
638 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
639 
640 #define esw_info(__dev, format, ...)			\
641 	dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
642 
643 #define esw_warn(__dev, format, ...)			\
644 	dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
645 
646 #define esw_debug(dev, format, ...)				\
647 	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
648 
mlx5_esw_allowed(const struct mlx5_eswitch * esw)649 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
650 {
651 	return esw && MLX5_ESWITCH_MANAGER(esw->dev);
652 }
653 
654 static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num)655 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
656 {
657 	return esw->manager_vport == vport_num;
658 }
659 
mlx5_esw_is_owner(struct mlx5_eswitch * esw,u16 vport_num,u16 esw_owner_vhca_id)660 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
661 				     u16 esw_owner_vhca_id)
662 {
663 	return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
664 		(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
665 }
666 
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev * dev)667 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
668 {
669 	return mlx5_core_is_ecpf_esw_manager(dev) ?
670 		MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
671 }
672 
mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev * dev)673 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
674 {
675 	return mlx5_core_is_ecpf_esw_manager(dev);
676 }
677 
678 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)679 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
680 				     u16 vport_num)
681 {
682 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
683 }
684 
685 static inline u16
mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)686 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
687 {
688 	return dl_port_index & 0xffff;
689 }
690 
mlx5_esw_is_fdb_created(struct mlx5_eswitch * esw)691 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
692 {
693 	return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
694 }
695 
696 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
697 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
698 
699 /* Each mark identifies eswitch vport type.
700  * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
701  * a single mark.
702  * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
703  * MLX5_ESW_VPT_SF identifies SF vport.
704  */
705 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
706 #define MLX5_ESW_VPT_VF XA_MARK_1
707 #define MLX5_ESW_VPT_SF XA_MARK_2
708 
709 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
710  * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
711  */
712 
713 #define mlx5_esw_for_each_vport(esw, index, vport) \
714 	xa_for_each(&((esw)->vports), index, vport)
715 
716 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter)	\
717 	for (index = 0, entry = xa_find(xa, &index, last, filter); \
718 	     entry; entry = xa_find_after(xa, &index, last, filter))
719 
720 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter)	\
721 	mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
722 
723 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last)	\
724 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
725 
726 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last)	\
727 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
728 
729 /* This macro should only be used if EC SRIOV is enabled.
730  *
731  * Because there were no more marks available on the xarray this uses a
732  * for_each_range approach. The range is only valid when EC SRIOV is enabled
733  */
734 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last)		\
735 	xa_for_each_range(&((esw)->vports),				\
736 			  index,					\
737 			  vport,					\
738 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base),	\
739 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
740 			  (last) - 1)
741 
742 #define mlx5_esw_for_each_rep(esw, i, rep) \
743 	xa_for_each(&((esw)->offloads.vport_reps), i, rep)
744 
745 struct mlx5_eswitch *__must_check
746 mlx5_devlink_eswitch_get(struct devlink *devlink);
747 
748 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
749 
750 struct mlx5_vport *__must_check
751 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
752 
753 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
754 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
755 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
756 
757 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
758 
759 int
760 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
761 				 enum mlx5_eswitch_vport_event enabled_events);
762 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
763 
764 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
765 			  enum mlx5_eswitch_vport_event enabled_events);
766 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
767 
768 int
769 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
770 				     struct mlx5_vport *vport);
771 void
772 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
773 				      struct mlx5_vport *vport);
774 
775 struct esw_vport_tbl_namespace {
776 	int max_fte;
777 	int max_num_groups;
778 	u32 flags;
779 };
780 
781 struct mlx5_vport_tbl_attr {
782 	u32 chain;
783 	u16 prio;
784 	u16 vport;
785 	struct esw_vport_tbl_namespace *vport_ns;
786 };
787 
788 struct mlx5_flow_table *
789 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
790 void
791 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
792 
793 struct mlx5_flow_handle *
794 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
795 
796 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
797 					 u32 *flow_group_in,
798 					 int match_params);
799 
800 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
801 				   u16 vport,
802 				   struct mlx5_flow_spec *spec);
803 
804 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
805 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
806 
807 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
808 				  struct mlx5_devlink_port *dl_port,
809 				  u32 controller, u32 sfnum);
810 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
811 
812 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
813 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
814 
815 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
816 			       enum mlx5_eswitch_vport_event enabled_events,
817 			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
818 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
819 
820 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
821 				enum mlx5_eswitch_vport_event enabled_events);
822 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
823 
824 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
825 					      struct mlx5_vport *vport);
826 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
827 						  struct mlx5_vport *vport);
828 
829 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
830 					   struct mlx5_devlink_port *dl_port,
831 					   u32 controller, u32 sfnum);
832 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
833 
834 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
835 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport);
836 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
837 
838 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
839 
840 int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
841 			       struct mlx5_vport *vport);
842 void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
843 				  struct mlx5_vport *vport);
844 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
845 bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
846 
847 void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
848 				  const struct mlx5_vport *vport);
849 int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
850 			      const struct mlx5_vport *vport);
851 
852 /**
853  * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
854  *
855  * @new_mode: New mode of eswitch.
856  */
857 struct mlx5_esw_event_info {
858 	u16 new_mode;
859 };
860 
861 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
862 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
863 
864 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
865 void mlx5_esw_release(struct mlx5_core_dev *dev);
866 void mlx5_esw_get(struct mlx5_core_dev *dev);
867 void mlx5_esw_put(struct mlx5_core_dev *dev);
868 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
869 int mlx5_esw_lock(struct mlx5_eswitch *esw);
870 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
871 
872 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
873 
874 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
875 
876 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
877 					     struct mlx5_eswitch *slave_esw, int max_slaves);
878 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
879 					      struct mlx5_eswitch *slave_esw);
880 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
881 
882 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
883 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
884 
885 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
886 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
887 
mlx5_eswitch_num_vfs(struct mlx5_eswitch * esw)888 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
889 {
890 	if (mlx5_esw_allowed(esw))
891 		return esw->esw_funcs.num_vfs;
892 
893 	return 0;
894 }
895 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)896 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
897 {
898 	if (mlx5_esw_allowed(esw))
899 		return esw->num_peers;
900 	return 0;
901 }
902 
903 static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch * esw)904 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
905 {
906 	return esw->fdb_table.offloads.slow_fdb;
907 }
908 
909 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
910 				    struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
911 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
912 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
913 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
914 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
915 				  struct mlx5_vport *vport);
916 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
917 					       u16 vport_num);
918 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
919 					 bool enable);
920 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
921 					 bool enable);
922 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
923 					       u16 vport_num);
924 bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
925 #else  /* CONFIG_MLX5_ESWITCH */
926 /* eswitch API stubs */
mlx5_eswitch_init(struct mlx5_core_dev * dev)927 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)928 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)929 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)930 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
mlx5_eswitch_disable(struct mlx5_eswitch * esw)931 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
932 static inline void
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,const struct mlx5_devcom_match_attr * attr)933 mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
934 			      const struct mlx5_devcom_match_attr *attr) {}
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)935 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)936 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev * dev)937 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
938 static inline
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)939 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
mlx5_esw_query_functions(struct mlx5_core_dev * dev)940 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
941 {
942 	return ERR_PTR(-EOPNOTSUPP);
943 }
944 
945 static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)946 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
947 {
948 	return ERR_PTR(-EOPNOTSUPP);
949 }
950 
951 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)952 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
953 				     u16 vport_num)
954 {
955 	return vport_num;
956 }
957 
958 static inline int
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)959 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
960 					 struct mlx5_eswitch *slave_esw, int max_slaves)
961 {
962 	return 0;
963 }
964 
965 static inline void
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)966 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
967 					 struct mlx5_eswitch *slave_esw) {}
968 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)969 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
970 
971 static inline int
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)972 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
973 {
974 	return 0;
975 }
976 
977 static inline bool
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev,bool from_fdb)978 mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
979 {
980 	return true;
981 }
982 
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)983 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
984 {
985 }
986 
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)987 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)988 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
mlx5_eswitch_block_ipsec(struct mlx5_core_dev * dev)989 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
990 {
991 	return false;
992 }
993 
mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev * dev)994 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
995 
996 static inline bool
mlx5_esw_host_functions_enabled(const struct mlx5_core_dev * dev)997 mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
998 {
999 	return true;
1000 }
1001 
1002 static inline bool
mlx5_esw_vport_vhca_id(struct mlx5_eswitch * esw,u16 vportn,u16 * vhca_id)1003 mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
1004 {
1005 	return -EOPNOTSUPP;
1006 }
1007 
1008 #endif /* CONFIG_MLX5_ESWITCH */
1009 
1010 #endif /* __MLX5_ESWITCH_H__ */
1011