xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h (revision 6439a0e64c355d2e375bd094f365d56ce81faba3)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35 
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
45 #include "lib/mpfs.h"
46 #include "lib/fs_chains.h"
47 #include "sf/sf.h"
48 #include "en/tc_ct.h"
49 #include "en/tc/sample.h"
50 
51 enum mlx5_mapped_obj_type {
52 	MLX5_MAPPED_OBJ_CHAIN,
53 	MLX5_MAPPED_OBJ_SAMPLE,
54 	MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55 	MLX5_MAPPED_OBJ_ACT_MISS,
56 };
57 
58 struct mlx5_mapped_obj {
59 	enum mlx5_mapped_obj_type type;
60 	union {
61 		u32 chain;
62 		u64 act_miss_cookie;
63 		struct {
64 			u32 group_id;
65 			u32 rate;
66 			u32 trunc_size;
67 			u32 tunnel_id;
68 		} sample;
69 		u32 int_port_metadata;
70 	};
71 };
72 
73 #ifdef CONFIG_MLX5_ESWITCH
74 
75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
76 
77 #define MLX5_MAX_UC_PER_VPORT(dev) \
78 	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
79 
80 #define MLX5_MAX_MC_PER_VPORT(dev) \
81 	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
82 
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 	MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
85 
86 #define esw_chains(esw) \
87 	((esw)->fdb_table.offloads.esw_chains_priv)
88 
89 enum {
90 	MAPPING_TYPE_CHAIN,
91 	MAPPING_TYPE_TUNNEL,
92 	MAPPING_TYPE_TUNNEL_ENC_OPTS,
93 	MAPPING_TYPE_LABELS,
94 	MAPPING_TYPE_ZONE,
95 	MAPPING_TYPE_INT_PORT,
96 };
97 
98 struct vport_ingress {
99 	struct mlx5_flow_table *acl;
100 	struct mlx5_flow_handle *allow_rule;
101 	struct {
102 		struct mlx5_flow_group *allow_spoofchk_only_grp;
103 		struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104 		struct mlx5_flow_group *allow_untagged_only_grp;
105 		struct mlx5_flow_group *drop_grp;
106 		struct mlx5_flow_handle *drop_rule;
107 		struct mlx5_fc *drop_counter;
108 	} legacy;
109 	struct {
110 		/* Optional group to add an FTE to do internal priority
111 		 * tagging on ingress packets.
112 		 */
113 		struct mlx5_flow_group *metadata_prio_tag_grp;
114 		/* Group to add default match-all FTE entry to tag ingress
115 		 * packet with metadata.
116 		 */
117 		struct mlx5_flow_group *metadata_allmatch_grp;
118 		/* Optional group to add a drop all rule */
119 		struct mlx5_flow_group *drop_grp;
120 		struct mlx5_modify_hdr *modify_metadata;
121 		struct mlx5_flow_handle *modify_metadata_rule;
122 		struct mlx5_flow_handle *drop_rule;
123 	} offloads;
124 };
125 
126 enum vport_egress_acl_type {
127 	VPORT_EGRESS_ACL_TYPE_DEFAULT,
128 	VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
129 };
130 
131 struct vport_egress {
132 	struct mlx5_flow_table *acl;
133 	enum vport_egress_acl_type type;
134 	struct mlx5_flow_handle  *allowed_vlan;
135 	struct mlx5_flow_group *vlan_grp;
136 	union {
137 		struct {
138 			struct mlx5_flow_group *drop_grp;
139 			struct mlx5_flow_handle *drop_rule;
140 			struct mlx5_fc *drop_counter;
141 		} legacy;
142 		struct {
143 			struct mlx5_flow_group *fwd_grp;
144 			struct mlx5_flow_handle *fwd_rule;
145 			struct xarray bounce_rules;
146 			struct mlx5_flow_group *bounce_grp;
147 		} offloads;
148 	};
149 };
150 
151 struct mlx5_vport_drop_stats {
152 	u64 rx_dropped;
153 	u64 tx_dropped;
154 };
155 
156 struct mlx5_vport_info {
157 	u8                      mac[ETH_ALEN];
158 	u16                     vlan;
159 	u64                     node_guid;
160 	int                     link_state;
161 	u8                      qos;
162 	u8                      spoofchk: 1;
163 	u8                      trusted: 1;
164 	u8                      roce_enabled: 1;
165 	u8                      mig_enabled: 1;
166 	u8                      ipsec_crypto_enabled: 1;
167 	u8                      ipsec_packet_enabled: 1;
168 };
169 
170 /* Vport context events */
171 enum mlx5_eswitch_vport_event {
172 	MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
173 	MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
174 	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
175 };
176 
177 struct mlx5_vport;
178 
179 struct mlx5_devlink_port {
180 	struct devlink_port dl_port;
181 	struct mlx5_vport *vport;
182 };
183 
mlx5_devlink_port_init(struct mlx5_devlink_port * dl_port,struct mlx5_vport * vport)184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
185 					  struct mlx5_vport *vport)
186 {
187 	dl_port->vport = vport;
188 }
189 
mlx5_devlink_port_get(struct devlink_port * dl_port)190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
191 {
192 	return container_of(dl_port, struct mlx5_devlink_port, dl_port);
193 }
194 
mlx5_devlink_port_vport_get(struct devlink_port * dl_port)195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
196 {
197 	return mlx5_devlink_port_get(dl_port)->vport;
198 }
199 
200 struct mlx5_vport {
201 	struct mlx5_core_dev    *dev;
202 	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
203 	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
204 	struct mlx5_flow_handle *promisc_rule;
205 	struct mlx5_flow_handle *allmulti_rule;
206 	struct work_struct      vport_change_handler;
207 
208 	struct vport_ingress    ingress;
209 	struct vport_egress     egress;
210 	u32                     default_metadata;
211 	u32                     metadata;
212 
213 	struct mlx5_vport_info  info;
214 
215 	/* Protected with the E-Switch qos domain lock. The Vport QoS can
216 	 * either be disabled (sched_node is NULL) or in one of three states:
217 	 * 1. Regular QoS (sched_node is a vport node).
218 	 * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
219 	 * 3. TC QoS enabled on the vport's parent node
220 	 *    (sched_node is a rate limit node).
221 	 * When TC is enabled in either mode, the vport owns vport TC scheduling
222 	 * nodes.
223 	 */
224 	struct {
225 		/* Vport scheduling node. */
226 		struct mlx5_esw_sched_node *sched_node;
227 		/* Array of vport traffic class scheduling nodes. */
228 		struct mlx5_esw_sched_node **sched_nodes;
229 	} qos;
230 
231 	u16 vport;
232 	bool                    enabled;
233 	bool max_eqs_set;
234 	enum mlx5_eswitch_vport_event enabled_events;
235 	int index;
236 	struct mlx5_devlink_port *dl_port;
237 };
238 
239 struct mlx5_esw_indir_table;
240 
241 struct mlx5_eswitch_fdb {
242 	union {
243 		struct legacy_fdb {
244 			struct mlx5_flow_table *fdb;
245 			struct mlx5_flow_group *addr_grp;
246 			struct mlx5_flow_group *allmulti_grp;
247 			struct mlx5_flow_group *promisc_grp;
248 			struct mlx5_flow_table *vepa_fdb;
249 			struct mlx5_flow_handle *vepa_uplink_rule;
250 			struct mlx5_flow_handle *vepa_star_rule;
251 		} legacy;
252 
253 		struct offloads_fdb {
254 			struct mlx5_flow_namespace *ns;
255 			struct mlx5_flow_table *tc_miss_table;
256 			struct mlx5_flow_table *slow_fdb;
257 			struct mlx5_flow_group *send_to_vport_grp;
258 			struct mlx5_flow_group *send_to_vport_meta_grp;
259 			struct mlx5_flow_group *peer_miss_grp;
260 			struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
261 			struct mlx5_flow_group *miss_grp;
262 			struct mlx5_flow_handle **send_to_vport_meta_rules;
263 			struct mlx5_flow_handle *miss_rule_uni;
264 			struct mlx5_flow_handle *miss_rule_multi;
265 
266 			struct mlx5_fs_chains *esw_chains_priv;
267 			struct {
268 				DECLARE_HASHTABLE(table, 8);
269 				/* Protects vports.table */
270 				struct mutex lock;
271 			} vports;
272 
273 			struct mlx5_esw_indir_table *indir;
274 
275 		} offloads;
276 	};
277 	u32 flags;
278 };
279 
280 struct mlx5_esw_offload {
281 	struct mlx5_flow_table *ft_offloads_restore;
282 	struct mlx5_flow_group *restore_group;
283 	struct mlx5_modify_hdr *restore_copy_hdr_id;
284 	struct mapping_ctx *reg_c0_obj_pool;
285 
286 	struct mlx5_flow_table *ft_offloads;
287 	struct mlx5_flow_group *vport_rx_group;
288 	struct mlx5_flow_group *vport_rx_drop_group;
289 	struct mlx5_flow_handle *vport_rx_drop_rule;
290 	struct mlx5_flow_table *ft_ipsec_tx_pol;
291 	struct xarray vport_reps;
292 	struct list_head peer_flows[MLX5_MAX_PORTS];
293 	struct mutex peer_mutex;
294 	struct mutex encap_tbl_lock; /* protects encap_tbl */
295 	DECLARE_HASHTABLE(encap_tbl, 8);
296 	struct mutex decap_tbl_lock; /* protects decap_tbl */
297 	DECLARE_HASHTABLE(decap_tbl, 8);
298 	struct mod_hdr_tbl mod_hdr;
299 	DECLARE_HASHTABLE(termtbl_tbl, 8);
300 	struct mutex termtbl_mutex; /* protects termtbl hash */
301 	struct xarray vhca_map;
302 	const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
303 	u8 inline_mode;
304 	atomic64_t num_flows;
305 	u64 num_block_encap;
306 	u64 num_block_mode;
307 	enum devlink_eswitch_encap_mode encap;
308 	struct ida vport_metadata_ida;
309 	unsigned int host_number; /* ECPF supports one external host */
310 };
311 
312 /* E-Switch MC FDB table hash node */
313 struct esw_mc_addr { /* SRIOV only */
314 	struct l2addr_node     node;
315 	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
316 	u32                    refcnt;
317 };
318 
319 struct mlx5_host_work {
320 	struct work_struct	work;
321 	struct mlx5_eswitch	*esw;
322 };
323 
324 struct mlx5_esw_functions {
325 	struct mlx5_nb		nb;
326 	u16			num_vfs;
327 	u16			num_ec_vfs;
328 };
329 
330 enum {
331 	MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
332 	MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
333 	MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
334 };
335 
336 struct mlx5_esw_bridge_offloads;
337 
338 enum {
339 	MLX5_ESW_FDB_CREATED = BIT(0),
340 };
341 
342 struct dentry;
343 struct mlx5_qos_domain;
344 
345 struct mlx5_eswitch {
346 	struct mlx5_core_dev    *dev;
347 	struct mlx5_nb          nb;
348 	struct mlx5_eswitch_fdb fdb_table;
349 	/* legacy data structures */
350 	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
351 	struct esw_mc_addr mc_promisc;
352 	/* end of legacy */
353 	struct dentry *debugfs_root;
354 	struct workqueue_struct *work_queue;
355 	struct xarray vports;
356 	u32 flags;
357 	int                     total_vports;
358 	int                     enabled_vports;
359 	/* Synchronize between vport change events
360 	 * and async SRIOV admin state changes
361 	 */
362 	struct mutex            state_lock;
363 
364 	/* Protects eswitch mode change that occurs via one or more
365 	 * user commands, i.e. sriov state change, devlink commands.
366 	 */
367 	struct rw_semaphore mode_lock;
368 	atomic64_t user_count;
369 
370 	/* Protected with the E-Switch qos domain lock. */
371 	struct {
372 		/* Initially 0, meaning no QoS users and QoS is disabled. */
373 		refcount_t refcnt;
374 		u32 root_tsar_ix;
375 		struct mlx5_qos_domain *domain;
376 	} qos;
377 
378 	struct mlx5_esw_bridge_offloads *br_offloads;
379 	struct mlx5_esw_offload offloads;
380 	int                     mode;
381 	u16                     manager_vport;
382 	u16                     first_host_vport;
383 	u8			num_peers;
384 	struct mlx5_esw_functions esw_funcs;
385 	struct {
386 		u32             large_group_num;
387 	}  params;
388 	struct blocking_notifier_head n_head;
389 	struct xarray paired;
390 	struct mlx5_devcom_comp_dev *devcom;
391 	u16 enabled_ipsec_vf_count;
392 	bool eswitch_operation_in_progress;
393 };
394 
395 void esw_offloads_disable(struct mlx5_eswitch *esw);
396 int esw_offloads_enable(struct mlx5_eswitch *esw);
397 void esw_offloads_cleanup(struct mlx5_eswitch *esw);
398 int esw_offloads_init(struct mlx5_eswitch *esw);
399 
400 struct mlx5_flow_handle *
401 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
402 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
403 
404 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
405 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
406 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
407 
408 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
409 
410 /* E-Switch API */
411 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
412 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
413 
414 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
415 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
416 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
417 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
418 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
419 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
420 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
421 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
422 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
423 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
424 			       u16 vport, const u8 *mac);
425 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
426 				 u16 vport, int link_state);
427 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
428 				u16 vport, u16 vlan, u8 qos);
429 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
430 				    u16 vport, bool spoofchk);
431 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
432 				 u16 vport_num, bool setting);
433 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
434 				u32 max_rate, u32 min_rate);
435 int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
436 				     struct netlink_ext_ack *extack);
437 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
438 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
439 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
440 				  u16 vport, struct ifla_vf_info *ivi);
441 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
442 				 u16 vport,
443 				 struct ifla_vf_stats *vf_stats);
444 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
445 
446 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
447 					  bool other_vport, void *in);
448 
449 struct mlx5_flow_spec;
450 struct mlx5_esw_flow_attr;
451 struct mlx5_termtbl_handle;
452 
453 bool
454 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
455 			      struct mlx5_flow_attr *attr,
456 			      struct mlx5_flow_act *flow_act,
457 			      struct mlx5_flow_spec *spec);
458 
459 struct mlx5_flow_handle *
460 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
461 			      struct mlx5_flow_table *ft,
462 			      struct mlx5_flow_spec *spec,
463 			      struct mlx5_esw_flow_attr *attr,
464 			      struct mlx5_flow_act *flow_act,
465 			      struct mlx5_flow_destination *dest,
466 			      int num_dest);
467 
468 void
469 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
470 			 struct mlx5_termtbl_handle *tt);
471 
472 void
473 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
474 
475 struct mlx5_flow_handle *
476 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
477 				struct mlx5_flow_spec *spec,
478 				struct mlx5_flow_attr *attr);
479 struct mlx5_flow_handle *
480 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
481 			  struct mlx5_flow_spec *spec,
482 			  struct mlx5_flow_attr *attr);
483 void
484 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
485 				struct mlx5_flow_handle *rule,
486 				struct mlx5_flow_attr *attr);
487 void
488 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
489 			  struct mlx5_flow_handle *rule,
490 			  struct mlx5_flow_attr *attr);
491 
492 struct mlx5_flow_handle *
493 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
494 				  struct mlx5_flow_destination *dest);
495 
496 enum {
497 	SET_VLAN_STRIP	= BIT(0),
498 	SET_VLAN_INSERT	= BIT(1)
499 };
500 
501 enum mlx5_flow_match_level {
502 	MLX5_MATCH_NONE	= MLX5_INLINE_MODE_NONE,
503 	MLX5_MATCH_L2	= MLX5_INLINE_MODE_L2,
504 	MLX5_MATCH_L3	= MLX5_INLINE_MODE_IP,
505 	MLX5_MATCH_L4	= MLX5_INLINE_MODE_TCP_UDP,
506 };
507 
508 /* current maximum for flow based vport multicasting */
509 #define MLX5_MAX_FLOW_FWD_VPORTS 32
510 
511 enum {
512 	MLX5_ESW_DEST_ENCAP         = BIT(0),
513 	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
514 	MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE  = BIT(2),
515 };
516 
517 struct mlx5_esw_flow_attr {
518 	struct mlx5_eswitch_rep *in_rep;
519 	struct mlx5_core_dev	*in_mdev;
520 	struct mlx5_core_dev    *counter_dev;
521 	struct mlx5e_tc_int_port *dest_int_port;
522 	struct mlx5e_tc_int_port *int_port;
523 
524 	int split_count;
525 	int out_count;
526 
527 	__be16	vlan_proto[MLX5_FS_VLAN_DEPTH];
528 	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
529 	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
530 	u8	total_vlan;
531 	struct {
532 		u32 flags;
533 		bool vport_valid;
534 		u16 vport;
535 		struct mlx5_pkt_reformat *pkt_reformat;
536 		struct mlx5_core_dev *mdev;
537 		struct mlx5_termtbl_handle *termtbl;
538 		int src_port_rewrite_act_id;
539 	} dests[MLX5_MAX_FLOW_FWD_VPORTS];
540 	struct mlx5_rx_tun_attr *rx_tun_attr;
541 	struct ethhdr eth;
542 	struct mlx5_pkt_reformat *decap_pkt_reformat;
543 };
544 
545 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
546 				  struct netlink_ext_ack *extack);
547 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
548 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
549 					 struct netlink_ext_ack *extack);
550 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
551 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
552 					enum devlink_eswitch_encap_mode encap,
553 					struct netlink_ext_ack *extack);
554 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
555 					enum devlink_eswitch_encap_mode *encap);
556 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
557 				     u8 *hw_addr, int *hw_addr_len,
558 				     struct netlink_ext_ack *extack);
559 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
560 				     const u8 *hw_addr, int hw_addr_len,
561 				     struct netlink_ext_ack *extack);
562 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
563 				  struct netlink_ext_ack *extack);
564 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
565 				  struct netlink_ext_ack *extack);
566 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
567 					struct netlink_ext_ack *extack);
568 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
569 					struct netlink_ext_ack *extack);
570 #ifdef CONFIG_XFRM_OFFLOAD
571 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
572 					  struct netlink_ext_ack *extack);
573 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
574 					  struct netlink_ext_ack *extack);
575 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
576 					  struct netlink_ext_ack *extack);
577 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
578 					  struct netlink_ext_ack *extack);
579 #endif /* CONFIG_XFRM_OFFLOAD */
580 int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
581 					u32 *max_io_eqs,
582 					struct netlink_ext_ack *extack);
583 int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
584 					u32 max_io_eqs,
585 					struct netlink_ext_ack *extack);
586 int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
587 						   struct netlink_ext_ack *extack);
588 
589 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
590 
591 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
592 				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
593 
esw_vst_mode_is_steering(struct mlx5_eswitch * esw)594 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
595 {
596 	return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
597 		MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
598 }
599 
mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev * dev,u8 vlan_depth)600 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
601 						       u8 vlan_depth)
602 {
603 	bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
604 		   MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
605 
606 	if (vlan_depth == 1)
607 		return ret;
608 
609 	return  ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
610 		MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
611 }
612 
613 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
614 			       struct mlx5_core_dev *dev1);
615 
616 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
617 
618 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
619 
620 #define esw_info(__dev, format, ...)			\
621 	dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
622 
623 #define esw_warn(__dev, format, ...)			\
624 	dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
625 
626 #define esw_debug(dev, format, ...)				\
627 	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
628 
mlx5_esw_allowed(const struct mlx5_eswitch * esw)629 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
630 {
631 	return esw && MLX5_ESWITCH_MANAGER(esw->dev);
632 }
633 
634 static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num)635 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
636 {
637 	return esw->manager_vport == vport_num;
638 }
639 
mlx5_esw_is_owner(struct mlx5_eswitch * esw,u16 vport_num,u16 esw_owner_vhca_id)640 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
641 				     u16 esw_owner_vhca_id)
642 {
643 	return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
644 		(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
645 }
646 
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev * dev)647 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
648 {
649 	return mlx5_core_is_ecpf_esw_manager(dev) ?
650 		MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
651 }
652 
mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev * dev)653 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
654 {
655 	return mlx5_core_is_ecpf_esw_manager(dev);
656 }
657 
658 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)659 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
660 				     u16 vport_num)
661 {
662 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
663 }
664 
665 static inline u16
mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)666 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
667 {
668 	return dl_port_index & 0xffff;
669 }
670 
mlx5_esw_is_fdb_created(struct mlx5_eswitch * esw)671 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
672 {
673 	return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
674 }
675 
676 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
677 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
678 
679 /* Each mark identifies eswitch vport type.
680  * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
681  * a single mark.
682  * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
683  * MLX5_ESW_VPT_SF identifies SF vport.
684  */
685 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
686 #define MLX5_ESW_VPT_VF XA_MARK_1
687 #define MLX5_ESW_VPT_SF XA_MARK_2
688 
689 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
690  * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
691  */
692 
693 #define mlx5_esw_for_each_vport(esw, index, vport) \
694 	xa_for_each(&((esw)->vports), index, vport)
695 
696 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter)	\
697 	for (index = 0, entry = xa_find(xa, &index, last, filter); \
698 	     entry; entry = xa_find_after(xa, &index, last, filter))
699 
700 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter)	\
701 	mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
702 
703 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last)	\
704 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
705 
706 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last)	\
707 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
708 
709 /* This macro should only be used if EC SRIOV is enabled.
710  *
711  * Because there were no more marks available on the xarray this uses a
712  * for_each_range approach. The range is only valid when EC SRIOV is enabled
713  */
714 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last)		\
715 	xa_for_each_range(&((esw)->vports),				\
716 			  index,					\
717 			  vport,					\
718 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base),	\
719 			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
720 			  (last) - 1)
721 
722 #define mlx5_esw_for_each_rep(esw, i, rep) \
723 	xa_for_each(&((esw)->offloads.vport_reps), i, rep)
724 
725 struct mlx5_eswitch *__must_check
726 mlx5_devlink_eswitch_get(struct devlink *devlink);
727 
728 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
729 
730 struct mlx5_vport *__must_check
731 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
732 
733 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
734 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
735 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
736 
737 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
738 
739 int
740 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
741 				 enum mlx5_eswitch_vport_event enabled_events);
742 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
743 
744 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
745 			  enum mlx5_eswitch_vport_event enabled_events);
746 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
747 
748 int
749 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
750 				     struct mlx5_vport *vport);
751 void
752 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
753 				      struct mlx5_vport *vport);
754 
755 struct esw_vport_tbl_namespace {
756 	int max_fte;
757 	int max_num_groups;
758 	u32 flags;
759 };
760 
761 struct mlx5_vport_tbl_attr {
762 	u32 chain;
763 	u16 prio;
764 	u16 vport;
765 	struct esw_vport_tbl_namespace *vport_ns;
766 };
767 
768 struct mlx5_flow_table *
769 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
770 void
771 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
772 
773 struct mlx5_flow_handle *
774 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
775 
776 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
777 					 u32 *flow_group_in,
778 					 int match_params);
779 
780 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
781 				   u16 vport,
782 				   struct mlx5_flow_spec *spec);
783 
784 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
785 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
786 
787 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
788 				  struct mlx5_devlink_port *dl_port,
789 				  u32 controller, u32 sfnum);
790 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
791 
792 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
793 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
794 
795 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
796 			       enum mlx5_eswitch_vport_event enabled_events,
797 			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
798 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
799 
800 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
801 				enum mlx5_eswitch_vport_event enabled_events);
802 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
803 
804 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
805 					      struct mlx5_vport *vport);
806 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
807 						  struct mlx5_vport *vport);
808 
809 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
810 					   struct mlx5_devlink_port *dl_port,
811 					   u32 controller, u32 sfnum);
812 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
813 
814 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
815 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport);
816 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
817 
818 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
819 
820 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
821 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
822 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
823 
824 /**
825  * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
826  *
827  * @new_mode: New mode of eswitch.
828  */
829 struct mlx5_esw_event_info {
830 	u16 new_mode;
831 };
832 
833 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
834 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
835 
836 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
837 void mlx5_esw_release(struct mlx5_core_dev *dev);
838 void mlx5_esw_get(struct mlx5_core_dev *dev);
839 void mlx5_esw_put(struct mlx5_core_dev *dev);
840 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
841 int mlx5_esw_lock(struct mlx5_eswitch *esw);
842 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
843 
844 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
845 
846 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
847 
848 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
849 					     struct mlx5_eswitch *slave_esw, int max_slaves);
850 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
851 					      struct mlx5_eswitch *slave_esw);
852 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
853 
854 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
855 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
856 
857 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
858 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
859 
mlx5_eswitch_num_vfs(struct mlx5_eswitch * esw)860 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
861 {
862 	if (mlx5_esw_allowed(esw))
863 		return esw->esw_funcs.num_vfs;
864 
865 	return 0;
866 }
867 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)868 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
869 {
870 	if (mlx5_esw_allowed(esw))
871 		return esw->num_peers;
872 	return 0;
873 }
874 
875 static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch * esw)876 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
877 {
878 	return esw->fdb_table.offloads.slow_fdb;
879 }
880 
881 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
882 				    struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
883 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
884 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
885 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
886 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
887 				  struct mlx5_vport *vport);
888 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
889 					       u16 vport_num);
890 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
891 					 bool enable);
892 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
893 					 bool enable);
894 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
895 					       u16 vport_num);
896 #else  /* CONFIG_MLX5_ESWITCH */
897 /* eswitch API stubs */
mlx5_eswitch_init(struct mlx5_core_dev * dev)898 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)899 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)900 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)901 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
mlx5_eswitch_disable(struct mlx5_eswitch * esw)902 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,u64 key)903 static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)904 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)905 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev * dev)906 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
907 static inline
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)908 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
mlx5_esw_query_functions(struct mlx5_core_dev * dev)909 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
910 {
911 	return ERR_PTR(-EOPNOTSUPP);
912 }
913 
914 static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)915 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
916 {
917 	return ERR_PTR(-EOPNOTSUPP);
918 }
919 
920 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)921 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
922 				     u16 vport_num)
923 {
924 	return vport_num;
925 }
926 
927 static inline int
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)928 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
929 					 struct mlx5_eswitch *slave_esw, int max_slaves)
930 {
931 	return 0;
932 }
933 
934 static inline void
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)935 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
936 					 struct mlx5_eswitch *slave_esw) {}
937 
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)938 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
939 
940 static inline int
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)941 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
942 {
943 	return 0;
944 }
945 
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev)946 static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
947 {
948 	return true;
949 }
950 
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)951 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
952 {
953 }
954 
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)955 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)956 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
mlx5_eswitch_block_ipsec(struct mlx5_core_dev * dev)957 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
958 {
959 	return false;
960 }
961 
mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev * dev)962 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
963 #endif /* CONFIG_MLX5_ESWITCH */
964 
965 #endif /* __MLX5_ESWITCH_H__ */
966