1 /*
2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
45 #include "lib/mpfs.h"
46 #include "lib/fs_chains.h"
47 #include "sf/sf.h"
48 #include "en/tc_ct.h"
49 #include "en/tc/sample.h"
50
51 enum mlx5_mapped_obj_type {
52 MLX5_MAPPED_OBJ_CHAIN,
53 MLX5_MAPPED_OBJ_SAMPLE,
54 MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55 MLX5_MAPPED_OBJ_ACT_MISS,
56 };
57
58 struct mlx5_mapped_obj {
59 enum mlx5_mapped_obj_type type;
60 union {
61 u32 chain;
62 u64 act_miss_cookie;
63 struct {
64 u32 group_id;
65 u32 rate;
66 u32 trunc_size;
67 u32 tunnel_id;
68 } sample;
69 u32 int_port_metadata;
70 };
71 };
72
73 #ifdef CONFIG_MLX5_ESWITCH
74
75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
76
77 #define MLX5_MAX_UC_PER_VPORT(dev) \
78 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
79
80 #define MLX5_MAX_MC_PER_VPORT(dev) \
81 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
82
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
85
86 #define esw_chains(esw) \
87 ((esw)->fdb_table.offloads.esw_chains_priv)
88
89 enum {
90 MAPPING_TYPE_CHAIN,
91 MAPPING_TYPE_TUNNEL,
92 MAPPING_TYPE_TUNNEL_ENC_OPTS,
93 MAPPING_TYPE_LABELS,
94 MAPPING_TYPE_ZONE,
95 MAPPING_TYPE_INT_PORT,
96 };
97
98 struct vport_ingress {
99 struct mlx5_flow_table *acl;
100 struct mlx5_flow_handle *allow_rule;
101 struct {
102 struct mlx5_flow_group *allow_spoofchk_only_grp;
103 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104 struct mlx5_flow_group *allow_untagged_only_grp;
105 struct mlx5_flow_group *drop_grp;
106 struct mlx5_flow_handle *drop_rule;
107 struct mlx5_fc *drop_counter;
108 } legacy;
109 struct {
110 /* Optional group to add an FTE to do internal priority
111 * tagging on ingress packets.
112 */
113 struct mlx5_flow_group *metadata_prio_tag_grp;
114 /* Group to add default match-all FTE entry to tag ingress
115 * packet with metadata.
116 */
117 struct mlx5_flow_group *metadata_allmatch_grp;
118 /* Optional group to add a drop all rule */
119 struct mlx5_flow_group *drop_grp;
120 struct mlx5_modify_hdr *modify_metadata;
121 struct mlx5_flow_handle *modify_metadata_rule;
122 struct mlx5_flow_handle *drop_rule;
123 } offloads;
124 };
125
126 enum vport_egress_acl_type {
127 VPORT_EGRESS_ACL_TYPE_DEFAULT,
128 VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
129 };
130
131 struct vport_egress {
132 struct mlx5_flow_table *acl;
133 enum vport_egress_acl_type type;
134 struct mlx5_flow_handle *allowed_vlan;
135 struct mlx5_flow_group *vlan_grp;
136 union {
137 struct {
138 struct mlx5_flow_group *drop_grp;
139 struct mlx5_flow_handle *drop_rule;
140 struct mlx5_fc *drop_counter;
141 } legacy;
142 struct {
143 struct mlx5_flow_group *fwd_grp;
144 struct mlx5_flow_handle *fwd_rule;
145 struct xarray bounce_rules;
146 struct mlx5_flow_group *bounce_grp;
147 } offloads;
148 };
149 };
150
151 struct mlx5_vport_drop_stats {
152 u64 rx_dropped;
153 u64 tx_dropped;
154 };
155
156 struct mlx5_vport_info {
157 u8 mac[ETH_ALEN];
158 u16 vlan;
159 u64 node_guid;
160 int link_state;
161 u8 qos;
162 u8 spoofchk: 1;
163 u8 trusted: 1;
164 u8 roce_enabled: 1;
165 u8 mig_enabled: 1;
166 u8 ipsec_crypto_enabled: 1;
167 u8 ipsec_packet_enabled: 1;
168 };
169
170 /* Vport context events */
171 enum mlx5_eswitch_vport_event {
172 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
173 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
174 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
175 };
176
177 struct mlx5_vport;
178
179 struct mlx5_devlink_port {
180 struct devlink_port dl_port;
181 struct mlx5_vport *vport;
182 };
183
mlx5_devlink_port_init(struct mlx5_devlink_port * dl_port,struct mlx5_vport * vport)184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
185 struct mlx5_vport *vport)
186 {
187 dl_port->vport = vport;
188 }
189
mlx5_devlink_port_get(struct devlink_port * dl_port)190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
191 {
192 return container_of(dl_port, struct mlx5_devlink_port, dl_port);
193 }
194
mlx5_devlink_port_vport_get(struct devlink_port * dl_port)195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
196 {
197 return mlx5_devlink_port_get(dl_port)->vport;
198 }
199
200 #define MLX5_VHCA_ID_INVALID (-1)
201
202 #define MLX5_VPORT_INVAL_VHCA_ID(vport) \
203 ((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
204
205 struct mlx5_vport {
206 struct mlx5_core_dev *dev;
207 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
208 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
209 struct mlx5_flow_handle *promisc_rule;
210 struct mlx5_flow_handle *allmulti_rule;
211 struct work_struct vport_change_handler;
212
213 struct vport_ingress ingress;
214 struct vport_egress egress;
215 u32 default_metadata;
216 u32 metadata;
217 int vhca_id;
218
219 bool adjacent; /* delegated vhca from adjacent function */
220 struct {
221 u16 parent_pci_devfn; /* Adjacent parent PCI device function */
222 u16 function_id; /* Function ID of the delegated VPort */
223 } adj_info;
224
225 struct mlx5_vport_info info;
226
227 /* Protected with the E-Switch qos domain lock. The Vport QoS can
228 * either be disabled (sched_node is NULL) or in one of three states:
229 * 1. Regular QoS (sched_node is a vport node).
230 * 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
231 * 3. TC QoS enabled on the vport's parent node
232 * (sched_node is a rate limit node).
233 * When TC is enabled in either mode, the vport owns vport TC scheduling
234 * nodes.
235 */
236 struct {
237 /* Vport scheduling node. */
238 struct mlx5_esw_sched_node *sched_node;
239 /* Array of vport traffic class scheduling nodes. */
240 struct mlx5_esw_sched_node **sched_nodes;
241 } qos;
242
243 u16 vport;
244 bool enabled;
245 bool max_eqs_set;
246 bool pf_activated;
247 enum mlx5_eswitch_vport_event enabled_events;
248 int index;
249 struct mlx5_devlink_port *dl_port;
250 };
251
252 struct mlx5_esw_indir_table;
253
254 struct mlx5_eswitch_fdb {
255 union {
256 struct legacy_fdb {
257 struct mlx5_flow_table *fdb;
258 struct mlx5_flow_group *addr_grp;
259 struct mlx5_flow_group *allmulti_grp;
260 struct mlx5_flow_group *promisc_grp;
261 struct mlx5_flow_table *vepa_fdb;
262 struct mlx5_flow_handle *vepa_uplink_rule;
263 struct mlx5_flow_handle *vepa_star_rule;
264 } legacy;
265
266 struct offloads_fdb {
267 struct mlx5_flow_namespace *ns;
268 struct mlx5_flow_table *drop_root;
269 struct mlx5_flow_handle *drop_root_rule;
270 struct mlx5_fc *drop_root_fc;
271 struct mlx5_flow_table *tc_miss_table;
272 struct mlx5_flow_table *slow_fdb;
273 struct mlx5_flow_group *send_to_vport_grp;
274 struct mlx5_flow_group *send_to_vport_meta_grp;
275 struct mlx5_flow_group *peer_miss_grp;
276 struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
277 struct mlx5_flow_group *miss_grp;
278 struct mlx5_flow_handle **send_to_vport_meta_rules;
279 struct mlx5_flow_handle *miss_rule_uni;
280 struct mlx5_flow_handle *miss_rule_multi;
281
282 struct mlx5_fs_chains *esw_chains_priv;
283 struct {
284 DECLARE_HASHTABLE(table, 8);
285 /* Protects vports.table */
286 struct mutex lock;
287 } vports;
288
289 struct mlx5_esw_indir_table *indir;
290
291 } offloads;
292 };
293 u32 flags;
294 };
295
296 struct mlx5_esw_offload {
297 struct mlx5_flow_table *ft_offloads_restore;
298 struct mlx5_flow_group *restore_group;
299 struct mlx5_modify_hdr *restore_copy_hdr_id;
300 struct mapping_ctx *reg_c0_obj_pool;
301
302 struct mlx5_flow_table *ft_offloads;
303 struct mlx5_flow_group *vport_rx_group;
304 struct mlx5_flow_group *vport_rx_drop_group;
305 struct mlx5_flow_handle *vport_rx_drop_rule;
306 struct mlx5_flow_table *ft_ipsec_tx_pol;
307 struct xarray vport_reps;
308 struct list_head peer_flows[MLX5_MAX_PORTS];
309 struct mutex peer_mutex;
310 struct mutex encap_tbl_lock; /* protects encap_tbl */
311 DECLARE_HASHTABLE(encap_tbl, 8);
312 struct mutex decap_tbl_lock; /* protects decap_tbl */
313 DECLARE_HASHTABLE(decap_tbl, 8);
314 struct mod_hdr_tbl mod_hdr;
315 DECLARE_HASHTABLE(termtbl_tbl, 8);
316 struct mutex termtbl_mutex; /* protects termtbl hash */
317 struct xarray vhca_map;
318 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
319 u8 inline_mode;
320 atomic64_t num_flows;
321 u64 num_block_encap;
322 u64 num_block_mode;
323 enum devlink_eswitch_encap_mode encap;
324 struct ida vport_metadata_ida;
325 unsigned int host_number; /* ECPF supports one external host */
326 };
327
328 /* E-Switch MC FDB table hash node */
329 struct esw_mc_addr { /* SRIOV only */
330 struct l2addr_node node;
331 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
332 u32 refcnt;
333 };
334
335 struct mlx5_host_work {
336 struct work_struct work;
337 struct mlx5_eswitch *esw;
338 int work_gen;
339 };
340
341 struct mlx5_esw_functions {
342 struct mlx5_nb nb;
343 atomic_t generation;
344 bool host_funcs_disabled;
345 u16 num_vfs;
346 u16 num_ec_vfs;
347 };
348
349 enum {
350 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
351 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
352 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
353 };
354
355 struct mlx5_esw_bridge_offloads;
356
357 enum {
358 MLX5_ESW_FDB_CREATED = BIT(0),
359 };
360
361 struct dentry;
362 struct mlx5_qos_domain;
363
364 struct mlx5_eswitch {
365 struct mlx5_core_dev *dev;
366 struct mlx5_nb nb;
367 struct mlx5_eswitch_fdb fdb_table;
368 /* legacy data structures */
369 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
370 struct esw_mc_addr mc_promisc;
371 /* end of legacy */
372 struct dentry *debugfs_root;
373 struct workqueue_struct *work_queue;
374 struct xarray vports;
375 u32 flags;
376 int total_vports;
377 int enabled_vports;
378 /* Synchronize between vport change events
379 * and async SRIOV admin state changes
380 */
381 struct mutex state_lock;
382
383 /* Protects eswitch mode change that occurs via one or more
384 * user commands, i.e. sriov state change, devlink commands.
385 */
386 struct rw_semaphore mode_lock;
387 atomic64_t user_count;
388
389 /* Protected with the E-Switch qos domain lock. */
390 struct {
391 /* Initially 0, meaning no QoS users and QoS is disabled. */
392 refcount_t refcnt;
393 u32 root_tsar_ix;
394 struct mlx5_qos_domain *domain;
395 } qos;
396
397 struct mlx5_esw_bridge_offloads *br_offloads;
398 struct mlx5_esw_offload offloads;
399 u32 last_vport_idx;
400 int mode;
401 bool offloads_inactive;
402 u16 manager_vport;
403 u16 first_host_vport;
404 u8 num_peers;
405 struct mlx5_esw_functions esw_funcs;
406 struct {
407 u32 large_group_num;
408 } params;
409 struct xarray paired;
410 struct mlx5_devcom_comp_dev *devcom;
411 u16 enabled_ipsec_vf_count;
412 bool eswitch_operation_in_progress;
413 };
414
415 void esw_offloads_disable(struct mlx5_eswitch *esw);
416 int esw_offloads_enable(struct mlx5_eswitch *esw);
417 void esw_offloads_cleanup(struct mlx5_eswitch *esw);
418 int esw_offloads_init(struct mlx5_eswitch *esw);
419
420 struct mlx5_flow_handle *
421 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
422 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
423
424 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
425 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
426 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
427
428 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
429
430 /* E-Switch API */
431 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
432 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
433 int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
434 void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
435
436 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
437 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
438 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
439 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
440 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
441 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
442 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
443 const struct mlx5_devcom_match_attr *attr);
444 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
445 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
446 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
447 u16 vport, const u8 *mac);
448 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
449 u16 vport, int link_state);
450 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
451 u16 vport, u16 vlan, u8 qos);
452 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
453 u16 vport, bool spoofchk);
454 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
455 u16 vport_num, bool setting);
456 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
457 u32 max_rate, u32 min_rate);
458 int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
459 struct netlink_ext_ack *extack);
460 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
461 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
462 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
463 u16 vport, struct ifla_vf_info *ivi);
464 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
465 u16 vport,
466 struct ifla_vf_stats *vf_stats);
467 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
468
469 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
470 bool other_vport, void *in);
471
472 struct mlx5_flow_spec;
473 struct mlx5_esw_flow_attr;
474 struct mlx5_termtbl_handle;
475
476 bool
477 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
478 struct mlx5_flow_attr *attr,
479 struct mlx5_flow_act *flow_act,
480 struct mlx5_flow_spec *spec);
481
482 struct mlx5_flow_handle *
483 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
484 struct mlx5_flow_table *ft,
485 struct mlx5_flow_spec *spec,
486 struct mlx5_esw_flow_attr *attr,
487 struct mlx5_flow_act *flow_act,
488 struct mlx5_flow_destination *dest,
489 int num_dest);
490
491 void
492 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
493 struct mlx5_termtbl_handle *tt);
494
495 void
496 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
497
498 struct mlx5_flow_handle *
499 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
500 struct mlx5_flow_spec *spec,
501 struct mlx5_flow_attr *attr);
502 struct mlx5_flow_handle *
503 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
504 struct mlx5_flow_spec *spec,
505 struct mlx5_flow_attr *attr);
506 void
507 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
508 struct mlx5_flow_handle *rule,
509 struct mlx5_flow_attr *attr);
510 void
511 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
512 struct mlx5_flow_handle *rule,
513 struct mlx5_flow_attr *attr);
514
515 struct mlx5_flow_handle *
516 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
517 struct mlx5_flow_destination *dest);
518
519 enum {
520 SET_VLAN_STRIP = BIT(0),
521 SET_VLAN_INSERT = BIT(1)
522 };
523
524 enum mlx5_flow_match_level {
525 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
526 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
527 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
528 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
529 };
530
531 /* current maximum for flow based vport multicasting */
532 #define MLX5_MAX_FLOW_FWD_VPORTS 32
533
534 enum {
535 MLX5_ESW_DEST_ENCAP = BIT(0),
536 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
537 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
538 };
539
540 struct mlx5_esw_flow_attr {
541 struct mlx5_eswitch_rep *in_rep;
542 struct mlx5_core_dev *in_mdev;
543 struct mlx5_core_dev *counter_dev;
544 struct mlx5e_tc_int_port *dest_int_port;
545 struct mlx5e_tc_int_port *int_port;
546
547 int split_count;
548 int out_count;
549
550 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
551 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
552 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
553 u8 total_vlan;
554 struct {
555 u32 flags;
556 bool vport_valid;
557 u16 vport;
558 struct mlx5_pkt_reformat *pkt_reformat;
559 struct mlx5_core_dev *mdev;
560 struct mlx5_termtbl_handle *termtbl;
561 int src_port_rewrite_act_id;
562 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
563 struct mlx5_rx_tun_attr *rx_tun_attr;
564 struct ethhdr eth;
565 struct mlx5_pkt_reformat *decap_pkt_reformat;
566 };
567
568 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
569 struct netlink_ext_ack *extack);
570 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
571 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
572 struct netlink_ext_ack *extack);
573 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
574 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
575 enum devlink_eswitch_encap_mode encap,
576 struct netlink_ext_ack *extack);
577 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
578 enum devlink_eswitch_encap_mode *encap);
579 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
580 u8 *hw_addr, int *hw_addr_len,
581 struct netlink_ext_ack *extack);
582 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
583 const u8 *hw_addr, int hw_addr_len,
584 struct netlink_ext_ack *extack);
585 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
586 struct netlink_ext_ack *extack);
587 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
588 struct netlink_ext_ack *extack);
589 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
590 struct netlink_ext_ack *extack);
591 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
592 struct netlink_ext_ack *extack);
593 int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
594 enum devlink_port_fn_state *state,
595 enum devlink_port_fn_opstate *opstate,
596 struct netlink_ext_ack *extack);
597 int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
598 enum devlink_port_fn_state state,
599 struct netlink_ext_ack *extack);
600 #ifdef CONFIG_XFRM_OFFLOAD
601 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
602 struct netlink_ext_ack *extack);
603 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
604 struct netlink_ext_ack *extack);
605 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
606 struct netlink_ext_ack *extack);
607 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
608 struct netlink_ext_ack *extack);
609 #endif /* CONFIG_XFRM_OFFLOAD */
610 int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
611 u32 *max_io_eqs,
612 struct netlink_ext_ack *extack);
613 int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
614 u32 max_io_eqs,
615 struct netlink_ext_ack *extack);
616 int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
617 struct netlink_ext_ack *extack);
618
619 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
620
621 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
622 u16 vport, u16 vlan, u8 qos, u8 set_flags);
623
esw_vst_mode_is_steering(struct mlx5_eswitch * esw)624 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
625 {
626 return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
627 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
628 }
629
mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev * dev,u8 vlan_depth)630 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
631 u8 vlan_depth)
632 {
633 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
634 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
635
636 if (vlan_depth == 1)
637 return ret;
638
639 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
640 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
641 }
642
643 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
644 struct mlx5_core_dev *dev1);
645
646 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
647 int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
648 int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
649
650 void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
651 void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
652 int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport,
653 bool connect);
654
655 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
656
657 #define esw_info(__dev, format, ...) \
658 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
659
660 #define esw_warn(__dev, format, ...) \
661 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
662
663 #define esw_debug(dev, format, ...) \
664 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
665
mlx5_esw_allowed(const struct mlx5_eswitch * esw)666 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
667 {
668 return esw && MLX5_ESWITCH_MANAGER(esw->dev);
669 }
670
671 static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num)672 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
673 {
674 return esw->manager_vport == vport_num;
675 }
676
mlx5_esw_is_owner(struct mlx5_eswitch * esw,u16 vport_num,u16 esw_owner_vhca_id)677 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
678 u16 esw_owner_vhca_id)
679 {
680 return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
681 (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
682 }
683
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev * dev)684 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
685 {
686 return mlx5_core_is_ecpf_esw_manager(dev) ?
687 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
688 }
689
mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev * dev)690 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
691 {
692 return mlx5_core_is_ecpf_esw_manager(dev);
693 }
694
695 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)696 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
697 u16 vport_num)
698 {
699 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
700 }
701
702 static inline u16
mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)703 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
704 {
705 return dl_port_index & 0xffff;
706 }
707
mlx5_esw_is_fdb_created(struct mlx5_eswitch * esw)708 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
709 {
710 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
711 }
712
713 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
714 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
715
716 /* Each mark identifies eswitch vport type.
717 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
718 * a single mark.
719 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
720 * MLX5_ESW_VPT_SF identifies SF vport.
721 */
722 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
723 #define MLX5_ESW_VPT_VF XA_MARK_1
724 #define MLX5_ESW_VPT_SF XA_MARK_2
725
726 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
727 * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
728 */
729
730 #define mlx5_esw_for_each_vport(esw, index, vport) \
731 xa_for_each(&((esw)->vports), index, vport)
732
733 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
734 for (index = 0, entry = xa_find(xa, &index, last, filter); \
735 entry; entry = xa_find_after(xa, &index, last, filter))
736
737 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
738 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
739
740 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
741 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
742
743 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
744 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
745
746 /* This macro should only be used if EC SRIOV is enabled.
747 *
748 * Because there were no more marks available on the xarray this uses a
749 * for_each_range approach. The range is only valid when EC SRIOV is enabled
750 */
751 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \
752 xa_for_each_range(&((esw)->vports), \
753 index, \
754 vport, \
755 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \
756 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
757 (last) - 1)
758
759 #define mlx5_esw_for_each_rep(esw, i, rep) \
760 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
761
762 struct mlx5_eswitch *__must_check
763 mlx5_devlink_eswitch_get(struct devlink *devlink);
764
765 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
766
767 struct mlx5_vport *__must_check
768 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
769
770 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
771 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
772 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
773
774 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
775
776 int
777 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
778 enum mlx5_eswitch_vport_event enabled_events);
779 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
780
781 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
782 enum mlx5_eswitch_vport_event enabled_events);
783 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
784
785 int
786 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
787 struct mlx5_vport *vport);
788 void
789 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
790 struct mlx5_vport *vport);
791
792 struct esw_vport_tbl_namespace {
793 int max_fte;
794 int max_num_groups;
795 u32 flags;
796 };
797
798 struct mlx5_vport_tbl_attr {
799 u32 chain;
800 u16 prio;
801 u16 vport;
802 struct esw_vport_tbl_namespace *vport_ns;
803 };
804
805 struct mlx5_flow_table *
806 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
807 void
808 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
809
810 struct mlx5_flow_handle *
811 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
812
813 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
814 u32 *flow_group_in,
815 int match_params);
816
817 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
818 u16 vport,
819 struct mlx5_flow_spec *spec);
820
821 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
822 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
823
824 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
825 struct mlx5_devlink_port *dl_port,
826 u32 controller, u32 sfnum);
827 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
828
829 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
830 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
831
832 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
833 enum mlx5_eswitch_vport_event enabled_events,
834 struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
835 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
836
837 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
838 enum mlx5_eswitch_vport_event enabled_events);
839 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
840
841 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
842 struct mlx5_vport *vport);
843 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
844 struct mlx5_vport *vport);
845
846 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
847 struct mlx5_devlink_port *dl_port,
848 u32 controller, u32 sfnum);
849 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
850
851 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
852 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport);
853 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
854
855 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
856
857 int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
858 struct mlx5_vport *vport);
859 void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
860 struct mlx5_vport *vport);
861 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
862 bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
863
864 void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
865 const struct mlx5_vport *vport);
866 int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
867 const struct mlx5_vport *vport);
868
869 /**
870 * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
871 *
872 * @new_mode: New mode of eswitch.
873 */
874 struct mlx5_esw_event_info {
875 u16 new_mode;
876 };
877
878 int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
879 struct notifier_block *n);
880 void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
881 struct notifier_block *n);
882
883 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
884 void mlx5_esw_release(struct mlx5_core_dev *dev);
885 void mlx5_esw_get(struct mlx5_core_dev *dev);
886 void mlx5_esw_put(struct mlx5_core_dev *dev);
887 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
888 int mlx5_esw_lock(struct mlx5_eswitch *esw);
889 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
890
891 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
892
893 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
894
895 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
896 struct mlx5_eswitch *slave_esw, int max_slaves);
897 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
898 struct mlx5_eswitch *slave_esw);
899 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
900
901 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
902 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
903
904 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
905 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
906
mlx5_eswitch_num_vfs(struct mlx5_eswitch * esw)907 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
908 {
909 if (mlx5_esw_allowed(esw))
910 return esw->esw_funcs.num_vfs;
911
912 return 0;
913 }
914
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)915 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
916 {
917 if (mlx5_esw_allowed(esw))
918 return esw->num_peers;
919 return 0;
920 }
921
922 static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch * esw)923 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
924 {
925 return esw->fdb_table.offloads.slow_fdb;
926 }
927
928 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
929 struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
930 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
931 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
932 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
933 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
934 struct mlx5_vport *vport);
935 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
936 u16 vport_num);
937 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
938 bool enable);
939 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
940 bool enable);
941 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
942 u16 vport_num);
943 bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
944 void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev);
945 #else /* CONFIG_MLX5_ESWITCH */
946 /* eswitch API stubs */
mlx5_eswitch_init(struct mlx5_core_dev * dev)947 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)948 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)949 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)950 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
mlx5_eswitch_disable(struct mlx5_eswitch * esw)951 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
952 static inline void
mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,const struct mlx5_devcom_match_attr * attr)953 mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
954 const struct mlx5_devcom_match_attr *attr) {}
mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw)955 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw)956 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev * dev)957 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
958 static inline
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)959 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
mlx5_esw_query_functions(struct mlx5_core_dev * dev)960 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
961 {
962 return ERR_PTR(-EOPNOTSUPP);
963 }
964
965 static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag)966 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
967 {
968 return ERR_PTR(-EOPNOTSUPP);
969 }
970
971 static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)972 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
973 u16 vport_num)
974 {
975 return vport_num;
976 }
977
978 static inline int
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw,int max_slaves)979 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
980 struct mlx5_eswitch *slave_esw, int max_slaves)
981 {
982 return 0;
983 }
984
985 static inline void
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch * master_esw,struct mlx5_eswitch * slave_esw)986 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
987 struct mlx5_eswitch *slave_esw) {}
988
mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw)989 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
990
991 static inline int
mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw)992 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
993 {
994 return 0;
995 }
996
997 static inline bool
mlx5_eswitch_block_encap(struct mlx5_core_dev * dev,bool from_fdb)998 mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
999 {
1000 return true;
1001 }
1002
mlx5_eswitch_unblock_encap(struct mlx5_core_dev * dev)1003 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
1004 {
1005 }
1006
mlx5_eswitch_block_mode(struct mlx5_core_dev * dev)1007 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
mlx5_eswitch_unblock_mode(struct mlx5_core_dev * dev)1008 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
mlx5_eswitch_block_ipsec(struct mlx5_core_dev * dev)1009 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
1010 {
1011 return false;
1012 }
1013
mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev * dev)1014 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
1015
1016 static inline bool
mlx5_esw_host_functions_enabled(const struct mlx5_core_dev * dev)1017 mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
1018 {
1019 return true;
1020 }
1021
1022 static inline bool
mlx5_esw_vport_vhca_id(struct mlx5_eswitch * esw,u16 vportn,u16 * vhca_id)1023 mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
1024 {
1025 return false;
1026 }
1027
1028 static inline void
mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev * dev)1029 mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {}
1030
1031 #endif /* CONFIG_MLX5_ESWITCH */
1032
1033 #endif /* __MLX5_ESWITCH_H__ */
1034