1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <linux/xarray.h> 40 #include <net/devlink.h> 41 #include <linux/mlx5/device.h> 42 #include <linux/mlx5/eswitch.h> 43 #include <linux/mlx5/vport.h> 44 #include <linux/mlx5/fs.h> 45 #include "lib/mpfs.h" 46 #include "lib/fs_chains.h" 47 #include "sf/sf.h" 48 #include "en/tc_ct.h" 49 #include "en/tc/sample.h" 50 51 enum mlx5_mapped_obj_type { 52 MLX5_MAPPED_OBJ_CHAIN, 53 MLX5_MAPPED_OBJ_SAMPLE, 54 MLX5_MAPPED_OBJ_INT_PORT_METADATA, 55 MLX5_MAPPED_OBJ_ACT_MISS, 56 }; 57 58 struct mlx5_mapped_obj { 59 enum mlx5_mapped_obj_type type; 60 union { 61 u32 chain; 62 u64 act_miss_cookie; 63 struct { 64 u32 group_id; 65 u32 rate; 66 u32 trunc_size; 67 u32 tunnel_id; 68 } sample; 69 u32 int_port_metadata; 70 }; 71 }; 72 73 #ifdef CONFIG_MLX5_ESWITCH 74 75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 76 77 #define MLX5_MAX_UC_PER_VPORT(dev) \ 78 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 79 80 #define MLX5_MAX_MC_PER_VPORT(dev) \ 81 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 82 83 #define mlx5_esw_has_fwd_fdb(dev) \ 84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 85 86 #define esw_chains(esw) \ 87 ((esw)->fdb_table.offloads.esw_chains_priv) 88 89 enum { 90 MAPPING_TYPE_CHAIN, 91 MAPPING_TYPE_TUNNEL, 92 MAPPING_TYPE_TUNNEL_ENC_OPTS, 93 MAPPING_TYPE_LABELS, 94 MAPPING_TYPE_ZONE, 95 MAPPING_TYPE_INT_PORT, 96 }; 97 98 struct vport_ingress { 99 struct mlx5_flow_table *acl; 100 struct mlx5_flow_handle *allow_rule; 101 struct { 102 struct mlx5_flow_group *allow_spoofchk_only_grp; 103 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 104 struct mlx5_flow_group *allow_untagged_only_grp; 105 struct mlx5_flow_group *drop_grp; 106 struct mlx5_flow_handle *drop_rule; 107 struct mlx5_fc *drop_counter; 108 } legacy; 109 struct { 110 /* Optional group to add an FTE to do internal priority 111 * tagging on ingress packets. 112 */ 113 struct mlx5_flow_group *metadata_prio_tag_grp; 114 /* Group to add default match-all FTE entry to tag ingress 115 * packet with metadata. 116 */ 117 struct mlx5_flow_group *metadata_allmatch_grp; 118 /* Optional group to add a drop all rule */ 119 struct mlx5_flow_group *drop_grp; 120 struct mlx5_modify_hdr *modify_metadata; 121 struct mlx5_flow_handle *modify_metadata_rule; 122 struct mlx5_flow_handle *drop_rule; 123 } offloads; 124 }; 125 126 enum vport_egress_acl_type { 127 VPORT_EGRESS_ACL_TYPE_DEFAULT, 128 VPORT_EGRESS_ACL_TYPE_SHARED_FDB, 129 }; 130 131 struct vport_egress { 132 struct mlx5_flow_table *acl; 133 enum vport_egress_acl_type type; 134 struct mlx5_flow_handle *allowed_vlan; 135 struct mlx5_flow_group *vlan_grp; 136 union { 137 struct { 138 struct mlx5_flow_group *drop_grp; 139 struct mlx5_flow_handle *drop_rule; 140 struct mlx5_fc *drop_counter; 141 } legacy; 142 struct { 143 struct mlx5_flow_group *fwd_grp; 144 struct mlx5_flow_handle *fwd_rule; 145 struct xarray bounce_rules; 146 struct mlx5_flow_group *bounce_grp; 147 } offloads; 148 }; 149 }; 150 151 struct mlx5_vport_drop_stats { 152 u64 rx_dropped; 153 u64 tx_dropped; 154 }; 155 156 struct mlx5_vport_info { 157 u8 mac[ETH_ALEN]; 158 u16 vlan; 159 u64 node_guid; 160 int link_state; 161 u8 qos; 162 u8 spoofchk: 1; 163 u8 trusted: 1; 164 u8 roce_enabled: 1; 165 u8 mig_enabled: 1; 166 u8 ipsec_crypto_enabled: 1; 167 u8 ipsec_packet_enabled: 1; 168 }; 169 170 /* Vport context events */ 171 enum mlx5_eswitch_vport_event { 172 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 173 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 174 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 175 }; 176 177 struct mlx5_vport; 178 179 struct mlx5_devlink_port { 180 struct devlink_port dl_port; 181 struct mlx5_vport *vport; 182 }; 183 184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port, 185 struct mlx5_vport *vport) 186 { 187 dl_port->vport = vport; 188 } 189 190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port) 191 { 192 return container_of(dl_port, struct mlx5_devlink_port, dl_port); 193 } 194 195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port) 196 { 197 return mlx5_devlink_port_get(dl_port)->vport; 198 } 199 200 #define MLX5_VHCA_ID_INVALID (-1) 201 202 #define MLX5_VPORT_INVAL_VHCA_ID(vport) \ 203 ((vport)->vhca_id == MLX5_VHCA_ID_INVALID) 204 205 struct mlx5_vport { 206 struct mlx5_core_dev *dev; 207 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 208 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 209 struct mlx5_flow_handle *promisc_rule; 210 struct mlx5_flow_handle *allmulti_rule; 211 struct work_struct vport_change_handler; 212 213 struct vport_ingress ingress; 214 struct vport_egress egress; 215 u32 default_metadata; 216 u32 metadata; 217 int vhca_id; 218 219 bool adjacent; /* delegated vhca from adjacent function */ 220 struct { 221 u16 parent_pci_devfn; /* Adjacent parent PCI device function */ 222 u16 function_id; /* Function ID of the delegated VPort */ 223 } adj_info; 224 225 struct mlx5_vport_info info; 226 227 /* Protected with the E-Switch qos domain lock. The Vport QoS can 228 * either be disabled (sched_node is NULL) or in one of three states: 229 * 1. Regular QoS (sched_node is a vport node). 230 * 2. TC QoS enabled on the vport (sched_node is a TC arbiter). 231 * 3. TC QoS enabled on the vport's parent node 232 * (sched_node is a rate limit node). 233 * When TC is enabled in either mode, the vport owns vport TC scheduling 234 * nodes. 235 */ 236 struct { 237 /* Vport scheduling node. */ 238 struct mlx5_esw_sched_node *sched_node; 239 /* Array of vport traffic class scheduling nodes. */ 240 struct mlx5_esw_sched_node **sched_nodes; 241 } qos; 242 243 u16 vport; 244 bool enabled; 245 bool max_eqs_set; 246 enum mlx5_eswitch_vport_event enabled_events; 247 int index; 248 struct mlx5_devlink_port *dl_port; 249 }; 250 251 struct mlx5_esw_indir_table; 252 253 struct mlx5_eswitch_fdb { 254 union { 255 struct legacy_fdb { 256 struct mlx5_flow_table *fdb; 257 struct mlx5_flow_group *addr_grp; 258 struct mlx5_flow_group *allmulti_grp; 259 struct mlx5_flow_group *promisc_grp; 260 struct mlx5_flow_table *vepa_fdb; 261 struct mlx5_flow_handle *vepa_uplink_rule; 262 struct mlx5_flow_handle *vepa_star_rule; 263 } legacy; 264 265 struct offloads_fdb { 266 struct mlx5_flow_namespace *ns; 267 struct mlx5_flow_table *drop_root; 268 struct mlx5_flow_handle *drop_root_rule; 269 struct mlx5_fc *drop_root_fc; 270 struct mlx5_flow_table *tc_miss_table; 271 struct mlx5_flow_table *slow_fdb; 272 struct mlx5_flow_group *send_to_vport_grp; 273 struct mlx5_flow_group *send_to_vport_meta_grp; 274 struct mlx5_flow_group *peer_miss_grp; 275 struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS]; 276 struct mlx5_flow_group *miss_grp; 277 struct mlx5_flow_handle **send_to_vport_meta_rules; 278 struct mlx5_flow_handle *miss_rule_uni; 279 struct mlx5_flow_handle *miss_rule_multi; 280 281 struct mlx5_fs_chains *esw_chains_priv; 282 struct { 283 DECLARE_HASHTABLE(table, 8); 284 /* Protects vports.table */ 285 struct mutex lock; 286 } vports; 287 288 struct mlx5_esw_indir_table *indir; 289 290 } offloads; 291 }; 292 u32 flags; 293 }; 294 295 struct mlx5_esw_offload { 296 struct mlx5_flow_table *ft_offloads_restore; 297 struct mlx5_flow_group *restore_group; 298 struct mlx5_modify_hdr *restore_copy_hdr_id; 299 struct mapping_ctx *reg_c0_obj_pool; 300 301 struct mlx5_flow_table *ft_offloads; 302 struct mlx5_flow_group *vport_rx_group; 303 struct mlx5_flow_group *vport_rx_drop_group; 304 struct mlx5_flow_handle *vport_rx_drop_rule; 305 struct mlx5_flow_table *ft_ipsec_tx_pol; 306 struct xarray vport_reps; 307 struct list_head peer_flows[MLX5_MAX_PORTS]; 308 struct mutex peer_mutex; 309 struct mutex encap_tbl_lock; /* protects encap_tbl */ 310 DECLARE_HASHTABLE(encap_tbl, 8); 311 struct mutex decap_tbl_lock; /* protects decap_tbl */ 312 DECLARE_HASHTABLE(decap_tbl, 8); 313 struct mod_hdr_tbl mod_hdr; 314 DECLARE_HASHTABLE(termtbl_tbl, 8); 315 struct mutex termtbl_mutex; /* protects termtbl hash */ 316 struct xarray vhca_map; 317 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 318 u8 inline_mode; 319 atomic64_t num_flows; 320 u64 num_block_encap; 321 u64 num_block_mode; 322 enum devlink_eswitch_encap_mode encap; 323 struct ida vport_metadata_ida; 324 unsigned int host_number; /* ECPF supports one external host */ 325 }; 326 327 /* E-Switch MC FDB table hash node */ 328 struct esw_mc_addr { /* SRIOV only */ 329 struct l2addr_node node; 330 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 331 u32 refcnt; 332 }; 333 334 struct mlx5_host_work { 335 struct work_struct work; 336 struct mlx5_eswitch *esw; 337 }; 338 339 struct mlx5_esw_functions { 340 struct mlx5_nb nb; 341 bool host_funcs_disabled; 342 u16 num_vfs; 343 u16 num_ec_vfs; 344 }; 345 346 enum { 347 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 348 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 349 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), 350 }; 351 352 struct mlx5_esw_bridge_offloads; 353 354 enum { 355 MLX5_ESW_FDB_CREATED = BIT(0), 356 }; 357 358 struct dentry; 359 struct mlx5_qos_domain; 360 361 struct mlx5_eswitch { 362 struct mlx5_core_dev *dev; 363 struct mlx5_nb nb; 364 struct mlx5_eswitch_fdb fdb_table; 365 /* legacy data structures */ 366 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 367 struct esw_mc_addr mc_promisc; 368 /* end of legacy */ 369 struct dentry *debugfs_root; 370 struct workqueue_struct *work_queue; 371 struct xarray vports; 372 u32 flags; 373 int total_vports; 374 int enabled_vports; 375 /* Synchronize between vport change events 376 * and async SRIOV admin state changes 377 */ 378 struct mutex state_lock; 379 380 /* Protects eswitch mode change that occurs via one or more 381 * user commands, i.e. sriov state change, devlink commands. 382 */ 383 struct rw_semaphore mode_lock; 384 atomic64_t user_count; 385 386 /* Protected with the E-Switch qos domain lock. */ 387 struct { 388 /* Initially 0, meaning no QoS users and QoS is disabled. */ 389 refcount_t refcnt; 390 u32 root_tsar_ix; 391 struct mlx5_qos_domain *domain; 392 } qos; 393 394 struct mlx5_esw_bridge_offloads *br_offloads; 395 struct mlx5_esw_offload offloads; 396 u32 last_vport_idx; 397 int mode; 398 bool offloads_inactive; 399 u16 manager_vport; 400 u16 first_host_vport; 401 u8 num_peers; 402 struct mlx5_esw_functions esw_funcs; 403 struct { 404 u32 large_group_num; 405 } params; 406 struct xarray paired; 407 struct mlx5_devcom_comp_dev *devcom; 408 u16 enabled_ipsec_vf_count; 409 bool eswitch_operation_in_progress; 410 }; 411 412 void esw_offloads_disable(struct mlx5_eswitch *esw); 413 int esw_offloads_enable(struct mlx5_eswitch *esw); 414 void esw_offloads_cleanup(struct mlx5_eswitch *esw); 415 int esw_offloads_init(struct mlx5_eswitch *esw); 416 417 struct mlx5_flow_handle * 418 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); 419 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule); 420 421 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 422 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 423 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 424 425 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); 426 427 /* E-Switch API */ 428 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 429 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 430 int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num); 431 void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 432 433 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 434 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); 435 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 436 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); 437 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); 438 void mlx5_eswitch_disable(struct mlx5_eswitch *esw); 439 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, 440 const struct mlx5_devcom_match_attr *attr); 441 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); 442 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); 443 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 444 u16 vport, const u8 *mac); 445 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 446 u16 vport, int link_state); 447 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 448 u16 vport, u16 vlan, u8 qos); 449 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 450 u16 vport, bool spoofchk); 451 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 452 u16 vport_num, bool setting); 453 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 454 u32 max_rate, u32 min_rate); 455 int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, 456 struct netlink_ext_ack *extack); 457 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 458 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 459 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 460 u16 vport, struct ifla_vf_info *ivi); 461 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 462 u16 vport, 463 struct ifla_vf_stats *vf_stats); 464 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 465 466 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 467 bool other_vport, void *in); 468 469 struct mlx5_flow_spec; 470 struct mlx5_esw_flow_attr; 471 struct mlx5_termtbl_handle; 472 473 bool 474 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 475 struct mlx5_flow_attr *attr, 476 struct mlx5_flow_act *flow_act, 477 struct mlx5_flow_spec *spec); 478 479 struct mlx5_flow_handle * 480 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 481 struct mlx5_flow_table *ft, 482 struct mlx5_flow_spec *spec, 483 struct mlx5_esw_flow_attr *attr, 484 struct mlx5_flow_act *flow_act, 485 struct mlx5_flow_destination *dest, 486 int num_dest); 487 488 void 489 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 490 struct mlx5_termtbl_handle *tt); 491 492 void 493 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); 494 495 struct mlx5_flow_handle * 496 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 497 struct mlx5_flow_spec *spec, 498 struct mlx5_flow_attr *attr); 499 struct mlx5_flow_handle * 500 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 501 struct mlx5_flow_spec *spec, 502 struct mlx5_flow_attr *attr); 503 void 504 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 505 struct mlx5_flow_handle *rule, 506 struct mlx5_flow_attr *attr); 507 void 508 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 509 struct mlx5_flow_handle *rule, 510 struct mlx5_flow_attr *attr); 511 512 struct mlx5_flow_handle * 513 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 514 struct mlx5_flow_destination *dest); 515 516 enum { 517 SET_VLAN_STRIP = BIT(0), 518 SET_VLAN_INSERT = BIT(1) 519 }; 520 521 enum mlx5_flow_match_level { 522 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 523 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 524 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 525 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 526 }; 527 528 /* current maximum for flow based vport multicasting */ 529 #define MLX5_MAX_FLOW_FWD_VPORTS 32 530 531 enum { 532 MLX5_ESW_DEST_ENCAP = BIT(0), 533 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 534 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), 535 }; 536 537 struct mlx5_esw_flow_attr { 538 struct mlx5_eswitch_rep *in_rep; 539 struct mlx5_core_dev *in_mdev; 540 struct mlx5_core_dev *counter_dev; 541 struct mlx5e_tc_int_port *dest_int_port; 542 struct mlx5e_tc_int_port *int_port; 543 544 int split_count; 545 int out_count; 546 547 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 548 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 549 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 550 u8 total_vlan; 551 struct { 552 u32 flags; 553 bool vport_valid; 554 u16 vport; 555 struct mlx5_pkt_reformat *pkt_reformat; 556 struct mlx5_core_dev *mdev; 557 struct mlx5_termtbl_handle *termtbl; 558 int src_port_rewrite_act_id; 559 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 560 struct mlx5_rx_tun_attr *rx_tun_attr; 561 struct ethhdr eth; 562 struct mlx5_pkt_reformat *decap_pkt_reformat; 563 }; 564 565 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 566 struct netlink_ext_ack *extack); 567 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 568 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 569 struct netlink_ext_ack *extack); 570 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 571 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 572 enum devlink_eswitch_encap_mode encap, 573 struct netlink_ext_ack *extack); 574 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 575 enum devlink_eswitch_encap_mode *encap); 576 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, 577 u8 *hw_addr, int *hw_addr_len, 578 struct netlink_ext_ack *extack); 579 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, 580 const u8 *hw_addr, int hw_addr_len, 581 struct netlink_ext_ack *extack); 582 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, 583 struct netlink_ext_ack *extack); 584 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, 585 struct netlink_ext_ack *extack); 586 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, 587 struct netlink_ext_ack *extack); 588 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, 589 struct netlink_ext_ack *extack); 590 #ifdef CONFIG_XFRM_OFFLOAD 591 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, 592 struct netlink_ext_ack *extack); 593 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, 594 struct netlink_ext_ack *extack); 595 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, 596 struct netlink_ext_ack *extack); 597 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, 598 struct netlink_ext_ack *extack); 599 #endif /* CONFIG_XFRM_OFFLOAD */ 600 int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, 601 u32 *max_io_eqs, 602 struct netlink_ext_ack *extack); 603 int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, 604 u32 max_io_eqs, 605 struct netlink_ext_ack *extack); 606 int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, 607 struct netlink_ext_ack *extack); 608 609 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 610 611 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 612 u16 vport, u16 vlan, u8 qos, u8 set_flags); 613 614 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) 615 { 616 return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && 617 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); 618 } 619 620 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 621 u8 vlan_depth) 622 { 623 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 624 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 625 626 if (vlan_depth == 1) 627 return ret; 628 629 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 630 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 631 } 632 633 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 634 struct mlx5_core_dev *dev1); 635 636 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 637 638 void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw); 639 void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw); 640 int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport, 641 bool connect); 642 643 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 644 645 #define esw_info(__dev, format, ...) \ 646 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 647 648 #define esw_warn(__dev, format, ...) \ 649 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 650 651 #define esw_debug(dev, format, ...) \ 652 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 653 654 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) 655 { 656 return esw && MLX5_ESWITCH_MANAGER(esw->dev); 657 } 658 659 static inline bool 660 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 661 { 662 return esw->manager_vport == vport_num; 663 } 664 665 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num, 666 u16 esw_owner_vhca_id) 667 { 668 return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) || 669 (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev)); 670 } 671 672 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 673 { 674 return mlx5_core_is_ecpf_esw_manager(dev) ? 675 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 676 } 677 678 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) 679 { 680 return mlx5_core_is_ecpf_esw_manager(dev); 681 } 682 683 static inline unsigned int 684 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 685 u16 vport_num) 686 { 687 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; 688 } 689 690 static inline u16 691 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) 692 { 693 return dl_port_index & 0xffff; 694 } 695 696 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) 697 { 698 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; 699 } 700 701 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 702 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 703 704 /* Each mark identifies eswitch vport type. 705 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using 706 * a single mark. 707 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. 708 * MLX5_ESW_VPT_SF identifies SF vport. 709 */ 710 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 711 #define MLX5_ESW_VPT_VF XA_MARK_1 712 #define MLX5_ESW_VPT_SF XA_MARK_2 713 714 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. 715 * Borrowed the idea from xa_for_each_marked() but with support for desired last element. 716 */ 717 718 #define mlx5_esw_for_each_vport(esw, index, vport) \ 719 xa_for_each(&((esw)->vports), index, vport) 720 721 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ 722 for (index = 0, entry = xa_find(xa, &index, last, filter); \ 723 entry; entry = xa_find_after(xa, &index, last, filter)) 724 725 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ 726 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) 727 728 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ 729 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) 730 731 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ 732 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) 733 734 /* This macro should only be used if EC SRIOV is enabled. 735 * 736 * Because there were no more marks available on the xarray this uses a 737 * for_each_range approach. The range is only valid when EC SRIOV is enabled 738 */ 739 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \ 740 xa_for_each_range(&((esw)->vports), \ 741 index, \ 742 vport, \ 743 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \ 744 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ 745 (last) - 1) 746 747 #define mlx5_esw_for_each_rep(esw, i, rep) \ 748 xa_for_each(&((esw)->offloads.vport_reps), i, rep) 749 750 struct mlx5_eswitch *__must_check 751 mlx5_devlink_eswitch_get(struct devlink *devlink); 752 753 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink); 754 755 struct mlx5_vport *__must_check 756 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 757 758 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 759 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 760 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 761 762 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 763 764 int 765 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 766 enum mlx5_eswitch_vport_event enabled_events); 767 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 768 769 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 770 enum mlx5_eswitch_vport_event enabled_events); 771 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 772 773 int 774 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 775 struct mlx5_vport *vport); 776 void 777 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 778 struct mlx5_vport *vport); 779 780 struct esw_vport_tbl_namespace { 781 int max_fte; 782 int max_num_groups; 783 u32 flags; 784 }; 785 786 struct mlx5_vport_tbl_attr { 787 u32 chain; 788 u16 prio; 789 u16 vport; 790 struct esw_vport_tbl_namespace *vport_ns; 791 }; 792 793 struct mlx5_flow_table * 794 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 795 void 796 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 797 798 struct mlx5_flow_handle * 799 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 800 801 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 802 u32 *flow_group_in, 803 int match_params); 804 805 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, 806 u16 vport, 807 struct mlx5_flow_spec *spec); 808 809 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 810 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 811 812 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 813 struct mlx5_devlink_port *dl_port, 814 u32 controller, u32 sfnum); 815 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 816 817 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 818 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 819 820 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, 821 enum mlx5_eswitch_vport_event enabled_events, 822 struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum); 823 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 824 825 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 826 enum mlx5_eswitch_vport_event enabled_events); 827 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 828 829 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, 830 struct mlx5_vport *vport); 831 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, 832 struct mlx5_vport *vport); 833 834 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 835 struct mlx5_devlink_port *dl_port, 836 u32 controller, u32 sfnum); 837 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 838 839 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 840 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport); 841 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); 842 843 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); 844 845 int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw, 846 struct mlx5_vport *vport); 847 void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw, 848 struct mlx5_vport *vport); 849 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 850 bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id); 851 852 void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw, 853 const struct mlx5_vport *vport); 854 int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw, 855 const struct mlx5_vport *vport); 856 857 /** 858 * struct mlx5_esw_event_info - Indicates eswitch mode changed/changing. 859 * 860 * @new_mode: New mode of eswitch. 861 */ 862 struct mlx5_esw_event_info { 863 u16 new_mode; 864 }; 865 866 int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev, 867 struct notifier_block *n); 868 void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev, 869 struct notifier_block *n); 870 871 bool mlx5_esw_hold(struct mlx5_core_dev *dev); 872 void mlx5_esw_release(struct mlx5_core_dev *dev); 873 void mlx5_esw_get(struct mlx5_core_dev *dev); 874 void mlx5_esw_put(struct mlx5_core_dev *dev); 875 int mlx5_esw_try_lock(struct mlx5_eswitch *esw); 876 int mlx5_esw_lock(struct mlx5_eswitch *esw); 877 void mlx5_esw_unlock(struct mlx5_eswitch *esw); 878 879 void esw_vport_change_handle_locked(struct mlx5_vport *vport); 880 881 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); 882 883 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 884 struct mlx5_eswitch *slave_esw, int max_slaves); 885 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 886 struct mlx5_eswitch *slave_esw); 887 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); 888 889 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb); 890 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); 891 892 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); 893 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); 894 895 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) 896 { 897 if (mlx5_esw_allowed(esw)) 898 return esw->esw_funcs.num_vfs; 899 900 return 0; 901 } 902 903 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) 904 { 905 if (mlx5_esw_allowed(esw)) 906 return esw->num_peers; 907 return 0; 908 } 909 910 static inline struct mlx5_flow_table * 911 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) 912 { 913 return esw->fdb_table.offloads.slow_fdb; 914 } 915 916 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, 917 struct mlx5_esw_flow_attr *esw_attr, int attr_idx); 918 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev); 919 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev); 920 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev); 921 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, 922 struct mlx5_vport *vport); 923 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, 924 u16 vport_num); 925 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 926 bool enable); 927 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 928 bool enable); 929 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, 930 u16 vport_num); 931 bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev); 932 #else /* CONFIG_MLX5_ESWITCH */ 933 /* eswitch API stubs */ 934 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 935 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 936 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 937 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} 938 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} 939 static inline void 940 mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, 941 const struct mlx5_devcom_match_attr *attr) {} 942 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} 943 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } 944 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 945 static inline 946 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } 947 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 948 { 949 return ERR_PTR(-EOPNOTSUPP); 950 } 951 952 static inline struct mlx5_flow_handle * 953 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 954 { 955 return ERR_PTR(-EOPNOTSUPP); 956 } 957 958 static inline unsigned int 959 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 960 u16 vport_num) 961 { 962 return vport_num; 963 } 964 965 static inline int 966 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 967 struct mlx5_eswitch *slave_esw, int max_slaves) 968 { 969 return 0; 970 } 971 972 static inline void 973 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 974 struct mlx5_eswitch *slave_esw) {} 975 976 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } 977 978 static inline int 979 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) 980 { 981 return 0; 982 } 983 984 static inline bool 985 mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) 986 { 987 return true; 988 } 989 990 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) 991 { 992 } 993 994 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } 995 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} 996 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) 997 { 998 return false; 999 } 1000 1001 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} 1002 1003 static inline bool 1004 mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev) 1005 { 1006 return true; 1007 } 1008 1009 static inline bool 1010 mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) 1011 { 1012 return -EOPNOTSUPP; 1013 } 1014 1015 #endif /* CONFIG_MLX5_ESWITCH */ 1016 1017 #endif /* __MLX5_ESWITCH_H__ */ 1018