| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | eswitch.c | 40 #include "esw/acl/lgcy.h" 41 #include "esw/legacy.h" 42 #include "esw/qos.h" 107 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) in mlx5_eswitch_get_vport() argument 111 if (!esw) in mlx5_eswitch_get_vport() 114 vport = xa_load(&esw->vports, vport_num); in mlx5_eswitch_get_vport() 116 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); in mlx5_eswitch_get_vport() 207 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, in __esw_fdb_set_vport_rule() argument 250 esw_debug(esw->dev, in __esw_fdb_set_vport_rule() 256 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, in __esw_fdb_set_vport_rule() [all …]
|
| H A D | eswitch_offloads.c | 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 76 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, in mlx5_eswitch_get_rep() argument 79 return xa_load(&esw->offloads.vport_reps, vport_num); in mlx5_eswitch_get_rep() 83 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, in mlx5_eswitch_set_rule_flow_source() argument 87 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) in mlx5_eswitch_set_rule_flow_source() 105 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) in mlx5_eswitch_clear_rule_source_port() argument 107 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { in mlx5_eswitch_clear_rule_source_port() 122 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, in mlx5_eswitch_set_rule_source_port() argument 136 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { in mlx5_eswitch_set_rule_source_port() [all …]
|
| H A D | eswitch.h | 86 #define esw_chains(esw) \ argument 87 ((esw)->fdb_table.offloads.esw_chains_priv) 336 struct mlx5_eswitch *esw; 412 void esw_offloads_disable(struct mlx5_eswitch *esw); 413 int esw_offloads_enable(struct mlx5_eswitch *esw); 414 void esw_offloads_cleanup(struct mlx5_eswitch *esw); 415 int esw_offloads_init(struct mlx5_eswitch *esw); 418 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); 421 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 422 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 333 struct mlx5_eswitch *esw; global() member 611 esw_vst_mode_is_steering(struct mlx5_eswitch * esw) esw_vst_mode_is_steering() argument 649 mlx5_esw_allowed(const struct mlx5_eswitch * esw) mlx5_esw_allowed() argument 655 mlx5_esw_is_manager_vport(const struct mlx5_eswitch * esw,u16 vport_num) mlx5_esw_is_manager_vport() argument 660 mlx5_esw_is_owner(struct mlx5_eswitch * esw,u16 vport_num,u16 esw_owner_vhca_id) mlx5_esw_is_owner() argument 691 mlx5_esw_is_fdb_created(struct mlx5_eswitch * esw) mlx5_esw_is_fdb_created() argument 713 mlx5_esw_for_each_vport(esw,index,vport) global() argument 720 mlx5_esw_for_each_vport_marked(esw,index,vport,last,filter) global() argument 723 mlx5_esw_for_each_vf_vport(esw,index,vport,last) global() argument 726 mlx5_esw_for_each_host_func_vport(esw,index,vport,last) global() argument 734 mlx5_esw_for_each_ec_vf_vport(esw,index,vport,last) global() argument 742 mlx5_esw_for_each_rep(esw,i,rep) global() argument 888 mlx5_eswitch_num_vfs(struct mlx5_eswitch * esw) mlx5_eswitch_num_vfs() argument 896 mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw) mlx5_eswitch_get_npeers() argument 904 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch * esw) mlx5_eswitch_get_slow_fdb() argument 928 mlx5_eswitch_cleanup(struct mlx5_eswitch * esw) mlx5_eswitch_cleanup() argument 929 mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs) mlx5_eswitch_enable() argument 930 mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf) mlx5_eswitch_disable_sriov() argument 931 mlx5_eswitch_disable(struct mlx5_eswitch * esw) mlx5_eswitch_disable() argument 933 mlx5_esw_offloads_devcom_init(struct mlx5_eswitch * esw,const struct mlx5_devcom_match_attr * attr) mlx5_esw_offloads_devcom_init() argument 935 mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch * esw) mlx5_esw_offloads_devcom_cleanup() argument 936 mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch * esw) mlx5_esw_offloads_devcom_is_ready() argument 939 mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state) mlx5_eswitch_set_vport_state() argument 946 esw_add_restore_rule(struct mlx5_eswitch * esw,u32 tag) esw_add_restore_rule() argument 969 mlx5_eswitch_get_npeers(struct mlx5_eswitch * esw) mlx5_eswitch_get_npeers() argument 972 mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch * esw) mlx5_eswitch_reload_ib_reps() argument 1003 mlx5_esw_vport_vhca_id(struct mlx5_eswitch * esw,u16 vportn,u16 * vhca_id) mlx5_esw_vport_vhca_id() argument [all...] |
| H A D | eswitch_offloads_termtbl.c | 112 mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, in mlx5_eswitch_termtbl_get_create() argument 122 mutex_lock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 124 hash_for_each_possible(esw->offloads.termtbl_tbl, tt, in mlx5_eswitch_termtbl_get_create() 147 err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act); in mlx5_eswitch_termtbl_get_create() 151 hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key); in mlx5_eswitch_termtbl_get_create() 154 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 158 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_get_create() 163 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, in mlx5_eswitch_termtbl_put() argument 166 mutex_lock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_put() 169 mutex_unlock(&esw->offloads.termtbl_mutex); in mlx5_eswitch_termtbl_put() [all …]
|
| H A D | en_rep.c | 205 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS() local 215 err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out); in MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS() 439 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, in mlx5e_sqs2vport_stop() argument 447 if (esw->mode != MLX5_ESWITCH_OFFLOADS) in mlx5e_sqs2vport_stop() 467 static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, in mlx5e_sqs2vport_add_peers_rules() argument 474 mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) { in mlx5e_sqs2vport_add_peers_rules() 483 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, in mlx5e_sqs2vport_add_peers_rules() 503 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, in mlx5e_sqs2vport_start() argument 514 if (esw->mode != MLX5_ESWITCH_OFFLOADS) in mlx5e_sqs2vport_start() 519 if (mlx5_devcom_comp_is_ready(esw in mlx5e_sqs2vport_start() 573 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; mlx5e_add_sqs_fwd_rules() local 628 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; mlx5e_remove_sqs_fwd_rules() local 638 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; mlx5e_rep_add_meta_tunnel_rule() local 1041 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; mlx5e_create_rep_vport_rx_rule() local 1705 mlx5e_vport_rep_event_pair(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,struct mlx5_eswitch * peer_esw) mlx5e_vport_rep_event_pair() argument 1757 mlx5e_vport_rep_event(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,enum mlx5_switchdev_event event,void * data) mlx5e_vport_rep_event() argument 1784 struct mlx5_eswitch *esw; mlx5e_rep_probe() local 1795 struct mlx5_eswitch *esw; mlx5e_rep_remove() local [all...] |
| /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
| H A D | legacy.c | 9 #include "esw/acl/lgcy.h" 10 #include "esw/legacy.h" 15 #include "esw/qos.h" 22 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) in esw_create_legacy_vepa_table() argument 25 struct mlx5_core_dev *dev = esw->dev; in esw_create_legacy_vepa_table() 46 esw->fdb_table.legacy.vepa_fdb = fdb; in esw_create_legacy_vepa_table() 51 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) in esw_destroy_legacy_fdb_table() argument 53 esw_debug(esw->dev, "Destroy FDB Table\n"); in esw_destroy_legacy_fdb_table() 54 if (!esw->fdb_table.legacy.fdb) in esw_destroy_legacy_fdb_table() 57 if (esw->fdb_table.legacy.promisc_grp) in esw_destroy_legacy_fdb_table() [all …]
|
| H A D | qos.c | 6 #include "esw/qos.h" 22 static void esw_qos_lock(struct mlx5_eswitch *esw) in esw_qos_lock() argument 24 mutex_lock(&esw->qos.domain->lock); in esw_qos_lock() 27 static void esw_qos_unlock(struct mlx5_eswitch *esw) in esw_qos_unlock() argument 29 mutex_unlock(&esw->qos.domain->lock); in esw_qos_unlock() 32 static void esw_assert_qos_lock_held(struct mlx5_eswitch *esw) in esw_assert_qos_lock_held() argument 34 lockdep_assert_held(&esw->qos.domain->lock); in esw_assert_qos_lock_held() 51 static int esw_qos_domain_init(struct mlx5_eswitch *esw) in esw_qos_domain_init() argument 53 esw->qos.domain = esw_qos_domain_alloc(); in esw_qos_domain_init() 55 return esw in esw_qos_domain_init() 58 esw_qos_domain_release(struct mlx5_eswitch * esw) esw_qos_domain_release() argument 96 struct mlx5_eswitch *esw; global() member 324 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch * esw,struct mlx5_esw_sched_node * parent) esw_qos_calculate_min_rate_divider() argument 378 esw_qos_normalize_min_rate(struct mlx5_eswitch * esw,struct mlx5_esw_sched_node * parent,struct netlink_ext_ack * extack) esw_qos_normalize_min_rate() argument 422 struct mlx5_eswitch *esw = node->esw; esw_qos_set_node_min_rate() local 523 __esw_qos_alloc_node(struct mlx5_eswitch * esw,u32 tsar_ix,enum sched_node_type type,struct mlx5_esw_sched_node * parent) __esw_qos_alloc_node() argument 621 struct mlx5_eswitch *esw = tc_arbiter_node->esw; esw_qos_set_tc_arbiter_bw_shares() local 654 struct mlx5_eswitch *esw = tc_arbiter_node->esw; esw_qos_create_vports_tc_nodes() local 704 __esw_qos_create_vports_sched_node(struct mlx5_eswitch * esw,struct mlx5_esw_sched_node * parent,struct netlink_ext_ack * extack) __esw_qos_create_vports_sched_node() argument 743 esw_qos_create_vports_sched_node(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack) esw_qos_create_vports_sched_node() argument 765 struct mlx5_eswitch *esw = node->esw; __esw_qos_destroy_node() local 775 esw_qos_create(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack) esw_qos_create() argument 795 esw_qos_destroy(struct mlx5_eswitch * esw) esw_qos_destroy() argument 806 esw_qos_get(struct mlx5_eswitch * esw,struct netlink_ext_ack * extack) esw_qos_get() argument 821 esw_qos_put(struct mlx5_eswitch * esw) esw_qos_put() argument 1101 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_vport_enable() local 1136 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_vport_disable_locked() local 1149 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_vport_disable() local 1198 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_set_vport_rate() local 1211 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_get_vport_rate() local 1286 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; esw_qos_vport_update_parent() local 1565 mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch * esw,u16 vport_num,u32 rate_mbps) mlx5_esw_qos_modify_vport_rate() argument 1626 esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch * esw,u32 * tc_bw) esw_qos_validate_unsupported_tc_bw() argument 1643 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; esw_qos_vport_validate_unsupported_tc_bw() local 1677 mlx5_esw_qos_init(struct mlx5_eswitch * esw) mlx5_esw_qos_init() argument 1685 mlx5_esw_qos_cleanup(struct mlx5_eswitch * esw) mlx5_esw_qos_cleanup() argument 1697 struct mlx5_eswitch *esw; mlx5_esw_devlink_rate_leaf_tx_share_set() local 1722 struct mlx5_eswitch *esw; mlx5_esw_devlink_rate_leaf_tx_max_set() local 1750 struct mlx5_eswitch *esw; mlx5_esw_devlink_rate_leaf_tc_bw_set() local 1803 struct mlx5_eswitch *esw = node->esw; mlx5_esw_devlink_rate_node_tc_bw_set() local 1832 struct mlx5_eswitch *esw = node->esw; mlx5_esw_devlink_rate_node_tx_share_set() local 1849 struct mlx5_eswitch *esw = node->esw; mlx5_esw_devlink_rate_node_tx_max_set() local 1866 struct mlx5_eswitch *esw; mlx5_esw_devlink_rate_node_new() local 1897 struct mlx5_eswitch *esw = node->esw; mlx5_esw_devlink_rate_node_del() local 1909 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_qos_vport_update_parent() local 1943 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; mlx5_esw_devlink_rate_leaf_parent_set() local 2018 struct mlx5_eswitch *esw = node->esw; esw_qos_tc_arbiter_node_update_parent() local 2042 struct mlx5_eswitch *esw = node->esw; esw_qos_vports_node_update_parent() local 2074 struct mlx5_eswitch *esw = node->esw; mlx5_esw_qos_node_update_parent() local [all...] |
| H A D | indir_table.c | 15 #include "esw/indir_table.h" 67 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_needed() argument 75 vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) || in mlx5_esw_indir_table_needed() 76 mlx5_esw_is_sf_vport(esw, vport_num); in mlx5_esw_indir_table_needed() 83 esw->dev == dest_mdev && in mlx5_esw_indir_table_needed() 95 static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_rule_get() argument 100 struct mlx5_fs_chains *chains = esw_chains(esw); in mlx5_esw_indir_table_rule_get() 119 data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); in mlx5_esw_indir_table_rule_get() 120 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, in mlx5_esw_indir_table_rule_get() 125 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, in mlx5_esw_indir_table_rule_get() [all …]
|
| H A D | bridge_mcast.c | 200 …esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid… in mlx5_esw_bridge_port_mdb_attach() 208 …esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,… in mlx5_esw_bridge_port_mdb_attach() 218 esw_warn(bridge->br_offloads->esw->dev, in mlx5_esw_bridge_port_mdb_attach() 229 …esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err… in mlx5_esw_bridge_port_mdb_attach() 253 …esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n", in mlx5_esw_bridge_port_mdb_entry_detach() 265 esw_debug(bridge->br_offloads->esw->dev, in mlx5_esw_bridge_port_mdb_detach() 272 esw_debug(bridge->br_offloads->esw->dev, in mlx5_esw_bridge_port_mdb_detach() 313 struct mlx5_eswitch *esw = bridge->br_offloads->esw; in mlx5_esw_bridge_port_mcast_fts_init() local 318 esw); in mlx5_esw_bridge_port_mcast_fts_init() 334 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw, in mlx5_esw_bridge_mcast_filter_fg_create() argument [all …]
|
| H A D | indir_table.h | 14 struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, 17 void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, 21 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, 43 mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_get() argument 51 mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_put() argument 57 mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, in mlx5_esw_indir_table_needed() argument
|
| H A D | legacy.h | 13 int esw_legacy_enable(struct mlx5_eswitch *esw); 14 void esw_legacy_disable(struct mlx5_eswitch *esw); 16 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 17 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
| H A D | ipsec_fs.c | 7 #include "esw/ipsec_fs.h" 136 static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw, in mlx5_esw_ipsec_modify_flow_dests() argument 148 err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr, in mlx5_esw_ipsec_modify_flow_dests() 158 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5_esw_ipsec_restore_dest_uplink() local 166 mlx5_esw_for_each_rep(esw, i, rep) { in mlx5_esw_ipsec_restore_dest_uplink() 177 err = mlx5_esw_ipsec_modify_flow_dests(esw, flow); in mlx5_esw_ipsec_restore_dest_uplink()
|
| H A D | ipsec.c | 175 struct mlx5_eswitch *esw = dev->priv.eswitch; in esw_ipsec_vf_crypto_aux_caps_set() local 199 ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap); in esw_ipsec_vf_crypto_aux_caps_set() 206 static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport, in esw_ipsec_vf_offload_set_bytype() argument 209 struct mlx5_core_dev *dev = esw->dev; in esw_ipsec_vf_offload_set_bytype() 357 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, in mlx5_esw_ipsec_vf_crypto_offload_set() argument 360 return esw_ipsec_vf_offload_set_bytype(esw, vport, enable, in mlx5_esw_ipsec_vf_crypto_offload_set() 364 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, in mlx5_esw_ipsec_vf_packet_offload_set() argument 367 return esw_ipsec_vf_offload_set_bytype(esw, vport, enable, in mlx5_esw_ipsec_vf_packet_offload_set()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
| H A D | ingress_ofld.c | 10 acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 13 esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw, in esw_acl_ingress_prio_tag_enabled() argument 16 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && in esw_acl_ingress_prio_tag_enabled() 17 mlx5_eswitch_is_vf_vport(esw, vport->vport)); in esw_acl_ingress_prio_tag_enabled() 20 static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw, in esw_acl_ingress_prio_tag_create() argument 55 esw_warn(esw->dev, in esw_acl_ingress_prio_tag_create() 65 static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw, in esw_acl_ingress_mod_metadata_create() argument 73 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); in esw_acl_ingress_mod_metadata_create() 86 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, in esw_acl_ingress_mod_metadata_create() 90 esw_warn(esw->dev, in esw_acl_ingress_mod_metadata_create() [all …]
|
| H A D | egress_ofld.c | 41 static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw, in esw_acl_egress_ofld_fwd2vport_create() argument 48 esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n", in esw_acl_egress_ofld_fwd2vport_create() 61 esw_warn(esw->dev, in esw_acl_egress_ofld_fwd2vport_create() 70 static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw, in esw_acl_egress_ofld_rules_create() argument 77 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create() 82 esw_debug(esw->dev, in esw_acl_egress_ofld_rules_create() 90 err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action); in esw_acl_egress_ofld_rules_create() 96 err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest); in esw_acl_egress_ofld_rules_create() 116 static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw, in esw_acl_egress_ofld_groups_create() argument 125 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create() [all …]
|
| H A D | ofld.h | 11 int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 14 int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num, 16 int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num); 18 static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *esw) in mlx5_esw_acl_egress_fwd2vport_supported() argument 20 return esw && esw->mode == MLX5_ESWITCH_OFFLOADS && in mlx5_esw_acl_egress_fwd2vport_supported() 21 mlx5_eswitch_vport_match_metadata_enabled(esw) && in mlx5_esw_acl_egress_fwd2vport_supported() 22 MLX5_CAP_ESW_FLOWTABLE(esw->dev, egress_acl_forward_to_vport); in mlx5_esw_acl_egress_fwd2vport_supported() 26 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 27 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 28 int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num, [all …]
|
| H A D | helper.c | 9 esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size) in esw_acl_table_create() argument 12 struct mlx5_core_dev *dev = esw->dev; in esw_acl_table_create() 38 if (vport_num || mlx5_core_is_ecpf(esw->dev)) in esw_acl_table_create() 49 int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw, in esw_egress_acl_vlan_create() argument 77 esw_warn(esw->dev, in esw_egress_acl_vlan_create() 95 int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) in esw_acl_egress_vlan_grp_create() argument 119 esw_warn(esw->dev, in esw_acl_egress_vlan_grp_create()
|
| H A D | lgcy.h | 10 int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 11 void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 14 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 15 void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
| /linux/include/linux/mlx5/ |
| H A D | eswitch.h | 42 int (*event)(struct mlx5_eswitch *esw, 60 struct mlx5_eswitch *esw; member 63 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 66 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); 67 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 70 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 72 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); 82 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw); 83 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); 105 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ |
| H A D | sample.c | 24 struct mlx5_eswitch *esw; member 63 struct mlx5_eswitch *esw = tc_psample->esw; in sampler_termtbl_create() local 66 struct mlx5_core_dev *dev = esw->dev; in sampler_termtbl_create() 95 dest.vport.num = esw->manager_vport; in sampler_termtbl_create() 196 err = sampler_obj_create(tc_psample->esw->dev, sampler); in sampler_get() 220 sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id); in sampler_put() 265 struct mlx5_eswitch *esw = tc_psample->esw; in sample_restore_get() local 266 struct mlx5_core_dev *mdev = esw->dev; in sample_restore_get() 290 restore->rule = esw_add_restore_rule(esw, obj_id); in sample_restore_get() 321 mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr); in sample_restore_put() [all …]
|
| H A D | post_meter.c | 120 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_post_meter_add_rule() local 135 ret = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); in mlx5e_post_meter_add_rule() 200 mlx5e_post_meter_rate_rules_destroy(struct mlx5_eswitch *esw, in mlx5e_post_meter_rate_rules_destroy() argument 205 mlx5_eswitch_del_offloaded_rule(esw, rate_table->red_rule, rate_table->red_attr); in mlx5e_post_meter_rate_rules_destroy() 206 mlx5_eswitch_del_offloaded_rule(esw, rate_table->green_rule, rate_table->green_attr); in mlx5e_post_meter_rate_rules_destroy() 435 mlx5e_post_meter_rate_destroy(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter) in mlx5e_post_meter_rate_destroy() argument 437 mlx5e_post_meter_rate_rules_destroy(esw, post_meter); in mlx5e_post_meter_rate_destroy() 451 mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter) in mlx5e_post_meter_cleanup() argument 454 mlx5e_post_meter_rate_destroy(esw, post_meter); in mlx5e_post_meter_cleanup()
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | ib_rep.c | 24 ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport); in mlx5_ib_set_vport_rep() 210 struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw); in mlx5_ib_vport_rep_unload() 248 struct mlx5_eswitch *esw; in mlx5_ib_vport_rep_unload() local 251 esw = peer_mdev->priv.eswitch; in mlx5_ib_vport_rep_unload() 252 mlx5_eswitch_unregister_vport_reps(esw, REP_IB); in mlx5_ib_vport_rep_unload() 269 struct mlx5_eswitch *esw; in mlx5_ib_register_peer_vport_reps() local 273 esw = peer_mdev->priv.eswitch; in mlx5_ib_register_peer_vport_reps() 274 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB); in mlx5_ib_register_peer_vport_reps() 278 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, in mlx5_ib_get_rep_netdev() argument 281 return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH); in mlx5_ib_get_rep_netdev() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/rep/ |
| H A D | tc.c | 95 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_rep_update_flows() local 101 mutex_lock(&esw->offloads.encap_tbl_lock); in mlx5e_rep_update_flows() 127 mutex_unlock(&esw->offloads.encap_tbl_lock); in mlx5e_rep_update_flows() 205 struct mlx5_eswitch *esw; in mlx5e_rep_setup_ft_cb() local 212 esw = priv->mdev->priv.eswitch; in mlx5e_rep_setup_ft_cb() 218 if (!mlx5_chains_prios_supported(esw_chains(esw))) in mlx5e_rep_setup_ft_cb() 225 * it to range [1, mlx5_esw_chains_get_prio_range(esw)] in mlx5e_rep_setup_ft_cb() 230 if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw))) in mlx5e_rep_setup_ft_cb() 235 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw)); in mlx5e_rep_setup_ft_cb() 387 struct mlx5_eswitch *esw; in mlx5e_rep_indr_setup_ft_cb() local [all …]
|
| H A D | bond.c | 10 #include "esw/acl/ofld.h" 26 struct mlx5_eswitch *esw; member 74 mlx5_esw_match_metadata_free(mdata->esw, mdata->metadata_reg_c_0); in mlx5e_rep_bond_metadata_release() 80 int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, in mlx5e_rep_bond_enslave() argument 91 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); in mlx5e_rep_bond_enslave() 100 mdata->esw = esw; in mlx5e_rep_bond_enslave() 102 mdata->metadata_reg_c_0 = mlx5_esw_match_metadata_alloc(esw); in mlx5e_rep_bond_enslave() 123 err = mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport, in mlx5e_rep_bond_enslave() 144 void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, in mlx5e_rep_bond_unslave() argument 155 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); in mlx5e_rep_bond_unslave() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| H A D | mpesw.c | 8 #include "esw/acl/ofld.h" 14 struct mlx5_eswitch *esw; in mlx5_mpesw_metadata_cleanup() local 20 esw = dev->priv.eswitch; in mlx5_mpesw_metadata_cleanup() 24 mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0); in mlx5_mpesw_metadata_cleanup() 27 mlx5_esw_match_metadata_free(esw, pf_metadata); in mlx5_mpesw_metadata_cleanup() 35 struct mlx5_eswitch *esw; in mlx5_mpesw_metadata_set() local 41 esw = dev->priv.eswitch; in mlx5_mpesw_metadata_set() 42 pf_metadata = mlx5_esw_match_metadata_alloc(esw); in mlx5_mpesw_metadata_set() 49 err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, in mlx5_mpesw_metadata_set()
|