1 /* 2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/etherdevice.h> 34 #include <linux/idr.h> 35 #include <linux/mlx5/driver.h> 36 #include <linux/mlx5/mlx5_ifc.h> 37 #include <linux/mlx5/vport.h> 38 #include <linux/mlx5/fs.h> 39 #include "mlx5_core.h" 40 #include "eswitch.h" 41 #include "esw/indir_table.h" 42 #include "esw/acl/ofld.h" 43 #include "rdma.h" 44 #include "en.h" 45 #include "fs_core.h" 46 #include "lib/mlx5.h" 47 #include "lib/devcom.h" 48 #include "lib/eq.h" 49 #include "lib/fs_chains.h" 50 #include "en_tc.h" 51 #include "en/mapping.h" 52 #include "devlink.h" 53 #include "lag/lag.h" 54 #include "en/tc/post_meter.h" 55 #include "fw_reset.h" 56 57 /* There are two match-all miss flows, one for unicast dst mac and 58 * one for multicast. 59 */ 60 #define MLX5_ESW_MISS_FLOWS (2) 61 #define UPLINK_REP_INDEX 0 62 63 #define MLX5_ESW_VPORT_TBL_SIZE 128 64 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 65 66 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 67 68 #define MLX5_ESW_MAX_CTRL_EQS 4 69 #define MLX5_ESW_DEFAULT_SF_COMP_EQS 8 70 71 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { 72 .max_fte = MLX5_ESW_VPORT_TBL_SIZE, 73 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, 74 .flags = 0, 75 }; 76 77 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 78 u16 vport_num) 79 { 80 return xa_load(&esw->offloads.vport_reps, vport_num); 81 } 82 83 static void 84 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, 85 struct mlx5_flow_spec *spec, 86 struct mlx5_esw_flow_attr *attr) 87 { 88 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep) 89 return; 90 91 if (attr->int_port) { 92 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port); 93 94 return; 95 } 96 97 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ? 98 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK : 99 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 100 } 101 102 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits 103 * are not needed as well in the following process. So clear them all for simplicity. 104 */ 105 void 106 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec) 107 { 108 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 109 void *misc2; 110 111 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 112 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 113 114 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 115 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0); 116 117 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2))) 118 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2; 119 } 120 } 121 122 static void 123 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, 124 struct mlx5_flow_spec *spec, 125 struct mlx5_flow_attr *attr, 126 struct mlx5_eswitch *src_esw, 127 u16 vport) 128 { 129 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 130 u32 metadata; 131 void *misc2; 132 void *misc; 133 134 /* Use metadata matching because vport is not represented by single 135 * VHCA in dual-port RoCE mode, and matching on source vport may fail. 136 */ 137 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 138 if (mlx5_esw_indir_table_decap_vport(attr)) 139 vport = mlx5_esw_indir_table_decap_vport(attr); 140 141 if (!attr->chain && esw_attr && esw_attr->int_port) 142 metadata = 143 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 144 else 145 metadata = 146 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport); 147 148 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 149 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata); 150 151 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 152 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 153 mlx5_eswitch_get_vport_metadata_mask()); 154 155 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 156 } else { 157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 158 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 159 160 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 161 MLX5_SET(fte_match_set_misc, misc, 162 source_eswitch_owner_vhca_id, 163 MLX5_CAP_GEN(src_esw->dev, vhca_id)); 164 165 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 166 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 167 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 168 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 169 source_eswitch_owner_vhca_id); 170 171 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 172 } 173 } 174 175 static int 176 esw_setup_decap_indir(struct mlx5_eswitch *esw, 177 struct mlx5_flow_attr *attr) 178 { 179 struct mlx5_flow_table *ft; 180 181 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 182 return -EOPNOTSUPP; 183 184 ft = mlx5_esw_indir_table_get(esw, attr, 185 mlx5_esw_indir_table_decap_vport(attr), true); 186 return PTR_ERR_OR_ZERO(ft); 187 } 188 189 static void 190 esw_cleanup_decap_indir(struct mlx5_eswitch *esw, 191 struct mlx5_flow_attr *attr) 192 { 193 if (mlx5_esw_indir_table_decap_vport(attr)) 194 mlx5_esw_indir_table_put(esw, 195 mlx5_esw_indir_table_decap_vport(attr), 196 true); 197 } 198 199 static int 200 esw_setup_mtu_dest(struct mlx5_flow_destination *dest, 201 struct mlx5e_meter_attr *meter, 202 int i) 203 { 204 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE; 205 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN; 206 dest[i].range.min = 0; 207 dest[i].range.max = meter->params.mtu; 208 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter); 209 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter); 210 211 return 0; 212 } 213 214 static int 215 esw_setup_sampler_dest(struct mlx5_flow_destination *dest, 216 struct mlx5_flow_act *flow_act, 217 u32 sampler_id, 218 int i) 219 { 220 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 221 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 222 dest[i].sampler_id = sampler_id; 223 224 return 0; 225 } 226 227 static int 228 esw_setup_ft_dest(struct mlx5_flow_destination *dest, 229 struct mlx5_flow_act *flow_act, 230 struct mlx5_eswitch *esw, 231 struct mlx5_flow_attr *attr, 232 int i) 233 { 234 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 235 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 236 dest[i].ft = attr->dest_ft; 237 238 if (mlx5_esw_indir_table_decap_vport(attr)) 239 return esw_setup_decap_indir(esw, attr); 240 return 0; 241 } 242 243 static void 244 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 245 struct mlx5_fs_chains *chains, int i) 246 { 247 if (mlx5_chains_ignore_flow_level_supported(chains)) 248 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 249 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 250 dest[i].ft = mlx5_chains_get_tc_end_ft(chains); 251 } 252 253 static void 254 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 255 struct mlx5_eswitch *esw, int i) 256 { 257 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) 258 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 259 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 260 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw); 261 } 262 263 static int 264 esw_setup_chain_dest(struct mlx5_flow_destination *dest, 265 struct mlx5_flow_act *flow_act, 266 struct mlx5_fs_chains *chains, 267 u32 chain, u32 prio, u32 level, 268 int i) 269 { 270 struct mlx5_flow_table *ft; 271 272 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 273 ft = mlx5_chains_get_table(chains, chain, prio, level); 274 if (IS_ERR(ft)) 275 return PTR_ERR(ft); 276 277 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 278 dest[i].ft = ft; 279 return 0; 280 } 281 282 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, 283 int from, int to) 284 { 285 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 286 struct mlx5_fs_chains *chains = esw_chains(esw); 287 int i; 288 289 for (i = from; i < to; i++) 290 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 291 mlx5_chains_put_table(chains, 0, 1, 0); 292 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, 293 esw_attr->dests[i].mdev)) 294 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false); 295 } 296 297 static bool 298 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) 299 { 300 int i; 301 302 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 303 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 304 return true; 305 return false; 306 } 307 308 static int 309 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, 310 struct mlx5_flow_act *flow_act, 311 struct mlx5_eswitch *esw, 312 struct mlx5_fs_chains *chains, 313 struct mlx5_flow_attr *attr, 314 int *i) 315 { 316 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 317 int err; 318 319 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 320 return -EOPNOTSUPP; 321 322 /* flow steering cannot handle more than one dest with the same ft 323 * in a single flow 324 */ 325 if (esw_attr->out_count - esw_attr->split_count > 1) 326 return -EOPNOTSUPP; 327 328 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); 329 if (err) 330 return err; 331 332 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { 333 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 334 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; 335 } 336 (*i)++; 337 338 return 0; 339 } 340 341 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, 342 struct mlx5_flow_attr *attr) 343 { 344 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 345 346 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 347 } 348 349 static bool 350 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 351 { 352 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 353 bool result = false; 354 int i; 355 356 /* Indirect table is supported only for flows with in_port uplink 357 * and the destination is vport on the same eswitch as the uplink, 358 * return false in case at least one of destinations doesn't meet 359 * this criteria. 360 */ 361 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { 362 if (esw_attr->dests[i].vport_valid && 363 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, 364 esw_attr->dests[i].mdev)) { 365 result = true; 366 } else { 367 result = false; 368 break; 369 } 370 } 371 return result; 372 } 373 374 static int 375 esw_setup_indir_table(struct mlx5_flow_destination *dest, 376 struct mlx5_flow_act *flow_act, 377 struct mlx5_eswitch *esw, 378 struct mlx5_flow_attr *attr, 379 int *i) 380 { 381 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 382 int j, err; 383 384 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) 385 return -EOPNOTSUPP; 386 387 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { 388 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 389 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 390 391 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, 392 esw_attr->dests[j].vport, false); 393 if (IS_ERR(dest[*i].ft)) { 394 err = PTR_ERR(dest[*i].ft); 395 goto err_indir_tbl_get; 396 } 397 } 398 399 if (mlx5_esw_indir_table_decap_vport(attr)) { 400 err = esw_setup_decap_indir(esw, attr); 401 if (err) 402 goto err_indir_tbl_get; 403 } 404 405 return 0; 406 407 err_indir_tbl_get: 408 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); 409 return err; 410 } 411 412 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) 413 { 414 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 415 416 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); 417 esw_cleanup_decap_indir(esw, attr); 418 } 419 420 static void 421 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) 422 { 423 mlx5_chains_put_table(chains, chain, prio, level); 424 } 425 426 static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2) 427 { 428 return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id); 429 } 430 431 static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw, 432 struct mlx5_esw_flow_attr *esw_attr, 433 int attr_idx) 434 { 435 if (esw->offloads.ft_ipsec_tx_pol && 436 esw_attr->dests[attr_idx].vport_valid && 437 esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK && 438 /* To be aligned with software, encryption is needed only for tunnel device */ 439 (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) && 440 esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport && 441 esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev)) 442 return true; 443 444 return false; 445 } 446 447 static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw, 448 struct mlx5_esw_flow_attr *esw_attr) 449 { 450 int i; 451 452 if (!esw->offloads.ft_ipsec_tx_pol) 453 return true; 454 455 for (i = 0; i < esw_attr->split_count; i++) 456 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i)) 457 return false; 458 459 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) 460 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) && 461 (esw_attr->out_count - esw_attr->split_count > 1)) 462 return false; 463 464 return true; 465 } 466 467 static void 468 esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 469 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 470 int attr_idx, int dest_idx, bool pkt_reformat) 471 { 472 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 473 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport; 474 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 475 dest[dest_idx].vport.vhca_id = 476 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); 477 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 478 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK && 479 mlx5_lag_is_mpesw(esw->dev)) 480 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; 481 } 482 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) { 483 if (pkt_reformat) { 484 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 485 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 486 } 487 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; 488 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 489 } 490 } 491 492 static void 493 esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 494 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 495 int attr_idx, int dest_idx, bool pkt_reformat) 496 { 497 dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol; 498 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 499 if (pkt_reformat && 500 esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) { 501 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 502 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; 503 } 504 } 505 506 static void 507 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 508 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 509 int attr_idx, int dest_idx, bool pkt_reformat) 510 { 511 if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx)) 512 esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr, 513 attr_idx, dest_idx, pkt_reformat); 514 else 515 esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr, 516 attr_idx, dest_idx, pkt_reformat); 517 } 518 519 static int 520 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, 521 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, 522 int i) 523 { 524 int j; 525 526 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) 527 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); 528 return i; 529 } 530 531 static bool 532 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) 533 { 534 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) && 535 mlx5_eswitch_vport_match_metadata_enabled(esw) && 536 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); 537 } 538 539 static bool 540 esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest) 541 { 542 bool internal_dest = false, external_dest = false; 543 int i; 544 545 for (i = 0; i < max_dest; i++) { 546 if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT && 547 dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK) 548 continue; 549 550 /* Uplink dest is external, but considered as internal 551 * if there is reformat because firmware uses LB+hairpin to support it. 552 */ 553 if (dests[i].vport.num == MLX5_VPORT_UPLINK && 554 !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) 555 external_dest = true; 556 else 557 internal_dest = true; 558 559 if (internal_dest && external_dest) 560 return true; 561 } 562 563 return false; 564 } 565 566 static int 567 esw_setup_dests(struct mlx5_flow_destination *dest, 568 struct mlx5_flow_act *flow_act, 569 struct mlx5_eswitch *esw, 570 struct mlx5_flow_attr *attr, 571 struct mlx5_flow_spec *spec, 572 int *i) 573 { 574 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 575 struct mlx5_fs_chains *chains = esw_chains(esw); 576 int err = 0; 577 578 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 579 esw_src_port_rewrite_supported(esw)) 580 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE; 581 582 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { 583 esw_setup_slow_path_dest(dest, flow_act, esw, *i); 584 (*i)++; 585 goto out; 586 } 587 588 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) { 589 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i); 590 (*i)++; 591 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { 592 esw_setup_accept_dest(dest, flow_act, chains, *i); 593 (*i)++; 594 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) { 595 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); 596 (*i)++; 597 } else if (esw_is_indir_table(esw, attr)) { 598 err = esw_setup_indir_table(dest, flow_act, esw, attr, i); 599 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { 600 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); 601 } else { 602 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); 603 604 if (attr->dest_ft) { 605 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i); 606 (*i)++; 607 } else if (attr->dest_chain) { 608 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 609 1, 0, *i); 610 (*i)++; 611 } 612 } 613 614 if (attr->extra_split_ft) { 615 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 616 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 617 dest[*i].ft = attr->extra_split_ft; 618 (*i)++; 619 } 620 621 out: 622 return err; 623 } 624 625 static void 626 esw_cleanup_dests(struct mlx5_eswitch *esw, 627 struct mlx5_flow_attr *attr) 628 { 629 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 630 struct mlx5_fs_chains *chains = esw_chains(esw); 631 632 if (attr->dest_ft) { 633 esw_cleanup_decap_indir(esw, attr); 634 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 635 if (attr->dest_chain) 636 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); 637 else if (esw_is_indir_table(esw, attr)) 638 esw_cleanup_indir_table(esw, attr); 639 else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) 640 esw_cleanup_chain_src_port_rewrite(esw, attr); 641 } 642 } 643 644 static void 645 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act) 646 { 647 struct mlx5e_flow_meter_handle *meter; 648 649 meter = attr->meter_attr.meter; 650 flow_act->exe_aso.type = attr->exe_aso_type; 651 flow_act->exe_aso.object_id = meter->obj_id; 652 flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter); 653 flow_act->exe_aso.flow_meter.meter_idx = meter->idx; 654 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN; 655 /* use metadata reg 5 for packet color */ 656 flow_act->exe_aso.return_reg_id = 5; 657 } 658 659 struct mlx5_flow_handle * 660 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 661 struct mlx5_flow_spec *spec, 662 struct mlx5_flow_attr *attr) 663 { 664 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 665 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 666 struct mlx5_fs_chains *chains = esw_chains(esw); 667 bool split = !!(esw_attr->split_count); 668 struct mlx5_vport_tbl_attr fwd_attr; 669 struct mlx5_flow_destination *dest; 670 struct mlx5_flow_handle *rule; 671 struct mlx5_flow_table *fdb; 672 int i = 0; 673 674 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 675 return ERR_PTR(-EOPNOTSUPP); 676 677 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 678 return ERR_PTR(-EOPNOTSUPP); 679 680 if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr)) 681 return ERR_PTR(-EOPNOTSUPP); 682 683 dest = kzalloc_objs(*dest, MLX5_MAX_FLOW_FWD_VPORTS + 1); 684 if (!dest) 685 return ERR_PTR(-ENOMEM); 686 687 flow_act.action = attr->action; 688 689 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 690 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]); 691 flow_act.vlan[0].vid = esw_attr->vlan_vid[0]; 692 flow_act.vlan[0].prio = esw_attr->vlan_prio[0]; 693 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 694 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]); 695 flow_act.vlan[1].vid = esw_attr->vlan_vid[1]; 696 flow_act.vlan[1].prio = esw_attr->vlan_prio[1]; 697 } 698 } 699 700 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); 701 702 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 703 int err; 704 705 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); 706 if (err) { 707 rule = ERR_PTR(err); 708 goto err_create_goto_table; 709 } 710 711 /* Header rewrite with combined wire+loopback in FDB is not allowed */ 712 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) && 713 esw_dests_to_int_external(dest, i)) { 714 esw_warn(esw->dev, 715 "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n"); 716 rule = ERR_PTR(-EINVAL); 717 goto err_esw_get; 718 } 719 } 720 721 if (esw_attr->decap_pkt_reformat) 722 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat; 723 724 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 725 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 726 dest[i].counter = attr->counter; 727 i++; 728 } 729 730 if (attr->outer_match_level != MLX5_MATCH_NONE) 731 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 732 if (attr->inner_match_level != MLX5_MATCH_NONE) 733 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 734 735 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 736 flow_act.modify_hdr = attr->modify_hdr; 737 738 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && 739 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER) 740 esw_setup_meter(attr, &flow_act); 741 742 if (split) { 743 fwd_attr.chain = attr->chain; 744 fwd_attr.prio = attr->prio; 745 fwd_attr.vport = esw_attr->in_rep->vport; 746 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 747 748 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 749 } else { 750 if (attr->chain || attr->prio) 751 fdb = mlx5_chains_get_table(chains, attr->chain, 752 attr->prio, 0); 753 else 754 fdb = attr->ft; 755 756 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT)) 757 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 758 esw_attr->in_mdev->priv.eswitch, 759 esw_attr->in_rep->vport); 760 } 761 if (IS_ERR(fdb)) { 762 rule = ERR_CAST(fdb); 763 goto err_esw_get; 764 } 765 766 if (!i) { 767 kfree(dest); 768 dest = NULL; 769 } 770 771 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) 772 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr, 773 &flow_act, dest, i); 774 else 775 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); 776 if (IS_ERR(rule)) 777 goto err_add_rule; 778 else 779 atomic64_inc(&esw->offloads.num_flows); 780 781 kfree(dest); 782 return rule; 783 784 err_add_rule: 785 if (split) 786 mlx5_esw_vporttbl_put(esw, &fwd_attr); 787 else if (attr->chain || attr->prio) 788 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 789 err_esw_get: 790 esw_cleanup_dests(esw, attr); 791 err_create_goto_table: 792 kfree(dest); 793 return rule; 794 } 795 796 struct mlx5_flow_handle * 797 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 798 struct mlx5_flow_spec *spec, 799 struct mlx5_flow_attr *attr) 800 { 801 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 802 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 803 struct mlx5_fs_chains *chains = esw_chains(esw); 804 struct mlx5_vport_tbl_attr fwd_attr; 805 struct mlx5_flow_destination *dest; 806 struct mlx5_flow_table *fast_fdb; 807 struct mlx5_flow_table *fwd_fdb; 808 struct mlx5_flow_handle *rule; 809 int i, err = 0; 810 811 dest = kzalloc_objs(*dest, MLX5_MAX_FLOW_FWD_VPORTS + 1); 812 if (!dest) 813 return ERR_PTR(-ENOMEM); 814 815 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0); 816 if (IS_ERR(fast_fdb)) { 817 rule = ERR_CAST(fast_fdb); 818 goto err_get_fast; 819 } 820 821 fwd_attr.chain = attr->chain; 822 fwd_attr.prio = attr->prio; 823 fwd_attr.vport = esw_attr->in_rep->vport; 824 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 825 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr); 826 if (IS_ERR(fwd_fdb)) { 827 rule = ERR_CAST(fwd_fdb); 828 goto err_get_fwd; 829 } 830 831 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 832 for (i = 0; i < esw_attr->split_count; i++) { 833 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) 834 /* Source port rewrite (forward to ovs internal port or statck device) isn't 835 * supported in the rule of split action. 836 */ 837 err = -EOPNOTSUPP; 838 else 839 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); 840 841 if (err) { 842 rule = ERR_PTR(err); 843 goto err_chain_src_rewrite; 844 } 845 } 846 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 847 dest[i].ft = fwd_fdb; 848 i++; 849 850 mlx5_eswitch_set_rule_source_port(esw, spec, attr, 851 esw_attr->in_mdev->priv.eswitch, 852 esw_attr->in_rep->vport); 853 854 if (attr->outer_match_level != MLX5_MATCH_NONE) 855 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 856 857 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 858 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 859 860 if (IS_ERR(rule)) { 861 i = esw_attr->split_count; 862 goto err_chain_src_rewrite; 863 } 864 865 atomic64_inc(&esw->offloads.num_flows); 866 867 kfree(dest); 868 return rule; 869 err_chain_src_rewrite: 870 mlx5_esw_vporttbl_put(esw, &fwd_attr); 871 err_get_fwd: 872 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 873 err_get_fast: 874 kfree(dest); 875 return rule; 876 } 877 878 static void 879 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, 880 struct mlx5_flow_handle *rule, 881 struct mlx5_flow_attr *attr, 882 bool fwd_rule) 883 { 884 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; 885 struct mlx5_fs_chains *chains = esw_chains(esw); 886 bool split = (esw_attr->split_count > 0); 887 struct mlx5_vport_tbl_attr fwd_attr; 888 int i; 889 890 mlx5_del_flow_rules(rule); 891 892 if (!mlx5e_tc_attr_flags_skip(attr->flags)) { 893 /* unref the term table */ 894 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { 895 if (esw_attr->dests[i].termtbl) 896 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl); 897 } 898 } 899 900 atomic64_dec(&esw->offloads.num_flows); 901 902 if (fwd_rule || split) { 903 fwd_attr.chain = attr->chain; 904 fwd_attr.prio = attr->prio; 905 fwd_attr.vport = esw_attr->in_rep->vport; 906 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 907 } 908 909 if (fwd_rule) { 910 mlx5_esw_vporttbl_put(esw, &fwd_attr); 911 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 912 } else { 913 if (split) 914 mlx5_esw_vporttbl_put(esw, &fwd_attr); 915 else if (attr->chain || attr->prio) 916 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0); 917 esw_cleanup_dests(esw, attr); 918 } 919 } 920 921 void 922 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 923 struct mlx5_flow_handle *rule, 924 struct mlx5_flow_attr *attr) 925 { 926 __mlx5_eswitch_del_rule(esw, rule, attr, false); 927 } 928 929 void 930 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 931 struct mlx5_flow_handle *rule, 932 struct mlx5_flow_attr *attr) 933 { 934 __mlx5_eswitch_del_rule(esw, rule, attr, true); 935 } 936 937 struct mlx5_flow_handle * 938 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, 939 struct mlx5_eswitch *from_esw, 940 struct mlx5_eswitch_rep *rep, 941 u32 sqn) 942 { 943 struct mlx5_flow_act flow_act = {0}; 944 struct mlx5_flow_destination dest = {}; 945 struct mlx5_flow_handle *flow_rule; 946 struct mlx5_flow_spec *spec; 947 void *misc; 948 u16 vport; 949 950 spec = kvzalloc_obj(*spec); 951 if (!spec) { 952 flow_rule = ERR_PTR(-ENOMEM); 953 goto out; 954 } 955 956 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 957 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 958 959 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 960 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 961 962 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 963 964 /* source vport is the esw manager */ 965 vport = from_esw->manager_vport; 966 967 if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) { 968 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 969 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 970 mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport)); 971 972 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 973 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 974 mlx5_eswitch_get_vport_metadata_mask()); 975 976 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 977 } else { 978 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 979 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 980 981 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 982 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 983 MLX5_CAP_GEN(from_esw->dev, vhca_id)); 984 985 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 986 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 987 988 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) 989 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 990 source_eswitch_owner_vhca_id); 991 992 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 993 } 994 995 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 996 dest.vport.num = rep->vport; 997 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 998 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 999 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1000 1001 if (rep->vport == MLX5_VPORT_UPLINK && 1002 on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) { 1003 dest.ft = on_esw->offloads.ft_ipsec_tx_pol; 1004 flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL; 1005 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1006 } else { 1007 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1008 dest.vport.num = rep->vport; 1009 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); 1010 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1011 } 1012 1013 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) && 1014 rep->vport == MLX5_VPORT_UPLINK) 1015 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; 1016 1017 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw), 1018 spec, &flow_act, &dest, 1); 1019 if (IS_ERR(flow_rule)) 1020 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n", 1021 flow_rule); 1022 out: 1023 kvfree(spec); 1024 return flow_rule; 1025 } 1026 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); 1027 1028 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) 1029 { 1030 mlx5_del_flow_rules(rule); 1031 } 1032 1033 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule) 1034 { 1035 if (rule) 1036 mlx5_del_flow_rules(rule); 1037 } 1038 1039 struct mlx5_flow_handle * 1040 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num) 1041 { 1042 struct mlx5_flow_destination dest = {}; 1043 struct mlx5_flow_act flow_act = {0}; 1044 struct mlx5_flow_handle *flow_rule; 1045 struct mlx5_flow_spec *spec; 1046 1047 spec = kvzalloc_obj(*spec); 1048 if (!spec) 1049 return ERR_PTR(-ENOMEM); 1050 1051 MLX5_SET(fte_match_param, spec->match_criteria, 1052 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); 1053 MLX5_SET(fte_match_param, spec->match_criteria, 1054 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1055 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, 1056 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK); 1057 1058 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1059 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1060 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1061 1062 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, 1063 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); 1064 dest.vport.num = vport_num; 1065 1066 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1067 spec, &flow_act, &dest, 1); 1068 if (IS_ERR(flow_rule)) 1069 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n", 1070 vport_num, flow_rule); 1071 1072 kvfree(spec); 1073 return flow_rule; 1074 } 1075 1076 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) 1077 { 1078 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 1079 MLX5_FDB_TO_VPORT_REG_C_1; 1080 } 1081 1082 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) 1083 { 1084 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; 1085 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; 1086 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; 1087 u8 curr, wanted; 1088 int err; 1089 1090 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && 1091 !mlx5_eswitch_vport_match_metadata_enabled(esw)) 1092 return 0; 1093 1094 MLX5_SET(query_esw_vport_context_in, in, opcode, 1095 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); 1096 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out); 1097 if (err) 1098 return err; 1099 1100 curr = MLX5_GET(query_esw_vport_context_out, out, 1101 esw_vport_context.fdb_to_vport_reg_c_id); 1102 wanted = MLX5_FDB_TO_VPORT_REG_C_0; 1103 if (mlx5_eswitch_reg_c1_loopback_supported(esw)) 1104 wanted |= MLX5_FDB_TO_VPORT_REG_C_1; 1105 1106 if (enable) 1107 curr |= wanted; 1108 else 1109 curr &= ~wanted; 1110 1111 MLX5_SET(modify_esw_vport_context_in, min, 1112 esw_vport_context.fdb_to_vport_reg_c_id, curr); 1113 MLX5_SET(modify_esw_vport_context_in, min, 1114 field_select.fdb_to_vport_reg_c_id, 1); 1115 1116 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min); 1117 if (!err) { 1118 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) 1119 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1120 else 1121 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; 1122 } 1123 1124 return err; 1125 } 1126 1127 static void peer_miss_rules_setup(struct mlx5_eswitch *esw, 1128 struct mlx5_core_dev *peer_dev, 1129 struct mlx5_flow_spec *spec, 1130 struct mlx5_flow_destination *dest) 1131 { 1132 void *misc; 1133 1134 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1135 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1136 misc_parameters_2); 1137 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1138 mlx5_eswitch_get_vport_metadata_mask()); 1139 1140 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1141 } else { 1142 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1143 misc_parameters); 1144 1145 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, 1146 MLX5_CAP_GEN(peer_dev, vhca_id)); 1147 1148 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 1149 1150 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1151 misc_parameters); 1152 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 1153 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 1154 source_eswitch_owner_vhca_id); 1155 } 1156 1157 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1158 dest->vport.num = peer_dev->priv.eswitch->manager_vport; 1159 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); 1160 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 1161 } 1162 1163 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, 1164 struct mlx5_eswitch *peer_esw, 1165 struct mlx5_flow_spec *spec, 1166 u16 vport) 1167 { 1168 void *misc; 1169 1170 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1171 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1172 misc_parameters_2); 1173 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1174 mlx5_eswitch_get_vport_metadata_for_match(peer_esw, 1175 vport)); 1176 } else { 1177 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1178 misc_parameters); 1179 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 1180 } 1181 } 1182 1183 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1184 struct mlx5_core_dev *peer_dev) 1185 { 1186 struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1187 struct mlx5_flow_destination dest = {}; 1188 struct mlx5_flow_act flow_act = {0}; 1189 struct mlx5_flow_handle **flows; 1190 struct mlx5_flow_handle *flow; 1191 struct mlx5_vport *peer_vport; 1192 struct mlx5_flow_spec *spec; 1193 int err, pfindex; 1194 unsigned long i; 1195 void *misc; 1196 1197 if (!MLX5_VPORT_MANAGER(peer_dev) && 1198 !mlx5_core_is_ecpf_esw_manager(peer_dev)) 1199 return 0; 1200 1201 spec = kvzalloc_obj(*spec); 1202 if (!spec) 1203 return -ENOMEM; 1204 1205 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1206 1207 flows = kvzalloc_objs(*flows, peer_esw->total_vports); 1208 if (!flows) { 1209 err = -ENOMEM; 1210 goto alloc_flows_err; 1211 } 1212 1213 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1214 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1215 misc_parameters); 1216 1217 if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1218 mlx5_esw_host_functions_enabled(peer_dev)) { 1219 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1220 esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1221 MLX5_VPORT_PF); 1222 1223 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1224 spec, &flow_act, &dest, 1); 1225 if (IS_ERR(flow)) { 1226 err = PTR_ERR(flow); 1227 goto add_pf_flow_err; 1228 } 1229 flows[peer_vport->index] = flow; 1230 } 1231 1232 if (mlx5_ecpf_vport_exists(peer_dev)) { 1233 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1234 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1235 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1236 spec, &flow_act, &dest, 1); 1237 if (IS_ERR(flow)) { 1238 err = PTR_ERR(flow); 1239 goto add_ecpf_flow_err; 1240 } 1241 flows[peer_vport->index] = flow; 1242 } 1243 1244 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1245 mlx5_core_max_vfs(peer_dev)) { 1246 esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1247 peer_vport->vport); 1248 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1249 spec, &flow_act, &dest, 1); 1250 if (IS_ERR(flow)) { 1251 err = PTR_ERR(flow); 1252 goto add_vf_flow_err; 1253 } 1254 flows[peer_vport->index] = flow; 1255 } 1256 1257 if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1258 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1259 mlx5_core_max_ec_vfs(peer_dev)) { 1260 esw_set_peer_miss_rule_source_port(esw, peer_esw, 1261 spec, 1262 peer_vport->vport); 1263 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1264 spec, &flow_act, &dest, 1); 1265 if (IS_ERR(flow)) { 1266 err = PTR_ERR(flow); 1267 goto add_ec_vf_flow_err; 1268 } 1269 flows[peer_vport->index] = flow; 1270 } 1271 } 1272 1273 pfindex = mlx5_get_dev_index(peer_dev); 1274 if (pfindex >= MLX5_MAX_PORTS) { 1275 esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n", 1276 pfindex, MLX5_MAX_PORTS); 1277 err = -EINVAL; 1278 goto add_ec_vf_flow_err; 1279 } 1280 esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows; 1281 1282 kvfree(spec); 1283 return 0; 1284 1285 add_ec_vf_flow_err: 1286 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1287 mlx5_core_max_ec_vfs(peer_dev)) { 1288 if (!flows[peer_vport->index]) 1289 continue; 1290 mlx5_del_flow_rules(flows[peer_vport->index]); 1291 } 1292 add_vf_flow_err: 1293 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1294 mlx5_core_max_vfs(peer_dev)) { 1295 if (!flows[peer_vport->index]) 1296 continue; 1297 mlx5_del_flow_rules(flows[peer_vport->index]); 1298 } 1299 if (mlx5_ecpf_vport_exists(peer_dev)) { 1300 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1301 mlx5_del_flow_rules(flows[peer_vport->index]); 1302 } 1303 add_ecpf_flow_err: 1304 1305 if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1306 mlx5_esw_host_functions_enabled(peer_dev)) { 1307 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1308 mlx5_del_flow_rules(flows[peer_vport->index]); 1309 } 1310 add_pf_flow_err: 1311 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); 1312 kvfree(flows); 1313 alloc_flows_err: 1314 kvfree(spec); 1315 return err; 1316 } 1317 1318 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1319 struct mlx5_core_dev *peer_dev) 1320 { 1321 struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1322 u16 peer_index = mlx5_get_dev_index(peer_dev); 1323 struct mlx5_flow_handle **flows; 1324 struct mlx5_vport *peer_vport; 1325 unsigned long i; 1326 1327 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; 1328 if (!flows) 1329 return; 1330 1331 if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1332 mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1333 mlx5_core_max_ec_vfs(peer_dev)) 1334 mlx5_del_flow_rules(flows[peer_vport->index]); 1335 } 1336 1337 mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1338 mlx5_core_max_vfs(peer_dev)) 1339 mlx5_del_flow_rules(flows[peer_vport->index]); 1340 1341 if (mlx5_ecpf_vport_exists(peer_dev)) { 1342 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1343 mlx5_del_flow_rules(flows[peer_vport->index]); 1344 } 1345 1346 if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1347 mlx5_esw_host_functions_enabled(peer_dev)) { 1348 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1349 mlx5_del_flow_rules(flows[peer_vport->index]); 1350 } 1351 1352 kvfree(flows); 1353 esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL; 1354 } 1355 1356 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 1357 { 1358 struct mlx5_flow_act flow_act = {0}; 1359 struct mlx5_flow_destination dest = {}; 1360 struct mlx5_flow_handle *flow_rule = NULL; 1361 struct mlx5_flow_spec *spec; 1362 void *headers_c; 1363 void *headers_v; 1364 int err = 0; 1365 u8 *dmac_c; 1366 u8 *dmac_v; 1367 1368 spec = kvzalloc_obj(*spec); 1369 if (!spec) { 1370 err = -ENOMEM; 1371 goto out; 1372 } 1373 1374 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 1375 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1376 outer_headers); 1377 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, 1378 outer_headers.dmac_47_16); 1379 dmac_c[0] = 0x01; 1380 1381 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 1382 dest.vport.num = esw->manager_vport; 1383 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1384 1385 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1386 spec, &flow_act, &dest, 1); 1387 if (IS_ERR(flow_rule)) { 1388 err = PTR_ERR(flow_rule); 1389 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); 1390 goto out; 1391 } 1392 1393 esw->fdb_table.offloads.miss_rule_uni = flow_rule; 1394 1395 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1396 outer_headers); 1397 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, 1398 outer_headers.dmac_47_16); 1399 dmac_v[0] = 0x01; 1400 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1401 spec, &flow_act, &dest, 1); 1402 if (IS_ERR(flow_rule)) { 1403 err = PTR_ERR(flow_rule); 1404 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); 1405 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1406 goto out; 1407 } 1408 1409 esw->fdb_table.offloads.miss_rule_multi = flow_rule; 1410 1411 out: 1412 kvfree(spec); 1413 return err; 1414 } 1415 1416 struct mlx5_flow_handle * 1417 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 1418 { 1419 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; 1420 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; 1421 struct mlx5_flow_context *flow_context; 1422 struct mlx5_flow_handle *flow_rule; 1423 struct mlx5_flow_destination dest; 1424 struct mlx5_flow_spec *spec; 1425 void *misc; 1426 1427 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 1428 return ERR_PTR(-EOPNOTSUPP); 1429 1430 spec = kvzalloc_obj(*spec); 1431 if (!spec) 1432 return ERR_PTR(-ENOMEM); 1433 1434 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1435 misc_parameters_2); 1436 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 1437 ESW_REG_C0_USER_DATA_METADATA_MASK); 1438 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1439 misc_parameters_2); 1440 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); 1441 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 1442 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1443 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1444 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; 1445 1446 flow_context = &spec->flow_context; 1447 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 1448 flow_context->flow_tag = tag; 1449 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1450 dest.ft = esw->offloads.ft_offloads; 1451 1452 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 1453 kvfree(spec); 1454 1455 if (IS_ERR(flow_rule)) 1456 esw_warn(esw->dev, 1457 "Failed to create restore rule for tag: %d, err(%d)\n", 1458 tag, (int)PTR_ERR(flow_rule)); 1459 1460 return flow_rule; 1461 } 1462 1463 #define MAX_PF_SQ 256 1464 #define MAX_SQ_NVPORTS 32 1465 1466 void 1467 mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 1468 u32 *flow_group_in, 1469 int match_params) 1470 { 1471 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1472 flow_group_in, 1473 match_criteria); 1474 1475 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1476 MLX5_SET(create_flow_group_in, flow_group_in, 1477 match_criteria_enable, 1478 MLX5_MATCH_MISC_PARAMETERS_2 | match_params); 1479 1480 MLX5_SET(fte_match_param, match_criteria, 1481 misc_parameters_2.metadata_reg_c_0, 1482 mlx5_eswitch_get_vport_metadata_mask()); 1483 } else { 1484 MLX5_SET(create_flow_group_in, flow_group_in, 1485 match_criteria_enable, 1486 MLX5_MATCH_MISC_PARAMETERS | match_params); 1487 1488 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1489 misc_parameters.source_port); 1490 } 1491 } 1492 1493 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 1494 static void esw_vport_tbl_put(struct mlx5_eswitch *esw) 1495 { 1496 struct mlx5_vport_tbl_attr attr; 1497 struct mlx5_vport *vport; 1498 unsigned long i; 1499 1500 attr.chain = 0; 1501 attr.prio = 1; 1502 mlx5_esw_for_each_vport(esw, i, vport) { 1503 attr.vport = vport->vport; 1504 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1505 mlx5_esw_vporttbl_put(esw, &attr); 1506 } 1507 } 1508 1509 static int esw_vport_tbl_get(struct mlx5_eswitch *esw) 1510 { 1511 struct mlx5_vport_tbl_attr attr; 1512 struct mlx5_flow_table *fdb; 1513 struct mlx5_vport *vport; 1514 unsigned long i; 1515 1516 attr.chain = 0; 1517 attr.prio = 1; 1518 mlx5_esw_for_each_vport(esw, i, vport) { 1519 attr.vport = vport->vport; 1520 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns; 1521 fdb = mlx5_esw_vporttbl_get(esw, &attr); 1522 if (IS_ERR(fdb)) 1523 goto out; 1524 } 1525 return 0; 1526 1527 out: 1528 esw_vport_tbl_put(esw); 1529 return PTR_ERR(fdb); 1530 } 1531 1532 #define fdb_modify_header_fwd_to_table_supported(esw) \ 1533 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) 1534 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags) 1535 { 1536 struct mlx5_core_dev *dev = esw->dev; 1537 1538 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level)) 1539 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 1540 1541 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) && 1542 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { 1543 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1544 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); 1545 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { 1546 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1547 esw_warn(dev, "Tc chains and priorities offload aren't supported\n"); 1548 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { 1549 /* Disabled when ttl workaround is needed, e.g 1550 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig 1551 */ 1552 esw_warn(dev, 1553 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); 1554 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1555 } else { 1556 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED; 1557 esw_info(dev, "Supported tc chains and prios offload\n"); 1558 } 1559 1560 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1561 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED; 1562 } 1563 1564 static int 1565 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1566 { 1567 struct mlx5_core_dev *dev = esw->dev; 1568 struct mlx5_flow_table *nf_ft, *ft; 1569 struct mlx5_chains_attr attr = {}; 1570 struct mlx5_fs_chains *chains; 1571 int err; 1572 1573 esw_init_chains_offload_flags(esw, &attr.flags); 1574 attr.ns = MLX5_FLOW_NAMESPACE_FDB; 1575 attr.max_grp_num = esw->params.large_group_num; 1576 attr.default_ft = miss_fdb; 1577 attr.mapping = esw->offloads.reg_c0_obj_pool; 1578 attr.fs_base_prio = FDB_BYPASS_PATH; 1579 1580 chains = mlx5_chains_create(dev, &attr); 1581 if (IS_ERR(chains)) { 1582 err = PTR_ERR(chains); 1583 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); 1584 return err; 1585 } 1586 mlx5_chains_print_info(chains); 1587 1588 esw->fdb_table.offloads.esw_chains_priv = chains; 1589 1590 /* Create tc_end_ft which is the always created ft chain */ 1591 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1592 1, 0); 1593 if (IS_ERR(nf_ft)) { 1594 err = PTR_ERR(nf_ft); 1595 goto nf_ft_err; 1596 } 1597 1598 /* Always open the root for fast path */ 1599 ft = mlx5_chains_get_table(chains, 0, 1, 0); 1600 if (IS_ERR(ft)) { 1601 err = PTR_ERR(ft); 1602 goto level_0_err; 1603 } 1604 1605 /* Open level 1 for split fdb rules now if prios isn't supported */ 1606 if (!mlx5_chains_prios_supported(chains)) { 1607 err = esw_vport_tbl_get(esw); 1608 if (err) 1609 goto level_1_err; 1610 } 1611 1612 mlx5_chains_set_end_ft(chains, nf_ft); 1613 1614 return 0; 1615 1616 level_1_err: 1617 mlx5_chains_put_table(chains, 0, 1, 0); 1618 level_0_err: 1619 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1620 nf_ft_err: 1621 mlx5_chains_destroy(chains); 1622 esw->fdb_table.offloads.esw_chains_priv = NULL; 1623 1624 return err; 1625 } 1626 1627 static void 1628 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1629 { 1630 if (!mlx5_chains_prios_supported(chains)) 1631 esw_vport_tbl_put(esw); 1632 mlx5_chains_put_table(chains, 0, 1, 0); 1633 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0); 1634 mlx5_chains_destroy(chains); 1635 } 1636 1637 #else /* CONFIG_MLX5_CLS_ACT */ 1638 1639 static int 1640 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) 1641 { return 0; } 1642 1643 static void 1644 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains) 1645 {} 1646 1647 #endif 1648 1649 static int 1650 esw_create_send_to_vport_group(struct mlx5_eswitch *esw, 1651 struct mlx5_flow_table *fdb, 1652 u32 *flow_group_in, 1653 int *ix) 1654 { 1655 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1656 struct mlx5_flow_group *g; 1657 void *match_criteria; 1658 int count, err = 0; 1659 1660 memset(flow_group_in, 0, inlen); 1661 1662 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS); 1663 1664 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1665 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); 1666 1667 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && 1668 MLX5_CAP_ESW(esw->dev, merged_eswitch)) { 1669 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1670 misc_parameters.source_eswitch_owner_vhca_id); 1671 MLX5_SET(create_flow_group_in, flow_group_in, 1672 source_eswitch_owner_vhca_id_valid, 1); 1673 } 1674 1675 /* See comment at table_size calculation */ 1676 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); 1677 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 1678 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1); 1679 *ix += count; 1680 1681 g = mlx5_create_flow_group(fdb, flow_group_in); 1682 if (IS_ERR(g)) { 1683 err = PTR_ERR(g); 1684 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err); 1685 goto out; 1686 } 1687 esw->fdb_table.offloads.send_to_vport_grp = g; 1688 1689 out: 1690 return err; 1691 } 1692 1693 static int 1694 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw, 1695 struct mlx5_flow_table *fdb, 1696 u32 *flow_group_in, 1697 int *ix) 1698 { 1699 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1700 struct mlx5_flow_group *g; 1701 void *match_criteria; 1702 int err = 0; 1703 1704 if (!esw_src_port_rewrite_supported(esw)) 1705 return 0; 1706 1707 memset(flow_group_in, 0, inlen); 1708 1709 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1710 MLX5_MATCH_MISC_PARAMETERS_2); 1711 1712 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); 1713 1714 MLX5_SET(fte_match_param, match_criteria, 1715 misc_parameters_2.metadata_reg_c_0, 1716 mlx5_eswitch_get_vport_metadata_mask()); 1717 MLX5_SET(fte_match_param, match_criteria, 1718 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK); 1719 1720 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1721 MLX5_SET(create_flow_group_in, flow_group_in, 1722 end_flow_index, *ix + esw->total_vports - 1); 1723 *ix += esw->total_vports; 1724 1725 g = mlx5_create_flow_group(fdb, flow_group_in); 1726 if (IS_ERR(g)) { 1727 err = PTR_ERR(g); 1728 esw_warn(esw->dev, 1729 "Failed to create send-to-vport meta flow group err(%d)\n", err); 1730 goto send_vport_meta_err; 1731 } 1732 esw->fdb_table.offloads.send_to_vport_meta_grp = g; 1733 1734 return 0; 1735 1736 send_vport_meta_err: 1737 return err; 1738 } 1739 1740 static int 1741 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, 1742 struct mlx5_flow_table *fdb, 1743 u32 *flow_group_in, 1744 int *ix) 1745 { 1746 int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1); 1747 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1748 struct mlx5_flow_group *g; 1749 void *match_criteria; 1750 int err = 0; 1751 1752 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1753 return 0; 1754 1755 memset(flow_group_in, 0, inlen); 1756 1757 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); 1758 1759 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { 1760 match_criteria = MLX5_ADDR_OF(create_flow_group_in, 1761 flow_group_in, 1762 match_criteria); 1763 1764 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 1765 misc_parameters.source_eswitch_owner_vhca_id); 1766 1767 MLX5_SET(create_flow_group_in, flow_group_in, 1768 source_eswitch_owner_vhca_id_valid, 1); 1769 } 1770 1771 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1772 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1773 *ix + max_peer_ports); 1774 *ix += max_peer_ports + 1; 1775 1776 g = mlx5_create_flow_group(fdb, flow_group_in); 1777 if (IS_ERR(g)) { 1778 err = PTR_ERR(g); 1779 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err); 1780 goto out; 1781 } 1782 esw->fdb_table.offloads.peer_miss_grp = g; 1783 1784 out: 1785 return err; 1786 } 1787 1788 static int 1789 esw_create_miss_group(struct mlx5_eswitch *esw, 1790 struct mlx5_flow_table *fdb, 1791 u32 *flow_group_in, 1792 int *ix) 1793 { 1794 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1795 struct mlx5_flow_group *g; 1796 void *match_criteria; 1797 int err = 0; 1798 u8 *dmac; 1799 1800 memset(flow_group_in, 0, inlen); 1801 1802 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 1803 MLX5_MATCH_OUTER_HEADERS); 1804 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 1805 match_criteria); 1806 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 1807 outer_headers.dmac_47_16); 1808 dmac[0] = 0x01; 1809 1810 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix); 1811 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1812 *ix + MLX5_ESW_MISS_FLOWS); 1813 1814 g = mlx5_create_flow_group(fdb, flow_group_in); 1815 if (IS_ERR(g)) { 1816 err = PTR_ERR(g); 1817 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err); 1818 goto miss_err; 1819 } 1820 esw->fdb_table.offloads.miss_grp = g; 1821 1822 err = esw_add_fdb_miss_rule(esw); 1823 if (err) 1824 goto miss_rule_err; 1825 1826 return 0; 1827 1828 miss_rule_err: 1829 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1830 miss_err: 1831 return err; 1832 } 1833 1834 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) 1835 { 1836 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1837 struct mlx5_flow_table_attr ft_attr = {}; 1838 struct mlx5_core_dev *dev = esw->dev; 1839 struct mlx5_flow_namespace *root_ns; 1840 struct mlx5_flow_table *fdb = NULL; 1841 int table_size, ix = 0, err = 0; 1842 u32 flags = 0, *flow_group_in; 1843 1844 esw_debug(esw->dev, "Create offloads FDB Tables\n"); 1845 1846 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 1847 if (!flow_group_in) 1848 return -ENOMEM; 1849 1850 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 1851 if (!root_ns) { 1852 esw_warn(dev, "Failed to get FDB flow namespace\n"); 1853 err = -EOPNOTSUPP; 1854 goto ns_err; 1855 } 1856 esw->fdb_table.offloads.ns = root_ns; 1857 err = mlx5_flow_namespace_set_mode(root_ns, 1858 esw->dev->priv.steering->mode); 1859 if (err) { 1860 esw_warn(dev, "Failed to set FDB namespace steering mode\n"); 1861 goto ns_err; 1862 } 1863 1864 /* To be strictly correct: 1865 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) 1866 * should be: 1867 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1868 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ 1869 * but as the peer device might not be in switchdev mode it's not 1870 * possible. We use the fact that by default FW sets max vfs and max sfs 1871 * to the same value on both devices. If it needs to be changed in the future note 1872 * the peer miss group should also be created based on the number of 1873 * total vports of the peer (currently is also uses esw->total_vports). 1874 */ 1875 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + 1876 esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS; 1877 1878 /* create the slow path fdb with encap set, so further table instances 1879 * can be created at run time while VFs are probed if the FW allows that. 1880 */ 1881 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) 1882 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 1883 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 1884 1885 ft_attr.flags = flags; 1886 ft_attr.max_fte = table_size; 1887 ft_attr.prio = FDB_SLOW_PATH; 1888 1889 fdb = mlx5_create_flow_table(root_ns, &ft_attr); 1890 if (IS_ERR(fdb)) { 1891 err = PTR_ERR(fdb); 1892 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); 1893 goto slow_fdb_err; 1894 } 1895 esw->fdb_table.offloads.slow_fdb = fdb; 1896 1897 /* Create empty TC-miss managed table. This allows plugging in following 1898 * priorities without directly exposing their level 0 table to 1899 * eswitch_offloads and passing it as miss_fdb to following call to 1900 * esw_chains_create(). 1901 */ 1902 memset(&ft_attr, 0, sizeof(ft_attr)); 1903 ft_attr.prio = FDB_TC_MISS; 1904 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr); 1905 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) { 1906 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table); 1907 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err); 1908 goto tc_miss_table_err; 1909 } 1910 1911 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table); 1912 if (err) { 1913 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err); 1914 goto fdb_chains_err; 1915 } 1916 1917 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1918 if (err) 1919 goto send_vport_err; 1920 1921 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix); 1922 if (err) 1923 goto send_vport_meta_err; 1924 1925 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix); 1926 if (err) 1927 goto peer_miss_err; 1928 1929 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix); 1930 if (err) 1931 goto miss_err; 1932 1933 kvfree(flow_group_in); 1934 return 0; 1935 1936 miss_err: 1937 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1938 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1939 peer_miss_err: 1940 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1941 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1942 send_vport_meta_err: 1943 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1944 send_vport_err: 1945 esw_chains_destroy(esw, esw_chains(esw)); 1946 fdb_chains_err: 1947 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1948 tc_miss_table_err: 1949 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw)); 1950 slow_fdb_err: 1951 /* Holds true only as long as DMFS is the default */ 1952 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS); 1953 ns_err: 1954 kvfree(flow_group_in); 1955 return err; 1956 } 1957 1958 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) 1959 { 1960 if (!mlx5_eswitch_get_slow_fdb(esw)) 1961 return; 1962 1963 esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); 1964 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); 1965 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); 1966 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); 1967 if (esw->fdb_table.offloads.send_to_vport_meta_grp) 1968 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); 1969 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) 1970 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); 1971 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); 1972 1973 esw_chains_destroy(esw, esw_chains(esw)); 1974 1975 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table); 1976 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw)); 1977 /* Holds true only as long as DMFS is the default */ 1978 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, 1979 MLX5_FLOW_STEERING_MODE_DMFS); 1980 } 1981 1982 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) 1983 { 1984 int nvports; 1985 1986 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; 1987 if (mlx5e_tc_int_port_supported(esw)) 1988 nvports += MLX5E_TC_MAX_INT_PORT_NUM; 1989 1990 return nvports; 1991 } 1992 1993 static int esw_create_offloads_table(struct mlx5_eswitch *esw) 1994 { 1995 struct mlx5_flow_table_attr ft_attr = {}; 1996 struct mlx5_core_dev *dev = esw->dev; 1997 struct mlx5_flow_table *ft_offloads; 1998 struct mlx5_flow_namespace *ns; 1999 int err = 0; 2000 2001 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2002 if (!ns) { 2003 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2004 return -EOPNOTSUPP; 2005 } 2006 2007 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) + 2008 MLX5_ESW_FT_OFFLOADS_DROP_RULE; 2009 ft_attr.prio = 1; 2010 2011 ft_offloads = mlx5_create_flow_table(ns, &ft_attr); 2012 if (IS_ERR(ft_offloads)) { 2013 err = PTR_ERR(ft_offloads); 2014 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); 2015 return err; 2016 } 2017 2018 esw->offloads.ft_offloads = ft_offloads; 2019 return 0; 2020 } 2021 2022 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) 2023 { 2024 struct mlx5_esw_offload *offloads = &esw->offloads; 2025 2026 mlx5_destroy_flow_table(offloads->ft_offloads); 2027 } 2028 2029 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) 2030 { 2031 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2032 struct mlx5_flow_group *g; 2033 u32 *flow_group_in; 2034 int nvports; 2035 int err = 0; 2036 2037 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw); 2038 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2039 if (!flow_group_in) 2040 return -ENOMEM; 2041 2042 mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0); 2043 2044 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2045 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); 2046 2047 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2048 2049 if (IS_ERR(g)) { 2050 err = PTR_ERR(g); 2051 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); 2052 goto out; 2053 } 2054 2055 esw->offloads.vport_rx_group = g; 2056 out: 2057 kvfree(flow_group_in); 2058 return err; 2059 } 2060 2061 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) 2062 { 2063 mlx5_destroy_flow_group(esw->offloads.vport_rx_group); 2064 } 2065 2066 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw) 2067 { 2068 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) 2069 * for the drop rule, which is placed at the end of the table. 2070 * So return the total of vport and int_port as rule index. 2071 */ 2072 return esw_get_nr_ft_offloads_steering_src_ports(esw); 2073 } 2074 2075 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw) 2076 { 2077 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2078 struct mlx5_flow_group *g; 2079 u32 *flow_group_in; 2080 int flow_index; 2081 int err = 0; 2082 2083 flow_index = esw_create_vport_rx_drop_rule_index(esw); 2084 2085 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2086 if (!flow_group_in) 2087 return -ENOMEM; 2088 2089 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); 2090 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); 2091 2092 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); 2093 2094 if (IS_ERR(g)) { 2095 err = PTR_ERR(g); 2096 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err); 2097 goto out; 2098 } 2099 2100 esw->offloads.vport_rx_drop_group = g; 2101 out: 2102 kvfree(flow_group_in); 2103 return err; 2104 } 2105 2106 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) 2107 { 2108 if (esw->offloads.vport_rx_drop_group) 2109 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); 2110 } 2111 2112 void 2113 mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, 2114 u16 vport, 2115 struct mlx5_flow_spec *spec) 2116 { 2117 void *misc; 2118 2119 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 2120 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 2121 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2122 mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); 2123 2124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 2125 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2126 mlx5_eswitch_get_vport_metadata_mask()); 2127 2128 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; 2129 } else { 2130 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); 2131 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 2132 2133 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2134 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2135 2136 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2137 } 2138 } 2139 2140 struct mlx5_flow_handle * 2141 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 2142 struct mlx5_flow_destination *dest) 2143 { 2144 struct mlx5_flow_act flow_act = {0}; 2145 struct mlx5_flow_handle *flow_rule; 2146 struct mlx5_flow_spec *spec; 2147 2148 spec = kvzalloc_obj(*spec); 2149 if (!spec) { 2150 flow_rule = ERR_PTR(-ENOMEM); 2151 goto out; 2152 } 2153 2154 mlx5_esw_set_spec_source_port(esw, vport, spec); 2155 2156 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2157 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, 2158 &flow_act, dest, 1); 2159 if (IS_ERR(flow_rule)) { 2160 esw_warn(esw->dev, 2161 "fs offloads: Failed to add vport rx rule err %pe\n", 2162 flow_rule); 2163 goto out; 2164 } 2165 2166 out: 2167 kvfree(spec); 2168 return flow_rule; 2169 } 2170 2171 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2172 { 2173 struct mlx5_flow_act flow_act = {}; 2174 struct mlx5_flow_handle *flow_rule; 2175 2176 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 2177 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL, 2178 &flow_act, NULL, 0); 2179 if (IS_ERR(flow_rule)) { 2180 esw_warn(esw->dev, 2181 "fs offloads: Failed to add vport rx drop rule err %pe\n", 2182 flow_rule); 2183 return PTR_ERR(flow_rule); 2184 } 2185 2186 esw->offloads.vport_rx_drop_rule = flow_rule; 2187 2188 return 0; 2189 } 2190 2191 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw) 2192 { 2193 if (esw->offloads.vport_rx_drop_rule) 2194 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule); 2195 } 2196 2197 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) 2198 { 2199 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; 2200 struct mlx5_core_dev *dev = esw->dev; 2201 struct mlx5_vport *vport; 2202 unsigned long i; 2203 2204 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 2205 return -EOPNOTSUPP; 2206 2207 if (!mlx5_esw_is_fdb_created(esw)) 2208 return -EOPNOTSUPP; 2209 2210 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 2211 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 2212 mlx5_mode = MLX5_INLINE_MODE_NONE; 2213 goto out; 2214 case MLX5_CAP_INLINE_MODE_L2: 2215 mlx5_mode = MLX5_INLINE_MODE_L2; 2216 goto out; 2217 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 2218 goto query_vports; 2219 } 2220 2221 query_vports: 2222 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode); 2223 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 2224 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode); 2225 if (prev_mlx5_mode != mlx5_mode) 2226 return -EINVAL; 2227 prev_mlx5_mode = mlx5_mode; 2228 } 2229 2230 out: 2231 *mode = mlx5_mode; 2232 return 0; 2233 } 2234 2235 static void esw_destroy_restore_table(struct mlx5_eswitch *esw) 2236 { 2237 struct mlx5_esw_offload *offloads = &esw->offloads; 2238 2239 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2240 return; 2241 2242 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); 2243 mlx5_destroy_flow_group(offloads->restore_group); 2244 mlx5_destroy_flow_table(offloads->ft_offloads_restore); 2245 } 2246 2247 static int esw_create_restore_table(struct mlx5_eswitch *esw) 2248 { 2249 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 2250 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2251 struct mlx5_flow_table_attr ft_attr = {}; 2252 struct mlx5_core_dev *dev = esw->dev; 2253 struct mlx5_flow_namespace *ns; 2254 struct mlx5_modify_hdr *mod_hdr; 2255 void *match_criteria, *misc; 2256 struct mlx5_flow_table *ft; 2257 struct mlx5_flow_group *g; 2258 u32 *flow_group_in; 2259 int err = 0; 2260 2261 if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) 2262 return 0; 2263 2264 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 2265 if (!ns) { 2266 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 2267 return -EOPNOTSUPP; 2268 } 2269 2270 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2271 if (!flow_group_in) { 2272 err = -ENOMEM; 2273 goto out_free; 2274 } 2275 2276 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS; 2277 ft = mlx5_create_flow_table(ns, &ft_attr); 2278 if (IS_ERR(ft)) { 2279 err = PTR_ERR(ft); 2280 esw_warn(esw->dev, "Failed to create restore table, err %d\n", 2281 err); 2282 goto out_free; 2283 } 2284 2285 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2286 match_criteria); 2287 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, 2288 misc_parameters_2); 2289 2290 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 2291 ESW_REG_C0_USER_DATA_METADATA_MASK); 2292 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2293 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2294 ft_attr.max_fte - 1); 2295 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2296 MLX5_MATCH_MISC_PARAMETERS_2); 2297 g = mlx5_create_flow_group(ft, flow_group_in); 2298 if (IS_ERR(g)) { 2299 err = PTR_ERR(g); 2300 esw_warn(dev, "Failed to create restore flow group, err: %d\n", 2301 err); 2302 goto err_group; 2303 } 2304 2305 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); 2306 MLX5_SET(copy_action_in, modact, src_field, 2307 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); 2308 MLX5_SET(copy_action_in, modact, dst_field, 2309 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 2310 mod_hdr = mlx5_modify_header_alloc(esw->dev, 2311 MLX5_FLOW_NAMESPACE_KERNEL, 1, 2312 modact); 2313 if (IS_ERR(mod_hdr)) { 2314 err = PTR_ERR(mod_hdr); 2315 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 2316 err); 2317 goto err_mod_hdr; 2318 } 2319 2320 esw->offloads.ft_offloads_restore = ft; 2321 esw->offloads.restore_group = g; 2322 esw->offloads.restore_copy_hdr_id = mod_hdr; 2323 2324 kvfree(flow_group_in); 2325 2326 return 0; 2327 2328 err_mod_hdr: 2329 mlx5_destroy_flow_group(g); 2330 err_group: 2331 mlx5_destroy_flow_table(ft); 2332 out_free: 2333 kvfree(flow_group_in); 2334 2335 return err; 2336 } 2337 2338 static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode) 2339 { 2340 mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp); 2341 if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV || 2342 mlx5_core_mp_enabled(esw->dev)) { 2343 esw->mode = mode; 2344 mlx5_rescan_drivers_locked(esw->dev); 2345 mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); 2346 return; 2347 } 2348 2349 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; 2350 mlx5_rescan_drivers_locked(esw->dev); 2351 esw->mode = mode; 2352 esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; 2353 mlx5_rescan_drivers_locked(esw->dev); 2354 mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); 2355 } 2356 2357 static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw) 2358 { 2359 if (!esw->fdb_table.offloads.drop_root) 2360 return; 2361 2362 esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n", 2363 esw->fdb_table.offloads.drop_root->id, 2364 esw->fdb_table.offloads.drop_root_fc->id); 2365 mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule); 2366 /* Don't free flow counter here, can be reused on a later activation */ 2367 mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root); 2368 esw->fdb_table.offloads.drop_root_rule = NULL; 2369 esw->fdb_table.offloads.drop_root = NULL; 2370 } 2371 2372 static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw) 2373 { 2374 struct mlx5_flow_destination drop_fc_dst = {}; 2375 struct mlx5_flow_table_attr ft_attr = {}; 2376 struct mlx5_flow_destination *dst = NULL; 2377 struct mlx5_core_dev *dev = esw->dev; 2378 struct mlx5_flow_namespace *root_ns; 2379 struct mlx5_flow_act flow_act = {}; 2380 struct mlx5_flow_handle *flow_rule; 2381 struct mlx5_flow_table *table; 2382 int err = 0, dst_num = 0; 2383 2384 if (esw->fdb_table.offloads.drop_root) 2385 return 0; 2386 2387 root_ns = esw->fdb_table.offloads.ns; 2388 2389 ft_attr.prio = FDB_DROP_ROOT; 2390 ft_attr.max_fte = 1; 2391 ft_attr.autogroup.max_num_groups = 1; 2392 table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); 2393 if (IS_ERR(table)) { 2394 esw_warn(dev, "Failed to create fdb drop root table, err %pe\n", 2395 table); 2396 return PTR_ERR(table); 2397 } 2398 2399 /* Drop FC reusable, create once on first deactivation of FDB */ 2400 if (!esw->fdb_table.offloads.drop_root_fc) { 2401 struct mlx5_fc *counter = mlx5_fc_create(dev, 0); 2402 2403 err = PTR_ERR_OR_ZERO(counter); 2404 if (err) 2405 esw_warn(esw->dev, "create fdb drop fc err %d\n", err); 2406 else 2407 esw->fdb_table.offloads.drop_root_fc = counter; 2408 } 2409 2410 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 2411 2412 if (esw->fdb_table.offloads.drop_root_fc) { 2413 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 2414 drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 2415 drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc; 2416 dst = &drop_fc_dst; 2417 dst_num++; 2418 } 2419 2420 flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num); 2421 err = PTR_ERR_OR_ZERO(flow_rule); 2422 if (err) { 2423 esw_warn(esw->dev, 2424 "fs offloads: Failed to add vport rx drop rule err %d\n", 2425 err); 2426 goto err_flow_rule; 2427 } 2428 2429 esw->fdb_table.offloads.drop_root = table; 2430 esw->fdb_table.offloads.drop_root_rule = flow_rule; 2431 esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n", 2432 table->id, dst ? dst->counter->id : 0); 2433 return 0; 2434 2435 err_flow_rule: 2436 /* no need to free drop fc, esw_offloads_steering_cleanup will do it */ 2437 mlx5_destroy_flow_table(table); 2438 return err; 2439 } 2440 2441 static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw) 2442 { 2443 struct mlx5_vport *vport; 2444 unsigned long i; 2445 2446 mlx5_esw_fdb_drop_destroy(esw); 2447 mlx5_mpfs_enable(esw->dev); 2448 2449 mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { 2450 if (!vport->adjacent) 2451 continue; 2452 esw_debug(esw->dev, "Connecting vport %d to eswitch\n", 2453 vport->vport); 2454 mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true); 2455 } 2456 2457 esw->offloads_inactive = false; 2458 esw_warn(esw->dev, "MPFS/FDB active\n"); 2459 } 2460 2461 static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw) 2462 { 2463 struct mlx5_vport *vport; 2464 unsigned long i; 2465 2466 mlx5_mpfs_disable(esw->dev); 2467 mlx5_esw_fdb_drop_create(esw); 2468 2469 mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { 2470 if (!vport->adjacent) 2471 continue; 2472 esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n", 2473 vport->vport); 2474 2475 mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false); 2476 } 2477 2478 esw->offloads_inactive = true; 2479 esw_warn(esw->dev, "MPFS/FDB inactive\n"); 2480 } 2481 2482 static int esw_offloads_start(struct mlx5_eswitch *esw, 2483 struct netlink_ext_ack *extack) 2484 { 2485 int err; 2486 2487 esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS); 2488 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); 2489 if (err) { 2490 NL_SET_ERR_MSG_MOD(extack, 2491 "Failed setting eswitch to offloads"); 2492 esw_mode_change(esw, MLX5_ESWITCH_LEGACY); 2493 return err; 2494 } 2495 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 2496 if (mlx5_eswitch_inline_mode_get(esw, 2497 &esw->offloads.inline_mode)) { 2498 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; 2499 NL_SET_ERR_MSG_MOD(extack, 2500 "Inline mode is different between vports"); 2501 } 2502 } 2503 return 0; 2504 } 2505 2506 void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw, 2507 const struct mlx5_vport *vport) 2508 { 2509 struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps, 2510 vport->vport); 2511 2512 if (!rep) 2513 return; 2514 xa_erase(&esw->offloads.vport_reps, vport->vport); 2515 kfree(rep); 2516 } 2517 2518 int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw, 2519 const struct mlx5_vport *vport) 2520 { 2521 struct mlx5_eswitch_rep *rep; 2522 int rep_type; 2523 int err; 2524 2525 rep = kzalloc_obj(*rep); 2526 if (!rep) 2527 return -ENOMEM; 2528 2529 rep->vport = vport->vport; 2530 rep->vport_index = vport->index; 2531 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2532 if (!esw->offloads.rep_ops[rep_type]) { 2533 atomic_set(&rep->rep_data[rep_type].state, 2534 REP_UNREGISTERED); 2535 continue; 2536 } 2537 /* Dynamic/delegated vports add their representors after 2538 * mlx5_eswitch_register_vport_reps, so mark them as registered 2539 * for them to be loaded later with the others. 2540 */ 2541 rep->esw = esw; 2542 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2543 } 2544 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL); 2545 if (err) 2546 goto insert_err; 2547 2548 return 0; 2549 2550 insert_err: 2551 kfree(rep); 2552 return err; 2553 } 2554 2555 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw, 2556 struct mlx5_eswitch_rep *rep) 2557 { 2558 xa_erase(&esw->offloads.vport_reps, rep->vport); 2559 kfree(rep); 2560 } 2561 2562 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) 2563 { 2564 struct mlx5_eswitch_rep *rep; 2565 unsigned long i; 2566 2567 mlx5_esw_for_each_rep(esw, i, rep) 2568 mlx5_esw_offloads_rep_cleanup(esw, rep); 2569 xa_destroy(&esw->offloads.vport_reps); 2570 } 2571 2572 static int esw_offloads_init_reps(struct mlx5_eswitch *esw) 2573 { 2574 struct mlx5_vport *vport; 2575 unsigned long i; 2576 int err; 2577 2578 xa_init(&esw->offloads.vport_reps); 2579 2580 mlx5_esw_for_each_vport(esw, i, vport) { 2581 err = mlx5_esw_offloads_rep_add(esw, vport); 2582 if (err) 2583 goto err; 2584 } 2585 return 0; 2586 2587 err: 2588 esw_offloads_cleanup_reps(esw); 2589 return err; 2590 } 2591 2592 static int esw_port_metadata_set(struct devlink *devlink, u32 id, 2593 struct devlink_param_gset_ctx *ctx, 2594 struct netlink_ext_ack *extack) 2595 { 2596 struct mlx5_core_dev *dev = devlink_priv(devlink); 2597 struct mlx5_eswitch *esw = dev->priv.eswitch; 2598 int err = 0; 2599 2600 down_write(&esw->mode_lock); 2601 if (mlx5_esw_is_fdb_created(esw)) { 2602 err = -EBUSY; 2603 goto done; 2604 } 2605 if (!mlx5_esw_vport_match_metadata_supported(esw)) { 2606 err = -EOPNOTSUPP; 2607 goto done; 2608 } 2609 if (ctx->val.vbool) 2610 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2611 else 2612 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; 2613 done: 2614 up_write(&esw->mode_lock); 2615 return err; 2616 } 2617 2618 static int esw_port_metadata_get(struct devlink *devlink, u32 id, 2619 struct devlink_param_gset_ctx *ctx, 2620 struct netlink_ext_ack *extack) 2621 { 2622 struct mlx5_core_dev *dev = devlink_priv(devlink); 2623 2624 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch); 2625 return 0; 2626 } 2627 2628 static int esw_port_metadata_validate(struct devlink *devlink, u32 id, 2629 union devlink_param_value val, 2630 struct netlink_ext_ack *extack) 2631 { 2632 struct mlx5_core_dev *dev = devlink_priv(devlink); 2633 u8 esw_mode; 2634 2635 esw_mode = mlx5_eswitch_mode(dev); 2636 if (esw_mode == MLX5_ESWITCH_OFFLOADS) { 2637 NL_SET_ERR_MSG_MOD(extack, 2638 "E-Switch must either disabled or non switchdev mode"); 2639 return -EBUSY; 2640 } 2641 return 0; 2642 } 2643 2644 static const struct devlink_param esw_devlink_params[] = { 2645 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, 2646 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL, 2647 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 2648 esw_port_metadata_get, 2649 esw_port_metadata_set, 2650 esw_port_metadata_validate), 2651 }; 2652 2653 int esw_offloads_init(struct mlx5_eswitch *esw) 2654 { 2655 int err; 2656 2657 err = esw_offloads_init_reps(esw); 2658 if (err) 2659 return err; 2660 2661 if (MLX5_ESWITCH_MANAGER(esw->dev) && 2662 mlx5_esw_vport_match_metadata_supported(esw)) 2663 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; 2664 2665 err = devl_params_register(priv_to_devlink(esw->dev), 2666 esw_devlink_params, 2667 ARRAY_SIZE(esw_devlink_params)); 2668 if (err) 2669 goto err_params; 2670 2671 return 0; 2672 2673 err_params: 2674 esw_offloads_cleanup_reps(esw); 2675 return err; 2676 } 2677 2678 void esw_offloads_cleanup(struct mlx5_eswitch *esw) 2679 { 2680 devl_params_unregister(priv_to_devlink(esw->dev), 2681 esw_devlink_params, 2682 ARRAY_SIZE(esw_devlink_params)); 2683 esw_offloads_cleanup_reps(esw); 2684 } 2685 2686 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, 2687 struct mlx5_eswitch_rep *rep, u8 rep_type) 2688 { 2689 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2690 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) 2691 return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); 2692 2693 return 0; 2694 } 2695 2696 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, 2697 struct mlx5_eswitch_rep *rep, u8 rep_type) 2698 { 2699 if (atomic_cmpxchg(&rep->rep_data[rep_type].state, 2700 REP_LOADED, REP_REGISTERED) == REP_LOADED) { 2701 if (rep_type == REP_ETH) 2702 __esw_offloads_unload_rep(esw, rep, REP_IB); 2703 esw->offloads.rep_ops[rep_type]->unload(rep); 2704 } 2705 } 2706 2707 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) 2708 { 2709 struct mlx5_eswitch_rep *rep; 2710 unsigned long i; 2711 2712 mlx5_esw_for_each_rep(esw, i, rep) 2713 __esw_offloads_unload_rep(esw, rep, rep_type); 2714 } 2715 2716 static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) 2717 { 2718 struct mlx5_eswitch_rep *rep; 2719 int rep_type; 2720 int err; 2721 2722 rep = mlx5_eswitch_get_rep(esw, vport_num); 2723 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 2724 err = __esw_offloads_load_rep(esw, rep, rep_type); 2725 if (err) 2726 goto err_reps; 2727 } 2728 2729 return 0; 2730 2731 err_reps: 2732 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED); 2733 for (--rep_type; rep_type >= 0; rep_type--) 2734 __esw_offloads_unload_rep(esw, rep, rep_type); 2735 return err; 2736 } 2737 2738 static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) 2739 { 2740 struct mlx5_eswitch_rep *rep; 2741 int rep_type; 2742 2743 rep = mlx5_eswitch_get_rep(esw, vport_num); 2744 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--) 2745 __esw_offloads_unload_rep(esw, rep, rep_type); 2746 } 2747 2748 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 2749 { 2750 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2751 return 0; 2752 2753 return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport); 2754 } 2755 2756 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 2757 { 2758 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2759 return; 2760 2761 mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport); 2762 } 2763 2764 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 2765 struct mlx5_devlink_port *dl_port, 2766 u32 controller, u32 sfnum) 2767 { 2768 return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum); 2769 } 2770 2771 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 2772 { 2773 mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport); 2774 } 2775 2776 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 2777 { 2778 int err; 2779 2780 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2781 return 0; 2782 2783 err = mlx5_esw_offloads_devlink_port_register(esw, vport); 2784 if (err) 2785 return err; 2786 2787 err = mlx5_esw_offloads_rep_load(esw, vport->vport); 2788 if (err) 2789 goto load_err; 2790 return err; 2791 2792 load_err: 2793 mlx5_esw_offloads_devlink_port_unregister(vport); 2794 return err; 2795 } 2796 2797 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 2798 { 2799 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 2800 return; 2801 2802 mlx5_esw_offloads_rep_unload(esw, vport->vport); 2803 2804 mlx5_esw_offloads_devlink_port_unregister(vport); 2805 } 2806 2807 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, 2808 struct mlx5_core_dev *slave) 2809 { 2810 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 2811 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 2812 struct mlx5_flow_root_namespace *root; 2813 struct mlx5_flow_namespace *ns; 2814 int err; 2815 2816 MLX5_SET(set_flow_table_root_in, in, opcode, 2817 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 2818 MLX5_SET(set_flow_table_root_in, in, table_type, 2819 FS_FT_FDB); 2820 2821 if (master) { 2822 ns = mlx5_get_flow_namespace(master, 2823 MLX5_FLOW_NAMESPACE_FDB); 2824 root = find_root(&ns->node); 2825 mutex_lock(&root->chain_lock); 2826 MLX5_SET(set_flow_table_root_in, in, 2827 table_eswitch_owner_vhca_id_valid, 1); 2828 MLX5_SET(set_flow_table_root_in, in, 2829 table_eswitch_owner_vhca_id, 2830 MLX5_CAP_GEN(master, vhca_id)); 2831 MLX5_SET(set_flow_table_root_in, in, table_id, 2832 root->root_ft->id); 2833 } else { 2834 ns = mlx5_get_flow_namespace(slave, 2835 MLX5_FLOW_NAMESPACE_FDB); 2836 root = find_root(&ns->node); 2837 mutex_lock(&root->chain_lock); 2838 MLX5_SET(set_flow_table_root_in, in, table_id, 2839 root->root_ft->id); 2840 } 2841 2842 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 2843 mutex_unlock(&root->chain_lock); 2844 2845 return err; 2846 } 2847 2848 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, 2849 struct mlx5_core_dev *slave, 2850 struct mlx5_vport *vport, 2851 struct mlx5_flow_table *acl) 2852 { 2853 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id); 2854 struct mlx5_flow_handle *flow_rule = NULL; 2855 struct mlx5_flow_destination dest = {}; 2856 struct mlx5_flow_act flow_act = {}; 2857 struct mlx5_flow_spec *spec; 2858 int err = 0; 2859 void *misc; 2860 2861 spec = kvzalloc_obj(*spec); 2862 if (!spec) 2863 return -ENOMEM; 2864 2865 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 2866 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 2867 misc_parameters); 2868 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); 2869 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index); 2870 2871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); 2872 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 2873 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 2874 source_eswitch_owner_vhca_id); 2875 2876 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2877 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2878 dest.vport.num = slave->priv.eswitch->manager_vport; 2879 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); 2880 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; 2881 2882 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, 2883 &dest, 1); 2884 if (IS_ERR(flow_rule)) { 2885 err = PTR_ERR(flow_rule); 2886 } else { 2887 err = xa_insert(&vport->egress.offloads.bounce_rules, 2888 slave_index, flow_rule, GFP_KERNEL); 2889 if (err) 2890 mlx5_del_flow_rules(flow_rule); 2891 } 2892 2893 kvfree(spec); 2894 return err; 2895 } 2896 2897 static int esw_master_egress_create_resources(struct mlx5_eswitch *esw, 2898 struct mlx5_flow_namespace *egress_ns, 2899 struct mlx5_vport *vport, size_t count) 2900 { 2901 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 2902 struct mlx5_flow_table_attr ft_attr = { 2903 .max_fte = count, .prio = 0, .level = 0, 2904 }; 2905 struct mlx5_flow_table *acl; 2906 struct mlx5_flow_group *g; 2907 void *match_criteria; 2908 u32 *flow_group_in; 2909 int err; 2910 2911 if (vport->egress.acl) 2912 return 0; 2913 2914 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 2915 if (!flow_group_in) 2916 return -ENOMEM; 2917 2918 if (vport->vport || mlx5_core_is_ecpf(esw->dev)) 2919 ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; 2920 2921 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); 2922 if (IS_ERR(acl)) { 2923 err = PTR_ERR(acl); 2924 goto out; 2925 } 2926 2927 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, 2928 match_criteria); 2929 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2930 misc_parameters.source_port); 2931 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 2932 misc_parameters.source_eswitch_owner_vhca_id); 2933 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 2934 MLX5_MATCH_MISC_PARAMETERS); 2935 2936 MLX5_SET(create_flow_group_in, flow_group_in, 2937 source_eswitch_owner_vhca_id_valid, 1); 2938 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); 2939 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count); 2940 2941 g = mlx5_create_flow_group(acl, flow_group_in); 2942 if (IS_ERR(g)) { 2943 err = PTR_ERR(g); 2944 goto err_group; 2945 } 2946 2947 vport->egress.acl = acl; 2948 vport->egress.offloads.bounce_grp = g; 2949 vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB; 2950 xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC); 2951 2952 kvfree(flow_group_in); 2953 2954 return 0; 2955 2956 err_group: 2957 mlx5_destroy_flow_table(acl); 2958 out: 2959 kvfree(flow_group_in); 2960 return err; 2961 } 2962 2963 static void esw_master_egress_destroy_resources(struct mlx5_vport *vport) 2964 { 2965 if (!xa_empty(&vport->egress.offloads.bounce_rules)) 2966 return; 2967 mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp); 2968 vport->egress.offloads.bounce_grp = NULL; 2969 mlx5_destroy_flow_table(vport->egress.acl); 2970 vport->egress.acl = NULL; 2971 } 2972 2973 static int esw_set_master_egress_rule(struct mlx5_core_dev *master, 2974 struct mlx5_core_dev *slave, size_t count) 2975 { 2976 struct mlx5_eswitch *esw = master->priv.eswitch; 2977 u16 slave_index = MLX5_CAP_GEN(slave, vhca_id); 2978 struct mlx5_flow_namespace *egress_ns; 2979 struct mlx5_vport *vport; 2980 int err; 2981 2982 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 2983 if (IS_ERR(vport)) 2984 return PTR_ERR(vport); 2985 2986 egress_ns = mlx5_get_flow_vport_namespace(master, 2987 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 2988 vport->index); 2989 if (!egress_ns) 2990 return -EINVAL; 2991 2992 if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB) 2993 return 0; 2994 2995 err = esw_master_egress_create_resources(esw, egress_ns, vport, count); 2996 if (err) 2997 return err; 2998 2999 if (xa_load(&vport->egress.offloads.bounce_rules, slave_index)) 3000 return -EINVAL; 3001 3002 err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl); 3003 if (err) 3004 goto err_rule; 3005 3006 return 0; 3007 3008 err_rule: 3009 esw_master_egress_destroy_resources(vport); 3010 return err; 3011 } 3012 3013 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev, 3014 struct mlx5_core_dev *slave_dev) 3015 { 3016 struct mlx5_vport *vport; 3017 3018 vport = mlx5_eswitch_get_vport(dev->priv.eswitch, 3019 dev->priv.eswitch->manager_vport); 3020 3021 esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id)); 3022 3023 if (xa_empty(&vport->egress.offloads.bounce_rules)) { 3024 esw_acl_egress_ofld_cleanup(vport); 3025 xa_destroy(&vport->egress.offloads.bounce_rules); 3026 } 3027 } 3028 3029 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 3030 struct mlx5_eswitch *slave_esw, int max_slaves) 3031 { 3032 int err; 3033 3034 err = esw_set_slave_root_fdb(master_esw->dev, 3035 slave_esw->dev); 3036 if (err) 3037 return err; 3038 3039 err = esw_set_master_egress_rule(master_esw->dev, 3040 slave_esw->dev, max_slaves); 3041 if (err) 3042 goto err_acl; 3043 3044 return err; 3045 3046 err_acl: 3047 esw_set_slave_root_fdb(NULL, slave_esw->dev); 3048 return err; 3049 } 3050 3051 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 3052 struct mlx5_eswitch *slave_esw) 3053 { 3054 esw_set_slave_root_fdb(NULL, slave_esw->dev); 3055 esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev); 3056 } 3057 3058 #define ESW_OFFLOADS_DEVCOM_PAIR (0) 3059 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) 3060 3061 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw, 3062 struct mlx5_eswitch *peer_esw) 3063 { 3064 const struct mlx5_eswitch_rep_ops *ops; 3065 struct mlx5_eswitch_rep *rep; 3066 unsigned long i; 3067 u8 rep_type; 3068 3069 mlx5_esw_for_each_rep(esw, i, rep) { 3070 rep_type = NUM_REP_TYPES; 3071 while (rep_type--) { 3072 ops = esw->offloads.rep_ops[rep_type]; 3073 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3074 ops->event) 3075 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw); 3076 } 3077 } 3078 } 3079 3080 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw, 3081 struct mlx5_eswitch *peer_esw) 3082 { 3083 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) 3084 mlx5e_tc_clean_fdb_peer_flows(esw); 3085 #endif 3086 mlx5_esw_offloads_rep_event_unpair(esw, peer_esw); 3087 esw_del_fdb_peer_miss_rules(esw, peer_esw->dev); 3088 } 3089 3090 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, 3091 struct mlx5_eswitch *peer_esw) 3092 { 3093 const struct mlx5_eswitch_rep_ops *ops; 3094 struct mlx5_eswitch_rep *rep; 3095 unsigned long i; 3096 u8 rep_type; 3097 int err; 3098 3099 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); 3100 if (err) 3101 return err; 3102 3103 mlx5_esw_for_each_rep(esw, i, rep) { 3104 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { 3105 ops = esw->offloads.rep_ops[rep_type]; 3106 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 3107 ops->event) { 3108 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); 3109 if (err) 3110 goto err_out; 3111 } 3112 } 3113 } 3114 3115 return 0; 3116 3117 err_out: 3118 mlx5_esw_offloads_unpair(esw, peer_esw); 3119 return err; 3120 } 3121 3122 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, 3123 struct mlx5_eswitch *peer_esw, 3124 bool pair) 3125 { 3126 u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id); 3127 u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); 3128 struct mlx5_flow_root_namespace *peer_ns; 3129 struct mlx5_flow_root_namespace *ns; 3130 int err; 3131 3132 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns; 3133 ns = esw->dev->priv.steering->fdb_root_ns; 3134 3135 if (pair) { 3136 err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id); 3137 if (err) 3138 return err; 3139 3140 err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id); 3141 if (err) { 3142 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id); 3143 return err; 3144 } 3145 } else { 3146 mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id); 3147 mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id); 3148 } 3149 3150 return 0; 3151 } 3152 3153 static int mlx5_esw_offloads_devcom_event(int event, 3154 void *my_data, 3155 void *event_data) 3156 { 3157 struct mlx5_eswitch *esw = my_data; 3158 struct mlx5_eswitch *peer_esw = event_data; 3159 u16 esw_i, peer_esw_i; 3160 bool esw_paired; 3161 int err; 3162 3163 peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); 3164 esw_i = MLX5_CAP_GEN(esw->dev, vhca_id); 3165 esw_paired = !!xa_load(&esw->paired, peer_esw_i); 3166 3167 switch (event) { 3168 case ESW_OFFLOADS_DEVCOM_PAIR: 3169 if (mlx5_eswitch_vport_match_metadata_enabled(esw) != 3170 mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) 3171 break; 3172 3173 if (esw_paired) 3174 break; 3175 3176 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); 3177 if (err) 3178 goto err_out; 3179 3180 err = mlx5_esw_offloads_pair(esw, peer_esw); 3181 if (err) 3182 goto err_peer; 3183 3184 err = mlx5_esw_offloads_pair(peer_esw, esw); 3185 if (err) 3186 goto err_pair; 3187 3188 err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL); 3189 if (err) 3190 goto err_xa; 3191 3192 err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL); 3193 if (err) 3194 goto err_peer_xa; 3195 3196 esw->num_peers++; 3197 peer_esw->num_peers++; 3198 mlx5_devcom_comp_set_ready(esw->devcom, true); 3199 break; 3200 3201 case ESW_OFFLOADS_DEVCOM_UNPAIR: 3202 if (!esw_paired) 3203 break; 3204 3205 peer_esw->num_peers--; 3206 esw->num_peers--; 3207 if (!esw->num_peers && !peer_esw->num_peers) 3208 mlx5_devcom_comp_set_ready(esw->devcom, false); 3209 xa_erase(&peer_esw->paired, esw_i); 3210 xa_erase(&esw->paired, peer_esw_i); 3211 mlx5_esw_offloads_unpair(peer_esw, esw); 3212 mlx5_esw_offloads_unpair(esw, peer_esw); 3213 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 3214 break; 3215 } 3216 3217 return 0; 3218 3219 err_peer_xa: 3220 xa_erase(&esw->paired, peer_esw_i); 3221 err_xa: 3222 mlx5_esw_offloads_unpair(peer_esw, esw); 3223 err_pair: 3224 mlx5_esw_offloads_unpair(esw, peer_esw); 3225 err_peer: 3226 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); 3227 err_out: 3228 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", 3229 event, err); 3230 return err; 3231 } 3232 3233 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, 3234 const struct mlx5_devcom_match_attr *attr) 3235 { 3236 int i; 3237 3238 for (i = 0; i < MLX5_MAX_PORTS; i++) 3239 INIT_LIST_HEAD(&esw->offloads.peer_flows[i]); 3240 mutex_init(&esw->offloads.peer_mutex); 3241 3242 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) 3243 return; 3244 3245 if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) && 3246 !mlx5_lag_is_supported(esw->dev)) 3247 return; 3248 3249 xa_init(&esw->paired); 3250 esw->num_peers = 0; 3251 esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc, 3252 MLX5_DEVCOM_ESW_OFFLOADS, 3253 attr, 3254 mlx5_esw_offloads_devcom_event, 3255 esw); 3256 if (!esw->devcom) 3257 return; 3258 3259 mlx5_devcom_send_event(esw->devcom, 3260 ESW_OFFLOADS_DEVCOM_PAIR, 3261 ESW_OFFLOADS_DEVCOM_UNPAIR, 3262 esw); 3263 } 3264 3265 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 3266 { 3267 if (!esw->devcom) 3268 return; 3269 3270 mlx5_devcom_send_event(esw->devcom, 3271 ESW_OFFLOADS_DEVCOM_UNPAIR, 3272 ESW_OFFLOADS_DEVCOM_UNPAIR, 3273 esw); 3274 3275 mlx5_devcom_unregister_component(esw->devcom); 3276 xa_destroy(&esw->paired); 3277 esw->devcom = NULL; 3278 } 3279 3280 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) 3281 { 3282 return mlx5_devcom_comp_is_ready(esw->devcom); 3283 } 3284 3285 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) 3286 { 3287 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl)) 3288 return false; 3289 3290 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & 3291 MLX5_FDB_TO_VPORT_REG_C_0)) 3292 return false; 3293 3294 return true; 3295 } 3296 3297 #define MLX5_ESW_METADATA_RSVD_UPLINK 1 3298 3299 /* Share the same metadata for uplink's. This is fine because: 3300 * (a) In shared FDB mode (LAG) both uplink's are treated the 3301 * same and tagged with the same metadata. 3302 * (b) In non shared FDB mode, packets from physical port0 3303 * cannot hit eswitch of PF1 and vice versa. 3304 */ 3305 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw) 3306 { 3307 return MLX5_ESW_METADATA_RSVD_UPLINK; 3308 } 3309 3310 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw) 3311 { 3312 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1; 3313 /* Reserve 0xf for internal port offload */ 3314 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2; 3315 u32 pf_num; 3316 int id; 3317 3318 /* Only 4 bits of pf_num */ 3319 pf_num = mlx5_get_dev_index(esw->dev); 3320 if (pf_num > max_pf_num) 3321 return 0; 3322 3323 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */ 3324 /* Use only non-zero vport_id (2-4095) for all PF's */ 3325 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 3326 MLX5_ESW_METADATA_RSVD_UPLINK + 1, 3327 vport_end_ida, GFP_KERNEL); 3328 if (id < 0) 3329 return 0; 3330 id = (pf_num << ESW_VPORT_BITS) | id; 3331 return id; 3332 } 3333 3334 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata) 3335 { 3336 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1; 3337 3338 /* Metadata contains only 12 bits of actual ida id */ 3339 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask); 3340 } 3341 3342 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw, 3343 struct mlx5_vport *vport) 3344 { 3345 if (vport->vport == MLX5_VPORT_UPLINK) 3346 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw); 3347 else 3348 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw); 3349 3350 vport->metadata = vport->default_metadata; 3351 return vport->metadata ? 0 : -ENOSPC; 3352 } 3353 3354 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw, 3355 struct mlx5_vport *vport) 3356 { 3357 if (!vport->default_metadata) 3358 return; 3359 3360 if (vport->vport == MLX5_VPORT_UPLINK) 3361 return; 3362 3363 WARN_ON(vport->metadata != vport->default_metadata); 3364 mlx5_esw_match_metadata_free(esw, vport->default_metadata); 3365 } 3366 3367 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw) 3368 { 3369 struct mlx5_vport *vport; 3370 unsigned long i; 3371 3372 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3373 return; 3374 3375 mlx5_esw_for_each_vport(esw, i, vport) 3376 esw_offloads_vport_metadata_cleanup(esw, vport); 3377 } 3378 3379 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw) 3380 { 3381 struct mlx5_vport *vport; 3382 unsigned long i; 3383 int err; 3384 3385 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) 3386 return 0; 3387 3388 mlx5_esw_for_each_vport(esw, i, vport) { 3389 err = esw_offloads_vport_metadata_setup(esw, vport); 3390 if (err) 3391 goto metadata_err; 3392 } 3393 3394 return 0; 3395 3396 metadata_err: 3397 esw_offloads_metadata_uninit(esw); 3398 return err; 3399 } 3400 3401 int 3402 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 3403 struct mlx5_vport *vport) 3404 { 3405 int err; 3406 3407 err = esw_acl_ingress_ofld_setup(esw, vport); 3408 if (err) 3409 return err; 3410 3411 err = esw_acl_egress_ofld_setup(esw, vport); 3412 if (err) 3413 goto egress_err; 3414 3415 return 0; 3416 3417 egress_err: 3418 esw_acl_ingress_ofld_cleanup(esw, vport); 3419 return err; 3420 } 3421 3422 void 3423 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 3424 struct mlx5_vport *vport) 3425 { 3426 esw_acl_egress_ofld_cleanup(vport); 3427 esw_acl_ingress_ofld_cleanup(esw, vport); 3428 } 3429 3430 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) 3431 { 3432 struct mlx5_vport *uplink, *manager; 3433 int ret; 3434 3435 uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3436 if (IS_ERR(uplink)) 3437 return PTR_ERR(uplink); 3438 3439 ret = esw_vport_create_offloads_acl_tables(esw, uplink); 3440 if (ret) 3441 return ret; 3442 3443 manager = mlx5_eswitch_get_vport(esw, esw->manager_vport); 3444 if (IS_ERR(manager)) { 3445 ret = PTR_ERR(manager); 3446 goto err_manager; 3447 } 3448 3449 ret = esw_vport_create_offloads_acl_tables(esw, manager); 3450 if (ret) 3451 goto err_manager; 3452 3453 return 0; 3454 3455 err_manager: 3456 esw_vport_destroy_offloads_acl_tables(esw, uplink); 3457 return ret; 3458 } 3459 3460 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) 3461 { 3462 struct mlx5_vport *vport; 3463 3464 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); 3465 if (!IS_ERR(vport)) 3466 esw_vport_destroy_offloads_acl_tables(esw, vport); 3467 3468 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); 3469 if (!IS_ERR(vport)) 3470 esw_vport_destroy_offloads_acl_tables(esw, vport); 3471 } 3472 3473 int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) 3474 { 3475 struct mlx5_eswitch_rep *rep; 3476 unsigned long i; 3477 int ret; 3478 3479 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) 3480 return 0; 3481 3482 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 3483 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) 3484 return 0; 3485 3486 ret = __esw_offloads_load_rep(esw, rep, REP_IB); 3487 if (ret) 3488 return ret; 3489 3490 mlx5_esw_for_each_rep(esw, i, rep) { 3491 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) 3492 __esw_offloads_load_rep(esw, rep, REP_IB); 3493 } 3494 3495 return 0; 3496 } 3497 3498 static int esw_offloads_steering_init(struct mlx5_eswitch *esw) 3499 { 3500 struct mlx5_esw_indir_table *indir; 3501 int err; 3502 3503 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 3504 mutex_init(&esw->fdb_table.offloads.vports.lock); 3505 hash_init(esw->fdb_table.offloads.vports.table); 3506 atomic64_set(&esw->user_count, 0); 3507 3508 indir = mlx5_esw_indir_table_init(); 3509 if (IS_ERR(indir)) { 3510 err = PTR_ERR(indir); 3511 goto create_indir_err; 3512 } 3513 esw->fdb_table.offloads.indir = indir; 3514 3515 err = esw_create_offloads_acl_tables(esw); 3516 if (err) 3517 goto create_acl_err; 3518 3519 err = esw_create_offloads_table(esw); 3520 if (err) 3521 goto create_offloads_err; 3522 3523 err = esw_create_restore_table(esw); 3524 if (err) 3525 goto create_restore_err; 3526 3527 err = esw_create_offloads_fdb_tables(esw); 3528 if (err) 3529 goto create_fdb_err; 3530 3531 err = esw_create_vport_rx_group(esw); 3532 if (err) 3533 goto create_fg_err; 3534 3535 err = esw_create_vport_rx_drop_group(esw); 3536 if (err) 3537 goto create_rx_drop_fg_err; 3538 3539 err = esw_create_vport_rx_drop_rule(esw); 3540 if (err) 3541 goto create_rx_drop_rule_err; 3542 3543 return 0; 3544 3545 create_rx_drop_rule_err: 3546 esw_destroy_vport_rx_drop_group(esw); 3547 create_rx_drop_fg_err: 3548 esw_destroy_vport_rx_group(esw); 3549 create_fg_err: 3550 esw_destroy_offloads_fdb_tables(esw); 3551 create_fdb_err: 3552 esw_destroy_restore_table(esw); 3553 create_restore_err: 3554 esw_destroy_offloads_table(esw); 3555 create_offloads_err: 3556 esw_destroy_offloads_acl_tables(esw); 3557 create_acl_err: 3558 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3559 create_indir_err: 3560 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3561 return err; 3562 } 3563 3564 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 3565 { 3566 mlx5_esw_fdb_drop_destroy(esw); 3567 if (esw->fdb_table.offloads.drop_root_fc) 3568 mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc); 3569 esw->fdb_table.offloads.drop_root_fc = NULL; 3570 esw_destroy_vport_rx_drop_rule(esw); 3571 esw_destroy_vport_rx_drop_group(esw); 3572 esw_destroy_vport_rx_group(esw); 3573 esw_destroy_offloads_fdb_tables(esw); 3574 esw_destroy_restore_table(esw); 3575 esw_destroy_offloads_table(esw); 3576 esw_destroy_offloads_acl_tables(esw); 3577 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); 3578 mutex_destroy(&esw->fdb_table.offloads.vports.lock); 3579 } 3580 3581 static void 3582 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen, 3583 const u32 *out) 3584 { 3585 struct devlink *devlink; 3586 bool host_pf_disabled; 3587 u16 new_num_vfs; 3588 3589 devlink = priv_to_devlink(esw->dev); 3590 devl_lock(devlink); 3591 3592 /* Stale work from one or more mode changes ago. Bail out. */ 3593 if (work_gen != atomic_read(&esw->esw_funcs.generation)) 3594 goto unlock; 3595 3596 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3597 host_params_context.host_num_of_vfs); 3598 host_pf_disabled = MLX5_GET(query_esw_functions_out, out, 3599 host_params_context.host_pf_disabled); 3600 3601 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3602 goto unlock; 3603 3604 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3605 if (esw->esw_funcs.num_vfs > 0) { 3606 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); 3607 } else { 3608 int err; 3609 3610 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs, 3611 MLX5_VPORT_UC_ADDR_CHANGE); 3612 if (err) { 3613 devl_unlock(devlink); 3614 return; 3615 } 3616 } 3617 esw->esw_funcs.num_vfs = new_num_vfs; 3618 unlock: 3619 devl_unlock(devlink); 3620 } 3621 3622 static void esw_functions_changed_event_handler(struct work_struct *work) 3623 { 3624 struct mlx5_host_work *host_work; 3625 struct mlx5_eswitch *esw; 3626 const u32 *out; 3627 3628 host_work = container_of(work, struct mlx5_host_work, work); 3629 esw = host_work->esw; 3630 3631 out = mlx5_esw_query_functions(esw->dev); 3632 if (IS_ERR(out)) 3633 goto out; 3634 3635 esw_vfs_changed_event_handler(esw, host_work->work_gen, out); 3636 kvfree(out); 3637 out: 3638 kfree(host_work); 3639 } 3640 3641 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data) 3642 { 3643 struct mlx5_esw_functions *esw_funcs; 3644 struct mlx5_host_work *host_work; 3645 struct mlx5_eswitch *esw; 3646 3647 host_work = kzalloc_obj(*host_work, GFP_ATOMIC); 3648 if (!host_work) 3649 return NOTIFY_DONE; 3650 3651 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb); 3652 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3653 3654 host_work->esw = esw; 3655 host_work->work_gen = atomic_read(&esw_funcs->generation); 3656 3657 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3658 queue_work(esw->work_queue, &host_work->work); 3659 3660 return NOTIFY_OK; 3661 } 3662 3663 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw) 3664 { 3665 const u32 *query_host_out; 3666 3667 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3668 return 0; 3669 3670 query_host_out = mlx5_esw_query_functions(esw->dev); 3671 if (IS_ERR(query_host_out)) 3672 return PTR_ERR(query_host_out); 3673 3674 /* Mark non local controller with non zero controller number. */ 3675 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out, 3676 host_params_context.host_number); 3677 kvfree(query_host_out); 3678 return 0; 3679 } 3680 3681 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller) 3682 { 3683 /* Local controller is always valid */ 3684 if (controller == 0) 3685 return true; 3686 3687 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 3688 return false; 3689 3690 /* External host number starts with zero in device */ 3691 return (controller == esw->offloads.host_number + 1); 3692 } 3693 3694 int esw_offloads_enable(struct mlx5_eswitch *esw) 3695 { 3696 u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; 3697 struct mapping_ctx *reg_c0_obj_pool; 3698 struct mlx5_vport *vport; 3699 unsigned long i; 3700 u8 id_len; 3701 int err; 3702 3703 mutex_init(&esw->offloads.termtbl_mutex); 3704 mlx5_esw_adjacent_vhcas_setup(esw); 3705 3706 err = mlx5_rdma_enable_roce(esw->dev); 3707 if (err) 3708 goto err_roce; 3709 3710 err = mlx5_esw_host_number_init(esw); 3711 if (err) 3712 goto err_metadata; 3713 3714 err = esw_offloads_metadata_init(esw); 3715 if (err) 3716 goto err_metadata; 3717 3718 err = esw_set_passing_vport_metadata(esw, true); 3719 if (err) 3720 goto err_vport_metadata; 3721 3722 mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len); 3723 3724 reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len, 3725 MAPPING_TYPE_CHAIN, 3726 sizeof(struct mlx5_mapped_obj), 3727 ESW_REG_C0_USER_DATA_METADATA_MASK, 3728 true); 3729 3730 if (IS_ERR(reg_c0_obj_pool)) { 3731 err = PTR_ERR(reg_c0_obj_pool); 3732 goto err_pool; 3733 } 3734 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool; 3735 3736 err = esw_offloads_steering_init(esw); 3737 if (err) 3738 goto err_steering_init; 3739 3740 if (esw->offloads_inactive) 3741 mlx5_esw_fdb_inactive(esw); 3742 else 3743 mlx5_esw_fdb_active(esw); 3744 3745 /* Representor will control the vport link state */ 3746 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 3747 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3748 if (mlx5_core_ec_sriov_enabled(esw->dev)) 3749 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) 3750 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; 3751 3752 /* Uplink vport rep must load first. */ 3753 err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); 3754 if (err) 3755 goto err_uplink; 3756 3757 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); 3758 if (err) 3759 goto err_vports; 3760 3761 return 0; 3762 3763 err_vports: 3764 /* rollback to legacy, indicates don't unregister the uplink netdev */ 3765 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; 3766 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK); 3767 err_uplink: 3768 esw_offloads_steering_cleanup(esw); 3769 err_steering_init: 3770 mapping_destroy(reg_c0_obj_pool); 3771 err_pool: 3772 esw_set_passing_vport_metadata(esw, false); 3773 err_vport_metadata: 3774 esw_offloads_metadata_uninit(esw); 3775 err_metadata: 3776 mlx5_rdma_disable_roce(esw->dev); 3777 err_roce: 3778 mlx5_esw_adjacent_vhcas_cleanup(esw); 3779 mutex_destroy(&esw->offloads.termtbl_mutex); 3780 return err; 3781 } 3782 3783 static int esw_offloads_stop(struct mlx5_eswitch *esw, 3784 struct netlink_ext_ack *extack) 3785 { 3786 int err; 3787 3788 esw_mode_change(esw, MLX5_ESWITCH_LEGACY); 3789 3790 /* If changing from switchdev to legacy mode without sriov enabled, 3791 * no need to create legacy fdb. 3792 */ 3793 if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev)) 3794 return 0; 3795 3796 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); 3797 if (err) 3798 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); 3799 3800 return err; 3801 } 3802 3803 void esw_offloads_disable(struct mlx5_eswitch *esw) 3804 { 3805 mlx5_eswitch_disable_pf_vf_vports(esw); 3806 mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK); 3807 esw_set_passing_vport_metadata(esw, false); 3808 esw_offloads_steering_cleanup(esw); 3809 mapping_destroy(esw->offloads.reg_c0_obj_pool); 3810 esw_offloads_metadata_uninit(esw); 3811 mlx5_rdma_disable_roce(esw->dev); 3812 mlx5_esw_adjacent_vhcas_cleanup(esw); 3813 /* must be done after vhcas cleanup to avoid adjacent vports connect */ 3814 if (esw->offloads_inactive) 3815 mlx5_esw_fdb_active(esw); /* legacy mode always active */ 3816 mutex_destroy(&esw->offloads.termtbl_mutex); 3817 } 3818 3819 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 3820 { 3821 switch (mode) { 3822 case DEVLINK_ESWITCH_MODE_LEGACY: 3823 *mlx5_mode = MLX5_ESWITCH_LEGACY; 3824 break; 3825 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3826 case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE: 3827 *mlx5_mode = MLX5_ESWITCH_OFFLOADS; 3828 break; 3829 default: 3830 return -EINVAL; 3831 } 3832 3833 return 0; 3834 } 3835 3836 static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode) 3837 { 3838 switch (esw->mode) { 3839 case MLX5_ESWITCH_LEGACY: 3840 *mode = DEVLINK_ESWITCH_MODE_LEGACY; 3841 break; 3842 case MLX5_ESWITCH_OFFLOADS: 3843 if (esw->offloads_inactive) 3844 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE; 3845 else 3846 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 3847 break; 3848 default: 3849 return -EINVAL; 3850 } 3851 3852 return 0; 3853 } 3854 3855 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) 3856 { 3857 switch (mode) { 3858 case DEVLINK_ESWITCH_INLINE_MODE_NONE: 3859 *mlx5_mode = MLX5_INLINE_MODE_NONE; 3860 break; 3861 case DEVLINK_ESWITCH_INLINE_MODE_LINK: 3862 *mlx5_mode = MLX5_INLINE_MODE_L2; 3863 break; 3864 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: 3865 *mlx5_mode = MLX5_INLINE_MODE_IP; 3866 break; 3867 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: 3868 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; 3869 break; 3870 default: 3871 return -EINVAL; 3872 } 3873 3874 return 0; 3875 } 3876 3877 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) 3878 { 3879 switch (mlx5_mode) { 3880 case MLX5_INLINE_MODE_NONE: 3881 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; 3882 break; 3883 case MLX5_INLINE_MODE_L2: 3884 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; 3885 break; 3886 case MLX5_INLINE_MODE_IP: 3887 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; 3888 break; 3889 case MLX5_INLINE_MODE_TCP_UDP: 3890 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; 3891 break; 3892 default: 3893 return -EINVAL; 3894 } 3895 3896 return 0; 3897 } 3898 3899 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) 3900 { 3901 struct mlx5_eswitch *esw = dev->priv.eswitch; 3902 int err; 3903 3904 if (!mlx5_esw_allowed(esw)) 3905 return 0; 3906 3907 /* Take TC into account */ 3908 err = mlx5_esw_try_lock(esw); 3909 if (err < 0) 3910 return err; 3911 3912 esw->offloads.num_block_mode++; 3913 mlx5_esw_unlock(esw); 3914 return 0; 3915 } 3916 3917 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) 3918 { 3919 struct mlx5_eswitch *esw = dev->priv.eswitch; 3920 3921 if (!mlx5_esw_allowed(esw)) 3922 return; 3923 3924 down_write(&esw->mode_lock); 3925 esw->offloads.num_block_mode--; 3926 up_write(&esw->mode_lock); 3927 } 3928 3929 /* Returns false only when uplink netdev exists and its netns is different from 3930 * devlink's netns. True for all others so entering switchdev mode is allowed. 3931 */ 3932 static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink, 3933 bool immutable) 3934 { 3935 struct mlx5_core_dev *mdev = devlink_priv(devlink); 3936 struct net_device *netdev; 3937 bool ret; 3938 3939 netdev = mlx5_uplink_netdev_get(mdev); 3940 if (!netdev) 3941 return true; 3942 3943 rtnl_lock(); 3944 netdev->netns_immutable = immutable; 3945 ret = net_eq(dev_net(netdev), devlink_net(devlink)); 3946 rtnl_unlock(); 3947 3948 mlx5_uplink_netdev_put(mdev, netdev); 3949 return ret; 3950 } 3951 3952 /* Returns true when only changing between active and inactive switchdev mode */ 3953 static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw, 3954 u16 devlink_mode) 3955 { 3956 /* current mode is not switchdev */ 3957 if (esw->mode != MLX5_ESWITCH_OFFLOADS) 3958 return false; 3959 3960 /* new mode is not switchdev */ 3961 if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV && 3962 devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE) 3963 return false; 3964 3965 /* already inactive: no change in current state */ 3966 if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE && 3967 esw->offloads_inactive) 3968 return false; 3969 3970 /* already active: no change in current state */ 3971 if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && 3972 !esw->offloads_inactive) 3973 return false; 3974 3975 down_write(&esw->mode_lock); 3976 esw->offloads_inactive = !esw->offloads_inactive; 3977 esw->eswitch_operation_in_progress = true; 3978 up_write(&esw->mode_lock); 3979 3980 if (esw->offloads_inactive) 3981 mlx5_esw_fdb_inactive(esw); 3982 else 3983 mlx5_esw_fdb_active(esw); 3984 3985 down_write(&esw->mode_lock); 3986 esw->eswitch_operation_in_progress = false; 3987 up_write(&esw->mode_lock); 3988 return true; 3989 } 3990 3991 #define MLX5_ESW_HOLD_TIMEOUT_MS 7000 3992 #define MLX5_ESW_HOLD_RETRY_DELAY_MS 500 3993 3994 void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) 3995 { 3996 unsigned long timeout; 3997 bool hold_esw = true; 3998 3999 /* Wait for any concurrent eswitch mode transition to complete. */ 4000 if (!mlx5_esw_hold(dev)) { 4001 timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS); 4002 while (!mlx5_esw_hold(dev)) { 4003 if (!time_before(jiffies, timeout)) { 4004 hold_esw = false; 4005 break; 4006 } 4007 msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS); 4008 } 4009 } 4010 if (hold_esw) { 4011 if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) 4012 mlx5_core_reps_aux_devs_remove(dev); 4013 mlx5_esw_release(dev); 4014 } 4015 } 4016 4017 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 4018 struct netlink_ext_ack *extack) 4019 { 4020 u16 cur_mlx5_mode, mlx5_mode = 0; 4021 struct mlx5_eswitch *esw; 4022 int err = 0; 4023 4024 esw = mlx5_devlink_eswitch_get(devlink); 4025 if (IS_ERR(esw)) 4026 return PTR_ERR(esw); 4027 4028 if (mlx5_fw_reset_in_progress(esw->dev)) { 4029 NL_SET_ERR_MSG_MOD(extack, "Can't change eswitch mode during firmware reset"); 4030 return -EBUSY; 4031 } 4032 4033 if (esw_mode_from_devlink(mode, &mlx5_mode)) 4034 return -EINVAL; 4035 4036 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) { 4037 NL_SET_ERR_MSG_MOD(extack, 4038 "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured."); 4039 return -EPERM; 4040 } 4041 4042 /* Avoid try_lock, active/inactive mode change is not restricted */ 4043 if (mlx5_devlink_switchdev_active_mode_change(esw, mode)) 4044 return 0; 4045 4046 mlx5_lag_disable_change(esw->dev); 4047 err = mlx5_esw_try_lock(esw); 4048 if (err < 0) { 4049 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); 4050 goto enable_lag; 4051 } 4052 cur_mlx5_mode = err; 4053 err = 0; 4054 4055 if (cur_mlx5_mode == mlx5_mode) 4056 goto unlock; 4057 4058 if (esw->offloads.num_block_mode) { 4059 NL_SET_ERR_MSG_MOD(extack, 4060 "Can't change eswitch mode when IPsec SA and/or policies are configured"); 4061 err = -EOPNOTSUPP; 4062 goto unlock; 4063 } 4064 4065 esw->eswitch_operation_in_progress = true; 4066 up_write(&esw->mode_lock); 4067 4068 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && 4069 !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) { 4070 NL_SET_ERR_MSG_MOD(extack, 4071 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); 4072 err = -EINVAL; 4073 goto skip; 4074 } 4075 4076 if (mlx5_mode == MLX5_ESWITCH_LEGACY) 4077 esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; 4078 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) 4079 esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_SWITCH_LEGACY; 4080 mlx5_eswitch_disable_locked(esw); 4081 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) { 4082 if (mlx5_devlink_trap_get_num_active(esw->dev)) { 4083 NL_SET_ERR_MSG_MOD(extack, 4084 "Can't change mode while devlink traps are active"); 4085 err = -EOPNOTSUPP; 4086 goto skip; 4087 } 4088 esw->offloads_inactive = 4089 (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE); 4090 err = esw_offloads_start(esw, extack); 4091 } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) { 4092 err = esw_offloads_stop(esw, extack); 4093 } else { 4094 err = -EINVAL; 4095 } 4096 4097 skip: 4098 if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err) 4099 mlx5_devlink_netdev_netns_immutable_set(devlink, false); 4100 down_write(&esw->mode_lock); 4101 esw->eswitch_operation_in_progress = false; 4102 unlock: 4103 mlx5_esw_unlock(esw); 4104 enable_lag: 4105 mlx5_lag_enable_change(esw->dev); 4106 return err; 4107 } 4108 4109 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 4110 { 4111 struct mlx5_eswitch *esw; 4112 4113 esw = mlx5_devlink_eswitch_get(devlink); 4114 if (IS_ERR(esw)) 4115 return PTR_ERR(esw); 4116 4117 return esw_mode_to_devlink(esw, mode); 4118 } 4119 4120 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, 4121 struct netlink_ext_ack *extack) 4122 { 4123 struct mlx5_core_dev *dev = esw->dev; 4124 struct mlx5_vport *vport; 4125 u16 err_vport_num = 0; 4126 unsigned long i; 4127 int err = 0; 4128 4129 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 4130 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 4131 if (err) { 4132 err_vport_num = vport->vport; 4133 NL_SET_ERR_MSG_MOD(extack, 4134 "Failed to set min inline on vport"); 4135 goto revert_inline_mode; 4136 } 4137 } 4138 if (mlx5_core_ec_sriov_enabled(esw->dev)) { 4139 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { 4140 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode); 4141 if (err) { 4142 err_vport_num = vport->vport; 4143 NL_SET_ERR_MSG_MOD(extack, 4144 "Failed to set min inline on vport"); 4145 goto revert_ec_vf_inline_mode; 4146 } 4147 } 4148 } 4149 return 0; 4150 4151 revert_ec_vf_inline_mode: 4152 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { 4153 if (vport->vport == err_vport_num) 4154 break; 4155 mlx5_modify_nic_vport_min_inline(dev, 4156 vport->vport, 4157 esw->offloads.inline_mode); 4158 } 4159 revert_inline_mode: 4160 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 4161 if (vport->vport == err_vport_num) 4162 break; 4163 mlx5_modify_nic_vport_min_inline(dev, 4164 vport->vport, 4165 esw->offloads.inline_mode); 4166 } 4167 return err; 4168 } 4169 4170 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 4171 struct netlink_ext_ack *extack) 4172 { 4173 struct mlx5_core_dev *dev = devlink_priv(devlink); 4174 struct mlx5_eswitch *esw; 4175 u8 mlx5_mode; 4176 int err; 4177 4178 esw = mlx5_devlink_eswitch_get(devlink); 4179 if (IS_ERR(esw)) 4180 return PTR_ERR(esw); 4181 4182 down_write(&esw->mode_lock); 4183 4184 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 4185 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 4186 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { 4187 err = 0; 4188 goto out; 4189 } 4190 4191 fallthrough; 4192 case MLX5_CAP_INLINE_MODE_L2: 4193 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); 4194 err = -EOPNOTSUPP; 4195 goto out; 4196 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 4197 break; 4198 } 4199 4200 if (atomic64_read(&esw->offloads.num_flows) > 0) { 4201 NL_SET_ERR_MSG_MOD(extack, 4202 "Can't set inline mode when flows are configured"); 4203 err = -EOPNOTSUPP; 4204 goto out; 4205 } 4206 4207 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 4208 if (err) 4209 goto out; 4210 4211 esw->eswitch_operation_in_progress = true; 4212 up_write(&esw->mode_lock); 4213 4214 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); 4215 if (!err) 4216 esw->offloads.inline_mode = mlx5_mode; 4217 4218 down_write(&esw->mode_lock); 4219 esw->eswitch_operation_in_progress = false; 4220 up_write(&esw->mode_lock); 4221 return 0; 4222 4223 out: 4224 up_write(&esw->mode_lock); 4225 return err; 4226 } 4227 4228 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) 4229 { 4230 struct mlx5_eswitch *esw; 4231 4232 esw = mlx5_devlink_eswitch_get(devlink); 4233 if (IS_ERR(esw)) 4234 return PTR_ERR(esw); 4235 4236 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 4237 } 4238 4239 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) 4240 { 4241 struct mlx5_eswitch *esw = dev->priv.eswitch; 4242 enum devlink_eswitch_encap_mode encap; 4243 bool allow_tunnel = false; 4244 4245 if (!mlx5_esw_allowed(esw)) 4246 return true; 4247 4248 down_write(&esw->mode_lock); 4249 encap = esw->offloads.encap; 4250 if (esw->mode == MLX5_ESWITCH_LEGACY || 4251 (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) { 4252 allow_tunnel = true; 4253 esw->offloads.num_block_encap++; 4254 } 4255 up_write(&esw->mode_lock); 4256 4257 return allow_tunnel; 4258 } 4259 4260 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) 4261 { 4262 struct mlx5_eswitch *esw = dev->priv.eswitch; 4263 4264 if (!mlx5_esw_allowed(esw)) 4265 return; 4266 4267 down_write(&esw->mode_lock); 4268 esw->offloads.num_block_encap--; 4269 up_write(&esw->mode_lock); 4270 } 4271 4272 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 4273 enum devlink_eswitch_encap_mode encap, 4274 struct netlink_ext_ack *extack) 4275 { 4276 struct mlx5_core_dev *dev = devlink_priv(devlink); 4277 struct mlx5_eswitch *esw; 4278 int err = 0; 4279 4280 esw = mlx5_devlink_eswitch_get(devlink); 4281 if (IS_ERR(esw)) 4282 return PTR_ERR(esw); 4283 4284 down_write(&esw->mode_lock); 4285 4286 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 4287 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || 4288 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { 4289 err = -EOPNOTSUPP; 4290 goto unlock; 4291 } 4292 4293 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { 4294 err = -EOPNOTSUPP; 4295 goto unlock; 4296 } 4297 4298 if (esw->mode == MLX5_ESWITCH_LEGACY) { 4299 esw->offloads.encap = encap; 4300 goto unlock; 4301 } 4302 4303 if (esw->offloads.encap == encap) 4304 goto unlock; 4305 4306 if (atomic64_read(&esw->offloads.num_flows) > 0) { 4307 NL_SET_ERR_MSG_MOD(extack, 4308 "Can't set encapsulation when flows are configured"); 4309 err = -EOPNOTSUPP; 4310 goto unlock; 4311 } 4312 4313 if (esw->offloads.num_block_encap) { 4314 NL_SET_ERR_MSG_MOD(extack, 4315 "Can't set encapsulation when IPsec SA and/or policies are configured"); 4316 err = -EOPNOTSUPP; 4317 goto unlock; 4318 } 4319 4320 esw->eswitch_operation_in_progress = true; 4321 up_write(&esw->mode_lock); 4322 4323 esw_destroy_offloads_fdb_tables(esw); 4324 4325 esw->offloads.encap = encap; 4326 4327 err = esw_create_offloads_fdb_tables(esw); 4328 4329 if (err) { 4330 NL_SET_ERR_MSG_MOD(extack, 4331 "Failed re-creating fast FDB table"); 4332 esw->offloads.encap = !encap; 4333 (void)esw_create_offloads_fdb_tables(esw); 4334 } 4335 4336 down_write(&esw->mode_lock); 4337 esw->eswitch_operation_in_progress = false; 4338 4339 unlock: 4340 up_write(&esw->mode_lock); 4341 return err; 4342 } 4343 4344 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 4345 enum devlink_eswitch_encap_mode *encap) 4346 { 4347 struct mlx5_eswitch *esw; 4348 4349 esw = mlx5_devlink_eswitch_get(devlink); 4350 if (IS_ERR(esw)) 4351 return PTR_ERR(esw); 4352 4353 *encap = esw->offloads.encap; 4354 return 0; 4355 } 4356 4357 static bool 4358 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num) 4359 { 4360 /* Currently, only ECPF based device has representor for host PF. */ 4361 if (vport_num == MLX5_VPORT_PF && 4362 (!mlx5_core_is_ecpf_esw_manager(esw->dev) || 4363 !mlx5_esw_host_functions_enabled(esw->dev))) 4364 return false; 4365 4366 if (vport_num == MLX5_VPORT_ECPF && 4367 !mlx5_ecpf_vport_exists(esw->dev)) 4368 return false; 4369 4370 return true; 4371 } 4372 4373 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, 4374 const struct mlx5_eswitch_rep_ops *ops, 4375 u8 rep_type) 4376 { 4377 struct mlx5_eswitch_rep_data *rep_data; 4378 struct mlx5_eswitch_rep *rep; 4379 unsigned long i; 4380 4381 esw->offloads.rep_ops[rep_type] = ops; 4382 mlx5_esw_for_each_rep(esw, i, rep) { 4383 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) { 4384 rep->esw = esw; 4385 rep_data = &rep->rep_data[rep_type]; 4386 atomic_set(&rep_data->state, REP_REGISTERED); 4387 } 4388 } 4389 } 4390 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); 4391 4392 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) 4393 { 4394 struct mlx5_eswitch_rep *rep; 4395 unsigned long i; 4396 4397 if (esw->mode == MLX5_ESWITCH_OFFLOADS) 4398 __unload_reps_all_vport(esw, rep_type); 4399 4400 mlx5_esw_for_each_rep(esw, i, rep) 4401 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED); 4402 } 4403 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); 4404 4405 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) 4406 { 4407 struct mlx5_eswitch_rep *rep; 4408 4409 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); 4410 return rep->rep_data[rep_type].priv; 4411 } 4412 4413 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 4414 u16 vport, 4415 u8 rep_type) 4416 { 4417 struct mlx5_eswitch_rep *rep; 4418 4419 rep = mlx5_eswitch_get_rep(esw, vport); 4420 4421 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && 4422 esw->offloads.rep_ops[rep_type]->get_proto_dev) 4423 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep); 4424 return NULL; 4425 } 4426 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); 4427 4428 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) 4429 { 4430 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); 4431 } 4432 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 4433 4434 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 4435 u16 vport) 4436 { 4437 return mlx5_eswitch_get_rep(esw, vport); 4438 } 4439 EXPORT_SYMBOL(mlx5_eswitch_vport_rep); 4440 4441 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) 4442 { 4443 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); 4444 } 4445 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); 4446 4447 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) 4448 { 4449 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); 4450 } 4451 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); 4452 4453 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, 4454 u16 vport_num) 4455 { 4456 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 4457 4458 if (WARN_ON_ONCE(IS_ERR(vport))) 4459 return 0; 4460 4461 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS); 4462 } 4463 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); 4464 4465 int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw, 4466 struct mlx5_vport *vport) 4467 { 4468 u16 *old_entry, *vhca_map_entry, vhca_id; 4469 4470 if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport), 4471 "vport %d vhca_id is not set", vport->vport)) { 4472 int err; 4473 4474 err = mlx5_vport_get_vhca_id(vport->dev, vport->vport, 4475 &vhca_id); 4476 if (err) 4477 return err; 4478 vport->vhca_id = vhca_id; 4479 } 4480 4481 vhca_id = vport->vhca_id; 4482 vhca_map_entry = kmalloc_obj(*vhca_map_entry); 4483 if (!vhca_map_entry) 4484 return -ENOMEM; 4485 4486 *vhca_map_entry = vport->vport; 4487 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL); 4488 if (xa_is_err(old_entry)) { 4489 kfree(vhca_map_entry); 4490 return xa_err(old_entry); 4491 } 4492 kfree(old_entry); 4493 return 0; 4494 } 4495 4496 void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw, 4497 struct mlx5_vport *vport) 4498 { 4499 u16 *vhca_map_entry; 4500 4501 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id); 4502 kfree(vhca_map_entry); 4503 } 4504 4505 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) 4506 { 4507 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); 4508 4509 if (!res) 4510 return -ENOENT; 4511 4512 *vport_num = *res; 4513 return 0; 4514 } 4515 4516 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, 4517 u16 vport_num) 4518 { 4519 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); 4520 4521 if (WARN_ON_ONCE(IS_ERR(vport))) 4522 return 0; 4523 4524 return vport->metadata; 4525 } 4526 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); 4527 4528 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, 4529 u8 *hw_addr, int *hw_addr_len, 4530 struct netlink_ext_ack *extack) 4531 { 4532 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4533 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4534 4535 mutex_lock(&esw->state_lock); 4536 4537 mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true, 4538 vport->info.mac); 4539 ether_addr_copy(hw_addr, vport->info.mac); 4540 *hw_addr_len = ETH_ALEN; 4541 mutex_unlock(&esw->state_lock); 4542 return 0; 4543 } 4544 4545 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, 4546 const u8 *hw_addr, int hw_addr_len, 4547 struct netlink_ext_ack *extack) 4548 { 4549 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4550 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4551 4552 return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr); 4553 } 4554 4555 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, 4556 struct netlink_ext_ack *extack) 4557 { 4558 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4559 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4560 4561 if (!MLX5_CAP_GEN(esw->dev, migration)) { 4562 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); 4563 return -EOPNOTSUPP; 4564 } 4565 4566 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 4567 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); 4568 return -EOPNOTSUPP; 4569 } 4570 4571 mutex_lock(&esw->state_lock); 4572 *is_enabled = vport->info.mig_enabled; 4573 mutex_unlock(&esw->state_lock); 4574 return 0; 4575 } 4576 4577 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, 4578 struct netlink_ext_ack *extack) 4579 { 4580 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4581 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4582 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 4583 void *query_ctx; 4584 void *hca_caps; 4585 int err; 4586 4587 if (!MLX5_CAP_GEN(esw->dev, migration)) { 4588 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); 4589 return -EOPNOTSUPP; 4590 } 4591 4592 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 4593 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); 4594 return -EOPNOTSUPP; 4595 } 4596 4597 mutex_lock(&esw->state_lock); 4598 4599 if (vport->info.mig_enabled == enable) { 4600 err = 0; 4601 goto out; 4602 } 4603 4604 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 4605 if (!query_ctx) { 4606 err = -ENOMEM; 4607 goto out; 4608 } 4609 4610 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, 4611 MLX5_CAP_GENERAL_2); 4612 if (err) { 4613 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 4614 goto out_free; 4615 } 4616 4617 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4618 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable); 4619 4620 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport, 4621 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); 4622 if (err) { 4623 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap"); 4624 goto out_free; 4625 } 4626 4627 vport->info.mig_enabled = enable; 4628 4629 out_free: 4630 kfree(query_ctx); 4631 out: 4632 mutex_unlock(&esw->state_lock); 4633 return err; 4634 } 4635 4636 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, 4637 struct netlink_ext_ack *extack) 4638 { 4639 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4640 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4641 4642 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 4643 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); 4644 return -EOPNOTSUPP; 4645 } 4646 4647 mutex_lock(&esw->state_lock); 4648 *is_enabled = vport->info.roce_enabled; 4649 mutex_unlock(&esw->state_lock); 4650 return 0; 4651 } 4652 4653 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, 4654 struct netlink_ext_ack *extack) 4655 { 4656 struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4657 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4658 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 4659 u16 vport_num = vport->vport; 4660 void *query_ctx; 4661 void *hca_caps; 4662 int err; 4663 4664 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 4665 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); 4666 return -EOPNOTSUPP; 4667 } 4668 4669 mutex_lock(&esw->state_lock); 4670 4671 if (vport->info.roce_enabled == enable) { 4672 err = 0; 4673 goto out; 4674 } 4675 4676 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 4677 if (!query_ctx) { 4678 err = -ENOMEM; 4679 goto out; 4680 } 4681 4682 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, 4683 MLX5_CAP_GENERAL); 4684 if (err) { 4685 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 4686 goto out_free; 4687 } 4688 4689 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4690 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable); 4691 4692 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, 4693 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); 4694 if (err) { 4695 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap"); 4696 goto out_free; 4697 } 4698 4699 vport->info.roce_enabled = enable; 4700 4701 out_free: 4702 kfree(query_ctx); 4703 out: 4704 mutex_unlock(&esw->state_lock); 4705 return err; 4706 } 4707 4708 int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port, 4709 enum devlink_port_fn_state *state, 4710 enum devlink_port_fn_opstate *opstate, 4711 struct netlink_ext_ack *extack) 4712 { 4713 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4714 const u32 *query_out; 4715 bool pf_disabled; 4716 4717 if (vport->vport != MLX5_VPORT_PF) { 4718 NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF"); 4719 return -EOPNOTSUPP; 4720 } 4721 4722 *state = vport->pf_activated ? 4723 DEVLINK_PORT_FN_STATE_ACTIVE : DEVLINK_PORT_FN_STATE_INACTIVE; 4724 4725 query_out = mlx5_esw_query_functions(vport->dev); 4726 if (IS_ERR(query_out)) 4727 return PTR_ERR(query_out); 4728 4729 pf_disabled = MLX5_GET(query_esw_functions_out, query_out, 4730 host_params_context.host_pf_disabled); 4731 4732 *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED : 4733 DEVLINK_PORT_FN_OPSTATE_ATTACHED; 4734 4735 kvfree(query_out); 4736 return 0; 4737 } 4738 4739 int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port, 4740 enum devlink_port_fn_state state, 4741 struct netlink_ext_ack *extack) 4742 { 4743 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4744 struct mlx5_core_dev *dev; 4745 4746 if (vport->vport != MLX5_VPORT_PF) { 4747 NL_SET_ERR_MSG_MOD(extack, "State set is not supported for VF"); 4748 return -EOPNOTSUPP; 4749 } 4750 4751 dev = vport->dev; 4752 4753 switch (state) { 4754 case DEVLINK_PORT_FN_STATE_ACTIVE: 4755 return mlx5_esw_host_pf_enable_hca(dev); 4756 case DEVLINK_PORT_FN_STATE_INACTIVE: 4757 return mlx5_esw_host_pf_disable_hca(dev); 4758 default: 4759 return -EOPNOTSUPP; 4760 } 4761 } 4762 4763 int 4764 mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, 4765 struct mlx5_esw_flow_attr *esw_attr, int attr_idx) 4766 { 4767 struct mlx5_flow_destination new_dest = {}; 4768 struct mlx5_flow_destination old_dest = {}; 4769 4770 if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx)) 4771 return 0; 4772 4773 esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false); 4774 esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false); 4775 4776 return mlx5_modify_rule_destination(rule, &new_dest, &old_dest); 4777 } 4778 4779 #ifdef CONFIG_XFRM_OFFLOAD 4780 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, 4781 struct netlink_ext_ack *extack) 4782 { 4783 struct mlx5_eswitch *esw; 4784 struct mlx5_vport *vport; 4785 int err = 0; 4786 4787 esw = mlx5_devlink_eswitch_get(port->devlink); 4788 if (IS_ERR(esw)) 4789 return PTR_ERR(esw); 4790 4791 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) { 4792 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto"); 4793 return -EOPNOTSUPP; 4794 } 4795 4796 vport = mlx5_devlink_port_vport_get(port); 4797 4798 mutex_lock(&esw->state_lock); 4799 if (!vport->enabled) { 4800 err = -EOPNOTSUPP; 4801 goto unlock; 4802 } 4803 4804 *is_enabled = vport->info.ipsec_crypto_enabled; 4805 unlock: 4806 mutex_unlock(&esw->state_lock); 4807 return err; 4808 } 4809 4810 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, 4811 struct netlink_ext_ack *extack) 4812 { 4813 struct mlx5_eswitch *esw; 4814 struct mlx5_vport *vport; 4815 u16 vport_num; 4816 int err; 4817 4818 esw = mlx5_devlink_eswitch_get(port->devlink); 4819 if (IS_ERR(esw)) 4820 return PTR_ERR(esw); 4821 4822 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4823 err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num); 4824 if (err) { 4825 NL_SET_ERR_MSG_MOD(extack, 4826 "Device doesn't support IPsec crypto"); 4827 return err; 4828 } 4829 4830 vport = mlx5_devlink_port_vport_get(port); 4831 4832 mutex_lock(&esw->state_lock); 4833 if (!vport->enabled) { 4834 err = -EOPNOTSUPP; 4835 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); 4836 goto unlock; 4837 } 4838 4839 if (vport->info.ipsec_crypto_enabled == enable) 4840 goto unlock; 4841 4842 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) { 4843 err = -EBUSY; 4844 goto unlock; 4845 } 4846 4847 err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable); 4848 if (err) { 4849 NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto"); 4850 goto unlock; 4851 } 4852 4853 vport->info.ipsec_crypto_enabled = enable; 4854 if (enable) 4855 esw->enabled_ipsec_vf_count++; 4856 else 4857 esw->enabled_ipsec_vf_count--; 4858 unlock: 4859 mutex_unlock(&esw->state_lock); 4860 return err; 4861 } 4862 4863 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, 4864 struct netlink_ext_ack *extack) 4865 { 4866 struct mlx5_eswitch *esw; 4867 struct mlx5_vport *vport; 4868 int err = 0; 4869 4870 esw = mlx5_devlink_eswitch_get(port->devlink); 4871 if (IS_ERR(esw)) 4872 return PTR_ERR(esw); 4873 4874 if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) { 4875 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet"); 4876 return -EOPNOTSUPP; 4877 } 4878 4879 vport = mlx5_devlink_port_vport_get(port); 4880 4881 mutex_lock(&esw->state_lock); 4882 if (!vport->enabled) { 4883 err = -EOPNOTSUPP; 4884 goto unlock; 4885 } 4886 4887 *is_enabled = vport->info.ipsec_packet_enabled; 4888 unlock: 4889 mutex_unlock(&esw->state_lock); 4890 return err; 4891 } 4892 4893 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, 4894 bool enable, 4895 struct netlink_ext_ack *extack) 4896 { 4897 struct mlx5_eswitch *esw; 4898 struct mlx5_vport *vport; 4899 u16 vport_num; 4900 int err; 4901 4902 esw = mlx5_devlink_eswitch_get(port->devlink); 4903 if (IS_ERR(esw)) 4904 return PTR_ERR(esw); 4905 4906 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); 4907 err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num); 4908 if (err) { 4909 NL_SET_ERR_MSG_MOD(extack, 4910 "Device doesn't support IPsec packet mode"); 4911 return err; 4912 } 4913 4914 vport = mlx5_devlink_port_vport_get(port); 4915 mutex_lock(&esw->state_lock); 4916 if (!vport->enabled) { 4917 err = -EOPNOTSUPP; 4918 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); 4919 goto unlock; 4920 } 4921 4922 if (vport->info.ipsec_packet_enabled == enable) 4923 goto unlock; 4924 4925 if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) { 4926 err = -EBUSY; 4927 goto unlock; 4928 } 4929 4930 err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable); 4931 if (err) { 4932 NL_SET_ERR_MSG_MOD(extack, 4933 "Failed to set IPsec packet mode"); 4934 goto unlock; 4935 } 4936 4937 vport->info.ipsec_packet_enabled = enable; 4938 if (enable) 4939 esw->enabled_ipsec_vf_count++; 4940 else 4941 esw->enabled_ipsec_vf_count--; 4942 unlock: 4943 mutex_unlock(&esw->state_lock); 4944 return err; 4945 } 4946 #endif /* CONFIG_XFRM_OFFLOAD */ 4947 4948 int 4949 mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs, 4950 struct netlink_ext_ack *extack) 4951 { 4952 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 4953 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 4954 u16 vport_num = vport->vport; 4955 struct mlx5_eswitch *esw; 4956 void *query_ctx; 4957 void *hca_caps; 4958 u32 max_eqs; 4959 int err; 4960 4961 esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 4962 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 4963 NL_SET_ERR_MSG_MOD(extack, 4964 "Device doesn't support VHCA management"); 4965 return -EOPNOTSUPP; 4966 } 4967 4968 if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) { 4969 NL_SET_ERR_MSG_MOD(extack, 4970 "Device doesn't support getting the max number of EQs"); 4971 return -EOPNOTSUPP; 4972 } 4973 4974 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 4975 if (!query_ctx) 4976 return -ENOMEM; 4977 4978 mutex_lock(&esw->state_lock); 4979 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, 4980 MLX5_CAP_GENERAL_2); 4981 if (err) { 4982 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 4983 goto out; 4984 } 4985 4986 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4987 max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b); 4988 if (max_eqs < MLX5_ESW_MAX_CTRL_EQS) 4989 *max_io_eqs = 0; 4990 else 4991 *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS; 4992 out: 4993 mutex_unlock(&esw->state_lock); 4994 kfree(query_ctx); 4995 return err; 4996 } 4997 4998 int 4999 mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, 5000 struct netlink_ext_ack *extack) 5001 { 5002 struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); 5003 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 5004 u16 vport_num = vport->vport; 5005 struct mlx5_eswitch *esw; 5006 void *query_ctx; 5007 void *hca_caps; 5008 u16 max_eqs; 5009 int err; 5010 5011 esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); 5012 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { 5013 NL_SET_ERR_MSG_MOD(extack, 5014 "Device doesn't support VHCA management"); 5015 return -EOPNOTSUPP; 5016 } 5017 5018 if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) { 5019 NL_SET_ERR_MSG_MOD(extack, 5020 "Device doesn't support changing the max number of EQs"); 5021 return -EOPNOTSUPP; 5022 } 5023 5024 if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) { 5025 NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range"); 5026 return -EINVAL; 5027 } 5028 5029 query_ctx = kzalloc(query_out_sz, GFP_KERNEL); 5030 if (!query_ctx) 5031 return -ENOMEM; 5032 5033 mutex_lock(&esw->state_lock); 5034 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, 5035 MLX5_CAP_GENERAL_2); 5036 if (err) { 5037 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); 5038 goto out; 5039 } 5040 5041 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 5042 MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs); 5043 5044 if (mlx5_esw_is_sf_vport(esw, vport_num)) 5045 MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1); 5046 5047 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, 5048 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); 5049 if (err) 5050 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); 5051 vport->max_eqs_set = true; 5052 out: 5053 mutex_unlock(&esw->state_lock); 5054 kfree(query_ctx); 5055 return err; 5056 } 5057 5058 int 5059 mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, 5060 struct netlink_ext_ack *extack) 5061 { 5062 return mlx5_devlink_port_fn_max_io_eqs_set(port, 5063 MLX5_ESW_DEFAULT_SF_COMP_EQS, 5064 extack); 5065 } 5066