1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/driver.h> 34 #include <linux/mlx5/device.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 37 #include "fs_core.h" 38 #include "fs_cmd.h" 39 #include "fs_ft_pool.h" 40 #include "mlx5_core.h" 41 #include "eswitch.h" 42 43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns, 44 struct mlx5_flow_table *ft, 45 u32 underlay_qpn, 46 bool disconnect) 47 { 48 return 0; 49 } 50 51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns, 52 struct mlx5_flow_table *ft, 53 struct mlx5_flow_table_attr *ft_attr, 54 struct mlx5_flow_table *next_ft) 55 { 56 int max_fte = ft_attr->max_fte; 57 58 ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1; 59 60 return 0; 61 } 62 63 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 64 struct mlx5_flow_table *ft) 65 { 66 return 0; 67 } 68 69 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns, 70 struct mlx5_flow_table *ft, 71 struct mlx5_flow_table *next_ft) 72 { 73 return 0; 74 } 75 76 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns, 77 struct mlx5_flow_table *ft, 78 u32 *in, 79 struct mlx5_flow_group *fg) 80 { 81 return 0; 82 } 83 84 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 85 struct mlx5_flow_table *ft, 86 struct mlx5_flow_group *fg) 87 { 88 return 0; 89 } 90 91 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns, 92 struct mlx5_flow_table *ft, 93 struct mlx5_flow_group *group, 94 struct fs_fte *fte) 95 { 96 return 0; 97 } 98 99 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns, 100 struct mlx5_flow_table *ft, 101 struct mlx5_flow_group *group, 102 int modify_mask, 103 struct fs_fte *fte) 104 { 105 return -EOPNOTSUPP; 106 } 107 108 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns, 109 struct mlx5_flow_table *ft, 110 struct fs_fte *fte) 111 { 112 return 0; 113 } 114 115 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 116 struct mlx5_pkt_reformat_params *params, 117 enum mlx5_flow_namespace_type namespace, 118 struct mlx5_pkt_reformat *pkt_reformat) 119 { 120 return 0; 121 } 122 123 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 124 struct mlx5_pkt_reformat *pkt_reformat) 125 { 126 } 127 128 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 129 u8 namespace, u8 num_actions, 130 void *modify_actions, 131 struct mlx5_modify_hdr *modify_hdr) 132 { 133 return 0; 134 } 135 136 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 137 struct mlx5_modify_hdr *modify_hdr) 138 { 139 } 140 141 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, 142 struct mlx5_flow_root_namespace *peer_ns, 143 u16 peer_vhca_id) 144 { 145 return 0; 146 } 147 148 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns) 149 { 150 return 0; 151 } 152 153 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns) 154 { 155 return 0; 156 } 157 158 static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns, 159 enum fs_flow_table_type ft_type) 160 { 161 return 0; 162 } 163 164 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master, 165 struct mlx5_core_dev *slave, 166 bool ft_id_valid, 167 u32 ft_id) 168 { 169 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 170 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 171 struct mlx5_flow_root_namespace *root; 172 struct mlx5_flow_namespace *ns; 173 174 MLX5_SET(set_flow_table_root_in, in, opcode, 175 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 176 MLX5_SET(set_flow_table_root_in, in, table_type, 177 FS_FT_FDB); 178 if (ft_id_valid) { 179 MLX5_SET(set_flow_table_root_in, in, 180 table_eswitch_owner_vhca_id_valid, 1); 181 MLX5_SET(set_flow_table_root_in, in, 182 table_eswitch_owner_vhca_id, 183 MLX5_CAP_GEN(master, vhca_id)); 184 MLX5_SET(set_flow_table_root_in, in, table_id, 185 ft_id); 186 } else { 187 ns = mlx5_get_flow_namespace(slave, 188 MLX5_FLOW_NAMESPACE_FDB); 189 root = find_root(&ns->node); 190 MLX5_SET(set_flow_table_root_in, in, table_id, 191 root->root_ft->id); 192 } 193 194 return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 195 } 196 197 static int 198 mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 199 int definer_id) 200 { 201 return 0; 202 } 203 204 static int 205 mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns, 206 u16 format_id, u32 *match_mask) 207 { 208 return 0; 209 } 210 211 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, 212 struct mlx5_flow_table *ft, u32 underlay_qpn, 213 bool disconnect) 214 { 215 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 216 struct mlx5_core_dev *dev = ns->dev; 217 int err; 218 219 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 220 underlay_qpn == 0 && 221 (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX)) 222 return 0; 223 224 if (ft->type == FS_FT_FDB && 225 mlx5_lag_is_shared_fdb(dev) && 226 !mlx5_lag_is_master(dev)) 227 return 0; 228 229 MLX5_SET(set_flow_table_root_in, in, opcode, 230 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 231 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 232 233 if (disconnect) 234 MLX5_SET(set_flow_table_root_in, in, op_mod, 1); 235 else 236 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 237 238 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); 239 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 240 MLX5_SET(set_flow_table_root_in, in, other_vport, 241 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 242 MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id, 243 ft->esw_owner_vhca_id); 244 MLX5_SET(set_flow_table_root_in, in, other_eswitch, 245 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 246 247 err = mlx5_cmd_exec_in(dev, set_flow_table_root, in); 248 if (!err && 249 ft->type == FS_FT_FDB && 250 mlx5_lag_is_shared_fdb(dev) && 251 mlx5_lag_is_master(dev)) { 252 struct mlx5_core_dev *peer_dev; 253 int i, j; 254 255 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { 256 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, 257 (!disconnect) ? ft->id : 0); 258 if (err && !disconnect) { 259 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { 260 if (j < i) 261 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, 262 ns->root_ft->id); 263 else 264 break; 265 } 266 267 MLX5_SET(set_flow_table_root_in, in, op_mod, 0); 268 MLX5_SET(set_flow_table_root_in, in, table_id, 269 ns->root_ft->id); 270 mlx5_cmd_exec_in(dev, set_flow_table_root, in); 271 } 272 if (err) 273 break; 274 } 275 276 } 277 278 return err; 279 } 280 281 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, 282 struct mlx5_flow_table *ft, 283 struct mlx5_flow_table_attr *ft_attr, 284 struct mlx5_flow_table *next_ft) 285 { 286 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); 287 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 288 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION); 289 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {}; 290 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {}; 291 struct mlx5_core_dev *dev = ns->dev; 292 unsigned int size; 293 int err; 294 295 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte); 296 if (!size) 297 return -ENOSPC; 298 299 MLX5_SET(create_flow_table_in, in, opcode, 300 MLX5_CMD_OP_CREATE_FLOW_TABLE); 301 302 MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid); 303 MLX5_SET(create_flow_table_in, in, table_type, ft->type); 304 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); 305 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0); 306 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); 307 MLX5_SET(create_flow_table_in, in, other_vport, 308 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 309 MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id, 310 ft->esw_owner_vhca_id); 311 MLX5_SET(create_flow_table_in, in, other_eswitch, 312 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 313 314 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, 315 en_decap); 316 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, 317 en_encap); 318 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table, 319 term); 320 321 switch (ft->op_mod) { 322 case FS_FT_OP_MOD_NORMAL: 323 if (next_ft) { 324 MLX5_SET(create_flow_table_in, in, 325 flow_table_context.table_miss_action, 326 MLX5_FLOW_TABLE_MISS_ACTION_FWD); 327 MLX5_SET(create_flow_table_in, in, 328 flow_table_context.table_miss_id, next_ft->id); 329 } else { 330 MLX5_SET(create_flow_table_in, in, 331 flow_table_context.table_miss_action, 332 ft->def_miss_action); 333 } 334 break; 335 336 case FS_FT_OP_MOD_LAG_DEMUX: 337 MLX5_SET(create_flow_table_in, in, op_mod, 0x1); 338 if (next_ft) 339 MLX5_SET(create_flow_table_in, in, 340 flow_table_context.lag_master_next_table_id, 341 next_ft->id); 342 break; 343 } 344 345 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out); 346 if (!err) { 347 ft->id = MLX5_GET(create_flow_table_out, out, 348 table_id); 349 ft->max_fte = size; 350 } else { 351 mlx5_ft_pool_put_sz(ns->dev, size); 352 } 353 354 return err; 355 } 356 357 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 358 struct mlx5_flow_table *ft) 359 { 360 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {}; 361 struct mlx5_core_dev *dev = ns->dev; 362 int err; 363 364 MLX5_SET(destroy_flow_table_in, in, opcode, 365 MLX5_CMD_OP_DESTROY_FLOW_TABLE); 366 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); 367 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); 368 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); 369 MLX5_SET(destroy_flow_table_in, in, other_vport, 370 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 371 MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id, 372 ft->esw_owner_vhca_id); 373 MLX5_SET(destroy_flow_table_in, in, other_eswitch, 374 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 375 376 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in); 377 if (!err) 378 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte); 379 380 return err; 381 } 382 383 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, 384 struct mlx5_flow_table *ft, 385 struct mlx5_flow_table *next_ft) 386 { 387 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {}; 388 struct mlx5_core_dev *dev = ns->dev; 389 390 MLX5_SET(modify_flow_table_in, in, opcode, 391 MLX5_CMD_OP_MODIFY_FLOW_TABLE); 392 MLX5_SET(modify_flow_table_in, in, table_type, ft->type); 393 MLX5_SET(modify_flow_table_in, in, table_id, ft->id); 394 395 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) { 396 MLX5_SET(modify_flow_table_in, in, modify_field_select, 397 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID); 398 if (next_ft) { 399 MLX5_SET(modify_flow_table_in, in, 400 flow_table_context.lag_master_next_table_id, next_ft->id); 401 } else { 402 MLX5_SET(modify_flow_table_in, in, 403 flow_table_context.lag_master_next_table_id, 0); 404 } 405 } else { 406 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); 407 MLX5_SET(modify_flow_table_in, in, other_vport, 408 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 409 MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id, 410 ft->esw_owner_vhca_id); 411 MLX5_SET(modify_flow_table_in, in, other_eswitch, 412 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 413 MLX5_SET(modify_flow_table_in, in, modify_field_select, 414 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); 415 if (next_ft) { 416 MLX5_SET(modify_flow_table_in, in, 417 flow_table_context.table_miss_action, 418 MLX5_FLOW_TABLE_MISS_ACTION_FWD); 419 MLX5_SET(modify_flow_table_in, in, 420 flow_table_context.table_miss_id, 421 next_ft->id); 422 } else { 423 MLX5_SET(modify_flow_table_in, in, 424 flow_table_context.table_miss_action, 425 ft->def_miss_action); 426 } 427 } 428 429 return mlx5_cmd_exec_in(dev, modify_flow_table, in); 430 } 431 432 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, 433 struct mlx5_flow_table *ft, 434 u32 *in, 435 struct mlx5_flow_group *fg) 436 { 437 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {}; 438 struct mlx5_core_dev *dev = ns->dev; 439 int err; 440 441 MLX5_SET(create_flow_group_in, in, opcode, 442 MLX5_CMD_OP_CREATE_FLOW_GROUP); 443 MLX5_SET(create_flow_group_in, in, table_type, ft->type); 444 MLX5_SET(create_flow_group_in, in, table_id, ft->id); 445 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); 446 MLX5_SET(create_flow_group_in, in, other_vport, 447 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 448 MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id, 449 ft->esw_owner_vhca_id); 450 MLX5_SET(create_flow_group_in, in, other_eswitch, 451 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 452 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); 453 if (!err) 454 fg->id = MLX5_GET(create_flow_group_out, out, 455 group_id); 456 return err; 457 } 458 459 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 460 struct mlx5_flow_table *ft, 461 struct mlx5_flow_group *fg) 462 { 463 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; 464 struct mlx5_core_dev *dev = ns->dev; 465 466 MLX5_SET(destroy_flow_group_in, in, opcode, 467 MLX5_CMD_OP_DESTROY_FLOW_GROUP); 468 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); 469 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); 470 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); 471 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); 472 MLX5_SET(destroy_flow_group_in, in, other_vport, 473 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 474 MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id, 475 ft->esw_owner_vhca_id); 476 MLX5_SET(destroy_flow_group_in, in, other_eswitch, 477 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 478 return mlx5_cmd_exec_in(dev, destroy_flow_group, in); 479 } 480 481 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, 482 struct fs_fte *fte, bool *extended_dest) 483 { 484 int fw_log_max_fdb_encap_uplink = 485 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink); 486 int num_fwd_destinations = 0; 487 struct mlx5_flow_rule *dst; 488 int num_encap = 0; 489 490 *extended_dest = false; 491 if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) 492 return 0; 493 494 list_for_each_entry(dst, &fte->node.children, node.list) { 495 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || 496 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE) 497 continue; 498 if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || 499 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && 500 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) 501 num_encap++; 502 num_fwd_destinations++; 503 } 504 if (num_fwd_destinations > 1 && num_encap > 0) 505 *extended_dest = true; 506 507 if (*extended_dest && !fw_log_max_fdb_encap_uplink) { 508 mlx5_core_warn(dev, "FW does not support extended destination"); 509 return -EOPNOTSUPP; 510 } 511 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) { 512 mlx5_core_warn(dev, "FW does not support more than %d encaps", 513 1 << fw_log_max_fdb_encap_uplink); 514 return -EOPNOTSUPP; 515 } 516 517 return 0; 518 } 519 520 static void 521 mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context) 522 { 523 void *exe_aso_ctrl; 524 void *execute_aso; 525 526 execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context, 527 execute_aso[0]); 528 MLX5_SET(execute_aso, execute_aso, valid, 1); 529 MLX5_SET(execute_aso, execute_aso, aso_object_id, 530 fte->act_dests.action.exe_aso.object_id); 531 532 exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl); 533 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id, 534 fte->act_dests.action.exe_aso.return_reg_id); 535 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type, 536 fte->act_dests.action.exe_aso.type); 537 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color, 538 fte->act_dests.action.exe_aso.flow_meter.init_color); 539 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id, 540 fte->act_dests.action.exe_aso.flow_meter.meter_idx); 541 } 542 543 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, 544 int opmod, int modify_mask, 545 struct mlx5_flow_table *ft, 546 unsigned group_id, 547 struct fs_fte *fte) 548 { 549 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 550 bool extended_dest = false; 551 struct mlx5_flow_rule *dst; 552 void *in_flow_context, *vlan; 553 void *in_match_value; 554 u32 reformat_id = 0; 555 unsigned int inlen; 556 int dst_cnt_size; 557 u32 *in, action; 558 void *in_dests; 559 int err; 560 561 if (mlx5_set_extended_dest(dev, fte, &extended_dest)) 562 return -EOPNOTSUPP; 563 564 if (!extended_dest) 565 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct); 566 else 567 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); 568 569 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size; 570 in = kvzalloc(inlen, GFP_KERNEL); 571 if (!in) 572 return -ENOMEM; 573 574 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 575 MLX5_SET(set_fte_in, in, op_mod, opmod); 576 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask); 577 MLX5_SET(set_fte_in, in, table_type, ft->type); 578 MLX5_SET(set_fte_in, in, table_id, ft->id); 579 MLX5_SET(set_fte_in, in, flow_index, fte->index); 580 MLX5_SET(set_fte_in, in, ignore_flow_level, 581 !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); 582 583 MLX5_SET(set_fte_in, in, vport_number, ft->vport); 584 MLX5_SET(set_fte_in, in, other_vport, 585 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 586 MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id); 587 MLX5_SET(set_fte_in, in, other_eswitch, 588 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 589 590 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 591 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 592 593 MLX5_SET(flow_context, in_flow_context, flow_tag, 594 fte->act_dests.flow_context.flow_tag); 595 MLX5_SET(flow_context, in_flow_context, flow_source, 596 fte->act_dests.flow_context.flow_source); 597 MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en, 598 !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); 599 600 MLX5_SET(flow_context, in_flow_context, extended_destination, 601 extended_dest); 602 603 action = fte->act_dests.action.action; 604 if (extended_dest) 605 action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 606 607 MLX5_SET(flow_context, in_flow_context, action, action); 608 609 if (!extended_dest && fte->act_dests.action.pkt_reformat) { 610 struct mlx5_pkt_reformat *pkt_reformat = 611 fte->act_dests.action.pkt_reformat; 612 613 err = mlx5_fs_get_packet_reformat_id(pkt_reformat, 614 &reformat_id); 615 if (err) { 616 mlx5_core_err(dev, 617 "Unsupported pkt_reformat type (%d)\n", 618 pkt_reformat->reformat_type); 619 goto err_out; 620 } 621 } 622 623 MLX5_SET(flow_context, in_flow_context, packet_reformat_id, 624 reformat_id); 625 626 if (fte->act_dests.action.modify_hdr) { 627 if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { 628 mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); 629 err = -EOPNOTSUPP; 630 goto err_out; 631 } 632 633 MLX5_SET(flow_context, in_flow_context, modify_header_id, 634 fte->act_dests.action.modify_hdr->id); 635 } 636 637 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, 638 fte->act_dests.action.crypto.type); 639 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id, 640 fte->act_dests.action.crypto.obj_id); 641 642 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); 643 644 MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype); 645 MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid); 646 MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio); 647 648 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); 649 650 MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype); 651 MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid); 652 MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio); 653 654 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, 655 match_value); 656 memcpy(in_match_value, &fte->val, sizeof(fte->val)); 657 658 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 659 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 660 int list_size = 0; 661 662 list_for_each_entry(dst, &fte->node.children, node.list) { 663 enum mlx5_flow_destination_type type = dst->dest_attr.type; 664 enum mlx5_ifc_flow_destination_type ifc_type; 665 unsigned int id; 666 667 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) 668 continue; 669 670 switch (type) { 671 case MLX5_FLOW_DESTINATION_TYPE_NONE: 672 continue; 673 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: 674 id = dst->dest_attr.ft_num; 675 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; 676 break; 677 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 678 id = dst->dest_attr.ft->id; 679 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; 680 break; 681 case MLX5_FLOW_DESTINATION_TYPE_UPLINK: 682 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 683 MLX5_SET(dest_format_struct, in_dests, 684 destination_eswitch_owner_vhca_id_valid, 685 !!(dst->dest_attr.vport.flags & 686 MLX5_FLOW_DEST_VPORT_VHCA_ID)); 687 MLX5_SET(dest_format_struct, in_dests, 688 destination_eswitch_owner_vhca_id, 689 dst->dest_attr.vport.vhca_id); 690 if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) { 691 /* destination_id is reserved */ 692 id = 0; 693 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; 694 break; 695 } 696 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; 697 id = dst->dest_attr.vport.num; 698 if (extended_dest && 699 dst->dest_attr.vport.pkt_reformat) { 700 MLX5_SET(dest_format_struct, in_dests, 701 packet_reformat, 702 !!(dst->dest_attr.vport.flags & 703 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)); 704 MLX5_SET(extended_dest_format, in_dests, 705 packet_reformat_id, 706 dst->dest_attr.vport.pkt_reformat->id); 707 } 708 break; 709 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 710 id = dst->dest_attr.sampler_id; 711 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 712 break; 713 case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE: 714 MLX5_SET(dest_format_struct, in_dests, 715 destination_table_type, dst->dest_attr.ft->type); 716 id = dst->dest_attr.ft->id; 717 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE; 718 break; 719 case MLX5_FLOW_DESTINATION_TYPE_VHCA_RX: 720 id = dst->dest_attr.vhca.id; 721 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VHCA_RX; 722 break; 723 default: 724 id = dst->dest_attr.tir_num; 725 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; 726 } 727 728 MLX5_SET(dest_format_struct, in_dests, destination_type, 729 ifc_type); 730 MLX5_SET(dest_format_struct, in_dests, destination_id, id); 731 in_dests += dst_cnt_size; 732 list_size++; 733 } 734 735 MLX5_SET(flow_context, in_flow_context, destination_list_size, 736 list_size); 737 } 738 739 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 740 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, 741 log_max_flow_counter, 742 ft->type)); 743 int list_size = 0; 744 745 list_for_each_entry(dst, &fte->node.children, node.list) { 746 if (dst->dest_attr.type != 747 MLX5_FLOW_DESTINATION_TYPE_COUNTER) 748 continue; 749 750 MLX5_SET(flow_counter_list, in_dests, flow_counter_id, 751 mlx5_fc_id(dst->dest_attr.counter)); 752 in_dests += dst_cnt_size; 753 list_size++; 754 } 755 if (list_size > max_list_size) { 756 err = -EINVAL; 757 goto err_out; 758 } 759 760 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, 761 list_size); 762 } 763 764 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { 765 if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { 766 mlx5_cmd_set_fte_flow_meter(fte, in_flow_context); 767 } else { 768 err = -EOPNOTSUPP; 769 goto err_out; 770 } 771 } 772 773 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 774 err_out: 775 kvfree(in); 776 return err; 777 } 778 779 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns, 780 struct mlx5_flow_table *ft, 781 struct mlx5_flow_group *group, 782 struct fs_fte *fte) 783 { 784 struct mlx5_core_dev *dev = ns->dev; 785 unsigned int group_id = group->id; 786 787 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); 788 } 789 790 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns, 791 struct mlx5_flow_table *ft, 792 struct mlx5_flow_group *fg, 793 int modify_mask, 794 struct fs_fte *fte) 795 { 796 int opmod; 797 struct mlx5_core_dev *dev = ns->dev; 798 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, 799 flow_table_properties_nic_receive. 800 flow_modify_en); 801 if (!atomic_mod_cap) 802 return -EOPNOTSUPP; 803 opmod = 1; 804 805 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte); 806 } 807 808 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, 809 struct mlx5_flow_table *ft, 810 struct fs_fte *fte) 811 { 812 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; 813 struct mlx5_core_dev *dev = ns->dev; 814 815 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); 816 MLX5_SET(delete_fte_in, in, table_type, ft->type); 817 MLX5_SET(delete_fte_in, in, table_id, ft->id); 818 MLX5_SET(delete_fte_in, in, flow_index, fte->index); 819 MLX5_SET(delete_fte_in, in, vport_number, ft->vport); 820 MLX5_SET(delete_fte_in, in, other_vport, 821 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 822 MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id, 823 ft->esw_owner_vhca_id); 824 MLX5_SET(delete_fte_in, in, other_eswitch, 825 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); 826 827 return mlx5_cmd_exec_in(dev, delete_fte, in); 828 } 829 830 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, 831 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, 832 u32 *id) 833 { 834 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {}; 835 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {}; 836 int err; 837 838 MLX5_SET(alloc_flow_counter_in, in, opcode, 839 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 840 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); 841 842 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out); 843 if (!err) 844 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 845 return err; 846 } 847 848 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) 849 { 850 return mlx5_cmd_fc_bulk_alloc(dev, 0, id); 851 } 852 853 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) 854 { 855 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {}; 856 857 MLX5_SET(dealloc_flow_counter_in, in, opcode, 858 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); 859 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); 860 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in); 861 } 862 863 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, 864 u64 *packets, u64 *bytes) 865 { 866 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 867 MLX5_ST_SZ_BYTES(traffic_counter)] = {}; 868 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; 869 void *stats; 870 int err = 0; 871 872 MLX5_SET(query_flow_counter_in, in, opcode, 873 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 874 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 875 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); 876 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 877 if (err) 878 return err; 879 880 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); 881 *packets = MLX5_GET64(traffic_counter, stats, packets); 882 *bytes = MLX5_GET64(traffic_counter, stats, octets); 883 return 0; 884 } 885 886 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len) 887 { 888 return MLX5_ST_SZ_BYTES(query_flow_counter_out) + 889 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len; 890 } 891 892 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, 893 u32 *out) 894 { 895 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); 896 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; 897 898 MLX5_SET(query_flow_counter_in, in, opcode, 899 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 900 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); 901 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); 902 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 903 } 904 905 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 906 struct mlx5_pkt_reformat_params *params, 907 enum mlx5_flow_namespace_type namespace, 908 struct mlx5_pkt_reformat *pkt_reformat) 909 { 910 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {}; 911 struct mlx5_core_dev *dev = ns->dev; 912 void *packet_reformat_context_in; 913 int max_encap_size; 914 void *reformat; 915 int inlen; 916 int err; 917 u32 *in; 918 919 if (namespace == MLX5_FLOW_NAMESPACE_FDB || 920 namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS) 921 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); 922 else 923 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size); 924 925 if (params->size > max_encap_size) { 926 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", 927 params->size, max_encap_size); 928 return -EINVAL; 929 } 930 931 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + 932 params->size, GFP_KERNEL); 933 if (!in) 934 return -ENOMEM; 935 936 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in, 937 in, packet_reformat_context); 938 reformat = MLX5_ADDR_OF(packet_reformat_context_in, 939 packet_reformat_context_in, 940 reformat_data); 941 inlen = reformat - (void *)in + params->size; 942 943 MLX5_SET(alloc_packet_reformat_context_in, in, opcode, 944 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); 945 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 946 reformat_data_size, params->size); 947 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 948 reformat_type, params->type); 949 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 950 reformat_param_0, params->param_0); 951 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 952 reformat_param_1, params->param_1); 953 if (params->data && params->size) 954 memcpy(reformat, params->data, params->size); 955 956 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 957 958 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, 959 out, packet_reformat_id); 960 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW; 961 962 kfree(in); 963 return err; 964 } 965 966 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 967 struct mlx5_pkt_reformat *pkt_reformat) 968 { 969 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {}; 970 struct mlx5_core_dev *dev = ns->dev; 971 972 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, 973 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); 974 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, 975 pkt_reformat->id); 976 977 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in); 978 } 979 980 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 981 u8 namespace, u8 num_actions, 982 void *modify_actions, 983 struct mlx5_modify_hdr *modify_hdr) 984 { 985 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {}; 986 int max_actions, actions_size, inlen, err; 987 struct mlx5_core_dev *dev = ns->dev; 988 void *actions_in; 989 u8 table_type; 990 u32 *in; 991 992 switch (namespace) { 993 case MLX5_FLOW_NAMESPACE_FDB: 994 case MLX5_FLOW_NAMESPACE_FDB_BYPASS: 995 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions); 996 table_type = FS_FT_FDB; 997 break; 998 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC: 999 case MLX5_FLOW_NAMESPACE_KERNEL: 1000 case MLX5_FLOW_NAMESPACE_BYPASS: 1001 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); 1002 table_type = FS_FT_NIC_RX; 1003 break; 1004 case MLX5_FLOW_NAMESPACE_EGRESS: 1005 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC: 1006 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC: 1007 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); 1008 table_type = FS_FT_NIC_TX; 1009 break; 1010 case MLX5_FLOW_NAMESPACE_ESW_INGRESS: 1011 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); 1012 table_type = FS_FT_ESW_INGRESS_ACL; 1013 break; 1014 case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: 1015 case MLX5_FLOW_NAMESPACE_RDMA_TX: 1016 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); 1017 table_type = FS_FT_RDMA_TX; 1018 break; 1019 default: 1020 return -EOPNOTSUPP; 1021 } 1022 1023 if (num_actions > max_actions) { 1024 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n", 1025 num_actions, max_actions); 1026 return -EOPNOTSUPP; 1027 } 1028 1029 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions; 1030 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size; 1031 1032 in = kzalloc(inlen, GFP_KERNEL); 1033 if (!in) 1034 return -ENOMEM; 1035 1036 MLX5_SET(alloc_modify_header_context_in, in, opcode, 1037 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT); 1038 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type); 1039 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions); 1040 1041 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions); 1042 memcpy(actions_in, modify_actions, actions_size); 1043 1044 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 1045 1046 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); 1047 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW; 1048 kfree(in); 1049 return err; 1050 } 1051 1052 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 1053 struct mlx5_modify_hdr *modify_hdr) 1054 { 1055 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {}; 1056 struct mlx5_core_dev *dev = ns->dev; 1057 1058 MLX5_SET(dealloc_modify_header_context_in, in, opcode, 1059 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); 1060 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, 1061 modify_hdr->id); 1062 1063 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in); 1064 } 1065 1066 static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 1067 int definer_id) 1068 { 1069 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 1070 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 1071 1072 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 1073 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 1074 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, 1075 MLX5_OBJ_TYPE_MATCH_DEFINER); 1076 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id); 1077 1078 return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out)); 1079 } 1080 1081 static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns, 1082 u16 format_id, u32 *match_mask) 1083 { 1084 u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {}; 1085 u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {}; 1086 struct mlx5_core_dev *dev = ns->dev; 1087 void *ptr; 1088 int err; 1089 1090 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode, 1091 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1092 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type, 1093 MLX5_OBJ_TYPE_MATCH_DEFINER); 1094 1095 ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context); 1096 MLX5_SET(match_definer, ptr, format_id, format_id); 1097 1098 ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask); 1099 memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask)); 1100 1101 err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out); 1102 return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1103 } 1104 1105 static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns, 1106 enum fs_flow_table_type ft_type) 1107 { 1108 return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH; 1109 } 1110 1111 static const struct mlx5_flow_cmds mlx5_flow_cmds = { 1112 .create_flow_table = mlx5_cmd_create_flow_table, 1113 .destroy_flow_table = mlx5_cmd_destroy_flow_table, 1114 .modify_flow_table = mlx5_cmd_modify_flow_table, 1115 .create_flow_group = mlx5_cmd_create_flow_group, 1116 .destroy_flow_group = mlx5_cmd_destroy_flow_group, 1117 .create_fte = mlx5_cmd_create_fte, 1118 .update_fte = mlx5_cmd_update_fte, 1119 .delete_fte = mlx5_cmd_delete_fte, 1120 .update_root_ft = mlx5_cmd_update_root_ft, 1121 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc, 1122 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc, 1123 .modify_header_alloc = mlx5_cmd_modify_header_alloc, 1124 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc, 1125 .create_match_definer = mlx5_cmd_create_match_definer, 1126 .destroy_match_definer = mlx5_cmd_destroy_match_definer, 1127 .set_peer = mlx5_cmd_stub_set_peer, 1128 .create_ns = mlx5_cmd_stub_create_ns, 1129 .destroy_ns = mlx5_cmd_stub_destroy_ns, 1130 .get_capabilities = mlx5_cmd_get_capabilities, 1131 }; 1132 1133 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { 1134 .create_flow_table = mlx5_cmd_stub_create_flow_table, 1135 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table, 1136 .modify_flow_table = mlx5_cmd_stub_modify_flow_table, 1137 .create_flow_group = mlx5_cmd_stub_create_flow_group, 1138 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group, 1139 .create_fte = mlx5_cmd_stub_create_fte, 1140 .update_fte = mlx5_cmd_stub_update_fte, 1141 .delete_fte = mlx5_cmd_stub_delete_fte, 1142 .update_root_ft = mlx5_cmd_stub_update_root_ft, 1143 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc, 1144 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc, 1145 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc, 1146 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc, 1147 .create_match_definer = mlx5_cmd_stub_create_match_definer, 1148 .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer, 1149 .set_peer = mlx5_cmd_stub_set_peer, 1150 .create_ns = mlx5_cmd_stub_create_ns, 1151 .destroy_ns = mlx5_cmd_stub_destroy_ns, 1152 .get_capabilities = mlx5_cmd_stub_get_capabilities, 1153 }; 1154 1155 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) 1156 { 1157 return &mlx5_flow_cmds; 1158 } 1159 1160 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void) 1161 { 1162 return &mlx5_flow_cmd_stubs; 1163 } 1164 1165 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type) 1166 { 1167 switch (type) { 1168 case FS_FT_NIC_RX: 1169 case FS_FT_ESW_EGRESS_ACL: 1170 case FS_FT_ESW_INGRESS_ACL: 1171 case FS_FT_FDB: 1172 case FS_FT_SNIFFER_RX: 1173 case FS_FT_SNIFFER_TX: 1174 case FS_FT_NIC_TX: 1175 case FS_FT_RDMA_RX: 1176 case FS_FT_RDMA_TX: 1177 case FS_FT_PORT_SEL: 1178 case FS_FT_RDMA_TRANSPORT_RX: 1179 case FS_FT_RDMA_TRANSPORT_TX: 1180 return mlx5_fs_cmd_get_fw_cmds(); 1181 default: 1182 return mlx5_fs_cmd_get_stub_cmds(); 1183 } 1184 } 1185 1186 int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode) 1187 { 1188 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {}; 1189 1190 if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode_set)) 1191 return -EOPNOTSUPP; 1192 1193 MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); 1194 MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1); 1195 MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode); 1196 1197 return mlx5_cmd_exec_in(dev, set_l2_table_entry, in); 1198 } 1199 1200 int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect) 1201 { 1202 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 1203 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 1204 1205 if (disconnect && 1206 !MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) 1207 return -EOPNOTSUPP; 1208 1209 MLX5_SET(set_flow_table_root_in, in, opcode, 1210 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 1211 MLX5_SET(set_flow_table_root_in, in, table_type, 1212 FS_FT_NIC_TX); 1213 if (disconnect) 1214 MLX5_SET(set_flow_table_root_in, in, op_mod, 1); 1215 else 1216 MLX5_SET(set_flow_table_root_in, in, table_id, ft_id); 1217 1218 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 1219 } 1220