1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #ifndef _MLX5_FS_ 27 #define _MLX5_FS_ 28 29 #include <linux/list.h> 30 #include <linux/bitops.h> 31 32 #include <dev/mlx5/mlx5_ifc.h> 33 #include <dev/mlx5/device.h> 34 #include <dev/mlx5/driver.h> 35 36 enum mlx5_flow_destination_type { 37 MLX5_FLOW_DESTINATION_TYPE_NONE, 38 MLX5_FLOW_DESTINATION_TYPE_VPORT, 39 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, 40 MLX5_FLOW_DESTINATION_TYPE_TIR, 41 MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER, 42 MLX5_FLOW_DESTINATION_TYPE_UPLINK, 43 MLX5_FLOW_DESTINATION_TYPE_PORT, 44 MLX5_FLOW_DESTINATION_TYPE_COUNTER, 45 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM, 46 MLX5_FLOW_DESTINATION_TYPE_RANGE, 47 MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE, 48 }; 49 50 enum { 51 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, 52 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, 53 MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, 54 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19, 55 }; 56 57 enum { 58 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), 59 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), 60 MLX5_FLOW_TABLE_TERMINATION = BIT(2), 61 MLX5_FLOW_TABLE_UNMANAGED = BIT(3), 62 MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), 63 }; 64 65 /*Flow tag*/ 66 enum { 67 MLX5_FS_DEFAULT_FLOW_TAG = 0xFFFFFF, 68 MLX5_FS_ETH_FLOW_TAG = 0xFFFFFE, 69 MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD, 70 }; 71 72 enum mlx5_rule_fwd_action { 73 MLX5_FLOW_RULE_FWD_ACTION_ALLOW = 0x1, 74 MLX5_FLOW_RULE_FWD_ACTION_DROP = 0x2, 75 MLX5_FLOW_RULE_FWD_ACTION_DEST = 0x4, 76 }; 77 78 enum { 79 MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF, 80 }; 81 82 #define FS_MAX_TYPES 10 83 #define FS_MAX_ENTRIES 32000U 84 85 #define FS_REFORMAT_KEYWORD "_reformat" 86 87 enum mlx5_flow_namespace_type { 88 MLX5_FLOW_NAMESPACE_BYPASS, 89 MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, 90 MLX5_FLOW_NAMESPACE_LAG, 91 MLX5_FLOW_NAMESPACE_OFFLOADS, 92 MLX5_FLOW_NAMESPACE_ETHTOOL, 93 MLX5_FLOW_NAMESPACE_KERNEL, 94 MLX5_FLOW_NAMESPACE_LEFTOVERS, 95 MLX5_FLOW_NAMESPACE_ANCHOR, 96 MLX5_FLOW_NAMESPACE_FDB_BYPASS, 97 MLX5_FLOW_NAMESPACE_FDB, 98 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 99 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 100 MLX5_FLOW_NAMESPACE_SNIFFER_RX, 101 MLX5_FLOW_NAMESPACE_SNIFFER_TX, 102 MLX5_FLOW_NAMESPACE_EGRESS, 103 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 104 MLX5_FLOW_NAMESPACE_EGRESS_MACSEC, 105 MLX5_FLOW_NAMESPACE_RDMA_RX, 106 MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, 107 MLX5_FLOW_NAMESPACE_RDMA_TX, 108 MLX5_FLOW_NAMESPACE_PORT_SEL, 109 MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, 110 MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, 111 MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC, 112 MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC, 113 }; 114 115 enum { 116 FDB_BYPASS_PATH, 117 FDB_TC_OFFLOAD, 118 FDB_FT_OFFLOAD, 119 FDB_TC_MISS, 120 FDB_BR_OFFLOAD, 121 FDB_SLOW_PATH, 122 FDB_PER_VPORT, 123 }; 124 125 struct mlx5_flow_table; 126 struct mlx5_flow_group; 127 struct mlx5_flow_rule; 128 struct mlx5_flow_namespace; 129 struct mlx5_flow_handle; 130 131 enum { 132 FLOW_CONTEXT_HAS_TAG = BIT(0), 133 }; 134 135 struct mlx5_flow_context { 136 u32 flags; 137 u32 flow_tag; 138 u32 flow_source; 139 }; 140 141 struct mlx5_flow_spec { 142 u8 match_criteria_enable; 143 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 144 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; 145 struct mlx5_flow_context flow_context; 146 }; 147 148 enum { 149 MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), 150 MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), 151 }; 152 153 enum mlx5_flow_dest_range_field { 154 MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0, 155 }; 156 157 struct mlx5_flow_destination { 158 enum mlx5_flow_destination_type type; 159 union { 160 u32 tir_num; 161 u32 ft_num; 162 struct mlx5_flow_table *ft; 163 u32 counter_id; 164 struct { 165 u16 num; 166 u16 vhca_id; 167 struct mlx5_pkt_reformat *pkt_reformat; 168 u8 flags; 169 } vport; 170 struct { 171 struct mlx5_flow_table *hit_ft; 172 struct mlx5_flow_table *miss_ft; 173 enum mlx5_flow_dest_range_field field; 174 u32 min; 175 u32 max; 176 } range; 177 u32 sampler_id; 178 }; 179 }; 180 181 struct mlx5_exe_aso { 182 u32 object_id; 183 u8 type; 184 u8 return_reg_id; 185 union { 186 u32 ctrl_data; 187 struct { 188 u8 meter_idx; 189 u8 init_color; 190 } flow_meter; 191 }; 192 }; 193 194 enum { 195 FLOW_ACT_NO_APPEND = BIT(0), 196 FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1), 197 }; 198 199 struct mlx5_fs_vlan { 200 u16 ethtype; 201 u16 vid; 202 u8 prio; 203 }; 204 205 #define MLX5_FS_VLAN_DEPTH 2 206 207 enum mlx5_flow_act_crypto_type { 208 MLX5_FLOW_ACT_CRYPTO_TYPE_IPSEC, 209 }; 210 211 enum mlx5_flow_act_crypto_op { 212 MLX5_FLOW_ACT_CRYPTO_OP_ENCRYPT, 213 MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT, 214 }; 215 216 struct mlx5_flow_act_crypto_params { 217 u32 obj_id; 218 u8 type; /* see enum mlx5_flow_act_crypto_type */ 219 u8 op; /* see enum mlx5_flow_act_crypto_op */ 220 }; 221 222 struct mlx5_flow_act { 223 u32 action; 224 struct mlx5_modify_hdr *modify_hdr; 225 struct mlx5_pkt_reformat *pkt_reformat; 226 struct mlx5_flow_act_crypto_params crypto; 227 u32 flags; 228 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; 229 struct ib_counters *counters; 230 struct mlx5_flow_group *fg; 231 struct mlx5_exe_aso exe_aso; 232 }; 233 234 #define FT_NAME_STR_SZ 20 235 #define LEFTOVERS_RULE_NUM 2 236 static inline void build_leftovers_ft_param(char *name, 237 unsigned int *priority, 238 int *n_ent, 239 int *n_grp) 240 { 241 snprintf(name, FT_NAME_STR_SZ, "leftovers"); 242 *priority = 0; /*Priority of leftovers_prio-0*/ 243 *n_ent = LEFTOVERS_RULE_NUM + 1; /*1: star rules*/ 244 *n_grp = LEFTOVERS_RULE_NUM; 245 } 246 247 static inline bool outer_header_zero(u32 *match_criteria) 248 { 249 int size = MLX5_ST_SZ_BYTES(fte_match_param); 250 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, 251 outer_headers); 252 253 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, 254 outer_headers_c + 1, 255 size - 1); 256 } 257 258 struct mlx5_flow_namespace * 259 mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, 260 enum mlx5_flow_namespace_type type, 261 int vport); 262 263 struct mlx5_flow_table_attr { 264 int prio; 265 int max_fte; 266 u32 level; 267 u32 flags; 268 u16 uid; 269 struct mlx5_flow_table *next_ft; 270 271 struct { 272 int max_num_groups; 273 int num_reserved_entries; 274 } autogroup; 275 }; 276 277 struct mlx5_flow_namespace * 278 mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); 279 280 struct mlx5_flow_namespace * 281 mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 282 enum mlx5_flow_namespace_type type); 283 284 /* The underlying implementation create two more entries for 285 * chaining flow tables. the user should be aware that if he pass 286 * max_num_ftes as 2^N it will result in doubled size flow table 287 */ 288 struct mlx5_flow_table * 289 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, 290 struct mlx5_flow_table_attr *ft_attr); 291 292 struct mlx5_flow_table * 293 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 294 struct mlx5_flow_table_attr *ft_attr, u16 vport); 295 296 struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( 297 struct mlx5_flow_namespace *ns, 298 int prio, u32 level); 299 300 struct mlx5_flow_table * 301 mlx5_create_flow_table(struct mlx5_flow_namespace *ns, 302 struct mlx5_flow_table_attr *ft_attr); 303 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); 304 305 /* inbox should be set with the following values: 306 * start_flow_index 307 * end_flow_index 308 * match_criteria_enable 309 * match_criteria 310 */ 311 struct mlx5_flow_group * 312 mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); 313 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); 314 315 struct mlx5_flow_handle * 316 mlx5_add_flow_rules(struct mlx5_flow_table *ft, 317 const struct mlx5_flow_spec *spec, 318 struct mlx5_flow_act *flow_act, 319 struct mlx5_flow_destination *dest, 320 int num_dest); 321 void mlx5_del_flow_rules(struct mlx5_flow_handle **pp); 322 323 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, 324 struct mlx5_flow_destination *new_dest, 325 struct mlx5_flow_destination *old_dest); 326 327 /*The following API is for sniffer*/ 328 typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule, 329 bool ctx_changed, 330 void *client_data, 331 void *context); 332 333 struct mlx5_flow_handler; 334 335 struct flow_client_priv_data; 336 337 void mlx5e_sniffer_roce_mode_notify( 338 struct mlx5_core_dev *mdev, 339 int action); 340 341 int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct 342 mlx5_flow_handler *handler, void 343 *client_data); 344 345 struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev, 346 enum mlx5_flow_namespace_type ns_type, 347 rule_event_fn add_cb, 348 rule_event_fn del_cb, 349 void *context); 350 351 void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler); 352 353 void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns, 354 rule_event_fn cb, 355 void *context); 356 357 void mlx5_get_match_criteria(u32 *match_criteria, 358 struct mlx5_flow_rule *rule); 359 360 void mlx5_get_match_value(u32 *match_value, 361 struct mlx5_flow_rule *rule); 362 363 u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule); 364 365 struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode); 366 367 void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list); 368 369 struct mlx5_flow_rules_list { 370 struct list_head head; 371 }; 372 373 struct mlx5_flow_rule_node { 374 struct list_head list; 375 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 376 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; 377 u8 match_criteria_enable; 378 }; 379 380 struct mlx5_core_fs_mask { 381 u8 match_criteria_enable; 382 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 383 }; 384 385 bool fs_match_exact_val( 386 struct mlx5_core_fs_mask *mask, 387 void *val1, 388 void *val2); 389 390 bool fs_match_exact_mask( 391 u8 match_criteria_enable1, 392 u8 match_criteria_enable2, 393 void *mask1, 394 void *mask2); 395 /**********end API for sniffer**********/ 396 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, 397 enum mlx5_flow_namespace_type ns_type, 398 u8 num_actions, 399 void *modify_actions); 400 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, 401 struct mlx5_modify_hdr *modify_hdr); 402 403 struct mlx5_pkt_reformat_params { 404 int type; 405 u8 param_0; 406 u8 param_1; 407 size_t size; 408 void *data; 409 }; 410 411 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, 412 struct mlx5_pkt_reformat_params *params, 413 enum mlx5_flow_namespace_type ns_type); 414 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, 415 struct mlx5_pkt_reformat *pkt_reformat); 416 /********** Flow counters API **********/ 417 struct mlx5_fc; 418 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); 419 420 /* As mlx5_fc_create() but doesn't queue stats refresh thread. */ 421 struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); 422 423 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 424 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); 425 void mlx5_fc_query_cached(struct mlx5_fc *counter, 426 u64 *bytes, u64 *packets, u64 *lastuse); 427 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, 428 u64 *packets, u64 *bytes); 429 u32 mlx5_fc_id(struct mlx5_fc *counter); 430 /******* End of Flow counters API ******/ 431 432 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); 433 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 434 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 435 #endif 436