1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #ifndef _MLX5_FS_ 27 #define _MLX5_FS_ 28 29 #include <linux/list.h> 30 #include <linux/bitops.h> 31 32 #include <dev/mlx5/mlx5_ifc.h> 33 #include <dev/mlx5/device.h> 34 #include <dev/mlx5/driver.h> 35 36 enum mlx5_flow_destination_type { 37 MLX5_FLOW_DESTINATION_TYPE_NONE, 38 MLX5_FLOW_DESTINATION_TYPE_VPORT, 39 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, 40 MLX5_FLOW_DESTINATION_TYPE_TIR, 41 MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER, 42 MLX5_FLOW_DESTINATION_TYPE_UPLINK, 43 MLX5_FLOW_DESTINATION_TYPE_PORT, 44 MLX5_FLOW_DESTINATION_TYPE_COUNTER, 45 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM, 46 MLX5_FLOW_DESTINATION_TYPE_RANGE, 47 MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE, 48 }; 49 50 enum { 51 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, 52 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, 53 MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, 54 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19, 55 }; 56 57 enum { 58 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), 59 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), 60 MLX5_FLOW_TABLE_TERMINATION = BIT(2), 61 MLX5_FLOW_TABLE_UNMANAGED = BIT(3), 62 MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4), 63 }; 64 65 /*Flow tag*/ 66 enum { 67 MLX5_FS_DEFAULT_FLOW_TAG = 0xFFFFFF, 68 MLX5_FS_ETH_FLOW_TAG = 0xFFFFFE, 69 MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD, 70 }; 71 72 enum { 73 MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF, 74 }; 75 76 #define FS_MAX_TYPES 10 77 #define FS_MAX_ENTRIES 32000U 78 79 #define FS_REFORMAT_KEYWORD "_reformat" 80 81 enum mlx5_flow_namespace_type { 82 MLX5_FLOW_NAMESPACE_BYPASS, 83 MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, 84 MLX5_FLOW_NAMESPACE_LAG, 85 MLX5_FLOW_NAMESPACE_OFFLOADS, 86 MLX5_FLOW_NAMESPACE_ETHTOOL, 87 MLX5_FLOW_NAMESPACE_KERNEL, 88 MLX5_FLOW_NAMESPACE_LEFTOVERS, 89 MLX5_FLOW_NAMESPACE_ANCHOR, 90 MLX5_FLOW_NAMESPACE_FDB_BYPASS, 91 MLX5_FLOW_NAMESPACE_FDB, 92 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 93 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 94 MLX5_FLOW_NAMESPACE_SNIFFER_RX, 95 MLX5_FLOW_NAMESPACE_SNIFFER_TX, 96 MLX5_FLOW_NAMESPACE_EGRESS, 97 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 98 MLX5_FLOW_NAMESPACE_EGRESS_MACSEC, 99 MLX5_FLOW_NAMESPACE_RDMA_RX, 100 MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, 101 MLX5_FLOW_NAMESPACE_RDMA_TX, 102 MLX5_FLOW_NAMESPACE_PORT_SEL, 103 MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, 104 MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, 105 MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC, 106 MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC, 107 }; 108 109 enum { 110 FDB_BYPASS_PATH, 111 FDB_TC_OFFLOAD, 112 FDB_FT_OFFLOAD, 113 FDB_TC_MISS, 114 FDB_BR_OFFLOAD, 115 FDB_SLOW_PATH, 116 FDB_PER_VPORT, 117 }; 118 119 struct mlx5_flow_table; 120 struct mlx5_flow_group; 121 struct mlx5_flow_rule; 122 struct mlx5_flow_namespace; 123 struct mlx5_flow_handle; 124 125 enum { 126 FLOW_CONTEXT_HAS_TAG = BIT(0), 127 }; 128 129 struct mlx5_flow_context { 130 u32 flags; 131 u32 flow_tag; 132 u32 flow_source; 133 }; 134 135 struct mlx5_flow_spec { 136 u8 match_criteria_enable; 137 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 138 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; 139 struct mlx5_flow_context flow_context; 140 }; 141 142 enum { 143 MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), 144 MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), 145 }; 146 147 enum mlx5_flow_dest_range_field { 148 MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0, 149 }; 150 151 struct mlx5_flow_destination { 152 enum mlx5_flow_destination_type type; 153 union { 154 u32 tir_num; 155 u32 ft_num; 156 struct mlx5_flow_table *ft; 157 u32 counter_id; 158 struct { 159 u16 num; 160 u16 vhca_id; 161 struct mlx5_pkt_reformat *pkt_reformat; 162 u8 flags; 163 } vport; 164 struct { 165 struct mlx5_flow_table *hit_ft; 166 struct mlx5_flow_table *miss_ft; 167 enum mlx5_flow_dest_range_field field; 168 u32 min; 169 u32 max; 170 } range; 171 u32 sampler_id; 172 }; 173 }; 174 175 struct mlx5_exe_aso { 176 u32 object_id; 177 u8 type; 178 u8 return_reg_id; 179 union { 180 u32 ctrl_data; 181 struct { 182 u8 meter_idx; 183 u8 init_color; 184 } flow_meter; 185 }; 186 }; 187 188 enum { 189 FLOW_ACT_NO_APPEND = BIT(0), 190 FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1), 191 }; 192 193 struct mlx5_fs_vlan { 194 u16 ethtype; 195 u16 vid; 196 u8 prio; 197 }; 198 199 #define MLX5_FS_VLAN_DEPTH 2 200 201 enum mlx5_flow_act_crypto_type { 202 MLX5_FLOW_ACT_CRYPTO_TYPE_IPSEC, 203 }; 204 205 enum mlx5_flow_act_crypto_op { 206 MLX5_FLOW_ACT_CRYPTO_OP_ENCRYPT, 207 MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT, 208 }; 209 210 struct mlx5_flow_act_crypto_params { 211 u32 obj_id; 212 u8 type; /* see enum mlx5_flow_act_crypto_type */ 213 u8 op; /* see enum mlx5_flow_act_crypto_op */ 214 }; 215 216 struct mlx5_flow_act { 217 u32 action; 218 struct mlx5_modify_hdr *modify_hdr; 219 struct mlx5_pkt_reformat *pkt_reformat; 220 struct mlx5_flow_act_crypto_params crypto; 221 u32 flags; 222 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; 223 struct ib_counters *counters; 224 struct mlx5_flow_group *fg; 225 struct mlx5_exe_aso exe_aso; 226 }; 227 228 #define FT_NAME_STR_SZ 20 229 #define LEFTOVERS_RULE_NUM 2 230 static inline void build_leftovers_ft_param(char *name, 231 unsigned int *priority, 232 int *n_ent, 233 int *n_grp) 234 { 235 snprintf(name, FT_NAME_STR_SZ, "leftovers"); 236 *priority = 0; /*Priority of leftovers_prio-0*/ 237 *n_ent = LEFTOVERS_RULE_NUM + 1; /*1: star rules*/ 238 *n_grp = LEFTOVERS_RULE_NUM; 239 } 240 241 static inline bool outer_header_zero(u32 *match_criteria) 242 { 243 int size = MLX5_ST_SZ_BYTES(fte_match_param); 244 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, 245 outer_headers); 246 247 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, 248 outer_headers_c + 1, 249 size - 1); 250 } 251 252 struct mlx5_flow_namespace * 253 mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, 254 enum mlx5_flow_namespace_type type, 255 int vport); 256 257 struct mlx5_flow_table_attr { 258 int prio; 259 int max_fte; 260 u32 level; 261 u32 flags; 262 u16 uid; 263 struct mlx5_flow_table *next_ft; 264 265 struct { 266 int max_num_groups; 267 int num_reserved_entries; 268 } autogroup; 269 }; 270 271 struct mlx5_flow_namespace * 272 mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); 273 274 struct mlx5_flow_namespace * 275 mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 276 enum mlx5_flow_namespace_type type); 277 278 /* The underlying implementation create two more entries for 279 * chaining flow tables. the user should be aware that if he pass 280 * max_num_ftes as 2^N it will result in doubled size flow table 281 */ 282 struct mlx5_flow_table * 283 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, 284 struct mlx5_flow_table_attr *ft_attr); 285 286 struct mlx5_flow_table * 287 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, 288 struct mlx5_flow_table_attr *ft_attr, u16 vport); 289 290 struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( 291 struct mlx5_flow_namespace *ns, 292 int prio, u32 level); 293 294 struct mlx5_flow_table * 295 mlx5_create_flow_table(struct mlx5_flow_namespace *ns, 296 struct mlx5_flow_table_attr *ft_attr); 297 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); 298 299 /* inbox should be set with the following values: 300 * start_flow_index 301 * end_flow_index 302 * match_criteria_enable 303 * match_criteria 304 */ 305 struct mlx5_flow_group * 306 mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); 307 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); 308 309 struct mlx5_flow_handle * 310 mlx5_add_flow_rules(struct mlx5_flow_table *ft, 311 const struct mlx5_flow_spec *spec, 312 struct mlx5_flow_act *flow_act, 313 struct mlx5_flow_destination *dest, 314 int num_dest); 315 void mlx5_del_flow_rules(struct mlx5_flow_handle **pp); 316 317 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, 318 struct mlx5_flow_destination *new_dest, 319 struct mlx5_flow_destination *old_dest); 320 321 /*The following API is for sniffer*/ 322 typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule, 323 bool ctx_changed, 324 void *client_data, 325 void *context); 326 327 struct mlx5_flow_handler; 328 329 struct flow_client_priv_data; 330 331 void mlx5e_sniffer_roce_mode_notify( 332 struct mlx5_core_dev *mdev, 333 int action); 334 335 int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct 336 mlx5_flow_handler *handler, void 337 *client_data); 338 339 struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev, 340 enum mlx5_flow_namespace_type ns_type, 341 rule_event_fn add_cb, 342 rule_event_fn del_cb, 343 void *context); 344 345 void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler); 346 347 void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns, 348 rule_event_fn cb, 349 void *context); 350 351 void mlx5_get_match_criteria(u32 *match_criteria, 352 struct mlx5_flow_rule *rule); 353 354 void mlx5_get_match_value(u32 *match_value, 355 struct mlx5_flow_rule *rule); 356 357 u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule); 358 359 struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode); 360 361 void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list); 362 363 struct mlx5_flow_rules_list { 364 struct list_head head; 365 }; 366 367 struct mlx5_flow_rule_node { 368 struct list_head list; 369 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 370 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; 371 u8 match_criteria_enable; 372 }; 373 374 struct mlx5_core_fs_mask { 375 u8 match_criteria_enable; 376 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; 377 }; 378 379 bool fs_match_exact_val( 380 struct mlx5_core_fs_mask *mask, 381 void *val1, 382 void *val2); 383 384 bool fs_match_exact_mask( 385 u8 match_criteria_enable1, 386 u8 match_criteria_enable2, 387 void *mask1, 388 void *mask2); 389 /**********end API for sniffer**********/ 390 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, 391 enum mlx5_flow_namespace_type ns_type, 392 u8 num_actions, 393 void *modify_actions); 394 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, 395 struct mlx5_modify_hdr *modify_hdr); 396 397 struct mlx5_pkt_reformat_params { 398 int type; 399 u8 param_0; 400 u8 param_1; 401 size_t size; 402 void *data; 403 }; 404 405 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, 406 struct mlx5_pkt_reformat_params *params, 407 enum mlx5_flow_namespace_type ns_type); 408 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, 409 struct mlx5_pkt_reformat *pkt_reformat); 410 /********** Flow counters API **********/ 411 struct mlx5_fc; 412 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); 413 414 /* As mlx5_fc_create() but doesn't queue stats refresh thread. */ 415 struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); 416 417 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 418 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); 419 void mlx5_fc_query_cached(struct mlx5_fc *counter, 420 u64 *bytes, u64 *packets, u64 *lastuse); 421 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, 422 u64 *packets, u64 *bytes); 423 u32 mlx5_fc_id(struct mlx5_fc *counter); 424 /******* End of Flow counters API ******/ 425 426 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); 427 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 428 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 429 #endif 430