1 /*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #include <linux/list.h> 31 #include <dev/mlx5/fs.h> 32 #include <dev/mlx5/mpfs.h> 33 34 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 35 36 enum { 37 MLX5E_FULLMATCH = 0, 38 MLX5E_ALLMULTI = 1, 39 MLX5E_PROMISC = 2, 40 }; 41 42 enum { 43 MLX5E_UC = 0, 44 MLX5E_MC_IPV4 = 1, 45 MLX5E_MC_IPV6 = 2, 46 MLX5E_MC_OTHER = 3, 47 }; 48 49 enum { 50 MLX5E_ACTION_NONE = 0, 51 MLX5E_ACTION_ADD = 1, 52 MLX5E_ACTION_DEL = 2, 53 }; 54 55 struct mlx5e_eth_addr_hash_node { 56 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist; 57 u8 action; 58 u32 mpfs_index; 59 struct mlx5e_eth_addr_info ai; 60 }; 61 62 static inline int 63 mlx5e_hash_eth_addr(const u8 * addr) 64 { 65 return (addr[5]); 66 } 67 68 static bool 69 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash, 70 struct mlx5e_eth_addr_hash_node *hn_new) 71 { 72 struct mlx5e_eth_addr_hash_node *hn; 73 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr); 74 75 LIST_FOREACH(hn, &hash[ix], hlist) { 76 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) { 77 if (hn->action == MLX5E_ACTION_DEL) 78 hn->action = MLX5E_ACTION_NONE; 79 free(hn_new, M_MLX5EN); 80 return (false); 81 } 82 } 83 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist); 84 return (true); 85 } 86 87 static void 88 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) 89 { 90 LIST_REMOVE(hn, hlist); 91 free(hn, M_MLX5EN); 92 } 93 94 static void 95 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, 96 struct mlx5e_eth_addr_info *ai) 97 { 98 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP)) 99 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); 100 101 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP)) 102 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); 103 104 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH)) 105 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); 106 107 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH)) 108 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); 109 110 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) 111 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); 112 113 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) 114 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); 115 116 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) 117 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); 118 119 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) 120 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); 121 122 if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) 123 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); 124 125 if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) 126 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); 127 128 if (ai->tt_vec & (1 << MLX5E_TT_ANY)) 129 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); 130 131 /* ensure the rules are not freed again */ 132 ai->tt_vec = 0; 133 } 134 135 static int 136 mlx5e_get_eth_addr_type(const u8 * addr) 137 { 138 if (ETHER_IS_MULTICAST(addr) == 0) 139 return (MLX5E_UC); 140 141 if ((addr[0] == 0x01) && 142 (addr[1] == 0x00) && 143 (addr[2] == 0x5e) && 144 !(addr[3] & 0x80)) 145 return (MLX5E_MC_IPV4); 146 147 if ((addr[0] == 0x33) && 148 (addr[1] == 0x33)) 149 return (MLX5E_MC_IPV6); 150 151 return (MLX5E_MC_OTHER); 152 } 153 154 static u32 155 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) 156 { 157 int eth_addr_type; 158 u32 ret; 159 160 switch (type) { 161 case MLX5E_FULLMATCH: 162 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); 163 switch (eth_addr_type) { 164 case MLX5E_UC: 165 ret = 166 (1 << MLX5E_TT_IPV4_TCP) | 167 (1 << MLX5E_TT_IPV6_TCP) | 168 (1 << MLX5E_TT_IPV4_UDP) | 169 (1 << MLX5E_TT_IPV6_UDP) | 170 (1 << MLX5E_TT_IPV4) | 171 (1 << MLX5E_TT_IPV6) | 172 (1 << MLX5E_TT_ANY) | 173 0; 174 break; 175 176 case MLX5E_MC_IPV4: 177 ret = 178 (1 << MLX5E_TT_IPV4_UDP) | 179 (1 << MLX5E_TT_IPV4) | 180 0; 181 break; 182 183 case MLX5E_MC_IPV6: 184 ret = 185 (1 << MLX5E_TT_IPV6_UDP) | 186 (1 << MLX5E_TT_IPV6) | 187 0; 188 break; 189 190 default: 191 ret = 192 (1 << MLX5E_TT_ANY) | 193 0; 194 break; 195 } 196 break; 197 198 case MLX5E_ALLMULTI: 199 ret = 200 (1 << MLX5E_TT_IPV4_UDP) | 201 (1 << MLX5E_TT_IPV6_UDP) | 202 (1 << MLX5E_TT_IPV4) | 203 (1 << MLX5E_TT_IPV6) | 204 (1 << MLX5E_TT_ANY) | 205 0; 206 break; 207 208 default: /* MLX5E_PROMISC */ 209 ret = 210 (1 << MLX5E_TT_IPV4_TCP) | 211 (1 << MLX5E_TT_IPV6_TCP) | 212 (1 << MLX5E_TT_IPV4_UDP) | 213 (1 << MLX5E_TT_IPV6_UDP) | 214 (1 << MLX5E_TT_IPV4) | 215 (1 << MLX5E_TT_IPV6) | 216 (1 << MLX5E_TT_ANY) | 217 0; 218 break; 219 } 220 221 return (ret); 222 } 223 224 static int 225 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv, 226 struct mlx5e_eth_addr_info *ai, int type, 227 u32 *mc, u32 *mv) 228 { 229 struct mlx5_flow_destination dest = {}; 230 u8 mc_enable = 0; 231 struct mlx5_flow_rule **rule_p; 232 struct mlx5_flow_table *ft = priv->fts.main.t; 233 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 234 outer_headers.dmac_47_16); 235 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, 236 outer_headers.dmac_47_16); 237 u32 *tirn = priv->tirn; 238 u32 tt_vec; 239 int err = 0; 240 241 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 242 243 switch (type) { 244 case MLX5E_FULLMATCH: 245 mc_enable = MLX5_MATCH_OUTER_HEADERS; 246 memset(mc_dmac, 0xff, ETH_ALEN); 247 ether_addr_copy(mv_dmac, ai->addr); 248 break; 249 250 case MLX5E_ALLMULTI: 251 mc_enable = MLX5_MATCH_OUTER_HEADERS; 252 mc_dmac[0] = 0x01; 253 mv_dmac[0] = 0x01; 254 break; 255 256 case MLX5E_PROMISC: 257 break; 258 default: 259 break; 260 } 261 262 tt_vec = mlx5e_get_tt_vec(ai, type); 263 264 if (tt_vec & BIT(MLX5E_TT_ANY)) { 265 rule_p = &ai->ft_rule[MLX5E_TT_ANY]; 266 dest.tir_num = tirn[MLX5E_TT_ANY]; 267 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 268 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 269 MLX5_FS_ETH_FLOW_TAG, &dest); 270 if (IS_ERR_OR_NULL(*rule_p)) 271 goto err_del_ai; 272 ai->tt_vec |= BIT(MLX5E_TT_ANY); 273 } 274 275 mc_enable = MLX5_MATCH_OUTER_HEADERS; 276 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 277 278 if (tt_vec & BIT(MLX5E_TT_IPV4)) { 279 rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; 280 dest.tir_num = tirn[MLX5E_TT_IPV4]; 281 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 282 ETHERTYPE_IP); 283 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 284 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 285 MLX5_FS_ETH_FLOW_TAG, &dest); 286 if (IS_ERR_OR_NULL(*rule_p)) 287 goto err_del_ai; 288 ai->tt_vec |= BIT(MLX5E_TT_IPV4); 289 } 290 291 if (tt_vec & BIT(MLX5E_TT_IPV6)) { 292 rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; 293 dest.tir_num = tirn[MLX5E_TT_IPV6]; 294 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 295 ETHERTYPE_IPV6); 296 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 297 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 298 MLX5_FS_ETH_FLOW_TAG, &dest); 299 if (IS_ERR_OR_NULL(*rule_p)) 300 goto err_del_ai; 301 ai->tt_vec |= BIT(MLX5E_TT_IPV6); 302 } 303 304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 305 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); 306 307 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { 308 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; 309 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; 310 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 311 ETHERTYPE_IP); 312 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 313 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 314 MLX5_FS_ETH_FLOW_TAG, &dest); 315 if (IS_ERR_OR_NULL(*rule_p)) 316 goto err_del_ai; 317 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); 318 } 319 320 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { 321 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; 322 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; 323 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 324 ETHERTYPE_IPV6); 325 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 326 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 327 MLX5_FS_ETH_FLOW_TAG, &dest); 328 if (IS_ERR_OR_NULL(*rule_p)) 329 goto err_del_ai; 330 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); 331 } 332 333 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); 334 335 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { 336 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; 337 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; 338 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 339 ETHERTYPE_IP); 340 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 341 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 342 MLX5_FS_ETH_FLOW_TAG, &dest); 343 if (IS_ERR_OR_NULL(*rule_p)) 344 goto err_del_ai; 345 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); 346 } 347 348 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { 349 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; 350 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; 351 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 352 ETHERTYPE_IPV6); 353 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 354 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 355 MLX5_FS_ETH_FLOW_TAG, &dest); 356 if (IS_ERR_OR_NULL(*rule_p)) 357 goto err_del_ai; 358 359 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); 360 } 361 362 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); 363 364 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { 365 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; 366 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; 367 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 368 ETHERTYPE_IP); 369 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 370 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 371 MLX5_FS_ETH_FLOW_TAG, &dest); 372 if (IS_ERR_OR_NULL(*rule_p)) 373 goto err_del_ai; 374 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); 375 } 376 377 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { 378 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; 379 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; 380 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 381 ETHERTYPE_IPV6); 382 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 383 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 384 MLX5_FS_ETH_FLOW_TAG, &dest); 385 if (IS_ERR_OR_NULL(*rule_p)) 386 goto err_del_ai; 387 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); 388 } 389 390 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); 391 392 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { 393 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; 394 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; 395 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 396 ETHERTYPE_IP); 397 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 398 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 399 MLX5_FS_ETH_FLOW_TAG, &dest); 400 if (IS_ERR_OR_NULL(*rule_p)) 401 goto err_del_ai; 402 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); 403 } 404 405 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { 406 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; 407 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; 408 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 409 ETHERTYPE_IPV6); 410 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 411 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 412 MLX5_FS_ETH_FLOW_TAG, &dest); 413 if (IS_ERR_OR_NULL(*rule_p)) 414 goto err_del_ai; 415 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); 416 } 417 418 return 0; 419 420 err_del_ai: 421 err = PTR_ERR(*rule_p); 422 *rule_p = NULL; 423 mlx5e_del_eth_addr_from_flow_table(priv, ai); 424 425 return err; 426 } 427 428 static int 429 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, 430 struct mlx5e_eth_addr_info *ai, int type) 431 { 432 u32 *match_criteria; 433 u32 *match_value; 434 int err = 0; 435 436 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 437 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 438 if (!match_value || !match_criteria) { 439 mlx5_en_err(priv->ifp, "alloc failed\n"); 440 err = -ENOMEM; 441 goto add_eth_addr_rule_out; 442 } 443 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria, 444 match_value); 445 446 add_eth_addr_rule_out: 447 kvfree(match_criteria); 448 kvfree(match_value); 449 450 return (err); 451 } 452 453 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 454 { 455 struct ifnet *ifp = priv->ifp; 456 int max_list_size; 457 int list_size; 458 u16 *vlans; 459 int vlan; 460 int err; 461 int i; 462 463 list_size = 0; 464 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) 465 list_size++; 466 467 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 468 469 if (list_size > max_list_size) { 470 mlx5_en_err(ifp, 471 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 472 list_size, max_list_size); 473 list_size = max_list_size; 474 } 475 476 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 477 if (!vlans) 478 return -ENOMEM; 479 480 i = 0; 481 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { 482 if (i >= list_size) 483 break; 484 vlans[i++] = vlan; 485 } 486 487 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 488 if (err) 489 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n", 490 err); 491 492 kfree(vlans); 493 return err; 494 } 495 496 enum mlx5e_vlan_rule_type { 497 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 498 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 499 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 500 MLX5E_VLAN_RULE_TYPE_MATCH_VID, 501 }; 502 503 static int 504 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv, 505 enum mlx5e_vlan_rule_type rule_type, u16 vid, 506 u32 *mc, u32 *mv) 507 { 508 struct mlx5_flow_table *ft = priv->fts.vlan.t; 509 struct mlx5_flow_destination dest = {}; 510 u8 mc_enable = 0; 511 struct mlx5_flow_rule **rule_p; 512 int err = 0; 513 514 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 515 dest.ft = priv->fts.main.t; 516 517 mc_enable = MLX5_MATCH_OUTER_HEADERS; 518 519 switch (rule_type) { 520 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 521 rule_p = &priv->vlan.untagged_ft_rule; 522 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 523 break; 524 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 525 rule_p = &priv->vlan.any_cvlan_ft_rule; 526 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 527 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 528 break; 529 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 530 rule_p = &priv->vlan.any_svlan_ft_rule; 531 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 532 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1); 533 break; 534 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 535 rule_p = &priv->vlan.active_vlans_ft_rule[vid]; 536 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 537 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 538 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 539 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 540 mlx5e_vport_context_update_vlans(priv); 541 break; 542 } 543 544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 545 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 546 MLX5_FS_ETH_FLOW_TAG, 547 &dest); 548 549 if (IS_ERR(*rule_p)) { 550 err = PTR_ERR(*rule_p); 551 *rule_p = NULL; 552 mlx5_en_err(priv->ifp, "add rule failed\n"); 553 } 554 555 return (err); 556 } 557 558 static int 559 mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 560 enum mlx5e_vlan_rule_type rule_type, u16 vid) 561 { 562 u32 *match_criteria; 563 u32 *match_value; 564 int err = 0; 565 566 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 568 if (!match_value || !match_criteria) { 569 mlx5_en_err(priv->ifp, "alloc failed\n"); 570 err = -ENOMEM; 571 goto add_vlan_rule_out; 572 } 573 574 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria, 575 match_value); 576 577 add_vlan_rule_out: 578 kvfree(match_criteria); 579 kvfree(match_value); 580 581 return (err); 582 } 583 584 static void 585 mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 586 enum mlx5e_vlan_rule_type rule_type, u16 vid) 587 { 588 switch (rule_type) { 589 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 590 if (priv->vlan.untagged_ft_rule) { 591 mlx5_del_flow_rule(priv->vlan.untagged_ft_rule); 592 priv->vlan.untagged_ft_rule = NULL; 593 } 594 break; 595 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 596 if (priv->vlan.any_cvlan_ft_rule) { 597 mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule); 598 priv->vlan.any_cvlan_ft_rule = NULL; 599 } 600 break; 601 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 602 if (priv->vlan.any_svlan_ft_rule) { 603 mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule); 604 priv->vlan.any_svlan_ft_rule = NULL; 605 } 606 break; 607 case MLX5E_VLAN_RULE_TYPE_MATCH_VID: 608 if (priv->vlan.active_vlans_ft_rule[vid]) { 609 mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]); 610 priv->vlan.active_vlans_ft_rule[vid] = NULL; 611 } 612 mlx5e_vport_context_update_vlans(priv); 613 break; 614 default: 615 break; 616 } 617 } 618 619 static void 620 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 621 { 622 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 623 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 624 } 625 626 static int 627 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 628 { 629 int err; 630 631 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 632 if (err) 633 return (err); 634 635 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 636 if (err) 637 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 638 639 return (err); 640 } 641 642 void 643 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) 644 { 645 if (priv->vlan.filter_disabled) { 646 priv->vlan.filter_disabled = false; 647 if (priv->ifp->if_flags & IFF_PROMISC) 648 return; 649 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 650 mlx5e_del_any_vid_rules(priv); 651 } 652 } 653 654 void 655 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 656 { 657 if (!priv->vlan.filter_disabled) { 658 priv->vlan.filter_disabled = true; 659 if (priv->ifp->if_flags & IFF_PROMISC) 660 return; 661 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 662 mlx5e_add_any_vid_rules(priv); 663 } 664 } 665 666 void 667 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid) 668 { 669 struct mlx5e_priv *priv = arg; 670 671 if (ifp != priv->ifp) 672 return; 673 674 PRIV_LOCK(priv); 675 if (!test_and_set_bit(vid, priv->vlan.active_vlans) && 676 test_bit(MLX5E_STATE_OPENED, &priv->state)) 677 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 678 PRIV_UNLOCK(priv); 679 } 680 681 void 682 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid) 683 { 684 struct mlx5e_priv *priv = arg; 685 686 if (ifp != priv->ifp) 687 return; 688 689 PRIV_LOCK(priv); 690 clear_bit(vid, priv->vlan.active_vlans); 691 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 692 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 693 PRIV_UNLOCK(priv); 694 } 695 696 int 697 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) 698 { 699 int err; 700 int i; 701 702 set_bit(0, priv->vlan.active_vlans); 703 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) { 704 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, 705 i); 706 if (err) 707 goto error; 708 } 709 710 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 711 if (err) 712 goto error; 713 714 if (priv->vlan.filter_disabled) { 715 err = mlx5e_add_any_vid_rules(priv); 716 if (err) 717 goto error; 718 } 719 return (0); 720 error: 721 mlx5e_del_all_vlan_rules(priv); 722 return (err); 723 } 724 725 void 726 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) 727 { 728 int i; 729 730 if (priv->vlan.filter_disabled) 731 mlx5e_del_any_vid_rules(priv); 732 733 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 734 735 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) 736 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); 737 clear_bit(0, priv->vlan.active_vlans); 738 } 739 740 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 741 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ 742 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp) 743 744 static void 745 mlx5e_execute_action(struct mlx5e_priv *priv, 746 struct mlx5e_eth_addr_hash_node *hn) 747 { 748 switch (hn->action) { 749 case MLX5E_ACTION_ADD: 750 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); 751 hn->action = MLX5E_ACTION_NONE; 752 break; 753 754 case MLX5E_ACTION_DEL: 755 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); 756 if (hn->mpfs_index != -1U) 757 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index); 758 mlx5e_del_eth_addr_from_hash(hn); 759 break; 760 761 default: 762 break; 763 } 764 } 765 766 static struct mlx5e_eth_addr_hash_node * 767 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh) 768 { 769 struct mlx5e_eth_addr_hash_node *hn; 770 771 hn = LIST_FIRST(fh); 772 if (hn != NULL) { 773 LIST_REMOVE(hn, hlist); 774 LIST_INSERT_HEAD(uh, hn, hlist); 775 } 776 return (hn); 777 } 778 779 static struct mlx5e_eth_addr_hash_node * 780 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh) 781 { 782 struct mlx5e_eth_addr_hash_node *hn; 783 784 hn = LIST_FIRST(fh); 785 if (hn != NULL) 786 LIST_REMOVE(hn, hlist); 787 return (hn); 788 } 789 790 struct mlx5e_copy_addr_ctx { 791 struct mlx5e_eth_addr_hash_head *free; 792 struct mlx5e_eth_addr_hash_head *fill; 793 bool success; 794 }; 795 796 static u_int 797 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 798 { 799 struct mlx5e_copy_addr_ctx *ctx = arg; 800 struct mlx5e_eth_addr_hash_node *hn; 801 802 hn = mlx5e_move_hn(ctx->free, ctx->fill); 803 if (hn == NULL) { 804 ctx->success = false; 805 return (0); 806 } 807 ether_addr_copy(hn->ai.addr, LLADDR(sdl)); 808 809 return (1); 810 } 811 812 static void 813 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv) 814 { 815 struct mlx5e_copy_addr_ctx ctx; 816 struct mlx5e_eth_addr_hash_head head_free; 817 struct mlx5e_eth_addr_hash_head head_uc; 818 struct mlx5e_eth_addr_hash_head head_mc; 819 struct mlx5e_eth_addr_hash_node *hn; 820 struct ifnet *ifp = priv->ifp; 821 size_t x; 822 size_t num; 823 824 PRIV_ASSERT_LOCKED(priv); 825 826 retry: 827 LIST_INIT(&head_free); 828 LIST_INIT(&head_uc); 829 LIST_INIT(&head_mc); 830 num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp); 831 832 /* allocate place holders */ 833 for (x = 0; x != num; x++) { 834 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO); 835 hn->action = MLX5E_ACTION_ADD; 836 hn->mpfs_index = -1U; 837 LIST_INSERT_HEAD(&head_free, hn, hlist); 838 } 839 840 hn = mlx5e_move_hn(&head_free, &head_uc); 841 MPASS(hn != NULL); 842 843 ether_addr_copy(hn->ai.addr, 844 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr))); 845 846 ctx.free = &head_free; 847 ctx.fill = &head_uc; 848 ctx.success = true; 849 if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx); 850 if (ctx.success == false) 851 goto cleanup; 852 853 ctx.fill = &head_mc; 854 if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx); 855 if (ctx.success == false) 856 goto cleanup; 857 858 /* insert L2 unicast addresses into hash list */ 859 860 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) { 861 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0) 862 continue; 863 if (hn->mpfs_index == -1U) 864 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index, hn->ai.addr); 865 } 866 867 /* insert L2 multicast addresses into hash list */ 868 869 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) { 870 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0) 871 continue; 872 } 873 874 cleanup: 875 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) 876 free(hn, M_MLX5EN); 877 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) 878 free(hn, M_MLX5EN); 879 while ((hn = mlx5e_remove_hn(&head_free)) != NULL) 880 free(hn, M_MLX5EN); 881 882 if (ctx.success == false) 883 goto retry; 884 } 885 886 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 887 u8 addr_array[][ETH_ALEN], int size) 888 { 889 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 890 struct ifnet *ifp = priv->ifp; 891 struct mlx5e_eth_addr_hash_node *hn; 892 struct mlx5e_eth_addr_hash_head *addr_list; 893 struct mlx5e_eth_addr_hash_node *tmp; 894 int i = 0; 895 int hi; 896 897 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 898 899 if (is_uc) /* Make sure our own address is pushed first */ 900 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp)); 901 else if (priv->eth_addr.broadcast_enabled) 902 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr); 903 904 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 905 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr)) 906 continue; 907 if (i >= size) 908 break; 909 ether_addr_copy(addr_array[i++], hn->ai.addr); 910 } 911 } 912 913 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 914 int list_type) 915 { 916 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 917 struct mlx5e_eth_addr_hash_node *hn; 918 u8 (*addr_array)[ETH_ALEN] = NULL; 919 struct mlx5e_eth_addr_hash_head *addr_list; 920 struct mlx5e_eth_addr_hash_node *tmp; 921 int max_size; 922 int size; 923 int err; 924 int hi; 925 926 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); 927 max_size = is_uc ? 928 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 929 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 930 931 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 932 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 933 size++; 934 935 if (size > max_size) { 936 mlx5_en_err(priv->ifp, 937 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 938 is_uc ? "UC" : "MC", size, max_size); 939 size = max_size; 940 } 941 942 if (size) { 943 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 944 if (!addr_array) { 945 err = -ENOMEM; 946 goto out; 947 } 948 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 949 } 950 951 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 952 out: 953 if (err) 954 mlx5_en_err(priv->ifp, 955 "Failed to modify vport %s list err(%d)\n", 956 is_uc ? "UC" : "MC", err); 957 kfree(addr_array); 958 } 959 960 static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 961 { 962 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 963 964 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC); 965 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC); 966 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 967 ea->allmulti_enabled, 968 ea->promisc_enabled); 969 } 970 971 static void 972 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv) 973 { 974 struct mlx5e_eth_addr_hash_node *hn; 975 struct mlx5e_eth_addr_hash_node *tmp; 976 int i; 977 978 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 979 mlx5e_execute_action(priv, hn); 980 981 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 982 mlx5e_execute_action(priv, hn); 983 } 984 985 static void 986 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv) 987 { 988 struct mlx5e_eth_addr_hash_node *hn; 989 struct mlx5e_eth_addr_hash_node *tmp; 990 int i; 991 992 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 993 hn->action = MLX5E_ACTION_DEL; 994 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 995 hn->action = MLX5E_ACTION_DEL; 996 997 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 998 mlx5e_sync_ifp_addr(priv); 999 1000 mlx5e_apply_ifp_addr(priv); 1001 } 1002 1003 void 1004 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) 1005 { 1006 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 1007 struct ifnet *ndev = priv->ifp; 1008 1009 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); 1010 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC); 1011 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI); 1012 bool broadcast_enabled = rx_mode_enable; 1013 1014 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 1015 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 1016 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 1017 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 1018 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 1019 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 1020 1021 /* update broadcast address */ 1022 ether_addr_copy(priv->eth_addr.broadcast.addr, 1023 priv->ifp->if_broadcastaddr); 1024 1025 if (enable_promisc) { 1026 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); 1027 if (!priv->vlan.filter_disabled) 1028 mlx5e_add_any_vid_rules(priv); 1029 } 1030 if (enable_allmulti) 1031 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 1032 if (enable_broadcast) 1033 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 1034 1035 mlx5e_handle_ifp_addr(priv); 1036 1037 if (disable_broadcast) 1038 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); 1039 if (disable_allmulti) 1040 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); 1041 if (disable_promisc) { 1042 if (!priv->vlan.filter_disabled) 1043 mlx5e_del_any_vid_rules(priv); 1044 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); 1045 } 1046 1047 ea->promisc_enabled = promisc_enabled; 1048 ea->allmulti_enabled = allmulti_enabled; 1049 ea->broadcast_enabled = broadcast_enabled; 1050 1051 mlx5e_vport_context_update(priv); 1052 } 1053 1054 void 1055 mlx5e_set_rx_mode_work(struct work_struct *work) 1056 { 1057 struct mlx5e_priv *priv = 1058 container_of(work, struct mlx5e_priv, set_rx_mode_work); 1059 1060 PRIV_LOCK(priv); 1061 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1062 mlx5e_set_rx_mode_core(priv); 1063 PRIV_UNLOCK(priv); 1064 } 1065 1066 static void 1067 mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 1068 { 1069 int i; 1070 1071 for (i = ft->num_groups - 1; i >= 0; i--) { 1072 if (!IS_ERR_OR_NULL(ft->g[i])) 1073 mlx5_destroy_flow_group(ft->g[i]); 1074 ft->g[i] = NULL; 1075 } 1076 ft->num_groups = 0; 1077 } 1078 1079 static void 1080 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 1081 { 1082 mlx5e_destroy_groups(ft); 1083 kfree(ft->g); 1084 mlx5_destroy_flow_table(ft->t); 1085 ft->t = NULL; 1086 } 1087 1088 #define MLX5E_NUM_MAIN_GROUPS 10 1089 #define MLX5E_MAIN_GROUP0_SIZE BIT(4) 1090 #define MLX5E_MAIN_GROUP1_SIZE BIT(3) 1091 #define MLX5E_MAIN_GROUP2_SIZE BIT(1) 1092 #define MLX5E_MAIN_GROUP3_SIZE BIT(0) 1093 #define MLX5E_MAIN_GROUP4_SIZE BIT(14) 1094 #define MLX5E_MAIN_GROUP5_SIZE BIT(13) 1095 #define MLX5E_MAIN_GROUP6_SIZE BIT(11) 1096 #define MLX5E_MAIN_GROUP7_SIZE BIT(2) 1097 #define MLX5E_MAIN_GROUP8_SIZE BIT(1) 1098 #define MLX5E_MAIN_GROUP9_SIZE BIT(0) 1099 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ 1100 MLX5E_MAIN_GROUP1_SIZE +\ 1101 MLX5E_MAIN_GROUP2_SIZE +\ 1102 MLX5E_MAIN_GROUP3_SIZE +\ 1103 MLX5E_MAIN_GROUP4_SIZE +\ 1104 MLX5E_MAIN_GROUP5_SIZE +\ 1105 MLX5E_MAIN_GROUP6_SIZE +\ 1106 MLX5E_MAIN_GROUP7_SIZE +\ 1107 MLX5E_MAIN_GROUP8_SIZE +\ 1108 MLX5E_MAIN_GROUP9_SIZE +\ 1109 0) 1110 1111 static int 1112 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1113 int inlen) 1114 { 1115 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1116 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, 1117 match_criteria.outer_headers.dmac_47_16); 1118 int err; 1119 int ix = 0; 1120 1121 /* Tunnel rules need to be first in this list of groups */ 1122 1123 /* Start tunnel rules */ 1124 memset(in, 0, inlen); 1125 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1126 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1127 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1128 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport); 1129 MLX5_SET_CFG(in, start_flow_index, ix); 1130 ix += MLX5E_MAIN_GROUP0_SIZE; 1131 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1132 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1133 if (IS_ERR(ft->g[ft->num_groups])) 1134 goto err_destory_groups; 1135 ft->num_groups++; 1136 /* End Tunnel Rules */ 1137 1138 memset(in, 0, inlen); 1139 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1140 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1141 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1142 MLX5_SET_CFG(in, start_flow_index, ix); 1143 ix += MLX5E_MAIN_GROUP1_SIZE; 1144 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1145 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1146 if (IS_ERR(ft->g[ft->num_groups])) 1147 goto err_destory_groups; 1148 ft->num_groups++; 1149 1150 memset(in, 0, inlen); 1151 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1152 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1153 MLX5_SET_CFG(in, start_flow_index, ix); 1154 ix += MLX5E_MAIN_GROUP2_SIZE; 1155 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1156 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1157 if (IS_ERR(ft->g[ft->num_groups])) 1158 goto err_destory_groups; 1159 ft->num_groups++; 1160 1161 memset(in, 0, inlen); 1162 MLX5_SET_CFG(in, start_flow_index, ix); 1163 ix += MLX5E_MAIN_GROUP3_SIZE; 1164 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1165 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1166 if (IS_ERR(ft->g[ft->num_groups])) 1167 goto err_destory_groups; 1168 ft->num_groups++; 1169 1170 memset(in, 0, inlen); 1171 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1172 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1173 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1174 memset(dmac, 0xff, ETH_ALEN); 1175 MLX5_SET_CFG(in, start_flow_index, ix); 1176 ix += MLX5E_MAIN_GROUP4_SIZE; 1177 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1178 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1179 if (IS_ERR(ft->g[ft->num_groups])) 1180 goto err_destory_groups; 1181 ft->num_groups++; 1182 1183 memset(in, 0, inlen); 1184 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1185 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1186 memset(dmac, 0xff, ETH_ALEN); 1187 MLX5_SET_CFG(in, start_flow_index, ix); 1188 ix += MLX5E_MAIN_GROUP5_SIZE; 1189 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1190 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1191 if (IS_ERR(ft->g[ft->num_groups])) 1192 goto err_destory_groups; 1193 ft->num_groups++; 1194 1195 memset(in, 0, inlen); 1196 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1197 memset(dmac, 0xff, ETH_ALEN); 1198 MLX5_SET_CFG(in, start_flow_index, ix); 1199 ix += MLX5E_MAIN_GROUP6_SIZE; 1200 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1201 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1202 if (IS_ERR(ft->g[ft->num_groups])) 1203 goto err_destory_groups; 1204 ft->num_groups++; 1205 1206 memset(in, 0, inlen); 1207 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1208 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1209 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1210 dmac[0] = 0x01; 1211 MLX5_SET_CFG(in, start_flow_index, ix); 1212 ix += MLX5E_MAIN_GROUP7_SIZE; 1213 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1214 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1215 if (IS_ERR(ft->g[ft->num_groups])) 1216 goto err_destory_groups; 1217 ft->num_groups++; 1218 1219 memset(in, 0, inlen); 1220 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1221 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1222 dmac[0] = 0x01; 1223 MLX5_SET_CFG(in, start_flow_index, ix); 1224 ix += MLX5E_MAIN_GROUP8_SIZE; 1225 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1226 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1227 if (IS_ERR(ft->g[ft->num_groups])) 1228 goto err_destory_groups; 1229 ft->num_groups++; 1230 1231 memset(in, 0, inlen); 1232 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1233 dmac[0] = 0x01; 1234 MLX5_SET_CFG(in, start_flow_index, ix); 1235 ix += MLX5E_MAIN_GROUP9_SIZE; 1236 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1237 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1238 if (IS_ERR(ft->g[ft->num_groups])) 1239 goto err_destory_groups; 1240 ft->num_groups++; 1241 1242 return (0); 1243 1244 err_destory_groups: 1245 err = PTR_ERR(ft->g[ft->num_groups]); 1246 ft->g[ft->num_groups] = NULL; 1247 mlx5e_destroy_groups(ft); 1248 1249 return (err); 1250 } 1251 1252 static int 1253 mlx5e_create_main_groups(struct mlx5e_flow_table *ft) 1254 { 1255 u32 *in; 1256 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1257 int err; 1258 1259 in = mlx5_vzalloc(inlen); 1260 if (!in) 1261 return (-ENOMEM); 1262 1263 err = mlx5e_create_main_groups_sub(ft, in, inlen); 1264 1265 kvfree(in); 1266 return (err); 1267 } 1268 1269 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) 1270 { 1271 struct mlx5e_flow_table *ft = &priv->fts.main; 1272 int err; 1273 1274 ft->num_groups = 0; 1275 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main", 1276 MLX5E_MAIN_TABLE_SIZE); 1277 1278 if (IS_ERR(ft->t)) { 1279 err = PTR_ERR(ft->t); 1280 ft->t = NULL; 1281 return (err); 1282 } 1283 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1284 if (!ft->g) { 1285 err = -ENOMEM; 1286 goto err_destroy_main_flow_table; 1287 } 1288 1289 err = mlx5e_create_main_groups(ft); 1290 if (err) 1291 goto err_free_g; 1292 return (0); 1293 1294 err_free_g: 1295 kfree(ft->g); 1296 1297 err_destroy_main_flow_table: 1298 mlx5_destroy_flow_table(ft->t); 1299 ft->t = NULL; 1300 1301 return (err); 1302 } 1303 1304 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) 1305 { 1306 mlx5e_destroy_flow_table(&priv->fts.main); 1307 } 1308 1309 #define MLX5E_NUM_VLAN_GROUPS 3 1310 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1311 #define MLX5E_VLAN_GROUP1_SIZE BIT(1) 1312 #define MLX5E_VLAN_GROUP2_SIZE BIT(0) 1313 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1314 MLX5E_VLAN_GROUP1_SIZE +\ 1315 MLX5E_VLAN_GROUP2_SIZE +\ 1316 0) 1317 1318 static int 1319 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1320 int inlen) 1321 { 1322 int err; 1323 int ix = 0; 1324 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1325 1326 memset(in, 0, inlen); 1327 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1328 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1329 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1330 MLX5_SET_CFG(in, start_flow_index, ix); 1331 ix += MLX5E_VLAN_GROUP0_SIZE; 1332 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1333 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1334 if (IS_ERR(ft->g[ft->num_groups])) 1335 goto err_destory_groups; 1336 ft->num_groups++; 1337 1338 memset(in, 0, inlen); 1339 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1340 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1341 MLX5_SET_CFG(in, start_flow_index, ix); 1342 ix += MLX5E_VLAN_GROUP1_SIZE; 1343 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1344 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1345 if (IS_ERR(ft->g[ft->num_groups])) 1346 goto err_destory_groups; 1347 ft->num_groups++; 1348 1349 memset(in, 0, inlen); 1350 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1351 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1352 MLX5_SET_CFG(in, start_flow_index, ix); 1353 ix += MLX5E_VLAN_GROUP2_SIZE; 1354 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1355 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1356 if (IS_ERR(ft->g[ft->num_groups])) 1357 goto err_destory_groups; 1358 ft->num_groups++; 1359 1360 return (0); 1361 1362 err_destory_groups: 1363 err = PTR_ERR(ft->g[ft->num_groups]); 1364 ft->g[ft->num_groups] = NULL; 1365 mlx5e_destroy_groups(ft); 1366 1367 return (err); 1368 } 1369 1370 static int 1371 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) 1372 { 1373 u32 *in; 1374 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1375 int err; 1376 1377 in = mlx5_vzalloc(inlen); 1378 if (!in) 1379 return (-ENOMEM); 1380 1381 err = mlx5e_create_vlan_groups_sub(ft, in, inlen); 1382 1383 kvfree(in); 1384 return (err); 1385 } 1386 1387 static int 1388 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) 1389 { 1390 struct mlx5e_flow_table *ft = &priv->fts.vlan; 1391 int err; 1392 1393 ft->num_groups = 0; 1394 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan", 1395 MLX5E_VLAN_TABLE_SIZE); 1396 1397 if (IS_ERR(ft->t)) { 1398 err = PTR_ERR(ft->t); 1399 ft->t = NULL; 1400 return (err); 1401 } 1402 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1403 if (!ft->g) { 1404 err = -ENOMEM; 1405 goto err_destroy_vlan_flow_table; 1406 } 1407 1408 err = mlx5e_create_vlan_groups(ft); 1409 if (err) 1410 goto err_free_g; 1411 1412 return (0); 1413 1414 err_free_g: 1415 kfree(ft->g); 1416 1417 err_destroy_vlan_flow_table: 1418 mlx5_destroy_flow_table(ft->t); 1419 ft->t = NULL; 1420 1421 return (err); 1422 } 1423 1424 static void 1425 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) 1426 { 1427 mlx5e_destroy_flow_table(&priv->fts.vlan); 1428 } 1429 1430 #define MLX5E_NUM_INNER_RSS_GROUPS 3 1431 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3) 1432 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1) 1433 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0) 1434 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\ 1435 MLX5E_INNER_RSS_GROUP1_SIZE +\ 1436 MLX5E_INNER_RSS_GROUP2_SIZE +\ 1437 0) 1438 1439 static int 1440 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1441 int inlen) 1442 { 1443 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1444 int err; 1445 int ix = 0; 1446 1447 memset(in, 0, inlen); 1448 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1449 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1450 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1451 MLX5_SET_CFG(in, start_flow_index, ix); 1452 ix += MLX5E_INNER_RSS_GROUP0_SIZE; 1453 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1454 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1455 if (IS_ERR(ft->g[ft->num_groups])) 1456 goto err_destory_groups; 1457 ft->num_groups++; 1458 1459 memset(in, 0, inlen); 1460 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1461 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1462 MLX5_SET_CFG(in, start_flow_index, ix); 1463 ix += MLX5E_INNER_RSS_GROUP1_SIZE; 1464 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1465 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1466 if (IS_ERR(ft->g[ft->num_groups])) 1467 goto err_destory_groups; 1468 ft->num_groups++; 1469 1470 memset(in, 0, inlen); 1471 MLX5_SET_CFG(in, start_flow_index, ix); 1472 ix += MLX5E_INNER_RSS_GROUP2_SIZE; 1473 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1474 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1475 if (IS_ERR(ft->g[ft->num_groups])) 1476 goto err_destory_groups; 1477 ft->num_groups++; 1478 1479 return (0); 1480 1481 err_destory_groups: 1482 err = PTR_ERR(ft->g[ft->num_groups]); 1483 ft->g[ft->num_groups] = NULL; 1484 mlx5e_destroy_groups(ft); 1485 1486 return (err); 1487 } 1488 1489 static int 1490 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft) 1491 { 1492 u32 *in; 1493 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1494 int err; 1495 1496 in = mlx5_vzalloc(inlen); 1497 if (!in) 1498 return (-ENOMEM); 1499 1500 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen); 1501 1502 kvfree(in); 1503 return (err); 1504 } 1505 1506 static int 1507 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv) 1508 { 1509 struct mlx5e_flow_table *ft = &priv->fts.inner_rss; 1510 int err; 1511 1512 ft->num_groups = 0; 1513 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss", 1514 MLX5E_INNER_RSS_TABLE_SIZE); 1515 1516 if (IS_ERR(ft->t)) { 1517 err = PTR_ERR(ft->t); 1518 ft->t = NULL; 1519 return (err); 1520 } 1521 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g), 1522 GFP_KERNEL); 1523 if (!ft->g) { 1524 err = -ENOMEM; 1525 goto err_destroy_inner_rss_flow_table; 1526 } 1527 1528 err = mlx5e_create_inner_rss_groups(ft); 1529 if (err) 1530 goto err_free_g; 1531 1532 return (0); 1533 1534 err_free_g: 1535 kfree(ft->g); 1536 1537 err_destroy_inner_rss_flow_table: 1538 mlx5_destroy_flow_table(ft->t); 1539 ft->t = NULL; 1540 1541 return (err); 1542 } 1543 1544 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv) 1545 { 1546 mlx5e_destroy_flow_table(&priv->fts.inner_rss); 1547 } 1548 1549 int 1550 mlx5e_open_flow_table(struct mlx5e_priv *priv) 1551 { 1552 int err; 1553 1554 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, 1555 MLX5_FLOW_NAMESPACE_KERNEL); 1556 1557 err = mlx5e_create_vlan_flow_table(priv); 1558 if (err) 1559 return (err); 1560 1561 err = mlx5e_create_main_flow_table(priv); 1562 if (err) 1563 goto err_destroy_vlan_flow_table; 1564 1565 err = mlx5e_create_inner_rss_flow_table(priv); 1566 if (err) 1567 goto err_destroy_main_flow_table; 1568 1569 return (0); 1570 1571 err_destroy_main_flow_table: 1572 mlx5e_destroy_main_flow_table(priv); 1573 err_destroy_vlan_flow_table: 1574 mlx5e_destroy_vlan_flow_table(priv); 1575 1576 return (err); 1577 } 1578 1579 void 1580 mlx5e_close_flow_table(struct mlx5e_priv *priv) 1581 { 1582 1583 mlx5e_handle_ifp_addr(priv); 1584 mlx5e_destroy_inner_rss_flow_table(priv); 1585 mlx5e_destroy_main_flow_table(priv); 1586 mlx5e_destroy_vlan_flow_table(priv); 1587 } 1588