1 /*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #include <linux/list.h> 31 #include <dev/mlx5/fs.h> 32 #include <dev/mlx5/mpfs.h> 33 34 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 35 36 enum { 37 MLX5E_FULLMATCH = 0, 38 MLX5E_ALLMULTI = 1, 39 MLX5E_PROMISC = 2, 40 }; 41 42 enum { 43 MLX5E_UC = 0, 44 MLX5E_MC_IPV4 = 1, 45 MLX5E_MC_IPV6 = 2, 46 MLX5E_MC_OTHER = 3, 47 }; 48 49 enum { 50 MLX5E_ACTION_NONE = 0, 51 MLX5E_ACTION_ADD = 1, 52 MLX5E_ACTION_DEL = 2, 53 }; 54 55 struct mlx5e_eth_addr_hash_node { 56 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist; 57 u8 action; 58 u32 mpfs_index; 59 struct mlx5e_eth_addr_info ai; 60 }; 61 62 static inline int 63 mlx5e_hash_eth_addr(const u8 * addr) 64 { 65 return (addr[5]); 66 } 67 68 static bool 69 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash, 70 struct mlx5e_eth_addr_hash_node *hn_new) 71 { 72 struct mlx5e_eth_addr_hash_node *hn; 73 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr); 74 75 LIST_FOREACH(hn, &hash[ix], hlist) { 76 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) { 77 if (hn->action == MLX5E_ACTION_DEL) 78 hn->action = MLX5E_ACTION_NONE; 79 free(hn_new, M_MLX5EN); 80 return (false); 81 } 82 } 83 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist); 84 return (true); 85 } 86 87 static void 88 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) 89 { 90 LIST_REMOVE(hn, hlist); 91 free(hn, M_MLX5EN); 92 } 93 94 static void 95 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, 96 struct mlx5e_eth_addr_info *ai) 97 { 98 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP)) 99 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); 100 101 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP)) 102 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); 103 104 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH)) 105 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); 106 107 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH)) 108 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); 109 110 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) 111 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); 112 113 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) 114 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); 115 116 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) 117 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); 118 119 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) 120 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); 121 122 if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) 123 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); 124 125 if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) 126 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); 127 128 if (ai->tt_vec & (1 << MLX5E_TT_ANY)) 129 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); 130 131 /* ensure the rules are not freed again */ 132 ai->tt_vec = 0; 133 } 134 135 static int 136 mlx5e_get_eth_addr_type(const u8 * addr) 137 { 138 if (ETHER_IS_MULTICAST(addr) == 0) 139 return (MLX5E_UC); 140 141 if ((addr[0] == 0x01) && 142 (addr[1] == 0x00) && 143 (addr[2] == 0x5e) && 144 !(addr[3] & 0x80)) 145 return (MLX5E_MC_IPV4); 146 147 if ((addr[0] == 0x33) && 148 (addr[1] == 0x33)) 149 return (MLX5E_MC_IPV6); 150 151 return (MLX5E_MC_OTHER); 152 } 153 154 static u32 155 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) 156 { 157 int eth_addr_type; 158 u32 ret; 159 160 switch (type) { 161 case MLX5E_FULLMATCH: 162 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); 163 switch (eth_addr_type) { 164 case MLX5E_UC: 165 ret = 166 (1 << MLX5E_TT_IPV4_TCP) | 167 (1 << MLX5E_TT_IPV6_TCP) | 168 (1 << MLX5E_TT_IPV4_UDP) | 169 (1 << MLX5E_TT_IPV6_UDP) | 170 (1 << MLX5E_TT_IPV4) | 171 (1 << MLX5E_TT_IPV6) | 172 (1 << MLX5E_TT_ANY) | 173 0; 174 break; 175 176 case MLX5E_MC_IPV4: 177 ret = 178 (1 << MLX5E_TT_IPV4_UDP) | 179 (1 << MLX5E_TT_IPV4) | 180 0; 181 break; 182 183 case MLX5E_MC_IPV6: 184 ret = 185 (1 << MLX5E_TT_IPV6_UDP) | 186 (1 << MLX5E_TT_IPV6) | 187 0; 188 break; 189 190 default: 191 ret = 192 (1 << MLX5E_TT_ANY) | 193 0; 194 break; 195 } 196 break; 197 198 case MLX5E_ALLMULTI: 199 ret = 200 (1 << MLX5E_TT_IPV4_UDP) | 201 (1 << MLX5E_TT_IPV6_UDP) | 202 (1 << MLX5E_TT_IPV4) | 203 (1 << MLX5E_TT_IPV6) | 204 (1 << MLX5E_TT_ANY) | 205 0; 206 break; 207 208 default: /* MLX5E_PROMISC */ 209 ret = 210 (1 << MLX5E_TT_IPV4_TCP) | 211 (1 << MLX5E_TT_IPV6_TCP) | 212 (1 << MLX5E_TT_IPV4_UDP) | 213 (1 << MLX5E_TT_IPV6_UDP) | 214 (1 << MLX5E_TT_IPV4) | 215 (1 << MLX5E_TT_IPV6) | 216 (1 << MLX5E_TT_ANY) | 217 0; 218 break; 219 } 220 221 return (ret); 222 } 223 224 static int 225 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv, 226 struct mlx5e_eth_addr_info *ai, int type, 227 u32 *mc, u32 *mv) 228 { 229 struct mlx5_flow_destination dest = {}; 230 u8 mc_enable = 0; 231 struct mlx5_flow_rule **rule_p; 232 struct mlx5_flow_table *ft = priv->fts.main.t; 233 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 234 outer_headers.dmac_47_16); 235 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, 236 outer_headers.dmac_47_16); 237 u32 *tirn = priv->tirn; 238 u32 tt_vec; 239 int err = 0; 240 241 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 242 243 switch (type) { 244 case MLX5E_FULLMATCH: 245 mc_enable = MLX5_MATCH_OUTER_HEADERS; 246 memset(mc_dmac, 0xff, ETH_ALEN); 247 ether_addr_copy(mv_dmac, ai->addr); 248 break; 249 250 case MLX5E_ALLMULTI: 251 mc_enable = MLX5_MATCH_OUTER_HEADERS; 252 mc_dmac[0] = 0x01; 253 mv_dmac[0] = 0x01; 254 break; 255 256 case MLX5E_PROMISC: 257 break; 258 default: 259 break; 260 } 261 262 tt_vec = mlx5e_get_tt_vec(ai, type); 263 264 if (tt_vec & BIT(MLX5E_TT_ANY)) { 265 rule_p = &ai->ft_rule[MLX5E_TT_ANY]; 266 dest.tir_num = tirn[MLX5E_TT_ANY]; 267 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 268 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 269 MLX5_FS_ETH_FLOW_TAG, &dest); 270 if (IS_ERR_OR_NULL(*rule_p)) 271 goto err_del_ai; 272 ai->tt_vec |= BIT(MLX5E_TT_ANY); 273 } 274 275 mc_enable = MLX5_MATCH_OUTER_HEADERS; 276 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 277 278 if (tt_vec & BIT(MLX5E_TT_IPV4)) { 279 rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; 280 dest.tir_num = tirn[MLX5E_TT_IPV4]; 281 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 282 ETHERTYPE_IP); 283 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 284 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 285 MLX5_FS_ETH_FLOW_TAG, &dest); 286 if (IS_ERR_OR_NULL(*rule_p)) 287 goto err_del_ai; 288 ai->tt_vec |= BIT(MLX5E_TT_IPV4); 289 } 290 291 if (tt_vec & BIT(MLX5E_TT_IPV6)) { 292 rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; 293 dest.tir_num = tirn[MLX5E_TT_IPV6]; 294 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 295 ETHERTYPE_IPV6); 296 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 297 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 298 MLX5_FS_ETH_FLOW_TAG, &dest); 299 if (IS_ERR_OR_NULL(*rule_p)) 300 goto err_del_ai; 301 ai->tt_vec |= BIT(MLX5E_TT_IPV6); 302 } 303 304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 305 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); 306 307 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { 308 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; 309 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; 310 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 311 ETHERTYPE_IP); 312 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 313 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 314 MLX5_FS_ETH_FLOW_TAG, &dest); 315 if (IS_ERR_OR_NULL(*rule_p)) 316 goto err_del_ai; 317 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); 318 } 319 320 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { 321 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; 322 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; 323 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 324 ETHERTYPE_IPV6); 325 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 326 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 327 MLX5_FS_ETH_FLOW_TAG, &dest); 328 if (IS_ERR_OR_NULL(*rule_p)) 329 goto err_del_ai; 330 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); 331 } 332 333 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); 334 335 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { 336 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; 337 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; 338 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 339 ETHERTYPE_IP); 340 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 341 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 342 MLX5_FS_ETH_FLOW_TAG, &dest); 343 if (IS_ERR_OR_NULL(*rule_p)) 344 goto err_del_ai; 345 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); 346 } 347 348 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { 349 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; 350 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; 351 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 352 ETHERTYPE_IPV6); 353 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 354 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 355 MLX5_FS_ETH_FLOW_TAG, &dest); 356 if (IS_ERR_OR_NULL(*rule_p)) 357 goto err_del_ai; 358 359 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); 360 } 361 362 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); 363 364 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { 365 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; 366 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; 367 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 368 ETHERTYPE_IP); 369 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 370 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 371 MLX5_FS_ETH_FLOW_TAG, &dest); 372 if (IS_ERR_OR_NULL(*rule_p)) 373 goto err_del_ai; 374 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); 375 } 376 377 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { 378 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; 379 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; 380 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 381 ETHERTYPE_IPV6); 382 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 383 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 384 MLX5_FS_ETH_FLOW_TAG, &dest); 385 if (IS_ERR_OR_NULL(*rule_p)) 386 goto err_del_ai; 387 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); 388 } 389 390 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); 391 392 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { 393 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; 394 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; 395 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 396 ETHERTYPE_IP); 397 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 398 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 399 MLX5_FS_ETH_FLOW_TAG, &dest); 400 if (IS_ERR_OR_NULL(*rule_p)) 401 goto err_del_ai; 402 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); 403 } 404 405 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { 406 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; 407 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; 408 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 409 ETHERTYPE_IPV6); 410 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 411 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 412 MLX5_FS_ETH_FLOW_TAG, &dest); 413 if (IS_ERR_OR_NULL(*rule_p)) 414 goto err_del_ai; 415 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); 416 } 417 418 return 0; 419 420 err_del_ai: 421 err = PTR_ERR(*rule_p); 422 *rule_p = NULL; 423 mlx5e_del_eth_addr_from_flow_table(priv, ai); 424 425 return err; 426 } 427 428 static int 429 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, 430 struct mlx5e_eth_addr_info *ai, int type) 431 { 432 u32 *match_criteria; 433 u32 *match_value; 434 int err = 0; 435 436 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 437 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 438 if (!match_value || !match_criteria) { 439 mlx5_en_err(priv->ifp, "alloc failed\n"); 440 err = -ENOMEM; 441 goto add_eth_addr_rule_out; 442 } 443 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria, 444 match_value); 445 446 add_eth_addr_rule_out: 447 kvfree(match_criteria); 448 kvfree(match_value); 449 450 return (err); 451 } 452 453 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 454 { 455 struct ifnet *ifp = priv->ifp; 456 int max_list_size; 457 int list_size; 458 u16 *vlans; 459 int vlan; 460 int err; 461 int i; 462 463 list_size = 0; 464 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) 465 list_size++; 466 467 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 468 469 if (list_size > max_list_size) { 470 mlx5_en_err(ifp, 471 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 472 list_size, max_list_size); 473 list_size = max_list_size; 474 } 475 476 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 477 if (!vlans) 478 return -ENOMEM; 479 480 i = 0; 481 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { 482 if (i >= list_size) 483 break; 484 vlans[i++] = vlan; 485 } 486 487 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 488 if (err) 489 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n", 490 err); 491 492 kfree(vlans); 493 return err; 494 } 495 496 enum mlx5e_vlan_rule_type { 497 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 498 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 499 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 500 MLX5E_VLAN_RULE_TYPE_MATCH_VID, 501 }; 502 503 static int 504 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv, 505 enum mlx5e_vlan_rule_type rule_type, u16 vid, 506 u32 *mc, u32 *mv) 507 { 508 struct mlx5_flow_table *ft = priv->fts.vlan.t; 509 struct mlx5_flow_destination dest = {}; 510 u8 mc_enable = 0; 511 struct mlx5_flow_rule **rule_p; 512 int err = 0; 513 514 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 515 dest.ft = priv->fts.main.t; 516 517 mc_enable = MLX5_MATCH_OUTER_HEADERS; 518 519 switch (rule_type) { 520 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 521 rule_p = &priv->vlan.untagged_ft_rule; 522 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 523 break; 524 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 525 rule_p = &priv->vlan.any_cvlan_ft_rule; 526 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 527 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 528 break; 529 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 530 rule_p = &priv->vlan.any_svlan_ft_rule; 531 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 532 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1); 533 break; 534 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 535 rule_p = &priv->vlan.active_vlans_ft_rule[vid]; 536 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 537 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 538 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 539 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 540 mlx5e_vport_context_update_vlans(priv); 541 break; 542 } 543 544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 545 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 546 MLX5_FS_ETH_FLOW_TAG, 547 &dest); 548 549 if (IS_ERR(*rule_p)) { 550 err = PTR_ERR(*rule_p); 551 *rule_p = NULL; 552 mlx5_en_err(priv->ifp, "add rule failed\n"); 553 } 554 555 return (err); 556 } 557 558 static int 559 mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 560 enum mlx5e_vlan_rule_type rule_type, u16 vid) 561 { 562 u32 *match_criteria; 563 u32 *match_value; 564 int err = 0; 565 566 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 568 if (!match_value || !match_criteria) { 569 mlx5_en_err(priv->ifp, "alloc failed\n"); 570 err = -ENOMEM; 571 goto add_vlan_rule_out; 572 } 573 574 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria, 575 match_value); 576 577 add_vlan_rule_out: 578 kvfree(match_criteria); 579 kvfree(match_value); 580 581 return (err); 582 } 583 584 static void 585 mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 586 enum mlx5e_vlan_rule_type rule_type, u16 vid) 587 { 588 switch (rule_type) { 589 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 590 if (priv->vlan.untagged_ft_rule) { 591 mlx5_del_flow_rule(priv->vlan.untagged_ft_rule); 592 priv->vlan.untagged_ft_rule = NULL; 593 } 594 break; 595 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 596 if (priv->vlan.any_cvlan_ft_rule) { 597 mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule); 598 priv->vlan.any_cvlan_ft_rule = NULL; 599 } 600 break; 601 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 602 if (priv->vlan.any_svlan_ft_rule) { 603 mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule); 604 priv->vlan.any_svlan_ft_rule = NULL; 605 } 606 break; 607 case MLX5E_VLAN_RULE_TYPE_MATCH_VID: 608 if (priv->vlan.active_vlans_ft_rule[vid]) { 609 mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]); 610 priv->vlan.active_vlans_ft_rule[vid] = NULL; 611 } 612 mlx5e_vport_context_update_vlans(priv); 613 break; 614 default: 615 break; 616 } 617 } 618 619 static void 620 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 621 { 622 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 623 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 624 } 625 626 static int 627 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 628 { 629 int err; 630 631 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 632 if (err) 633 return (err); 634 635 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 636 if (err) 637 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 638 639 return (err); 640 } 641 642 void 643 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) 644 { 645 if (priv->vlan.filter_disabled) { 646 priv->vlan.filter_disabled = false; 647 if (priv->ifp->if_flags & IFF_PROMISC) 648 return; 649 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 650 mlx5e_del_any_vid_rules(priv); 651 } 652 } 653 654 void 655 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 656 { 657 if (!priv->vlan.filter_disabled) { 658 priv->vlan.filter_disabled = true; 659 if (priv->ifp->if_flags & IFF_PROMISC) 660 return; 661 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 662 mlx5e_add_any_vid_rules(priv); 663 } 664 } 665 666 void 667 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid) 668 { 669 struct mlx5e_priv *priv = arg; 670 671 if (ifp != priv->ifp) 672 return; 673 674 PRIV_LOCK(priv); 675 if (!test_and_set_bit(vid, priv->vlan.active_vlans) && 676 test_bit(MLX5E_STATE_OPENED, &priv->state)) 677 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 678 PRIV_UNLOCK(priv); 679 } 680 681 void 682 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid) 683 { 684 struct mlx5e_priv *priv = arg; 685 686 if (ifp != priv->ifp) 687 return; 688 689 PRIV_LOCK(priv); 690 clear_bit(vid, priv->vlan.active_vlans); 691 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 692 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 693 PRIV_UNLOCK(priv); 694 } 695 696 int 697 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) 698 { 699 int err; 700 int i; 701 702 set_bit(0, priv->vlan.active_vlans); 703 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) { 704 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, 705 i); 706 if (err) 707 goto error; 708 } 709 710 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 711 if (err) 712 goto error; 713 714 if (priv->vlan.filter_disabled) { 715 err = mlx5e_add_any_vid_rules(priv); 716 if (err) 717 goto error; 718 } 719 return (0); 720 error: 721 mlx5e_del_all_vlan_rules(priv); 722 return (err); 723 } 724 725 void 726 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) 727 { 728 int i; 729 730 if (priv->vlan.filter_disabled) 731 mlx5e_del_any_vid_rules(priv); 732 733 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 734 735 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) 736 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); 737 clear_bit(0, priv->vlan.active_vlans); 738 } 739 740 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 741 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ 742 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp) 743 744 static void 745 mlx5e_execute_action(struct mlx5e_priv *priv, 746 struct mlx5e_eth_addr_hash_node *hn) 747 { 748 switch (hn->action) { 749 case MLX5E_ACTION_ADD: 750 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); 751 hn->action = MLX5E_ACTION_NONE; 752 break; 753 754 case MLX5E_ACTION_DEL: 755 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); 756 if (hn->mpfs_index != -1U) 757 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index); 758 mlx5e_del_eth_addr_from_hash(hn); 759 break; 760 761 default: 762 break; 763 } 764 } 765 766 static struct mlx5e_eth_addr_hash_node * 767 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh) 768 { 769 struct mlx5e_eth_addr_hash_node *hn; 770 771 hn = LIST_FIRST(fh); 772 if (hn != NULL) { 773 LIST_REMOVE(hn, hlist); 774 LIST_INSERT_HEAD(uh, hn, hlist); 775 } 776 return (hn); 777 } 778 779 static struct mlx5e_eth_addr_hash_node * 780 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh) 781 { 782 struct mlx5e_eth_addr_hash_node *hn; 783 784 hn = LIST_FIRST(fh); 785 if (hn != NULL) 786 LIST_REMOVE(hn, hlist); 787 return (hn); 788 } 789 790 struct mlx5e_copy_addr_ctx { 791 struct mlx5e_eth_addr_hash_head *free; 792 struct mlx5e_eth_addr_hash_head *fill; 793 bool success; 794 }; 795 796 static u_int 797 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 798 { 799 struct mlx5e_copy_addr_ctx *ctx = arg; 800 struct mlx5e_eth_addr_hash_node *hn; 801 802 hn = mlx5e_move_hn(ctx->free, ctx->fill); 803 if (hn == NULL) { 804 ctx->success = false; 805 return (0); 806 } 807 ether_addr_copy(hn->ai.addr, LLADDR(sdl)); 808 809 return (1); 810 } 811 812 static void 813 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv) 814 { 815 struct mlx5e_copy_addr_ctx ctx; 816 struct mlx5e_eth_addr_hash_head head_free; 817 struct mlx5e_eth_addr_hash_head head_uc; 818 struct mlx5e_eth_addr_hash_head head_mc; 819 struct mlx5e_eth_addr_hash_node *hn; 820 struct ifnet *ifp = priv->ifp; 821 size_t x; 822 size_t num; 823 824 PRIV_ASSERT_LOCKED(priv); 825 826 retry: 827 LIST_INIT(&head_free); 828 LIST_INIT(&head_uc); 829 LIST_INIT(&head_mc); 830 num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp); 831 832 /* allocate place holders */ 833 for (x = 0; x != num; x++) { 834 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO); 835 hn->action = MLX5E_ACTION_ADD; 836 hn->mpfs_index = -1U; 837 LIST_INSERT_HEAD(&head_free, hn, hlist); 838 } 839 840 hn = mlx5e_move_hn(&head_free, &head_uc); 841 MPASS(hn != NULL); 842 843 ether_addr_copy(hn->ai.addr, 844 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr))); 845 846 ctx.free = &head_free; 847 ctx.fill = &head_uc; 848 ctx.success = true; 849 if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx); 850 if (ctx.success == false) 851 goto cleanup; 852 853 ctx.fill = &head_mc; 854 if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx); 855 if (ctx.success == false) 856 goto cleanup; 857 858 /* insert L2 unicast addresses into hash list */ 859 860 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) { 861 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0) 862 continue; 863 if (hn->mpfs_index == -1U) 864 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index, 865 hn->ai.addr, 0, 0); 866 } 867 868 /* insert L2 multicast addresses into hash list */ 869 870 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) { 871 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0) 872 continue; 873 } 874 875 cleanup: 876 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) 877 free(hn, M_MLX5EN); 878 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) 879 free(hn, M_MLX5EN); 880 while ((hn = mlx5e_remove_hn(&head_free)) != NULL) 881 free(hn, M_MLX5EN); 882 883 if (ctx.success == false) 884 goto retry; 885 } 886 887 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 888 u8 addr_array[][ETH_ALEN], int size) 889 { 890 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 891 struct ifnet *ifp = priv->ifp; 892 struct mlx5e_eth_addr_hash_node *hn; 893 struct mlx5e_eth_addr_hash_head *addr_list; 894 struct mlx5e_eth_addr_hash_node *tmp; 895 int i = 0; 896 int hi; 897 898 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 899 900 if (is_uc) /* Make sure our own address is pushed first */ 901 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp)); 902 else if (priv->eth_addr.broadcast_enabled) 903 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr); 904 905 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 906 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr)) 907 continue; 908 if (i >= size) 909 break; 910 ether_addr_copy(addr_array[i++], hn->ai.addr); 911 } 912 } 913 914 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 915 int list_type) 916 { 917 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 918 struct mlx5e_eth_addr_hash_node *hn; 919 u8 (*addr_array)[ETH_ALEN] = NULL; 920 struct mlx5e_eth_addr_hash_head *addr_list; 921 struct mlx5e_eth_addr_hash_node *tmp; 922 int max_size; 923 int size; 924 int err; 925 int hi; 926 927 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); 928 max_size = is_uc ? 929 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 930 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 931 932 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 933 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 934 size++; 935 936 if (size > max_size) { 937 mlx5_en_err(priv->ifp, 938 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 939 is_uc ? "UC" : "MC", size, max_size); 940 size = max_size; 941 } 942 943 if (size) { 944 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 945 if (!addr_array) { 946 err = -ENOMEM; 947 goto out; 948 } 949 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 950 } 951 952 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 953 out: 954 if (err) 955 mlx5_en_err(priv->ifp, 956 "Failed to modify vport %s list err(%d)\n", 957 is_uc ? "UC" : "MC", err); 958 kfree(addr_array); 959 } 960 961 static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 962 { 963 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 964 965 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC); 966 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC); 967 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 968 ea->allmulti_enabled, 969 ea->promisc_enabled); 970 } 971 972 static void 973 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv) 974 { 975 struct mlx5e_eth_addr_hash_node *hn; 976 struct mlx5e_eth_addr_hash_node *tmp; 977 int i; 978 979 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 980 mlx5e_execute_action(priv, hn); 981 982 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 983 mlx5e_execute_action(priv, hn); 984 } 985 986 static void 987 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv) 988 { 989 struct mlx5e_eth_addr_hash_node *hn; 990 struct mlx5e_eth_addr_hash_node *tmp; 991 int i; 992 993 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 994 hn->action = MLX5E_ACTION_DEL; 995 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 996 hn->action = MLX5E_ACTION_DEL; 997 998 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 999 mlx5e_sync_ifp_addr(priv); 1000 1001 mlx5e_apply_ifp_addr(priv); 1002 } 1003 1004 void 1005 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) 1006 { 1007 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 1008 struct ifnet *ndev = priv->ifp; 1009 1010 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); 1011 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC); 1012 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI); 1013 bool broadcast_enabled = rx_mode_enable; 1014 1015 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 1016 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 1017 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 1018 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 1019 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 1020 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 1021 1022 /* update broadcast address */ 1023 ether_addr_copy(priv->eth_addr.broadcast.addr, 1024 priv->ifp->if_broadcastaddr); 1025 1026 if (enable_promisc) { 1027 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); 1028 if (!priv->vlan.filter_disabled) 1029 mlx5e_add_any_vid_rules(priv); 1030 } 1031 if (enable_allmulti) 1032 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 1033 if (enable_broadcast) 1034 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 1035 1036 mlx5e_handle_ifp_addr(priv); 1037 1038 if (disable_broadcast) 1039 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); 1040 if (disable_allmulti) 1041 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); 1042 if (disable_promisc) { 1043 if (!priv->vlan.filter_disabled) 1044 mlx5e_del_any_vid_rules(priv); 1045 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); 1046 } 1047 1048 ea->promisc_enabled = promisc_enabled; 1049 ea->allmulti_enabled = allmulti_enabled; 1050 ea->broadcast_enabled = broadcast_enabled; 1051 1052 mlx5e_vport_context_update(priv); 1053 } 1054 1055 void 1056 mlx5e_set_rx_mode_work(struct work_struct *work) 1057 { 1058 struct mlx5e_priv *priv = 1059 container_of(work, struct mlx5e_priv, set_rx_mode_work); 1060 1061 PRIV_LOCK(priv); 1062 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1063 mlx5e_set_rx_mode_core(priv); 1064 PRIV_UNLOCK(priv); 1065 } 1066 1067 static void 1068 mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 1069 { 1070 int i; 1071 1072 for (i = ft->num_groups - 1; i >= 0; i--) { 1073 if (!IS_ERR_OR_NULL(ft->g[i])) 1074 mlx5_destroy_flow_group(ft->g[i]); 1075 ft->g[i] = NULL; 1076 } 1077 ft->num_groups = 0; 1078 } 1079 1080 static void 1081 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 1082 { 1083 mlx5e_destroy_groups(ft); 1084 kfree(ft->g); 1085 mlx5_destroy_flow_table(ft->t); 1086 ft->t = NULL; 1087 } 1088 1089 #define MLX5E_NUM_MAIN_GROUPS 10 1090 #define MLX5E_MAIN_GROUP0_SIZE BIT(4) 1091 #define MLX5E_MAIN_GROUP1_SIZE BIT(3) 1092 #define MLX5E_MAIN_GROUP2_SIZE BIT(1) 1093 #define MLX5E_MAIN_GROUP3_SIZE BIT(0) 1094 #define MLX5E_MAIN_GROUP4_SIZE BIT(14) 1095 #define MLX5E_MAIN_GROUP5_SIZE BIT(13) 1096 #define MLX5E_MAIN_GROUP6_SIZE BIT(11) 1097 #define MLX5E_MAIN_GROUP7_SIZE BIT(2) 1098 #define MLX5E_MAIN_GROUP8_SIZE BIT(1) 1099 #define MLX5E_MAIN_GROUP9_SIZE BIT(0) 1100 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ 1101 MLX5E_MAIN_GROUP1_SIZE +\ 1102 MLX5E_MAIN_GROUP2_SIZE +\ 1103 MLX5E_MAIN_GROUP3_SIZE +\ 1104 MLX5E_MAIN_GROUP4_SIZE +\ 1105 MLX5E_MAIN_GROUP5_SIZE +\ 1106 MLX5E_MAIN_GROUP6_SIZE +\ 1107 MLX5E_MAIN_GROUP7_SIZE +\ 1108 MLX5E_MAIN_GROUP8_SIZE +\ 1109 MLX5E_MAIN_GROUP9_SIZE +\ 1110 0) 1111 1112 static int 1113 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1114 int inlen) 1115 { 1116 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1117 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, 1118 match_criteria.outer_headers.dmac_47_16); 1119 int err; 1120 int ix = 0; 1121 1122 /* Tunnel rules need to be first in this list of groups */ 1123 1124 /* Start tunnel rules */ 1125 memset(in, 0, inlen); 1126 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1127 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1128 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1129 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport); 1130 MLX5_SET_CFG(in, start_flow_index, ix); 1131 ix += MLX5E_MAIN_GROUP0_SIZE; 1132 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1133 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1134 if (IS_ERR(ft->g[ft->num_groups])) 1135 goto err_destory_groups; 1136 ft->num_groups++; 1137 /* End Tunnel Rules */ 1138 1139 memset(in, 0, inlen); 1140 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1141 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1142 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1143 MLX5_SET_CFG(in, start_flow_index, ix); 1144 ix += MLX5E_MAIN_GROUP1_SIZE; 1145 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1146 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1147 if (IS_ERR(ft->g[ft->num_groups])) 1148 goto err_destory_groups; 1149 ft->num_groups++; 1150 1151 memset(in, 0, inlen); 1152 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1153 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1154 MLX5_SET_CFG(in, start_flow_index, ix); 1155 ix += MLX5E_MAIN_GROUP2_SIZE; 1156 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1157 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1158 if (IS_ERR(ft->g[ft->num_groups])) 1159 goto err_destory_groups; 1160 ft->num_groups++; 1161 1162 memset(in, 0, inlen); 1163 MLX5_SET_CFG(in, start_flow_index, ix); 1164 ix += MLX5E_MAIN_GROUP3_SIZE; 1165 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1166 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1167 if (IS_ERR(ft->g[ft->num_groups])) 1168 goto err_destory_groups; 1169 ft->num_groups++; 1170 1171 memset(in, 0, inlen); 1172 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1173 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1174 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1175 memset(dmac, 0xff, ETH_ALEN); 1176 MLX5_SET_CFG(in, start_flow_index, ix); 1177 ix += MLX5E_MAIN_GROUP4_SIZE; 1178 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1179 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1180 if (IS_ERR(ft->g[ft->num_groups])) 1181 goto err_destory_groups; 1182 ft->num_groups++; 1183 1184 memset(in, 0, inlen); 1185 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1186 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1187 memset(dmac, 0xff, ETH_ALEN); 1188 MLX5_SET_CFG(in, start_flow_index, ix); 1189 ix += MLX5E_MAIN_GROUP5_SIZE; 1190 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1191 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1192 if (IS_ERR(ft->g[ft->num_groups])) 1193 goto err_destory_groups; 1194 ft->num_groups++; 1195 1196 memset(in, 0, inlen); 1197 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1198 memset(dmac, 0xff, ETH_ALEN); 1199 MLX5_SET_CFG(in, start_flow_index, ix); 1200 ix += MLX5E_MAIN_GROUP6_SIZE; 1201 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1202 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1203 if (IS_ERR(ft->g[ft->num_groups])) 1204 goto err_destory_groups; 1205 ft->num_groups++; 1206 1207 memset(in, 0, inlen); 1208 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1209 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1210 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1211 dmac[0] = 0x01; 1212 MLX5_SET_CFG(in, start_flow_index, ix); 1213 ix += MLX5E_MAIN_GROUP7_SIZE; 1214 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1215 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1216 if (IS_ERR(ft->g[ft->num_groups])) 1217 goto err_destory_groups; 1218 ft->num_groups++; 1219 1220 memset(in, 0, inlen); 1221 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1222 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1223 dmac[0] = 0x01; 1224 MLX5_SET_CFG(in, start_flow_index, ix); 1225 ix += MLX5E_MAIN_GROUP8_SIZE; 1226 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1227 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1228 if (IS_ERR(ft->g[ft->num_groups])) 1229 goto err_destory_groups; 1230 ft->num_groups++; 1231 1232 memset(in, 0, inlen); 1233 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1234 dmac[0] = 0x01; 1235 MLX5_SET_CFG(in, start_flow_index, ix); 1236 ix += MLX5E_MAIN_GROUP9_SIZE; 1237 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1238 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1239 if (IS_ERR(ft->g[ft->num_groups])) 1240 goto err_destory_groups; 1241 ft->num_groups++; 1242 1243 return (0); 1244 1245 err_destory_groups: 1246 err = PTR_ERR(ft->g[ft->num_groups]); 1247 ft->g[ft->num_groups] = NULL; 1248 mlx5e_destroy_groups(ft); 1249 1250 return (err); 1251 } 1252 1253 static int 1254 mlx5e_create_main_groups(struct mlx5e_flow_table *ft) 1255 { 1256 u32 *in; 1257 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1258 int err; 1259 1260 in = mlx5_vzalloc(inlen); 1261 if (!in) 1262 return (-ENOMEM); 1263 1264 err = mlx5e_create_main_groups_sub(ft, in, inlen); 1265 1266 kvfree(in); 1267 return (err); 1268 } 1269 1270 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) 1271 { 1272 struct mlx5e_flow_table *ft = &priv->fts.main; 1273 int err; 1274 1275 ft->num_groups = 0; 1276 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main", 1277 MLX5E_MAIN_TABLE_SIZE); 1278 1279 if (IS_ERR(ft->t)) { 1280 err = PTR_ERR(ft->t); 1281 ft->t = NULL; 1282 return (err); 1283 } 1284 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1285 if (!ft->g) { 1286 err = -ENOMEM; 1287 goto err_destroy_main_flow_table; 1288 } 1289 1290 err = mlx5e_create_main_groups(ft); 1291 if (err) 1292 goto err_free_g; 1293 return (0); 1294 1295 err_free_g: 1296 kfree(ft->g); 1297 1298 err_destroy_main_flow_table: 1299 mlx5_destroy_flow_table(ft->t); 1300 ft->t = NULL; 1301 1302 return (err); 1303 } 1304 1305 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) 1306 { 1307 mlx5e_destroy_flow_table(&priv->fts.main); 1308 } 1309 1310 #define MLX5E_NUM_VLAN_GROUPS 3 1311 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1312 #define MLX5E_VLAN_GROUP1_SIZE BIT(1) 1313 #define MLX5E_VLAN_GROUP2_SIZE BIT(0) 1314 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1315 MLX5E_VLAN_GROUP1_SIZE +\ 1316 MLX5E_VLAN_GROUP2_SIZE +\ 1317 0) 1318 1319 static int 1320 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1321 int inlen) 1322 { 1323 int err; 1324 int ix = 0; 1325 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1326 1327 memset(in, 0, inlen); 1328 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1329 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1330 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1331 MLX5_SET_CFG(in, start_flow_index, ix); 1332 ix += MLX5E_VLAN_GROUP0_SIZE; 1333 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1334 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1335 if (IS_ERR(ft->g[ft->num_groups])) 1336 goto err_destory_groups; 1337 ft->num_groups++; 1338 1339 memset(in, 0, inlen); 1340 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1341 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1342 MLX5_SET_CFG(in, start_flow_index, ix); 1343 ix += MLX5E_VLAN_GROUP1_SIZE; 1344 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1345 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1346 if (IS_ERR(ft->g[ft->num_groups])) 1347 goto err_destory_groups; 1348 ft->num_groups++; 1349 1350 memset(in, 0, inlen); 1351 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1352 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1353 MLX5_SET_CFG(in, start_flow_index, ix); 1354 ix += MLX5E_VLAN_GROUP2_SIZE; 1355 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1356 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1357 if (IS_ERR(ft->g[ft->num_groups])) 1358 goto err_destory_groups; 1359 ft->num_groups++; 1360 1361 return (0); 1362 1363 err_destory_groups: 1364 err = PTR_ERR(ft->g[ft->num_groups]); 1365 ft->g[ft->num_groups] = NULL; 1366 mlx5e_destroy_groups(ft); 1367 1368 return (err); 1369 } 1370 1371 static int 1372 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) 1373 { 1374 u32 *in; 1375 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1376 int err; 1377 1378 in = mlx5_vzalloc(inlen); 1379 if (!in) 1380 return (-ENOMEM); 1381 1382 err = mlx5e_create_vlan_groups_sub(ft, in, inlen); 1383 1384 kvfree(in); 1385 return (err); 1386 } 1387 1388 static int 1389 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) 1390 { 1391 struct mlx5e_flow_table *ft = &priv->fts.vlan; 1392 int err; 1393 1394 ft->num_groups = 0; 1395 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan", 1396 MLX5E_VLAN_TABLE_SIZE); 1397 1398 if (IS_ERR(ft->t)) { 1399 err = PTR_ERR(ft->t); 1400 ft->t = NULL; 1401 return (err); 1402 } 1403 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1404 if (!ft->g) { 1405 err = -ENOMEM; 1406 goto err_destroy_vlan_flow_table; 1407 } 1408 1409 err = mlx5e_create_vlan_groups(ft); 1410 if (err) 1411 goto err_free_g; 1412 1413 return (0); 1414 1415 err_free_g: 1416 kfree(ft->g); 1417 1418 err_destroy_vlan_flow_table: 1419 mlx5_destroy_flow_table(ft->t); 1420 ft->t = NULL; 1421 1422 return (err); 1423 } 1424 1425 static void 1426 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) 1427 { 1428 mlx5e_destroy_flow_table(&priv->fts.vlan); 1429 } 1430 1431 #define MLX5E_NUM_INNER_RSS_GROUPS 3 1432 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3) 1433 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1) 1434 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0) 1435 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\ 1436 MLX5E_INNER_RSS_GROUP1_SIZE +\ 1437 MLX5E_INNER_RSS_GROUP2_SIZE +\ 1438 0) 1439 1440 static int 1441 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1442 int inlen) 1443 { 1444 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1445 int err; 1446 int ix = 0; 1447 1448 memset(in, 0, inlen); 1449 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1450 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1451 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1452 MLX5_SET_CFG(in, start_flow_index, ix); 1453 ix += MLX5E_INNER_RSS_GROUP0_SIZE; 1454 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1455 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1456 if (IS_ERR(ft->g[ft->num_groups])) 1457 goto err_destory_groups; 1458 ft->num_groups++; 1459 1460 memset(in, 0, inlen); 1461 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1462 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1463 MLX5_SET_CFG(in, start_flow_index, ix); 1464 ix += MLX5E_INNER_RSS_GROUP1_SIZE; 1465 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1466 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1467 if (IS_ERR(ft->g[ft->num_groups])) 1468 goto err_destory_groups; 1469 ft->num_groups++; 1470 1471 memset(in, 0, inlen); 1472 MLX5_SET_CFG(in, start_flow_index, ix); 1473 ix += MLX5E_INNER_RSS_GROUP2_SIZE; 1474 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1475 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1476 if (IS_ERR(ft->g[ft->num_groups])) 1477 goto err_destory_groups; 1478 ft->num_groups++; 1479 1480 return (0); 1481 1482 err_destory_groups: 1483 err = PTR_ERR(ft->g[ft->num_groups]); 1484 ft->g[ft->num_groups] = NULL; 1485 mlx5e_destroy_groups(ft); 1486 1487 return (err); 1488 } 1489 1490 static int 1491 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft) 1492 { 1493 u32 *in; 1494 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1495 int err; 1496 1497 in = mlx5_vzalloc(inlen); 1498 if (!in) 1499 return (-ENOMEM); 1500 1501 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen); 1502 1503 kvfree(in); 1504 return (err); 1505 } 1506 1507 static int 1508 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv) 1509 { 1510 struct mlx5e_flow_table *ft = &priv->fts.inner_rss; 1511 int err; 1512 1513 ft->num_groups = 0; 1514 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss", 1515 MLX5E_INNER_RSS_TABLE_SIZE); 1516 1517 if (IS_ERR(ft->t)) { 1518 err = PTR_ERR(ft->t); 1519 ft->t = NULL; 1520 return (err); 1521 } 1522 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g), 1523 GFP_KERNEL); 1524 if (!ft->g) { 1525 err = -ENOMEM; 1526 goto err_destroy_inner_rss_flow_table; 1527 } 1528 1529 err = mlx5e_create_inner_rss_groups(ft); 1530 if (err) 1531 goto err_free_g; 1532 1533 return (0); 1534 1535 err_free_g: 1536 kfree(ft->g); 1537 1538 err_destroy_inner_rss_flow_table: 1539 mlx5_destroy_flow_table(ft->t); 1540 ft->t = NULL; 1541 1542 return (err); 1543 } 1544 1545 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv) 1546 { 1547 mlx5e_destroy_flow_table(&priv->fts.inner_rss); 1548 } 1549 1550 int 1551 mlx5e_open_flow_table(struct mlx5e_priv *priv) 1552 { 1553 int err; 1554 1555 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, 1556 MLX5_FLOW_NAMESPACE_KERNEL); 1557 1558 err = mlx5e_create_vlan_flow_table(priv); 1559 if (err) 1560 return (err); 1561 1562 err = mlx5e_create_main_flow_table(priv); 1563 if (err) 1564 goto err_destroy_vlan_flow_table; 1565 1566 err = mlx5e_create_inner_rss_flow_table(priv); 1567 if (err) 1568 goto err_destroy_main_flow_table; 1569 1570 return (0); 1571 1572 err_destroy_main_flow_table: 1573 mlx5e_destroy_main_flow_table(priv); 1574 err_destroy_vlan_flow_table: 1575 mlx5e_destroy_vlan_flow_table(priv); 1576 1577 return (err); 1578 } 1579 1580 void 1581 mlx5e_close_flow_table(struct mlx5e_priv *priv) 1582 { 1583 1584 mlx5e_handle_ifp_addr(priv); 1585 mlx5e_destroy_inner_rss_flow_table(priv); 1586 mlx5e_destroy_main_flow_table(priv); 1587 mlx5e_destroy_vlan_flow_table(priv); 1588 } 1589