1 /*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #include <linux/list.h> 31 #include <dev/mlx5/fs.h> 32 33 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 34 35 enum { 36 MLX5E_FULLMATCH = 0, 37 MLX5E_ALLMULTI = 1, 38 MLX5E_PROMISC = 2, 39 }; 40 41 enum { 42 MLX5E_UC = 0, 43 MLX5E_MC_IPV4 = 1, 44 MLX5E_MC_IPV6 = 2, 45 MLX5E_MC_OTHER = 3, 46 }; 47 48 enum { 49 MLX5E_ACTION_NONE = 0, 50 MLX5E_ACTION_ADD = 1, 51 MLX5E_ACTION_DEL = 2, 52 }; 53 54 struct mlx5e_eth_addr_hash_node { 55 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist; 56 u8 action; 57 struct mlx5e_eth_addr_info ai; 58 }; 59 60 static inline int 61 mlx5e_hash_eth_addr(const u8 * addr) 62 { 63 return (addr[5]); 64 } 65 66 static void 67 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash, 68 const u8 * addr) 69 { 70 struct mlx5e_eth_addr_hash_node *hn; 71 int ix = mlx5e_hash_eth_addr(addr); 72 73 LIST_FOREACH(hn, &hash[ix], hlist) { 74 if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) { 75 if (hn->action == MLX5E_ACTION_DEL) 76 hn->action = MLX5E_ACTION_NONE; 77 return; 78 } 79 } 80 81 hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO); 82 if (hn == NULL) 83 return; 84 85 ether_addr_copy(hn->ai.addr, addr); 86 hn->action = MLX5E_ACTION_ADD; 87 88 LIST_INSERT_HEAD(&hash[ix], hn, hlist); 89 } 90 91 static void 92 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) 93 { 94 LIST_REMOVE(hn, hlist); 95 free(hn, M_MLX5EN); 96 } 97 98 static void 99 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, 100 struct mlx5e_eth_addr_info *ai) 101 { 102 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP)) 103 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); 104 105 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP)) 106 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); 107 108 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH)) 109 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); 110 111 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH)) 112 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); 113 114 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) 115 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); 116 117 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) 118 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); 119 120 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) 121 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); 122 123 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) 124 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); 125 126 if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) 127 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); 128 129 if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) 130 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); 131 132 if (ai->tt_vec & (1 << MLX5E_TT_ANY)) 133 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); 134 } 135 136 static int 137 mlx5e_get_eth_addr_type(const u8 * addr) 138 { 139 if (ETHER_IS_MULTICAST(addr) == 0) 140 return (MLX5E_UC); 141 142 if ((addr[0] == 0x01) && 143 (addr[1] == 0x00) && 144 (addr[2] == 0x5e) && 145 !(addr[3] & 0x80)) 146 return (MLX5E_MC_IPV4); 147 148 if ((addr[0] == 0x33) && 149 (addr[1] == 0x33)) 150 return (MLX5E_MC_IPV6); 151 152 return (MLX5E_MC_OTHER); 153 } 154 155 static u32 156 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) 157 { 158 int eth_addr_type; 159 u32 ret; 160 161 switch (type) { 162 case MLX5E_FULLMATCH: 163 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); 164 switch (eth_addr_type) { 165 case MLX5E_UC: 166 ret = 167 (1 << MLX5E_TT_IPV4_TCP) | 168 (1 << MLX5E_TT_IPV6_TCP) | 169 (1 << MLX5E_TT_IPV4_UDP) | 170 (1 << MLX5E_TT_IPV6_UDP) | 171 (1 << MLX5E_TT_IPV4) | 172 (1 << MLX5E_TT_IPV6) | 173 (1 << MLX5E_TT_ANY) | 174 0; 175 break; 176 177 case MLX5E_MC_IPV4: 178 ret = 179 (1 << MLX5E_TT_IPV4_UDP) | 180 (1 << MLX5E_TT_IPV4) | 181 0; 182 break; 183 184 case MLX5E_MC_IPV6: 185 ret = 186 (1 << MLX5E_TT_IPV6_UDP) | 187 (1 << MLX5E_TT_IPV6) | 188 0; 189 break; 190 191 default: 192 ret = 193 (1 << MLX5E_TT_ANY) | 194 0; 195 break; 196 } 197 break; 198 199 case MLX5E_ALLMULTI: 200 ret = 201 (1 << MLX5E_TT_IPV4_UDP) | 202 (1 << MLX5E_TT_IPV6_UDP) | 203 (1 << MLX5E_TT_IPV4) | 204 (1 << MLX5E_TT_IPV6) | 205 (1 << MLX5E_TT_ANY) | 206 0; 207 break; 208 209 default: /* MLX5E_PROMISC */ 210 ret = 211 (1 << MLX5E_TT_IPV4_TCP) | 212 (1 << MLX5E_TT_IPV6_TCP) | 213 (1 << MLX5E_TT_IPV4_UDP) | 214 (1 << MLX5E_TT_IPV6_UDP) | 215 (1 << MLX5E_TT_IPV4) | 216 (1 << MLX5E_TT_IPV6) | 217 (1 << MLX5E_TT_ANY) | 218 0; 219 break; 220 } 221 222 return (ret); 223 } 224 225 static int 226 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv, 227 struct mlx5e_eth_addr_info *ai, int type, 228 u32 *mc, u32 *mv) 229 { 230 struct mlx5_flow_destination dest; 231 u8 mc_enable = 0; 232 struct mlx5_flow_rule **rule_p; 233 struct mlx5_flow_table *ft = priv->fts.main.t; 234 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, 235 outer_headers.dmac_47_16); 236 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, 237 outer_headers.dmac_47_16); 238 u32 *tirn = priv->tirn; 239 u32 tt_vec; 240 int err = 0; 241 242 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 243 244 switch (type) { 245 case MLX5E_FULLMATCH: 246 mc_enable = MLX5_MATCH_OUTER_HEADERS; 247 memset(mc_dmac, 0xff, ETH_ALEN); 248 ether_addr_copy(mv_dmac, ai->addr); 249 break; 250 251 case MLX5E_ALLMULTI: 252 mc_enable = MLX5_MATCH_OUTER_HEADERS; 253 mc_dmac[0] = 0x01; 254 mv_dmac[0] = 0x01; 255 break; 256 257 case MLX5E_PROMISC: 258 break; 259 default: 260 break; 261 } 262 263 tt_vec = mlx5e_get_tt_vec(ai, type); 264 265 if (tt_vec & BIT(MLX5E_TT_ANY)) { 266 rule_p = &ai->ft_rule[MLX5E_TT_ANY]; 267 dest.tir_num = tirn[MLX5E_TT_ANY]; 268 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 269 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 270 MLX5_FS_ETH_FLOW_TAG, &dest); 271 if (IS_ERR_OR_NULL(*rule_p)) 272 goto err_del_ai; 273 ai->tt_vec |= BIT(MLX5E_TT_ANY); 274 } 275 276 mc_enable = MLX5_MATCH_OUTER_HEADERS; 277 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 278 279 if (tt_vec & BIT(MLX5E_TT_IPV4)) { 280 rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; 281 dest.tir_num = tirn[MLX5E_TT_IPV4]; 282 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 283 ETHERTYPE_IP); 284 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 285 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 286 MLX5_FS_ETH_FLOW_TAG, &dest); 287 if (IS_ERR_OR_NULL(*rule_p)) 288 goto err_del_ai; 289 ai->tt_vec |= BIT(MLX5E_TT_IPV4); 290 } 291 292 if (tt_vec & BIT(MLX5E_TT_IPV6)) { 293 rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; 294 dest.tir_num = tirn[MLX5E_TT_IPV6]; 295 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 296 ETHERTYPE_IPV6); 297 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 298 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 299 MLX5_FS_ETH_FLOW_TAG, &dest); 300 if (IS_ERR_OR_NULL(*rule_p)) 301 goto err_del_ai; 302 ai->tt_vec |= BIT(MLX5E_TT_IPV6); 303 } 304 305 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 306 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); 307 308 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { 309 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; 310 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; 311 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 312 ETHERTYPE_IP); 313 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 314 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 315 MLX5_FS_ETH_FLOW_TAG, &dest); 316 if (IS_ERR_OR_NULL(*rule_p)) 317 goto err_del_ai; 318 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); 319 } 320 321 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { 322 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; 323 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; 324 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 325 ETHERTYPE_IPV6); 326 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 327 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 328 MLX5_FS_ETH_FLOW_TAG, &dest); 329 if (IS_ERR_OR_NULL(*rule_p)) 330 goto err_del_ai; 331 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); 332 } 333 334 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); 335 336 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { 337 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; 338 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; 339 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 340 ETHERTYPE_IP); 341 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 342 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 343 MLX5_FS_ETH_FLOW_TAG, &dest); 344 if (IS_ERR_OR_NULL(*rule_p)) 345 goto err_del_ai; 346 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); 347 } 348 349 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { 350 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; 351 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; 352 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 353 ETHERTYPE_IPV6); 354 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 355 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 356 MLX5_FS_ETH_FLOW_TAG, &dest); 357 if (IS_ERR_OR_NULL(*rule_p)) 358 goto err_del_ai; 359 360 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); 361 } 362 363 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); 364 365 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { 366 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; 367 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; 368 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 369 ETHERTYPE_IP); 370 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 371 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 372 MLX5_FS_ETH_FLOW_TAG, &dest); 373 if (IS_ERR_OR_NULL(*rule_p)) 374 goto err_del_ai; 375 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); 376 } 377 378 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { 379 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; 380 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; 381 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 382 ETHERTYPE_IPV6); 383 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 385 MLX5_FS_ETH_FLOW_TAG, &dest); 386 if (IS_ERR_OR_NULL(*rule_p)) 387 goto err_del_ai; 388 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); 389 } 390 391 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); 392 393 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { 394 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; 395 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; 396 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 397 ETHERTYPE_IP); 398 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 399 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 400 MLX5_FS_ETH_FLOW_TAG, &dest); 401 if (IS_ERR_OR_NULL(*rule_p)) 402 goto err_del_ai; 403 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); 404 } 405 406 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { 407 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; 408 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; 409 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, 410 ETHERTYPE_IPV6); 411 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 412 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 413 MLX5_FS_ETH_FLOW_TAG, &dest); 414 if (IS_ERR_OR_NULL(*rule_p)) 415 goto err_del_ai; 416 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); 417 } 418 419 return 0; 420 421 err_del_ai: 422 err = PTR_ERR(*rule_p); 423 *rule_p = NULL; 424 mlx5e_del_eth_addr_from_flow_table(priv, ai); 425 426 return err; 427 } 428 429 static int 430 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, 431 struct mlx5e_eth_addr_info *ai, int type) 432 { 433 u32 *match_criteria; 434 u32 *match_value; 435 int err = 0; 436 437 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 438 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 439 if (!match_value || !match_criteria) { 440 if_printf(priv->ifp, "%s: alloc failed\n", __func__); 441 err = -ENOMEM; 442 goto add_eth_addr_rule_out; 443 } 444 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria, 445 match_value); 446 447 add_eth_addr_rule_out: 448 kvfree(match_criteria); 449 kvfree(match_value); 450 451 return (err); 452 } 453 454 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) 455 { 456 struct ifnet *ifp = priv->ifp; 457 int max_list_size; 458 int list_size; 459 u16 *vlans; 460 int vlan; 461 int err; 462 int i; 463 464 list_size = 0; 465 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) 466 list_size++; 467 468 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); 469 470 if (list_size > max_list_size) { 471 if_printf(ifp, 472 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", 473 list_size, max_list_size); 474 list_size = max_list_size; 475 } 476 477 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); 478 if (!vlans) 479 return -ENOMEM; 480 481 i = 0; 482 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { 483 if (i >= list_size) 484 break; 485 vlans[i++] = vlan; 486 } 487 488 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); 489 if (err) 490 if_printf(ifp, "Failed to modify vport vlans list err(%d)\n", 491 err); 492 493 kfree(vlans); 494 return err; 495 } 496 497 enum mlx5e_vlan_rule_type { 498 MLX5E_VLAN_RULE_TYPE_UNTAGGED, 499 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 500 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 501 MLX5E_VLAN_RULE_TYPE_MATCH_VID, 502 }; 503 504 static int 505 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv, 506 enum mlx5e_vlan_rule_type rule_type, u16 vid, 507 u32 *mc, u32 *mv) 508 { 509 struct mlx5_flow_table *ft = priv->fts.vlan.t; 510 struct mlx5_flow_destination dest; 511 u8 mc_enable = 0; 512 struct mlx5_flow_rule **rule_p; 513 int err = 0; 514 515 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 516 dest.ft = priv->fts.main.t; 517 518 mc_enable = MLX5_MATCH_OUTER_HEADERS; 519 520 switch (rule_type) { 521 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 522 rule_p = &priv->vlan.untagged_ft_rule; 523 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 524 break; 525 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 526 rule_p = &priv->vlan.any_cvlan_ft_rule; 527 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 528 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 529 break; 530 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 531 rule_p = &priv->vlan.any_svlan_ft_rule; 532 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 533 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1); 534 break; 535 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 536 rule_p = &priv->vlan.active_vlans_ft_rule[vid]; 537 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 538 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1); 539 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 540 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 541 mlx5e_vport_context_update_vlans(priv); 542 break; 543 } 544 545 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv, 546 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 547 MLX5_FS_ETH_FLOW_TAG, 548 &dest); 549 550 if (IS_ERR(*rule_p)) { 551 err = PTR_ERR(*rule_p); 552 *rule_p = NULL; 553 if_printf(priv->ifp, "%s: add rule failed\n", __func__); 554 } 555 556 return (err); 557 } 558 559 static int 560 mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 561 enum mlx5e_vlan_rule_type rule_type, u16 vid) 562 { 563 u32 *match_criteria; 564 u32 *match_value; 565 int err = 0; 566 567 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 568 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 569 if (!match_value || !match_criteria) { 570 if_printf(priv->ifp, "%s: alloc failed\n", __func__); 571 err = -ENOMEM; 572 goto add_vlan_rule_out; 573 } 574 575 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria, 576 match_value); 577 578 add_vlan_rule_out: 579 kvfree(match_criteria); 580 kvfree(match_value); 581 582 return (err); 583 } 584 585 static void 586 mlx5e_del_vlan_rule(struct mlx5e_priv *priv, 587 enum mlx5e_vlan_rule_type rule_type, u16 vid) 588 { 589 switch (rule_type) { 590 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 591 if (priv->vlan.untagged_ft_rule) { 592 mlx5_del_flow_rule(priv->vlan.untagged_ft_rule); 593 priv->vlan.untagged_ft_rule = NULL; 594 } 595 break; 596 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: 597 if (priv->vlan.any_cvlan_ft_rule) { 598 mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule); 599 priv->vlan.any_cvlan_ft_rule = NULL; 600 } 601 break; 602 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: 603 if (priv->vlan.any_svlan_ft_rule) { 604 mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule); 605 priv->vlan.any_svlan_ft_rule = NULL; 606 } 607 break; 608 case MLX5E_VLAN_RULE_TYPE_MATCH_VID: 609 if (priv->vlan.active_vlans_ft_rule[vid]) { 610 mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]); 611 priv->vlan.active_vlans_ft_rule[vid] = NULL; 612 } 613 mlx5e_vport_context_update_vlans(priv); 614 break; 615 default: 616 break; 617 } 618 } 619 620 static void 621 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) 622 { 623 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 624 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); 625 } 626 627 static int 628 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) 629 { 630 int err; 631 632 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); 633 if (err) 634 return (err); 635 636 return (mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0)); 637 } 638 639 void 640 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) 641 { 642 if (priv->vlan.filter_disabled) { 643 priv->vlan.filter_disabled = false; 644 if (priv->ifp->if_flags & IFF_PROMISC) 645 return; 646 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 647 mlx5e_del_any_vid_rules(priv); 648 } 649 } 650 651 void 652 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 653 { 654 if (!priv->vlan.filter_disabled) { 655 priv->vlan.filter_disabled = true; 656 if (priv->ifp->if_flags & IFF_PROMISC) 657 return; 658 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 659 mlx5e_add_any_vid_rules(priv); 660 } 661 } 662 663 void 664 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid) 665 { 666 struct mlx5e_priv *priv = arg; 667 668 if (ifp != priv->ifp) 669 return; 670 671 PRIV_LOCK(priv); 672 if (!test_and_set_bit(vid, priv->vlan.active_vlans) && 673 test_bit(MLX5E_STATE_OPENED, &priv->state)) 674 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 675 PRIV_UNLOCK(priv); 676 } 677 678 void 679 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid) 680 { 681 struct mlx5e_priv *priv = arg; 682 683 if (ifp != priv->ifp) 684 return; 685 686 PRIV_LOCK(priv); 687 clear_bit(vid, priv->vlan.active_vlans); 688 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 689 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); 690 PRIV_UNLOCK(priv); 691 } 692 693 int 694 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) 695 { 696 int err; 697 int i; 698 699 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) { 700 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, 701 i); 702 if (err) 703 return (err); 704 } 705 706 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 707 if (err) 708 return (err); 709 710 if (priv->vlan.filter_disabled) { 711 err = mlx5e_add_any_vid_rules(priv); 712 if (err) 713 return (err); 714 } 715 return (0); 716 } 717 718 void 719 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) 720 { 721 int i; 722 723 if (priv->vlan.filter_disabled) 724 mlx5e_del_any_vid_rules(priv); 725 726 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); 727 728 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) 729 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); 730 } 731 732 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ 733 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ 734 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp) 735 736 static void 737 mlx5e_execute_action(struct mlx5e_priv *priv, 738 struct mlx5e_eth_addr_hash_node *hn) 739 { 740 switch (hn->action) { 741 case MLX5E_ACTION_ADD: 742 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); 743 hn->action = MLX5E_ACTION_NONE; 744 break; 745 746 case MLX5E_ACTION_DEL: 747 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); 748 mlx5e_del_eth_addr_from_hash(hn); 749 break; 750 751 default: 752 break; 753 } 754 } 755 756 static void 757 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv) 758 { 759 struct ifnet *ifp = priv->ifp; 760 struct ifaddr *ifa; 761 struct ifmultiaddr *ifma; 762 763 /* XXX adding this entry might not be needed */ 764 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, 765 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr))); 766 767 if_addr_rlock(ifp); 768 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 769 if (ifa->ifa_addr->sa_family != AF_LINK) 770 continue; 771 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, 772 LLADDR((struct sockaddr_dl *)ifa->ifa_addr)); 773 } 774 if_addr_runlock(ifp); 775 776 if_maddr_rlock(ifp); 777 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 778 if (ifma->ifma_addr->sa_family != AF_LINK) 779 continue; 780 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, 781 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 782 } 783 if_maddr_runlock(ifp); 784 } 785 786 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, 787 u8 addr_array[][ETH_ALEN], int size) 788 { 789 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 790 struct ifnet *ifp = priv->ifp; 791 struct mlx5e_eth_addr_hash_node *hn; 792 struct mlx5e_eth_addr_hash_head *addr_list; 793 struct mlx5e_eth_addr_hash_node *tmp; 794 int i = 0; 795 int hi; 796 797 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 798 799 if (is_uc) /* Make sure our own address is pushed first */ 800 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp)); 801 else if (priv->eth_addr.broadcast_enabled) 802 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr); 803 804 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { 805 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr)) 806 continue; 807 if (i >= size) 808 break; 809 ether_addr_copy(addr_array[i++], hn->ai.addr); 810 } 811 } 812 813 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, 814 int list_type) 815 { 816 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC); 817 struct mlx5e_eth_addr_hash_node *hn; 818 u8 (*addr_array)[ETH_ALEN] = NULL; 819 struct mlx5e_eth_addr_hash_head *addr_list; 820 struct mlx5e_eth_addr_hash_node *tmp; 821 int max_size; 822 int size; 823 int err; 824 int hi; 825 826 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); 827 max_size = is_uc ? 828 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 829 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); 830 831 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc; 832 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) 833 size++; 834 835 if (size > max_size) { 836 if_printf(priv->ifp, 837 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", 838 is_uc ? "UC" : "MC", size, max_size); 839 size = max_size; 840 } 841 842 if (size) { 843 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL); 844 if (!addr_array) { 845 err = -ENOMEM; 846 goto out; 847 } 848 mlx5e_fill_addr_array(priv, list_type, addr_array, size); 849 } 850 851 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); 852 out: 853 if (err) 854 if_printf(priv->ifp, 855 "Failed to modify vport %s list err(%d)\n", 856 is_uc ? "UC" : "MC", err); 857 kfree(addr_array); 858 } 859 860 static void mlx5e_vport_context_update(struct mlx5e_priv *priv) 861 { 862 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 863 864 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC); 865 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC); 866 mlx5_modify_nic_vport_promisc(priv->mdev, 0, 867 ea->allmulti_enabled, 868 ea->promisc_enabled); 869 } 870 871 static void 872 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv) 873 { 874 struct mlx5e_eth_addr_hash_node *hn; 875 struct mlx5e_eth_addr_hash_node *tmp; 876 int i; 877 878 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 879 mlx5e_execute_action(priv, hn); 880 881 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 882 mlx5e_execute_action(priv, hn); 883 } 884 885 static void 886 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv) 887 { 888 struct mlx5e_eth_addr_hash_node *hn; 889 struct mlx5e_eth_addr_hash_node *tmp; 890 int i; 891 892 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i) 893 hn->action = MLX5E_ACTION_DEL; 894 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i) 895 hn->action = MLX5E_ACTION_DEL; 896 897 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 898 mlx5e_sync_ifp_addr(priv); 899 900 mlx5e_apply_ifp_addr(priv); 901 } 902 903 void 904 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) 905 { 906 struct mlx5e_eth_addr_db *ea = &priv->eth_addr; 907 struct ifnet *ndev = priv->ifp; 908 909 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); 910 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC); 911 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI); 912 bool broadcast_enabled = rx_mode_enable; 913 914 bool enable_promisc = !ea->promisc_enabled && promisc_enabled; 915 bool disable_promisc = ea->promisc_enabled && !promisc_enabled; 916 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; 917 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; 918 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; 919 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; 920 921 /* update broadcast address */ 922 ether_addr_copy(priv->eth_addr.broadcast.addr, 923 priv->ifp->if_broadcastaddr); 924 925 if (enable_promisc) { 926 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); 927 if (!priv->vlan.filter_disabled) 928 mlx5e_add_any_vid_rules(priv); 929 } 930 if (enable_allmulti) 931 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); 932 if (enable_broadcast) 933 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); 934 935 mlx5e_handle_ifp_addr(priv); 936 937 if (disable_broadcast) 938 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); 939 if (disable_allmulti) 940 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); 941 if (disable_promisc) { 942 if (!priv->vlan.filter_disabled) 943 mlx5e_del_any_vid_rules(priv); 944 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); 945 } 946 947 ea->promisc_enabled = promisc_enabled; 948 ea->allmulti_enabled = allmulti_enabled; 949 ea->broadcast_enabled = broadcast_enabled; 950 951 mlx5e_vport_context_update(priv); 952 } 953 954 void 955 mlx5e_set_rx_mode_work(struct work_struct *work) 956 { 957 struct mlx5e_priv *priv = 958 container_of(work, struct mlx5e_priv, set_rx_mode_work); 959 960 PRIV_LOCK(priv); 961 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 962 mlx5e_set_rx_mode_core(priv); 963 PRIV_UNLOCK(priv); 964 } 965 966 static void 967 mlx5e_destroy_groups(struct mlx5e_flow_table *ft) 968 { 969 int i; 970 971 for (i = ft->num_groups - 1; i >= 0; i--) { 972 if (!IS_ERR_OR_NULL(ft->g[i])) 973 mlx5_destroy_flow_group(ft->g[i]); 974 ft->g[i] = NULL; 975 } 976 ft->num_groups = 0; 977 } 978 979 static void 980 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) 981 { 982 mlx5e_destroy_groups(ft); 983 kfree(ft->g); 984 mlx5_destroy_flow_table(ft->t); 985 ft->t = NULL; 986 } 987 988 #define MLX5E_NUM_MAIN_GROUPS 10 989 #define MLX5E_MAIN_GROUP0_SIZE BIT(4) 990 #define MLX5E_MAIN_GROUP1_SIZE BIT(3) 991 #define MLX5E_MAIN_GROUP2_SIZE BIT(1) 992 #define MLX5E_MAIN_GROUP3_SIZE BIT(0) 993 #define MLX5E_MAIN_GROUP4_SIZE BIT(14) 994 #define MLX5E_MAIN_GROUP5_SIZE BIT(13) 995 #define MLX5E_MAIN_GROUP6_SIZE BIT(11) 996 #define MLX5E_MAIN_GROUP7_SIZE BIT(2) 997 #define MLX5E_MAIN_GROUP8_SIZE BIT(1) 998 #define MLX5E_MAIN_GROUP9_SIZE BIT(0) 999 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ 1000 MLX5E_MAIN_GROUP1_SIZE +\ 1001 MLX5E_MAIN_GROUP2_SIZE +\ 1002 MLX5E_MAIN_GROUP3_SIZE +\ 1003 MLX5E_MAIN_GROUP4_SIZE +\ 1004 MLX5E_MAIN_GROUP5_SIZE +\ 1005 MLX5E_MAIN_GROUP6_SIZE +\ 1006 MLX5E_MAIN_GROUP7_SIZE +\ 1007 MLX5E_MAIN_GROUP8_SIZE +\ 1008 MLX5E_MAIN_GROUP9_SIZE +\ 1009 0) 1010 1011 static int 1012 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1013 int inlen) 1014 { 1015 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1016 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, 1017 match_criteria.outer_headers.dmac_47_16); 1018 int err; 1019 int ix = 0; 1020 1021 /* Tunnel rules need to be first in this list of groups */ 1022 1023 /* Start tunnel rules */ 1024 memset(in, 0, inlen); 1025 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1026 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1027 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1028 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport); 1029 MLX5_SET_CFG(in, start_flow_index, ix); 1030 ix += MLX5E_MAIN_GROUP0_SIZE; 1031 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1032 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1033 if (IS_ERR(ft->g[ft->num_groups])) 1034 goto err_destory_groups; 1035 ft->num_groups++; 1036 /* End Tunnel Rules */ 1037 1038 memset(in, 0, inlen); 1039 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1040 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1041 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1042 MLX5_SET_CFG(in, start_flow_index, ix); 1043 ix += MLX5E_MAIN_GROUP1_SIZE; 1044 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1045 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1046 if (IS_ERR(ft->g[ft->num_groups])) 1047 goto err_destory_groups; 1048 ft->num_groups++; 1049 1050 memset(in, 0, inlen); 1051 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1052 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1053 MLX5_SET_CFG(in, start_flow_index, ix); 1054 ix += MLX5E_MAIN_GROUP2_SIZE; 1055 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1056 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1057 if (IS_ERR(ft->g[ft->num_groups])) 1058 goto err_destory_groups; 1059 ft->num_groups++; 1060 1061 memset(in, 0, inlen); 1062 MLX5_SET_CFG(in, start_flow_index, ix); 1063 ix += MLX5E_MAIN_GROUP3_SIZE; 1064 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1065 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1066 if (IS_ERR(ft->g[ft->num_groups])) 1067 goto err_destory_groups; 1068 ft->num_groups++; 1069 1070 memset(in, 0, inlen); 1071 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1072 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1073 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1074 memset(dmac, 0xff, ETH_ALEN); 1075 MLX5_SET_CFG(in, start_flow_index, ix); 1076 ix += MLX5E_MAIN_GROUP4_SIZE; 1077 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1078 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1079 if (IS_ERR(ft->g[ft->num_groups])) 1080 goto err_destory_groups; 1081 ft->num_groups++; 1082 1083 memset(in, 0, inlen); 1084 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1085 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1086 memset(dmac, 0xff, ETH_ALEN); 1087 MLX5_SET_CFG(in, start_flow_index, ix); 1088 ix += MLX5E_MAIN_GROUP5_SIZE; 1089 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1090 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1091 if (IS_ERR(ft->g[ft->num_groups])) 1092 goto err_destory_groups; 1093 ft->num_groups++; 1094 1095 memset(in, 0, inlen); 1096 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1097 memset(dmac, 0xff, ETH_ALEN); 1098 MLX5_SET_CFG(in, start_flow_index, ix); 1099 ix += MLX5E_MAIN_GROUP6_SIZE; 1100 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1101 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1102 if (IS_ERR(ft->g[ft->num_groups])) 1103 goto err_destory_groups; 1104 ft->num_groups++; 1105 1106 memset(in, 0, inlen); 1107 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1108 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1109 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); 1110 dmac[0] = 0x01; 1111 MLX5_SET_CFG(in, start_flow_index, ix); 1112 ix += MLX5E_MAIN_GROUP7_SIZE; 1113 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1114 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1115 if (IS_ERR(ft->g[ft->num_groups])) 1116 goto err_destory_groups; 1117 ft->num_groups++; 1118 1119 memset(in, 0, inlen); 1120 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1121 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); 1122 dmac[0] = 0x01; 1123 MLX5_SET_CFG(in, start_flow_index, ix); 1124 ix += MLX5E_MAIN_GROUP8_SIZE; 1125 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1126 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1127 if (IS_ERR(ft->g[ft->num_groups])) 1128 goto err_destory_groups; 1129 ft->num_groups++; 1130 1131 memset(in, 0, inlen); 1132 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1133 dmac[0] = 0x01; 1134 MLX5_SET_CFG(in, start_flow_index, ix); 1135 ix += MLX5E_MAIN_GROUP9_SIZE; 1136 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1137 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1138 if (IS_ERR(ft->g[ft->num_groups])) 1139 goto err_destory_groups; 1140 ft->num_groups++; 1141 1142 return (0); 1143 1144 err_destory_groups: 1145 err = PTR_ERR(ft->g[ft->num_groups]); 1146 ft->g[ft->num_groups] = NULL; 1147 mlx5e_destroy_groups(ft); 1148 1149 return (err); 1150 } 1151 1152 static int 1153 mlx5e_create_main_groups(struct mlx5e_flow_table *ft) 1154 { 1155 u32 *in; 1156 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1157 int err; 1158 1159 in = mlx5_vzalloc(inlen); 1160 if (!in) 1161 return (-ENOMEM); 1162 1163 err = mlx5e_create_main_groups_sub(ft, in, inlen); 1164 1165 kvfree(in); 1166 return (err); 1167 } 1168 1169 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) 1170 { 1171 struct mlx5e_flow_table *ft = &priv->fts.main; 1172 int err; 1173 1174 ft->num_groups = 0; 1175 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main", 1176 MLX5E_MAIN_TABLE_SIZE); 1177 1178 if (IS_ERR(ft->t)) { 1179 err = PTR_ERR(ft->t); 1180 ft->t = NULL; 1181 return (err); 1182 } 1183 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1184 if (!ft->g) { 1185 err = -ENOMEM; 1186 goto err_destroy_main_flow_table; 1187 } 1188 1189 err = mlx5e_create_main_groups(ft); 1190 if (err) 1191 goto err_free_g; 1192 return (0); 1193 1194 err_free_g: 1195 kfree(ft->g); 1196 1197 err_destroy_main_flow_table: 1198 mlx5_destroy_flow_table(ft->t); 1199 ft->t = NULL; 1200 1201 return (err); 1202 } 1203 1204 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) 1205 { 1206 mlx5e_destroy_flow_table(&priv->fts.main); 1207 } 1208 1209 #define MLX5E_NUM_VLAN_GROUPS 3 1210 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) 1211 #define MLX5E_VLAN_GROUP1_SIZE BIT(1) 1212 #define MLX5E_VLAN_GROUP2_SIZE BIT(0) 1213 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ 1214 MLX5E_VLAN_GROUP1_SIZE +\ 1215 MLX5E_VLAN_GROUP2_SIZE +\ 1216 0) 1217 1218 static int 1219 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1220 int inlen) 1221 { 1222 int err; 1223 int ix = 0; 1224 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1225 1226 memset(in, 0, inlen); 1227 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1228 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1229 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 1230 MLX5_SET_CFG(in, start_flow_index, ix); 1231 ix += MLX5E_VLAN_GROUP0_SIZE; 1232 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1233 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1234 if (IS_ERR(ft->g[ft->num_groups])) 1235 goto err_destory_groups; 1236 ft->num_groups++; 1237 1238 memset(in, 0, inlen); 1239 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1240 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); 1241 MLX5_SET_CFG(in, start_flow_index, ix); 1242 ix += MLX5E_VLAN_GROUP1_SIZE; 1243 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1244 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1245 if (IS_ERR(ft->g[ft->num_groups])) 1246 goto err_destory_groups; 1247 ft->num_groups++; 1248 1249 memset(in, 0, inlen); 1250 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1251 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); 1252 MLX5_SET_CFG(in, start_flow_index, ix); 1253 ix += MLX5E_VLAN_GROUP2_SIZE; 1254 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1255 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1256 if (IS_ERR(ft->g[ft->num_groups])) 1257 goto err_destory_groups; 1258 ft->num_groups++; 1259 1260 return (0); 1261 1262 err_destory_groups: 1263 err = PTR_ERR(ft->g[ft->num_groups]); 1264 ft->g[ft->num_groups] = NULL; 1265 mlx5e_destroy_groups(ft); 1266 1267 return (err); 1268 } 1269 1270 static int 1271 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) 1272 { 1273 u32 *in; 1274 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1275 int err; 1276 1277 in = mlx5_vzalloc(inlen); 1278 if (!in) 1279 return (-ENOMEM); 1280 1281 err = mlx5e_create_vlan_groups_sub(ft, in, inlen); 1282 1283 kvfree(in); 1284 return (err); 1285 } 1286 1287 static int 1288 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) 1289 { 1290 struct mlx5e_flow_table *ft = &priv->fts.vlan; 1291 int err; 1292 1293 ft->num_groups = 0; 1294 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan", 1295 MLX5E_VLAN_TABLE_SIZE); 1296 1297 if (IS_ERR(ft->t)) { 1298 err = PTR_ERR(ft->t); 1299 ft->t = NULL; 1300 return (err); 1301 } 1302 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); 1303 if (!ft->g) { 1304 err = -ENOMEM; 1305 goto err_destroy_vlan_flow_table; 1306 } 1307 1308 err = mlx5e_create_vlan_groups(ft); 1309 if (err) 1310 goto err_free_g; 1311 1312 return (0); 1313 1314 err_free_g: 1315 kfree(ft->g); 1316 1317 err_destroy_vlan_flow_table: 1318 mlx5_destroy_flow_table(ft->t); 1319 ft->t = NULL; 1320 1321 return (err); 1322 } 1323 1324 static void 1325 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) 1326 { 1327 mlx5e_destroy_flow_table(&priv->fts.vlan); 1328 } 1329 1330 #define MLX5E_NUM_INNER_RSS_GROUPS 3 1331 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3) 1332 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1) 1333 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0) 1334 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\ 1335 MLX5E_INNER_RSS_GROUP1_SIZE +\ 1336 MLX5E_INNER_RSS_GROUP2_SIZE +\ 1337 0) 1338 1339 static int 1340 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in, 1341 int inlen) 1342 { 1343 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1344 int err; 1345 int ix = 0; 1346 1347 memset(in, 0, inlen); 1348 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1349 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1350 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); 1351 MLX5_SET_CFG(in, start_flow_index, ix); 1352 ix += MLX5E_INNER_RSS_GROUP0_SIZE; 1353 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1354 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1355 if (IS_ERR(ft->g[ft->num_groups])) 1356 goto err_destory_groups; 1357 ft->num_groups++; 1358 1359 memset(in, 0, inlen); 1360 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); 1361 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype); 1362 MLX5_SET_CFG(in, start_flow_index, ix); 1363 ix += MLX5E_INNER_RSS_GROUP1_SIZE; 1364 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1365 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1366 if (IS_ERR(ft->g[ft->num_groups])) 1367 goto err_destory_groups; 1368 ft->num_groups++; 1369 1370 memset(in, 0, inlen); 1371 MLX5_SET_CFG(in, start_flow_index, ix); 1372 ix += MLX5E_INNER_RSS_GROUP2_SIZE; 1373 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1374 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 1375 if (IS_ERR(ft->g[ft->num_groups])) 1376 goto err_destory_groups; 1377 ft->num_groups++; 1378 1379 return (0); 1380 1381 err_destory_groups: 1382 err = PTR_ERR(ft->g[ft->num_groups]); 1383 ft->g[ft->num_groups] = NULL; 1384 mlx5e_destroy_groups(ft); 1385 1386 return (err); 1387 } 1388 1389 static int 1390 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft) 1391 { 1392 u32 *in; 1393 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1394 int err; 1395 1396 in = mlx5_vzalloc(inlen); 1397 if (!in) 1398 return (-ENOMEM); 1399 1400 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen); 1401 1402 kvfree(in); 1403 return (err); 1404 } 1405 1406 static int 1407 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv) 1408 { 1409 struct mlx5e_flow_table *ft = &priv->fts.inner_rss; 1410 int err; 1411 1412 ft->num_groups = 0; 1413 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss", 1414 MLX5E_INNER_RSS_TABLE_SIZE); 1415 1416 if (IS_ERR(ft->t)) { 1417 err = PTR_ERR(ft->t); 1418 ft->t = NULL; 1419 return (err); 1420 } 1421 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g), 1422 GFP_KERNEL); 1423 if (!ft->g) { 1424 err = -ENOMEM; 1425 goto err_destroy_inner_rss_flow_table; 1426 } 1427 1428 err = mlx5e_create_inner_rss_groups(ft); 1429 if (err) 1430 goto err_free_g; 1431 1432 return (0); 1433 1434 err_free_g: 1435 kfree(ft->g); 1436 1437 err_destroy_inner_rss_flow_table: 1438 mlx5_destroy_flow_table(ft->t); 1439 ft->t = NULL; 1440 1441 return (err); 1442 } 1443 1444 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv) 1445 { 1446 mlx5e_destroy_flow_table(&priv->fts.inner_rss); 1447 } 1448 1449 int 1450 mlx5e_open_flow_table(struct mlx5e_priv *priv) 1451 { 1452 int err; 1453 1454 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, 1455 MLX5_FLOW_NAMESPACE_KERNEL); 1456 1457 err = mlx5e_create_vlan_flow_table(priv); 1458 if (err) 1459 return (err); 1460 1461 err = mlx5e_create_main_flow_table(priv); 1462 if (err) 1463 goto err_destroy_vlan_flow_table; 1464 1465 err = mlx5e_create_inner_rss_flow_table(priv); 1466 if (err) 1467 goto err_destroy_main_flow_table; 1468 1469 return (0); 1470 1471 err_destroy_main_flow_table: 1472 mlx5e_destroy_main_flow_table(priv); 1473 err_destroy_vlan_flow_table: 1474 mlx5e_destroy_vlan_flow_table(priv); 1475 1476 return (err); 1477 } 1478 1479 void 1480 mlx5e_close_flow_table(struct mlx5e_priv *priv) 1481 { 1482 mlx5e_destroy_inner_rss_flow_table(priv); 1483 mlx5e_destroy_main_flow_table(priv); 1484 mlx5e_destroy_vlan_flow_table(priv); 1485 } 1486