1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/net_namespace.h> 8 #include <net/flow_dissector.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gact.h> 11 #include <net/tc_act/tc_mirred.h> 12 #include <net/tc_act/tc_vlan.h> 13 14 #include "spectrum.h" 15 #include "core_acl_flex_keys.h" 16 17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 18 struct mlxsw_sp_flow_block *block, 19 struct mlxsw_sp_acl_rule_info *rulei, 20 struct flow_action *flow_action, 21 struct netlink_ext_ack *extack) 22 { 23 const struct flow_action_entry *act; 24 int mirror_act_count = 0; 25 int err, i; 26 27 if (!flow_action_has_entries(flow_action)) 28 return 0; 29 if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 30 return -EOPNOTSUPP; 31 32 act = flow_action_first_entry_get(flow_action); 33 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { 34 /* Nothing to do */ 35 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { 36 /* Count action is inserted first */ 37 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 38 if (err) 39 return err; 40 } else { 41 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 42 return -EOPNOTSUPP; 43 } 44 45 flow_action_for_each(i, act, flow_action) { 46 switch (act->id) { 47 case FLOW_ACTION_ACCEPT: 48 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 49 if (err) { 50 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 51 return err; 52 } 53 break; 54 case FLOW_ACTION_DROP: { 55 bool ingress; 56 57 if (mlxsw_sp_flow_block_is_mixed_bound(block)) { 58 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); 59 return -EOPNOTSUPP; 60 } 61 ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 62 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, 63 act->cookie, extack); 64 if (err) { 65 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 66 return err; 67 } 68 69 /* Forbid block with this rulei to be bound 70 * to ingress/egress in future. Ingress rule is 71 * a blocker for egress and vice versa. 72 */ 73 if (ingress) 74 rulei->egress_bind_blocker = 1; 75 else 76 rulei->ingress_bind_blocker = 1; 77 } 78 break; 79 case FLOW_ACTION_TRAP: 80 err = mlxsw_sp_acl_rulei_act_trap(rulei); 81 if (err) { 82 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 83 return err; 84 } 85 break; 86 case FLOW_ACTION_GOTO: { 87 u32 chain_index = act->chain_index; 88 struct mlxsw_sp_acl_ruleset *ruleset; 89 u16 group_id; 90 91 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 92 chain_index, 93 MLXSW_SP_ACL_PROFILE_FLOWER); 94 if (IS_ERR(ruleset)) 95 return PTR_ERR(ruleset); 96 97 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 98 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 99 if (err) { 100 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 101 return err; 102 } 103 } 104 break; 105 case FLOW_ACTION_REDIRECT: { 106 struct net_device *out_dev; 107 struct mlxsw_sp_fid *fid; 108 u16 fid_index; 109 110 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 111 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 112 return -EOPNOTSUPP; 113 } 114 115 /* Forbid block with this rulei to be bound 116 * to egress in future. 117 */ 118 rulei->egress_bind_blocker = 1; 119 120 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 121 fid_index = mlxsw_sp_fid_index(fid); 122 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 123 fid_index, extack); 124 if (err) 125 return err; 126 127 out_dev = act->dev; 128 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 129 out_dev, extack); 130 if (err) 131 return err; 132 } 133 break; 134 case FLOW_ACTION_MIRRED: { 135 struct net_device *out_dev = act->dev; 136 137 if (mirror_act_count++) { 138 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported"); 139 return -EOPNOTSUPP; 140 } 141 142 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 143 block, out_dev, 144 extack); 145 if (err) 146 return err; 147 } 148 break; 149 case FLOW_ACTION_VLAN_MANGLE: { 150 u16 proto = be16_to_cpu(act->vlan.proto); 151 u8 prio = act->vlan.prio; 152 u16 vid = act->vlan.vid; 153 154 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 155 act->id, vid, 156 proto, prio, extack); 157 if (err) 158 return err; 159 break; 160 } 161 case FLOW_ACTION_PRIORITY: 162 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, 163 act->priority, 164 extack); 165 if (err) 166 return err; 167 break; 168 case FLOW_ACTION_MANGLE: { 169 enum flow_action_mangle_base htype = act->mangle.htype; 170 __be32 be_mask = (__force __be32) act->mangle.mask; 171 __be32 be_val = (__force __be32) act->mangle.val; 172 u32 offset = act->mangle.offset; 173 u32 mask = be32_to_cpu(be_mask); 174 u32 val = be32_to_cpu(be_val); 175 176 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, 177 htype, offset, 178 mask, val, extack); 179 if (err) 180 return err; 181 break; 182 } 183 default: 184 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 185 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 186 return -EOPNOTSUPP; 187 } 188 } 189 return 0; 190 } 191 192 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 193 struct flow_cls_offload *f, 194 struct mlxsw_sp_flow_block *block) 195 { 196 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 197 struct mlxsw_sp_port *mlxsw_sp_port; 198 struct net_device *ingress_dev; 199 struct flow_match_meta match; 200 201 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 202 return 0; 203 204 flow_rule_match_meta(rule, &match); 205 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 206 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 207 return -EINVAL; 208 } 209 210 ingress_dev = __dev_get_by_index(block->net, 211 match.key->ingress_ifindex); 212 if (!ingress_dev) { 213 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 214 return -EINVAL; 215 } 216 217 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 218 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 219 return -EINVAL; 220 } 221 222 mlxsw_sp_port = netdev_priv(ingress_dev); 223 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 224 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 225 return -EINVAL; 226 } 227 228 mlxsw_sp_acl_rulei_keymask_u32(rulei, 229 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 230 mlxsw_sp_port->local_port, 231 0xFFFFFFFF); 232 return 0; 233 } 234 235 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 236 struct flow_cls_offload *f) 237 { 238 struct flow_match_ipv4_addrs match; 239 240 flow_rule_match_ipv4_addrs(f->rule, &match); 241 242 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 243 (char *) &match.key->src, 244 (char *) &match.mask->src, 4); 245 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 246 (char *) &match.key->dst, 247 (char *) &match.mask->dst, 4); 248 } 249 250 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 251 struct flow_cls_offload *f) 252 { 253 struct flow_match_ipv6_addrs match; 254 255 flow_rule_match_ipv6_addrs(f->rule, &match); 256 257 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 258 &match.key->src.s6_addr[0x0], 259 &match.mask->src.s6_addr[0x0], 4); 260 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 261 &match.key->src.s6_addr[0x4], 262 &match.mask->src.s6_addr[0x4], 4); 263 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 264 &match.key->src.s6_addr[0x8], 265 &match.mask->src.s6_addr[0x8], 4); 266 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 267 &match.key->src.s6_addr[0xC], 268 &match.mask->src.s6_addr[0xC], 4); 269 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 270 &match.key->dst.s6_addr[0x0], 271 &match.mask->dst.s6_addr[0x0], 4); 272 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 273 &match.key->dst.s6_addr[0x4], 274 &match.mask->dst.s6_addr[0x4], 4); 275 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 276 &match.key->dst.s6_addr[0x8], 277 &match.mask->dst.s6_addr[0x8], 4); 278 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 279 &match.key->dst.s6_addr[0xC], 280 &match.mask->dst.s6_addr[0xC], 4); 281 } 282 283 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 284 struct mlxsw_sp_acl_rule_info *rulei, 285 struct flow_cls_offload *f, 286 u8 ip_proto) 287 { 288 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 289 struct flow_match_ports match; 290 291 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 292 return 0; 293 294 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 295 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 296 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 297 return -EINVAL; 298 } 299 300 flow_rule_match_ports(rule, &match); 301 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 302 ntohs(match.key->dst), 303 ntohs(match.mask->dst)); 304 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 305 ntohs(match.key->src), 306 ntohs(match.mask->src)); 307 return 0; 308 } 309 310 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 311 struct mlxsw_sp_acl_rule_info *rulei, 312 struct flow_cls_offload *f, 313 u8 ip_proto) 314 { 315 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 316 struct flow_match_tcp match; 317 318 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 319 return 0; 320 321 if (ip_proto != IPPROTO_TCP) { 322 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 323 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 324 return -EINVAL; 325 } 326 327 flow_rule_match_tcp(rule, &match); 328 329 if (match.mask->flags & htons(0x0E00)) { 330 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 331 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 332 return -EINVAL; 333 } 334 335 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 336 ntohs(match.key->flags), 337 ntohs(match.mask->flags)); 338 return 0; 339 } 340 341 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 342 struct mlxsw_sp_acl_rule_info *rulei, 343 struct flow_cls_offload *f, 344 u16 n_proto) 345 { 346 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 347 struct flow_match_ip match; 348 349 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 350 return 0; 351 352 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 353 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 354 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 355 return -EINVAL; 356 } 357 358 flow_rule_match_ip(rule, &match); 359 360 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 361 match.key->ttl, match.mask->ttl); 362 363 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 364 match.key->tos & 0x3, 365 match.mask->tos & 0x3); 366 367 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 368 match.key->tos >> 2, 369 match.mask->tos >> 2); 370 371 return 0; 372 } 373 374 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 375 struct mlxsw_sp_flow_block *block, 376 struct mlxsw_sp_acl_rule_info *rulei, 377 struct flow_cls_offload *f) 378 { 379 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 380 struct flow_dissector *dissector = rule->match.dissector; 381 u16 n_proto_mask = 0; 382 u16 n_proto_key = 0; 383 u16 addr_type = 0; 384 u8 ip_proto = 0; 385 int err; 386 387 if (dissector->used_keys & 388 ~(BIT(FLOW_DISSECTOR_KEY_META) | 389 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 390 BIT(FLOW_DISSECTOR_KEY_BASIC) | 391 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 392 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 393 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 394 BIT(FLOW_DISSECTOR_KEY_PORTS) | 395 BIT(FLOW_DISSECTOR_KEY_TCP) | 396 BIT(FLOW_DISSECTOR_KEY_IP) | 397 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 398 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 399 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 400 return -EOPNOTSUPP; 401 } 402 403 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 404 405 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 406 if (err) 407 return err; 408 409 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 410 struct flow_match_control match; 411 412 flow_rule_match_control(rule, &match); 413 addr_type = match.key->addr_type; 414 } 415 416 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 417 struct flow_match_basic match; 418 419 flow_rule_match_basic(rule, &match); 420 n_proto_key = ntohs(match.key->n_proto); 421 n_proto_mask = ntohs(match.mask->n_proto); 422 423 if (n_proto_key == ETH_P_ALL) { 424 n_proto_key = 0; 425 n_proto_mask = 0; 426 } 427 mlxsw_sp_acl_rulei_keymask_u32(rulei, 428 MLXSW_AFK_ELEMENT_ETHERTYPE, 429 n_proto_key, n_proto_mask); 430 431 ip_proto = match.key->ip_proto; 432 mlxsw_sp_acl_rulei_keymask_u32(rulei, 433 MLXSW_AFK_ELEMENT_IP_PROTO, 434 match.key->ip_proto, 435 match.mask->ip_proto); 436 } 437 438 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 439 struct flow_match_eth_addrs match; 440 441 flow_rule_match_eth_addrs(rule, &match); 442 mlxsw_sp_acl_rulei_keymask_buf(rulei, 443 MLXSW_AFK_ELEMENT_DMAC_32_47, 444 match.key->dst, 445 match.mask->dst, 2); 446 mlxsw_sp_acl_rulei_keymask_buf(rulei, 447 MLXSW_AFK_ELEMENT_DMAC_0_31, 448 match.key->dst + 2, 449 match.mask->dst + 2, 4); 450 mlxsw_sp_acl_rulei_keymask_buf(rulei, 451 MLXSW_AFK_ELEMENT_SMAC_32_47, 452 match.key->src, 453 match.mask->src, 2); 454 mlxsw_sp_acl_rulei_keymask_buf(rulei, 455 MLXSW_AFK_ELEMENT_SMAC_0_31, 456 match.key->src + 2, 457 match.mask->src + 2, 4); 458 } 459 460 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 461 struct flow_match_vlan match; 462 463 flow_rule_match_vlan(rule, &match); 464 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 465 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 466 return -EOPNOTSUPP; 467 } 468 469 /* Forbid block with this rulei to be bound 470 * to egress in future. 471 */ 472 rulei->egress_bind_blocker = 1; 473 474 if (match.mask->vlan_id != 0) 475 mlxsw_sp_acl_rulei_keymask_u32(rulei, 476 MLXSW_AFK_ELEMENT_VID, 477 match.key->vlan_id, 478 match.mask->vlan_id); 479 if (match.mask->vlan_priority != 0) 480 mlxsw_sp_acl_rulei_keymask_u32(rulei, 481 MLXSW_AFK_ELEMENT_PCP, 482 match.key->vlan_priority, 483 match.mask->vlan_priority); 484 } 485 486 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 487 mlxsw_sp_flower_parse_ipv4(rulei, f); 488 489 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 490 mlxsw_sp_flower_parse_ipv6(rulei, f); 491 492 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 493 if (err) 494 return err; 495 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 496 if (err) 497 return err; 498 499 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 500 if (err) 501 return err; 502 503 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 504 &f->rule->action, 505 f->common.extack); 506 } 507 508 static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block, 509 struct flow_cls_offload *f) 510 { 511 bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 512 unsigned int mall_min_prio; 513 unsigned int mall_max_prio; 514 int err; 515 516 err = mlxsw_sp_mall_prio_get(block, f->common.chain_index, 517 &mall_min_prio, &mall_max_prio); 518 if (err) { 519 if (err == -ENOENT) 520 /* No matchall filters installed on this chain. */ 521 return 0; 522 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); 523 return err; 524 } 525 if (ingress && f->common.prio <= mall_min_prio) { 526 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules"); 527 return -EOPNOTSUPP; 528 } 529 if (!ingress && f->common.prio >= mall_max_prio) { 530 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); 531 return -EOPNOTSUPP; 532 } 533 return 0; 534 } 535 536 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 537 struct mlxsw_sp_flow_block *block, 538 struct flow_cls_offload *f) 539 { 540 struct mlxsw_sp_acl_rule_info *rulei; 541 struct mlxsw_sp_acl_ruleset *ruleset; 542 struct mlxsw_sp_acl_rule *rule; 543 int err; 544 545 err = mlxsw_sp_flower_mall_prio_check(block, f); 546 if (err) 547 return err; 548 549 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 550 f->common.chain_index, 551 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 552 if (IS_ERR(ruleset)) 553 return PTR_ERR(ruleset); 554 555 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 556 f->common.extack); 557 if (IS_ERR(rule)) { 558 err = PTR_ERR(rule); 559 goto err_rule_create; 560 } 561 562 rulei = mlxsw_sp_acl_rule_rulei(rule); 563 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 564 if (err) 565 goto err_flower_parse; 566 567 err = mlxsw_sp_acl_rulei_commit(rulei); 568 if (err) 569 goto err_rulei_commit; 570 571 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 572 if (err) 573 goto err_rule_add; 574 575 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 576 return 0; 577 578 err_rule_add: 579 err_rulei_commit: 580 err_flower_parse: 581 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 582 err_rule_create: 583 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 584 return err; 585 } 586 587 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 588 struct mlxsw_sp_flow_block *block, 589 struct flow_cls_offload *f) 590 { 591 struct mlxsw_sp_acl_ruleset *ruleset; 592 struct mlxsw_sp_acl_rule *rule; 593 594 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 595 f->common.chain_index, 596 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 597 if (IS_ERR(ruleset)) 598 return; 599 600 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 601 if (rule) { 602 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 603 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 604 } 605 606 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 607 } 608 609 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 610 struct mlxsw_sp_flow_block *block, 611 struct flow_cls_offload *f) 612 { 613 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; 614 struct mlxsw_sp_acl_ruleset *ruleset; 615 struct mlxsw_sp_acl_rule *rule; 616 u64 packets; 617 u64 lastuse; 618 u64 bytes; 619 int err; 620 621 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 622 f->common.chain_index, 623 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 624 if (WARN_ON(IS_ERR(ruleset))) 625 return -EINVAL; 626 627 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 628 if (!rule) 629 return -EINVAL; 630 631 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 632 &lastuse, &used_hw_stats); 633 if (err) 634 goto err_rule_get_stats; 635 636 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, used_hw_stats); 637 638 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 639 return 0; 640 641 err_rule_get_stats: 642 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 643 return err; 644 } 645 646 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 647 struct mlxsw_sp_flow_block *block, 648 struct flow_cls_offload *f) 649 { 650 struct mlxsw_sp_acl_ruleset *ruleset; 651 struct mlxsw_sp_acl_rule_info rulei; 652 int err; 653 654 memset(&rulei, 0, sizeof(rulei)); 655 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 656 if (err) 657 return err; 658 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 659 f->common.chain_index, 660 MLXSW_SP_ACL_PROFILE_FLOWER, 661 &rulei.values.elusage); 662 663 /* keep the reference to the ruleset */ 664 return PTR_ERR_OR_ZERO(ruleset); 665 } 666 667 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 668 struct mlxsw_sp_flow_block *block, 669 struct flow_cls_offload *f) 670 { 671 struct mlxsw_sp_acl_ruleset *ruleset; 672 673 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 674 f->common.chain_index, 675 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 676 if (IS_ERR(ruleset)) 677 return; 678 /* put the reference to the ruleset kept in create */ 679 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 680 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 681 } 682 683 int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp, 684 struct mlxsw_sp_flow_block *block, 685 u32 chain_index, unsigned int *p_min_prio, 686 unsigned int *p_max_prio) 687 { 688 struct mlxsw_sp_acl_ruleset *ruleset; 689 690 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 691 chain_index, 692 MLXSW_SP_ACL_PROFILE_FLOWER); 693 if (IS_ERR(ruleset)) 694 /* In case there are no flower rules, the caller 695 * receives -ENOENT to indicate there is no need 696 * to check the priorities. 697 */ 698 return PTR_ERR(ruleset); 699 mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio); 700 return 0; 701 } 702