1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ 3 4 #include "prestera.h" 5 #include "prestera_acl.h" 6 #include "prestera_flow.h" 7 #include "prestera_flower.h" 8 9 struct prestera_flower_template { 10 struct prestera_acl_ruleset *ruleset; 11 }; 12 13 void prestera_flower_template_cleanup(struct prestera_flow_block *block) 14 { 15 if (block->tmplt) { 16 /* put the reference to the ruleset kept in create */ 17 prestera_acl_ruleset_put(block->tmplt->ruleset); 18 kfree(block->tmplt); 19 block->tmplt = NULL; 20 return; 21 } 22 } 23 24 static int prestera_flower_parse_actions(struct prestera_flow_block *block, 25 struct prestera_acl_rule *rule, 26 struct flow_action *flow_action, 27 struct netlink_ext_ack *extack) 28 { 29 const struct flow_action_entry *act; 30 int i; 31 32 /* whole struct (rule->re_arg) must be initialized with 0 */ 33 if (!flow_action_has_entries(flow_action)) 34 return 0; 35 36 flow_action_for_each(i, act, flow_action) { 37 switch (act->id) { 38 case FLOW_ACTION_ACCEPT: 39 if (rule->re_arg.accept.valid) 40 return -EEXIST; 41 42 rule->re_arg.accept.valid = 1; 43 break; 44 case FLOW_ACTION_DROP: 45 if (rule->re_arg.drop.valid) 46 return -EEXIST; 47 48 rule->re_arg.drop.valid = 1; 49 break; 50 case FLOW_ACTION_TRAP: 51 if (rule->re_arg.trap.valid) 52 return -EEXIST; 53 54 rule->re_arg.trap.valid = 1; 55 break; 56 default: 57 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 58 pr_err("Unsupported action\n"); 59 return -EOPNOTSUPP; 60 } 61 } 62 63 return 0; 64 } 65 66 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, 67 struct flow_cls_offload *f, 68 struct prestera_flow_block *block) 69 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); 70 struct prestera_acl_match *r_match = &rule->re_key.match; 71 struct prestera_port *port; 72 struct net_device *ingress_dev; 73 struct flow_match_meta match; 74 __be16 key, mask; 75 76 flow_rule_match_meta(f_rule, &match); 77 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 78 NL_SET_ERR_MSG_MOD(f->common.extack, 79 "Unsupported ingress ifindex mask"); 80 return -EINVAL; 81 } 82 83 ingress_dev = __dev_get_by_index(block->net, 84 match.key->ingress_ifindex); 85 if (!ingress_dev) { 86 NL_SET_ERR_MSG_MOD(f->common.extack, 87 "Can't find specified ingress port to match on"); 88 return -EINVAL; 89 } 90 91 if (!prestera_netdev_check(ingress_dev)) { 92 NL_SET_ERR_MSG_MOD(f->common.extack, 93 "Can't match on switchdev ingress port"); 94 return -EINVAL; 95 } 96 port = netdev_priv(ingress_dev); 97 98 mask = htons(0x1FFF); 99 key = htons(port->hw_id); 100 rule_match_set(r_match->key, SYS_PORT, key); 101 rule_match_set(r_match->mask, SYS_PORT, mask); 102 103 mask = htons(0x1FF); 104 key = htons(port->dev_id); 105 rule_match_set(r_match->key, SYS_DEV, key); 106 rule_match_set(r_match->mask, SYS_DEV, mask); 107 108 return 0; 109 110 } 111 112 static int prestera_flower_parse(struct prestera_flow_block *block, 113 struct prestera_acl_rule *rule, 114 struct flow_cls_offload *f) 115 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); 116 struct flow_dissector *dissector = f_rule->match.dissector; 117 struct prestera_acl_match *r_match = &rule->re_key.match; 118 __be16 n_proto_mask = 0; 119 __be16 n_proto_key = 0; 120 u16 addr_type = 0; 121 u8 ip_proto = 0; 122 int err; 123 124 if (dissector->used_keys & 125 ~(BIT(FLOW_DISSECTOR_KEY_META) | 126 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 127 BIT(FLOW_DISSECTOR_KEY_BASIC) | 128 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 129 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 130 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 131 BIT(FLOW_DISSECTOR_KEY_ICMP) | 132 BIT(FLOW_DISSECTOR_KEY_PORTS) | 133 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 134 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 135 return -EOPNOTSUPP; 136 } 137 138 prestera_acl_rule_priority_set(rule, f->common.prio); 139 140 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) { 141 err = prestera_flower_parse_meta(rule, f, block); 142 if (err) 143 return err; 144 } 145 146 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) { 147 struct flow_match_control match; 148 149 flow_rule_match_control(f_rule, &match); 150 addr_type = match.key->addr_type; 151 } 152 153 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { 154 struct flow_match_basic match; 155 156 flow_rule_match_basic(f_rule, &match); 157 n_proto_key = match.key->n_proto; 158 n_proto_mask = match.mask->n_proto; 159 160 if (ntohs(match.key->n_proto) == ETH_P_ALL) { 161 n_proto_key = 0; 162 n_proto_mask = 0; 163 } 164 165 rule_match_set(r_match->key, ETH_TYPE, n_proto_key); 166 rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); 167 168 rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); 169 rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); 170 ip_proto = match.key->ip_proto; 171 } 172 173 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 174 struct flow_match_eth_addrs match; 175 176 flow_rule_match_eth_addrs(f_rule, &match); 177 178 /* DA key, mask */ 179 rule_match_set_n(r_match->key, 180 ETH_DMAC_0, &match.key->dst[0], 4); 181 rule_match_set_n(r_match->key, 182 ETH_DMAC_1, &match.key->dst[4], 2); 183 184 rule_match_set_n(r_match->mask, 185 ETH_DMAC_0, &match.mask->dst[0], 4); 186 rule_match_set_n(r_match->mask, 187 ETH_DMAC_1, &match.mask->dst[4], 2); 188 189 /* SA key, mask */ 190 rule_match_set_n(r_match->key, 191 ETH_SMAC_0, &match.key->src[0], 4); 192 rule_match_set_n(r_match->key, 193 ETH_SMAC_1, &match.key->src[4], 2); 194 195 rule_match_set_n(r_match->mask, 196 ETH_SMAC_0, &match.mask->src[0], 4); 197 rule_match_set_n(r_match->mask, 198 ETH_SMAC_1, &match.mask->src[4], 2); 199 } 200 201 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 202 struct flow_match_ipv4_addrs match; 203 204 flow_rule_match_ipv4_addrs(f_rule, &match); 205 206 rule_match_set(r_match->key, IP_SRC, match.key->src); 207 rule_match_set(r_match->mask, IP_SRC, match.mask->src); 208 209 rule_match_set(r_match->key, IP_DST, match.key->dst); 210 rule_match_set(r_match->mask, IP_DST, match.mask->dst); 211 } 212 213 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { 214 struct flow_match_ports match; 215 216 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 217 NL_SET_ERR_MSG_MOD 218 (f->common.extack, 219 "Only UDP and TCP keys are supported"); 220 return -EINVAL; 221 } 222 223 flow_rule_match_ports(f_rule, &match); 224 225 rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); 226 rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); 227 228 rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); 229 rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); 230 } 231 232 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { 233 struct flow_match_vlan match; 234 235 flow_rule_match_vlan(f_rule, &match); 236 237 if (match.mask->vlan_id != 0) { 238 __be16 key = cpu_to_be16(match.key->vlan_id); 239 __be16 mask = cpu_to_be16(match.mask->vlan_id); 240 241 rule_match_set(r_match->key, VLAN_ID, key); 242 rule_match_set(r_match->mask, VLAN_ID, mask); 243 } 244 245 rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); 246 rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); 247 } 248 249 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { 250 struct flow_match_icmp match; 251 252 flow_rule_match_icmp(f_rule, &match); 253 254 rule_match_set(r_match->key, ICMP_TYPE, match.key->type); 255 rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); 256 257 rule_match_set(r_match->key, ICMP_CODE, match.key->code); 258 rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); 259 } 260 261 return prestera_flower_parse_actions(block, rule, &f->rule->action, 262 f->common.extack); 263 } 264 265 int prestera_flower_replace(struct prestera_flow_block *block, 266 struct flow_cls_offload *f) 267 { 268 struct prestera_acl_ruleset *ruleset; 269 struct prestera_acl *acl = block->sw->acl; 270 struct prestera_acl_rule *rule; 271 int err; 272 273 ruleset = prestera_acl_ruleset_get(acl, block); 274 if (IS_ERR(ruleset)) 275 return PTR_ERR(ruleset); 276 277 /* increments the ruleset reference */ 278 rule = prestera_acl_rule_create(ruleset, f->cookie); 279 if (IS_ERR(rule)) { 280 err = PTR_ERR(rule); 281 goto err_rule_create; 282 } 283 284 err = prestera_flower_parse(block, rule, f); 285 if (err) 286 goto err_rule_add; 287 288 if (!prestera_acl_ruleset_is_offload(ruleset)) { 289 err = prestera_acl_ruleset_offload(ruleset); 290 if (err) 291 goto err_ruleset_offload; 292 } 293 294 err = prestera_acl_rule_add(block->sw, rule); 295 if (err) 296 goto err_rule_add; 297 298 prestera_acl_ruleset_put(ruleset); 299 return 0; 300 301 err_ruleset_offload: 302 err_rule_add: 303 prestera_acl_rule_destroy(rule); 304 err_rule_create: 305 prestera_acl_ruleset_put(ruleset); 306 return err; 307 } 308 309 void prestera_flower_destroy(struct prestera_flow_block *block, 310 struct flow_cls_offload *f) 311 { 312 struct prestera_acl_ruleset *ruleset; 313 struct prestera_acl_rule *rule; 314 315 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); 316 if (IS_ERR(ruleset)) 317 return; 318 319 rule = prestera_acl_rule_lookup(ruleset, f->cookie); 320 if (rule) { 321 prestera_acl_rule_del(block->sw, rule); 322 prestera_acl_rule_destroy(rule); 323 } 324 prestera_acl_ruleset_put(ruleset); 325 326 } 327 328 int prestera_flower_tmplt_create(struct prestera_flow_block *block, 329 struct flow_cls_offload *f) 330 { 331 struct prestera_flower_template *template; 332 struct prestera_acl_ruleset *ruleset; 333 struct prestera_acl_rule rule; 334 int err; 335 336 memset(&rule, 0, sizeof(rule)); 337 err = prestera_flower_parse(block, &rule, f); 338 if (err) 339 return err; 340 341 template = kmalloc(sizeof(*template), GFP_KERNEL); 342 if (!template) { 343 err = -ENOMEM; 344 goto err_malloc; 345 } 346 347 prestera_acl_rule_keymask_pcl_id_set(&rule, 0); 348 ruleset = prestera_acl_ruleset_get(block->sw->acl, block); 349 if (IS_ERR_OR_NULL(ruleset)) { 350 err = -EINVAL; 351 goto err_ruleset_get; 352 } 353 354 /* preserve keymask/template to this ruleset */ 355 prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); 356 357 /* skip error, as it is not possible to reject template operation, 358 * so, keep the reference to the ruleset for rules to be added 359 * to that ruleset later. In case of offload fail, the ruleset 360 * will be offloaded again during adding a new rule. Also, 361 * unlikly possble that ruleset is already offloaded at this staage. 362 */ 363 prestera_acl_ruleset_offload(ruleset); 364 365 /* keep the reference to the ruleset */ 366 template->ruleset = ruleset; 367 block->tmplt = template; 368 return 0; 369 370 err_ruleset_get: 371 kfree(template); 372 err_malloc: 373 NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed"); 374 return err; 375 } 376 377 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, 378 struct flow_cls_offload *f) 379 { 380 prestera_flower_template_cleanup(block); 381 } 382 383 int prestera_flower_stats(struct prestera_flow_block *block, 384 struct flow_cls_offload *f) 385 { 386 struct prestera_acl_ruleset *ruleset; 387 struct prestera_acl_rule *rule; 388 u64 packets; 389 u64 lastuse; 390 u64 bytes; 391 int err; 392 393 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); 394 if (IS_ERR(ruleset)) 395 return PTR_ERR(ruleset); 396 397 rule = prestera_acl_rule_lookup(ruleset, f->cookie); 398 if (!rule) { 399 err = -EINVAL; 400 goto err_rule_get_stats; 401 } 402 403 err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, 404 &bytes, &lastuse); 405 if (err) 406 goto err_rule_get_stats; 407 408 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 409 FLOW_ACTION_HW_STATS_DELAYED); 410 411 err_rule_get_stats: 412 prestera_acl_ruleset_put(ruleset); 413 return err; 414 } 415