1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 #include <linux/bitfield.h> 15 16 #include <linux/if_ether.h> 17 #include <linux/in6.h> 18 #include <linux/ip.h> 19 #include <linux/mpls.h> 20 #include <linux/ppp_defs.h> 21 22 #include <net/sch_generic.h> 23 #include <net/pkt_cls.h> 24 #include <net/pkt_sched.h> 25 #include <net/ip.h> 26 #include <net/flow_dissector.h> 27 #include <net/geneve.h> 28 #include <net/vxlan.h> 29 #include <net/erspan.h> 30 #include <net/gtp.h> 31 #include <net/pfcp.h> 32 #include <net/tc_wrapper.h> 33 34 #include <net/dst.h> 35 #include <net/dst_metadata.h> 36 37 #include <uapi/linux/netfilter/nf_conntrack_common.h> 38 39 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \ 40 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1) 41 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ 42 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) 43 44 #define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \ 45 (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \ 46 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) 47 48 #define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \ 49 (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \ 50 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \ 51 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \ 52 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT) 53 54 struct fl_flow_key { 55 struct flow_dissector_key_meta meta; 56 struct flow_dissector_key_control control; 57 struct flow_dissector_key_control enc_control; 58 struct flow_dissector_key_basic basic; 59 struct flow_dissector_key_eth_addrs eth; 60 struct flow_dissector_key_vlan vlan; 61 struct flow_dissector_key_vlan cvlan; 62 struct flow_dissector_key_ipv4_addrs ipv4; 63 struct flow_dissector_key_ipv6_addrs ipv6; 64 struct flow_dissector_key_ports tp; 65 struct flow_dissector_key_icmp icmp; 66 struct flow_dissector_key_arp arp; 67 struct flow_dissector_key_keyid enc_key_id; 68 struct flow_dissector_key_ipv4_addrs enc_ipv4; 69 struct flow_dissector_key_ipv6_addrs enc_ipv6; 70 struct flow_dissector_key_ports enc_tp; 71 struct flow_dissector_key_mpls mpls; 72 struct flow_dissector_key_tcp tcp; 73 struct flow_dissector_key_ip ip; 74 struct flow_dissector_key_ip enc_ip; 75 struct flow_dissector_key_enc_opts enc_opts; 76 struct flow_dissector_key_ports_range tp_range; 77 struct flow_dissector_key_ct ct; 78 struct flow_dissector_key_hash hash; 79 struct flow_dissector_key_num_of_vlans num_of_vlans; 80 struct flow_dissector_key_pppoe pppoe; 81 struct flow_dissector_key_l2tpv3 l2tpv3; 82 struct flow_dissector_key_ipsec ipsec; 83 struct flow_dissector_key_cfm cfm; 84 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 85 86 struct fl_flow_mask_range { 87 unsigned short int start; 88 unsigned short int end; 89 }; 90 91 struct fl_flow_mask { 92 struct fl_flow_key key; 93 struct fl_flow_mask_range range; 94 u32 flags; 95 struct rhash_head ht_node; 96 struct rhashtable ht; 97 struct rhashtable_params filter_ht_params; 98 struct flow_dissector dissector; 99 struct list_head filters; 100 struct rcu_work rwork; 101 struct list_head list; 102 refcount_t refcnt; 103 }; 104 105 struct fl_flow_tmplt { 106 struct fl_flow_key dummy_key; 107 struct fl_flow_key mask; 108 struct flow_dissector dissector; 109 struct tcf_chain *chain; 110 }; 111 112 struct cls_fl_head { 113 struct rhashtable ht; 114 spinlock_t masks_lock; /* Protect masks list */ 115 struct list_head masks; 116 struct list_head hw_filters; 117 struct rcu_work rwork; 118 struct idr handle_idr; 119 }; 120 121 struct cls_fl_filter { 122 struct fl_flow_mask *mask; 123 struct rhash_head ht_node; 124 struct fl_flow_key mkey; 125 struct tcf_exts exts; 126 struct tcf_result res; 127 struct fl_flow_key key; 128 struct list_head list; 129 struct list_head hw_list; 130 u32 handle; 131 u32 flags; 132 u32 in_hw_count; 133 u8 needs_tc_skb_ext:1; 134 struct rcu_work rwork; 135 struct net_device *hw_dev; 136 /* Flower classifier is unlocked, which means that its reference counter 137 * can be changed concurrently without any kind of external 138 * synchronization. Use atomic reference counter to be concurrency-safe. 139 */ 140 refcount_t refcnt; 141 bool deleted; 142 }; 143 144 static const struct rhashtable_params mask_ht_params = { 145 .key_offset = offsetof(struct fl_flow_mask, key), 146 .key_len = sizeof(struct fl_flow_key), 147 .head_offset = offsetof(struct fl_flow_mask, ht_node), 148 .automatic_shrinking = true, 149 }; 150 151 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 152 { 153 return mask->range.end - mask->range.start; 154 } 155 156 static void fl_mask_update_range(struct fl_flow_mask *mask) 157 { 158 const u8 *bytes = (const u8 *) &mask->key; 159 size_t size = sizeof(mask->key); 160 size_t i, first = 0, last; 161 162 for (i = 0; i < size; i++) { 163 if (bytes[i]) { 164 first = i; 165 break; 166 } 167 } 168 last = first; 169 for (i = size - 1; i != first; i--) { 170 if (bytes[i]) { 171 last = i; 172 break; 173 } 174 } 175 mask->range.start = rounddown(first, sizeof(long)); 176 mask->range.end = roundup(last + 1, sizeof(long)); 177 } 178 179 static void *fl_key_get_start(struct fl_flow_key *key, 180 const struct fl_flow_mask *mask) 181 { 182 return (u8 *) key + mask->range.start; 183 } 184 185 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 186 struct fl_flow_mask *mask) 187 { 188 const long *lkey = fl_key_get_start(key, mask); 189 const long *lmask = fl_key_get_start(&mask->key, mask); 190 long *lmkey = fl_key_get_start(mkey, mask); 191 int i; 192 193 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 194 *lmkey++ = *lkey++ & *lmask++; 195 } 196 197 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 198 struct fl_flow_mask *mask) 199 { 200 const long *lmask = fl_key_get_start(&mask->key, mask); 201 const long *ltmplt; 202 int i; 203 204 if (!tmplt) 205 return true; 206 ltmplt = fl_key_get_start(&tmplt->mask, mask); 207 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 208 if (~*ltmplt++ & *lmask++) 209 return false; 210 } 211 return true; 212 } 213 214 static void fl_clear_masked_range(struct fl_flow_key *key, 215 struct fl_flow_mask *mask) 216 { 217 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 218 } 219 220 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 221 struct fl_flow_key *key, 222 struct fl_flow_key *mkey) 223 { 224 u16 min_mask, max_mask, min_val, max_val; 225 226 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst); 227 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst); 228 min_val = ntohs(filter->key.tp_range.tp_min.dst); 229 max_val = ntohs(filter->key.tp_range.tp_max.dst); 230 231 if (min_mask && max_mask) { 232 if (ntohs(key->tp_range.tp.dst) < min_val || 233 ntohs(key->tp_range.tp.dst) > max_val) 234 return false; 235 236 /* skb does not have min and max values */ 237 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 238 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 239 } 240 return true; 241 } 242 243 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 244 struct fl_flow_key *key, 245 struct fl_flow_key *mkey) 246 { 247 u16 min_mask, max_mask, min_val, max_val; 248 249 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src); 250 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src); 251 min_val = ntohs(filter->key.tp_range.tp_min.src); 252 max_val = ntohs(filter->key.tp_range.tp_max.src); 253 254 if (min_mask && max_mask) { 255 if (ntohs(key->tp_range.tp.src) < min_val || 256 ntohs(key->tp_range.tp.src) > max_val) 257 return false; 258 259 /* skb does not have min and max values */ 260 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 261 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 262 } 263 return true; 264 } 265 266 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 267 struct fl_flow_key *mkey) 268 { 269 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 270 mask->filter_ht_params); 271 } 272 273 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 274 struct fl_flow_key *mkey, 275 struct fl_flow_key *key) 276 { 277 struct cls_fl_filter *filter, *f; 278 279 list_for_each_entry_rcu(filter, &mask->filters, list) { 280 if (!fl_range_port_dst_cmp(filter, key, mkey)) 281 continue; 282 283 if (!fl_range_port_src_cmp(filter, key, mkey)) 284 continue; 285 286 f = __fl_lookup(mask, mkey); 287 if (f) 288 return f; 289 } 290 return NULL; 291 } 292 293 static noinline_for_stack 294 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 295 { 296 struct fl_flow_key mkey; 297 298 fl_set_masked_key(&mkey, key, mask); 299 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 300 return fl_lookup_range(mask, &mkey, key); 301 302 return __fl_lookup(mask, &mkey); 303 } 304 305 static u16 fl_ct_info_to_flower_map[] = { 306 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 307 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 308 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 309 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 310 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 311 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | 312 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 313 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 314 TCA_FLOWER_KEY_CT_FLAGS_RELATED | 315 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 316 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 317 TCA_FLOWER_KEY_CT_FLAGS_NEW, 318 }; 319 320 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb, 321 const struct tcf_proto *tp, 322 struct tcf_result *res) 323 { 324 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 325 bool post_ct = qdisc_skb_cb(skb)->post_ct; 326 u16 zone = tc_skb_cb(skb)->zone; 327 struct fl_flow_key skb_key; 328 struct fl_flow_mask *mask; 329 struct cls_fl_filter *f; 330 331 list_for_each_entry_rcu(mask, &head->masks, list) { 332 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 333 fl_clear_masked_range(&skb_key, mask); 334 335 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 336 /* skb_flow_dissect() does not set n_proto in case an unknown 337 * protocol, so do it rather here. 338 */ 339 skb_key.basic.n_proto = skb_protocol(skb, false); 340 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 341 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 342 fl_ct_info_to_flower_map, 343 ARRAY_SIZE(fl_ct_info_to_flower_map), 344 post_ct, zone); 345 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 346 skb_flow_dissect(skb, &mask->dissector, &skb_key, 347 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); 348 349 f = fl_mask_lookup(mask, &skb_key); 350 if (f && !tc_skip_sw(f->flags)) { 351 *res = f->res; 352 return tcf_exts_exec(skb, &f->exts, res); 353 } 354 } 355 return -1; 356 } 357 358 static int fl_init(struct tcf_proto *tp) 359 { 360 struct cls_fl_head *head; 361 362 head = kzalloc_obj(*head); 363 if (!head) 364 return -ENOBUFS; 365 366 spin_lock_init(&head->masks_lock); 367 INIT_LIST_HEAD_RCU(&head->masks); 368 INIT_LIST_HEAD(&head->hw_filters); 369 rcu_assign_pointer(tp->root, head); 370 idr_init(&head->handle_idr); 371 372 return rhashtable_init(&head->ht, &mask_ht_params); 373 } 374 375 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 376 { 377 /* temporary masks don't have their filters list and ht initialized */ 378 if (mask_init_done) { 379 WARN_ON(!list_empty(&mask->filters)); 380 rhashtable_destroy(&mask->ht); 381 } 382 kfree(mask); 383 } 384 385 static void fl_mask_free_work(struct work_struct *work) 386 { 387 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 388 struct fl_flow_mask, rwork); 389 390 fl_mask_free(mask, true); 391 } 392 393 static void fl_uninit_mask_free_work(struct work_struct *work) 394 { 395 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 396 struct fl_flow_mask, rwork); 397 398 fl_mask_free(mask, false); 399 } 400 401 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 402 { 403 if (!refcount_dec_and_test(&mask->refcnt)) 404 return false; 405 406 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 407 408 spin_lock(&head->masks_lock); 409 list_del_rcu(&mask->list); 410 spin_unlock(&head->masks_lock); 411 412 tcf_queue_work(&mask->rwork, fl_mask_free_work); 413 414 return true; 415 } 416 417 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 418 { 419 /* Flower classifier only changes root pointer during init and destroy. 420 * Users must obtain reference to tcf_proto instance before calling its 421 * API, so tp->root pointer is protected from concurrent call to 422 * fl_destroy() by reference counting. 423 */ 424 return rcu_dereference_raw(tp->root); 425 } 426 427 static void __fl_destroy_filter(struct cls_fl_filter *f) 428 { 429 if (f->needs_tc_skb_ext) 430 tc_skb_ext_tc_disable(); 431 tcf_exts_destroy(&f->exts); 432 tcf_exts_put_net(&f->exts); 433 kfree(f); 434 } 435 436 static void fl_destroy_filter_work(struct work_struct *work) 437 { 438 struct cls_fl_filter *f = container_of(to_rcu_work(work), 439 struct cls_fl_filter, rwork); 440 441 __fl_destroy_filter(f); 442 } 443 444 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 445 bool rtnl_held, struct netlink_ext_ack *extack) 446 { 447 struct tcf_block *block = tp->chain->block; 448 struct flow_cls_offload cls_flower = {}; 449 450 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 451 cls_flower.command = FLOW_CLS_DESTROY; 452 cls_flower.cookie = (unsigned long) f; 453 454 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 455 &f->flags, &f->in_hw_count, rtnl_held); 456 457 } 458 459 static int fl_hw_replace_filter(struct tcf_proto *tp, 460 struct cls_fl_filter *f, bool rtnl_held, 461 struct netlink_ext_ack *extack) 462 { 463 struct tcf_block *block = tp->chain->block; 464 struct flow_cls_offload cls_flower = {}; 465 bool skip_sw = tc_skip_sw(f->flags); 466 int err = 0; 467 468 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 469 if (!cls_flower.rule) 470 return -ENOMEM; 471 472 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 473 cls_flower.command = FLOW_CLS_REPLACE; 474 cls_flower.cookie = (unsigned long) f; 475 cls_flower.rule->match.dissector = &f->mask->dissector; 476 cls_flower.rule->match.mask = &f->mask->key; 477 cls_flower.rule->match.key = &f->mkey; 478 cls_flower.classid = f->res.classid; 479 480 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 481 cls_flower.common.extack); 482 if (err) { 483 kfree(cls_flower.rule); 484 485 return skip_sw ? err : 0; 486 } 487 488 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 489 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 490 tc_cleanup_offload_action(&cls_flower.rule->action); 491 kfree(cls_flower.rule); 492 493 if (err) { 494 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 495 return err; 496 } 497 498 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 499 return -EINVAL; 500 501 return 0; 502 } 503 504 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 505 bool rtnl_held) 506 { 507 struct tcf_block *block = tp->chain->block; 508 struct flow_cls_offload cls_flower = {}; 509 510 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 511 cls_flower.command = FLOW_CLS_STATS; 512 cls_flower.cookie = (unsigned long) f; 513 cls_flower.classid = f->res.classid; 514 515 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 516 rtnl_held); 517 518 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats); 519 } 520 521 static void __fl_put(struct cls_fl_filter *f) 522 { 523 if (!refcount_dec_and_test(&f->refcnt)) 524 return; 525 526 if (tcf_exts_get_net(&f->exts)) 527 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 528 else 529 __fl_destroy_filter(f); 530 } 531 532 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 533 { 534 struct cls_fl_filter *f; 535 536 rcu_read_lock(); 537 f = idr_find(&head->handle_idr, handle); 538 if (f && !refcount_inc_not_zero(&f->refcnt)) 539 f = NULL; 540 rcu_read_unlock(); 541 542 return f; 543 } 544 545 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle) 546 { 547 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 548 struct cls_fl_filter *f; 549 550 f = idr_find(&head->handle_idr, handle); 551 return f ? &f->exts : NULL; 552 } 553 554 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 555 bool *last, bool rtnl_held, 556 struct netlink_ext_ack *extack) 557 { 558 struct cls_fl_head *head = fl_head_dereference(tp); 559 560 *last = false; 561 562 spin_lock(&tp->lock); 563 if (f->deleted) { 564 spin_unlock(&tp->lock); 565 return -ENOENT; 566 } 567 568 f->deleted = true; 569 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 570 f->mask->filter_ht_params); 571 idr_remove(&head->handle_idr, f->handle); 572 list_del_rcu(&f->list); 573 spin_unlock(&tp->lock); 574 575 *last = fl_mask_put(head, f->mask); 576 if (!tc_skip_hw(f->flags)) 577 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 578 tcf_unbind_filter(tp, &f->res); 579 __fl_put(f); 580 581 return 0; 582 } 583 584 static void fl_destroy_sleepable(struct work_struct *work) 585 { 586 struct cls_fl_head *head = container_of(to_rcu_work(work), 587 struct cls_fl_head, 588 rwork); 589 590 rhashtable_destroy(&head->ht); 591 kfree(head); 592 module_put(THIS_MODULE); 593 } 594 595 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 596 struct netlink_ext_ack *extack) 597 { 598 struct cls_fl_head *head = fl_head_dereference(tp); 599 struct fl_flow_mask *mask, *next_mask; 600 struct cls_fl_filter *f, *next; 601 bool last; 602 603 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 604 list_for_each_entry_safe(f, next, &mask->filters, list) { 605 __fl_delete(tp, f, &last, rtnl_held, extack); 606 if (last) 607 break; 608 } 609 } 610 idr_destroy(&head->handle_idr); 611 612 __module_get(THIS_MODULE); 613 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 614 } 615 616 static void fl_put(struct tcf_proto *tp, void *arg) 617 { 618 struct cls_fl_filter *f = arg; 619 620 __fl_put(f); 621 } 622 623 static void *fl_get(struct tcf_proto *tp, u32 handle) 624 { 625 struct cls_fl_head *head = fl_head_dereference(tp); 626 627 return __fl_get(head, handle); 628 } 629 630 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 631 [TCA_FLOWER_UNSPEC] = { .strict_start_type = 632 TCA_FLOWER_L2_MISS }, 633 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 634 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 635 .len = IFNAMSIZ }, 636 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 637 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 638 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 639 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 640 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 641 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 642 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 643 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 644 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 645 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 646 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 647 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 648 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 649 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 650 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 651 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 652 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 653 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 654 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 655 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 656 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 657 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 658 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 659 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 660 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 661 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 662 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 663 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 664 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 665 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 666 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 667 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 668 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 669 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 670 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 671 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 672 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 673 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 674 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 675 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 676 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 677 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 678 [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32, 679 TCA_FLOWER_KEY_FLAGS_POLICY_MASK), 680 [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, 681 TCA_FLOWER_KEY_FLAGS_POLICY_MASK), 682 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 683 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 684 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 686 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 687 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 688 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 689 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 690 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 691 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 692 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 693 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 694 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 695 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 696 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 697 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 698 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 699 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 700 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 701 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 702 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 703 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 704 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 705 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 706 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 707 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 708 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 709 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 710 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 711 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 712 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 713 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 714 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 715 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 716 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 717 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 718 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 719 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 720 [TCA_FLOWER_KEY_CT_STATE] = 721 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 722 [TCA_FLOWER_KEY_CT_STATE_MASK] = 723 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 724 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 725 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 726 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 727 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 728 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 729 .len = 128 / BITS_PER_BYTE }, 730 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 731 .len = 128 / BITS_PER_BYTE }, 732 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 733 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 734 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 735 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, 736 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, 737 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, 738 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, 739 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 }, 740 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 }, 741 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), 742 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, 743 [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32, 744 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), 745 [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, 746 TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), 747 }; 748 749 static const struct nla_policy 750 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 751 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 752 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 753 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 754 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 755 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 756 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, 757 [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED }, 758 }; 759 760 static const struct nla_policy 761 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 762 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 763 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 764 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 765 .len = 127 }, 766 }; 767 768 static const struct nla_policy 769 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 770 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 771 }; 772 773 static const struct nla_policy 774 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 775 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 776 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 777 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 778 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 779 }; 780 781 static const struct nla_policy 782 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { 783 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, 784 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, 785 }; 786 787 static const struct nla_policy 788 pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = { 789 [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 }, 790 [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 }, 791 }; 792 793 static const struct nla_policy 794 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 795 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 796 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 797 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 798 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 799 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 800 }; 801 802 static const struct nla_policy 803 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { 804 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, 805 FLOW_DIS_CFM_MDL_MAX), 806 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, 807 }; 808 809 static void fl_set_key_val(struct nlattr **tb, 810 void *val, int val_type, 811 void *mask, int mask_type, int len) 812 { 813 if (!tb[val_type]) 814 return; 815 nla_memcpy(val, tb[val_type], len); 816 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 817 memset(mask, 0xff, len); 818 else 819 nla_memcpy(mask, tb[mask_type], len); 820 } 821 822 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key, 823 struct fl_flow_key *mask, 824 struct netlink_ext_ack *extack) 825 { 826 if (key->basic.ip_proto != IPPROTO_ESP && 827 key->basic.ip_proto != IPPROTO_AH) { 828 NL_SET_ERR_MSG(extack, 829 "Protocol must be either ESP or AH"); 830 return -EINVAL; 831 } 832 833 fl_set_key_val(tb, &key->ipsec.spi, 834 TCA_FLOWER_KEY_SPI, 835 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 836 sizeof(key->ipsec.spi)); 837 return 0; 838 } 839 840 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 841 struct fl_flow_key *mask, 842 struct netlink_ext_ack *extack) 843 { 844 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 845 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 846 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 847 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 848 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 849 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 850 fl_set_key_val(tb, &key->tp_range.tp_min.src, 851 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 852 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 853 fl_set_key_val(tb, &key->tp_range.tp_max.src, 854 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 855 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 856 857 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) { 858 NL_SET_ERR_MSG(extack, 859 "Both min and max destination ports must be specified"); 860 return -EINVAL; 861 } 862 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) { 863 NL_SET_ERR_MSG(extack, 864 "Both min and max source ports must be specified"); 865 return -EINVAL; 866 } 867 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 868 ntohs(key->tp_range.tp_max.dst) <= 869 ntohs(key->tp_range.tp_min.dst)) { 870 NL_SET_ERR_MSG_ATTR(extack, 871 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 872 "Invalid destination port range (min must be strictly smaller than max)"); 873 return -EINVAL; 874 } 875 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 876 ntohs(key->tp_range.tp_max.src) <= 877 ntohs(key->tp_range.tp_min.src)) { 878 NL_SET_ERR_MSG_ATTR(extack, 879 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 880 "Invalid source port range (min must be strictly smaller than max)"); 881 return -EINVAL; 882 } 883 884 return 0; 885 } 886 887 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 888 struct flow_dissector_key_mpls *key_val, 889 struct flow_dissector_key_mpls *key_mask, 890 struct netlink_ext_ack *extack) 891 { 892 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 893 struct flow_dissector_mpls_lse *lse_mask; 894 struct flow_dissector_mpls_lse *lse_val; 895 u8 lse_index; 896 u8 depth; 897 int err; 898 899 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 900 mpls_stack_entry_policy, extack); 901 if (err < 0) 902 return err; 903 904 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 905 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 906 return -EINVAL; 907 } 908 909 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 910 911 /* LSE depth starts at 1, for consistency with terminology used by 912 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 913 */ 914 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 915 NL_SET_ERR_MSG_ATTR(extack, 916 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 917 "Invalid MPLS depth"); 918 return -EINVAL; 919 } 920 lse_index = depth - 1; 921 922 dissector_set_mpls_lse(key_val, lse_index); 923 dissector_set_mpls_lse(key_mask, lse_index); 924 925 lse_val = &key_val->ls[lse_index]; 926 lse_mask = &key_mask->ls[lse_index]; 927 928 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 929 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 930 lse_mask->mpls_ttl = MPLS_TTL_MASK; 931 } 932 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 933 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 934 935 if (bos & ~MPLS_BOS_MASK) { 936 NL_SET_ERR_MSG_ATTR(extack, 937 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 938 "Bottom Of Stack (BOS) must be 0 or 1"); 939 return -EINVAL; 940 } 941 lse_val->mpls_bos = bos; 942 lse_mask->mpls_bos = MPLS_BOS_MASK; 943 } 944 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 945 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 946 947 if (tc & ~MPLS_TC_MASK) { 948 NL_SET_ERR_MSG_ATTR(extack, 949 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 950 "Traffic Class (TC) must be between 0 and 7"); 951 return -EINVAL; 952 } 953 lse_val->mpls_tc = tc; 954 lse_mask->mpls_tc = MPLS_TC_MASK; 955 } 956 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 957 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 958 959 if (label & ~MPLS_LABEL_MASK) { 960 NL_SET_ERR_MSG_ATTR(extack, 961 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 962 "Label must be between 0 and 1048575"); 963 return -EINVAL; 964 } 965 lse_val->mpls_label = label; 966 lse_mask->mpls_label = MPLS_LABEL_MASK; 967 } 968 969 return 0; 970 } 971 972 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 973 struct flow_dissector_key_mpls *key_val, 974 struct flow_dissector_key_mpls *key_mask, 975 struct netlink_ext_ack *extack) 976 { 977 struct nlattr *nla_lse; 978 int rem; 979 int err; 980 981 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 982 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 983 "NLA_F_NESTED is missing"); 984 return -EINVAL; 985 } 986 987 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 988 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 989 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 990 "Invalid MPLS option type"); 991 return -EINVAL; 992 } 993 994 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 995 if (err < 0) 996 return err; 997 } 998 if (rem) { 999 NL_SET_ERR_MSG(extack, 1000 "Bytes leftover after parsing MPLS options"); 1001 return -EINVAL; 1002 } 1003 1004 return 0; 1005 } 1006 1007 static int fl_set_key_mpls(struct nlattr **tb, 1008 struct flow_dissector_key_mpls *key_val, 1009 struct flow_dissector_key_mpls *key_mask, 1010 struct netlink_ext_ack *extack) 1011 { 1012 struct flow_dissector_mpls_lse *lse_mask; 1013 struct flow_dissector_mpls_lse *lse_val; 1014 1015 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 1016 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 1017 tb[TCA_FLOWER_KEY_MPLS_BOS] || 1018 tb[TCA_FLOWER_KEY_MPLS_TC] || 1019 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1020 NL_SET_ERR_MSG_ATTR(extack, 1021 tb[TCA_FLOWER_KEY_MPLS_OPTS], 1022 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 1023 return -EBADMSG; 1024 } 1025 1026 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 1027 key_val, key_mask, extack); 1028 } 1029 1030 lse_val = &key_val->ls[0]; 1031 lse_mask = &key_mask->ls[0]; 1032 1033 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 1034 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 1035 lse_mask->mpls_ttl = MPLS_TTL_MASK; 1036 dissector_set_mpls_lse(key_val, 0); 1037 dissector_set_mpls_lse(key_mask, 0); 1038 } 1039 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 1040 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 1041 1042 if (bos & ~MPLS_BOS_MASK) { 1043 NL_SET_ERR_MSG_ATTR(extack, 1044 tb[TCA_FLOWER_KEY_MPLS_BOS], 1045 "Bottom Of Stack (BOS) must be 0 or 1"); 1046 return -EINVAL; 1047 } 1048 lse_val->mpls_bos = bos; 1049 lse_mask->mpls_bos = MPLS_BOS_MASK; 1050 dissector_set_mpls_lse(key_val, 0); 1051 dissector_set_mpls_lse(key_mask, 0); 1052 } 1053 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 1054 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 1055 1056 if (tc & ~MPLS_TC_MASK) { 1057 NL_SET_ERR_MSG_ATTR(extack, 1058 tb[TCA_FLOWER_KEY_MPLS_TC], 1059 "Traffic Class (TC) must be between 0 and 7"); 1060 return -EINVAL; 1061 } 1062 lse_val->mpls_tc = tc; 1063 lse_mask->mpls_tc = MPLS_TC_MASK; 1064 dissector_set_mpls_lse(key_val, 0); 1065 dissector_set_mpls_lse(key_mask, 0); 1066 } 1067 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1068 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 1069 1070 if (label & ~MPLS_LABEL_MASK) { 1071 NL_SET_ERR_MSG_ATTR(extack, 1072 tb[TCA_FLOWER_KEY_MPLS_LABEL], 1073 "Label must be between 0 and 1048575"); 1074 return -EINVAL; 1075 } 1076 lse_val->mpls_label = label; 1077 lse_mask->mpls_label = MPLS_LABEL_MASK; 1078 dissector_set_mpls_lse(key_val, 0); 1079 dissector_set_mpls_lse(key_mask, 0); 1080 } 1081 return 0; 1082 } 1083 1084 static void fl_set_key_vlan(struct nlattr **tb, 1085 __be16 ethertype, 1086 int vlan_id_key, int vlan_prio_key, 1087 int vlan_next_eth_type_key, 1088 struct flow_dissector_key_vlan *key_val, 1089 struct flow_dissector_key_vlan *key_mask) 1090 { 1091 #define VLAN_PRIORITY_MASK 0x7 1092 1093 if (tb[vlan_id_key]) { 1094 key_val->vlan_id = 1095 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1096 key_mask->vlan_id = VLAN_VID_MASK; 1097 } 1098 if (tb[vlan_prio_key]) { 1099 key_val->vlan_priority = 1100 nla_get_u8(tb[vlan_prio_key]) & 1101 VLAN_PRIORITY_MASK; 1102 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1103 } 1104 if (ethertype) { 1105 key_val->vlan_tpid = ethertype; 1106 key_mask->vlan_tpid = cpu_to_be16(~0); 1107 } 1108 if (tb[vlan_next_eth_type_key]) { 1109 key_val->vlan_eth_type = 1110 nla_get_be16(tb[vlan_next_eth_type_key]); 1111 key_mask->vlan_eth_type = cpu_to_be16(~0); 1112 } 1113 } 1114 1115 static void fl_set_key_pppoe(struct nlattr **tb, 1116 struct flow_dissector_key_pppoe *key_val, 1117 struct flow_dissector_key_pppoe *key_mask, 1118 struct fl_flow_key *key, 1119 struct fl_flow_key *mask) 1120 { 1121 /* key_val::type must be set to ETH_P_PPP_SES 1122 * because ETH_P_PPP_SES was stored in basic.n_proto 1123 * which might get overwritten by ppp_proto 1124 * or might be set to 0, the role of key_val::type 1125 * is similar to vlan_key::tpid 1126 */ 1127 key_val->type = htons(ETH_P_PPP_SES); 1128 key_mask->type = cpu_to_be16(~0); 1129 1130 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) { 1131 key_val->session_id = 1132 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]); 1133 key_mask->session_id = cpu_to_be16(~0); 1134 } 1135 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) { 1136 key_val->ppp_proto = 1137 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]); 1138 key_mask->ppp_proto = cpu_to_be16(~0); 1139 1140 if (key_val->ppp_proto == htons(PPP_IP)) { 1141 key->basic.n_proto = htons(ETH_P_IP); 1142 mask->basic.n_proto = cpu_to_be16(~0); 1143 } else if (key_val->ppp_proto == htons(PPP_IPV6)) { 1144 key->basic.n_proto = htons(ETH_P_IPV6); 1145 mask->basic.n_proto = cpu_to_be16(~0); 1146 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) { 1147 key->basic.n_proto = htons(ETH_P_MPLS_UC); 1148 mask->basic.n_proto = cpu_to_be16(~0); 1149 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) { 1150 key->basic.n_proto = htons(ETH_P_MPLS_MC); 1151 mask->basic.n_proto = cpu_to_be16(~0); 1152 } 1153 } else { 1154 key->basic.n_proto = 0; 1155 mask->basic.n_proto = cpu_to_be16(0); 1156 } 1157 } 1158 1159 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1160 u32 *dissector_key, u32 *dissector_mask, 1161 u32 flower_flag_bit, u32 dissector_flag_bit) 1162 { 1163 if (flower_mask & flower_flag_bit) { 1164 *dissector_mask |= dissector_flag_bit; 1165 if (flower_key & flower_flag_bit) 1166 *dissector_key |= dissector_flag_bit; 1167 } 1168 } 1169 1170 static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb, 1171 bool encap, u32 *flags_key, u32 *flags_mask, 1172 struct netlink_ext_ack *extack) 1173 { 1174 int fl_key, fl_mask; 1175 u32 key, mask; 1176 1177 if (encap) { 1178 fl_key = TCA_FLOWER_KEY_ENC_FLAGS; 1179 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; 1180 } else { 1181 fl_key = TCA_FLOWER_KEY_FLAGS; 1182 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; 1183 } 1184 1185 /* mask is mandatory for flags */ 1186 if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) { 1187 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1188 return -EINVAL; 1189 } 1190 1191 key = be32_to_cpu(nla_get_be32(tb[fl_key])); 1192 mask = be32_to_cpu(nla_get_be32(tb[fl_mask])); 1193 1194 *flags_key = 0; 1195 *flags_mask = 0; 1196 1197 fl_set_key_flag(key, mask, flags_key, flags_mask, 1198 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1199 fl_set_key_flag(key, mask, flags_key, flags_mask, 1200 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1201 FLOW_DIS_FIRST_FRAG); 1202 1203 fl_set_key_flag(key, mask, flags_key, flags_mask, 1204 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, 1205 FLOW_DIS_F_TUNNEL_CSUM); 1206 1207 fl_set_key_flag(key, mask, flags_key, flags_mask, 1208 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, 1209 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); 1210 1211 fl_set_key_flag(key, mask, flags_key, flags_mask, 1212 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); 1213 1214 fl_set_key_flag(key, mask, flags_key, flags_mask, 1215 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, 1216 FLOW_DIS_F_TUNNEL_CRIT_OPT); 1217 1218 return 0; 1219 } 1220 1221 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1222 struct flow_dissector_key_ip *key, 1223 struct flow_dissector_key_ip *mask) 1224 { 1225 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1226 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1227 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1228 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1229 1230 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1231 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1232 } 1233 1234 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1235 int depth, int option_len, 1236 struct netlink_ext_ack *extack) 1237 { 1238 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1239 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1240 struct geneve_opt *opt; 1241 int err, data_len = 0; 1242 1243 if (option_len > sizeof(struct geneve_opt)) 1244 data_len = option_len - sizeof(struct geneve_opt); 1245 1246 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) 1247 return -ERANGE; 1248 1249 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1250 memset(opt, 0xff, option_len); 1251 opt->length = data_len / 4; 1252 opt->r1 = 0; 1253 opt->r2 = 0; 1254 opt->r3 = 0; 1255 1256 /* If no mask has been prodived we assume an exact match. */ 1257 if (!depth) 1258 return sizeof(struct geneve_opt) + data_len; 1259 1260 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1261 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1262 return -EINVAL; 1263 } 1264 1265 err = nla_parse_nested_deprecated(tb, 1266 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1267 nla, geneve_opt_policy, extack); 1268 if (err < 0) 1269 return err; 1270 1271 /* We are not allowed to omit any of CLASS, TYPE or DATA 1272 * fields from the key. 1273 */ 1274 if (!option_len && 1275 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1276 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1277 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1278 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1279 return -EINVAL; 1280 } 1281 1282 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1283 * for the mask. 1284 */ 1285 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1286 int new_len = key->enc_opts.len; 1287 1288 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1289 data_len = nla_len(data); 1290 if (data_len < 4) { 1291 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1292 return -ERANGE; 1293 } 1294 if (data_len % 4) { 1295 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1296 return -ERANGE; 1297 } 1298 1299 new_len += sizeof(struct geneve_opt) + data_len; 1300 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1301 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1302 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1303 return -ERANGE; 1304 } 1305 opt->length = data_len / 4; 1306 memcpy(opt->opt_data, nla_data(data), data_len); 1307 } 1308 1309 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1310 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1311 opt->opt_class = nla_get_be16(class); 1312 } 1313 1314 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1315 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1316 opt->type = nla_get_u8(type); 1317 } 1318 1319 return sizeof(struct geneve_opt) + data_len; 1320 } 1321 1322 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1323 int depth, int option_len, 1324 struct netlink_ext_ack *extack) 1325 { 1326 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1327 struct vxlan_metadata *md; 1328 int err; 1329 1330 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1331 memset(md, 0xff, sizeof(*md)); 1332 1333 if (!depth) 1334 return sizeof(*md); 1335 1336 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1337 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1338 return -EINVAL; 1339 } 1340 1341 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1342 vxlan_opt_policy, extack); 1343 if (err < 0) 1344 return err; 1345 1346 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1347 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1348 return -EINVAL; 1349 } 1350 1351 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1352 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1353 md->gbp &= VXLAN_GBP_MASK; 1354 } 1355 1356 return sizeof(*md); 1357 } 1358 1359 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1360 int depth, int option_len, 1361 struct netlink_ext_ack *extack) 1362 { 1363 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1364 struct erspan_metadata *md; 1365 int err; 1366 1367 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1368 md->version = 1; 1369 1370 if (!depth) 1371 return sizeof(*md); 1372 1373 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1374 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1375 return -EINVAL; 1376 } 1377 1378 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1379 erspan_opt_policy, extack); 1380 if (err < 0) 1381 return err; 1382 1383 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1384 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1385 return -EINVAL; 1386 } 1387 1388 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1389 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1390 1391 if (md->version == 1) { 1392 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1393 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1394 return -EINVAL; 1395 } 1396 memset(&md->u.index, 0xff, sizeof(md->u.index)); 1397 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1398 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1399 md->u.index = nla_get_be32(nla); 1400 } 1401 } else if (md->version == 2) { 1402 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1403 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1404 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1405 return -EINVAL; 1406 } 1407 md->u.md2.dir = 1; 1408 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1409 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1410 md->u.md2.dir = nla_get_u8(nla); 1411 } 1412 set_hwid(&md->u.md2, 0xff); 1413 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1414 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1415 set_hwid(&md->u.md2, nla_get_u8(nla)); 1416 } 1417 } else { 1418 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1419 return -EINVAL; 1420 } 1421 1422 return sizeof(*md); 1423 } 1424 1425 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1426 int depth, int option_len, 1427 struct netlink_ext_ack *extack) 1428 { 1429 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1]; 1430 struct gtp_pdu_session_info *sinfo; 1431 u8 len = key->enc_opts.len; 1432 int err; 1433 1434 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len]; 1435 memset(sinfo, 0xff, option_len); 1436 1437 if (!depth) 1438 return sizeof(*sinfo); 1439 1440 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) { 1441 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask"); 1442 return -EINVAL; 1443 } 1444 1445 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla, 1446 gtp_opt_policy, extack); 1447 if (err < 0) 1448 return err; 1449 1450 if (!option_len && 1451 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] || 1452 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) { 1453 NL_SET_ERR_MSG_MOD(extack, 1454 "Missing tunnel key gtp option pdu type or qfi"); 1455 return -EINVAL; 1456 } 1457 1458 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]) 1459 sinfo->pdu_type = 1460 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]); 1461 1462 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]) 1463 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]); 1464 1465 return sizeof(*sinfo); 1466 } 1467 1468 static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1469 int depth, int option_len, 1470 struct netlink_ext_ack *extack) 1471 { 1472 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1]; 1473 struct pfcp_metadata *md; 1474 int err; 1475 1476 md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1477 memset(md, 0xff, sizeof(*md)); 1478 1479 if (!depth) 1480 return sizeof(*md); 1481 1482 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) { 1483 NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask"); 1484 return -EINVAL; 1485 } 1486 1487 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla, 1488 pfcp_opt_policy, extack); 1489 if (err < 0) 1490 return err; 1491 1492 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) { 1493 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type"); 1494 return -EINVAL; 1495 } 1496 1497 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) 1498 md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]); 1499 1500 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]) 1501 md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]); 1502 1503 return sizeof(*md); 1504 } 1505 1506 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1507 struct fl_flow_key *mask, 1508 struct netlink_ext_ack *extack) 1509 { 1510 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1511 int err, option_len, key_depth, msk_depth = 0; 1512 1513 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1514 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1515 enc_opts_policy, extack); 1516 if (err) 1517 return err; 1518 1519 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1520 1521 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1522 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1523 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1524 enc_opts_policy, extack); 1525 if (err) 1526 return err; 1527 1528 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1529 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1530 if (!nla_ok(nla_opt_msk, msk_depth)) { 1531 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks"); 1532 return -EINVAL; 1533 } 1534 } 1535 1536 nla_for_each_attr(nla_opt_key, nla_enc_key, 1537 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1538 switch (nla_type(nla_opt_key)) { 1539 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1540 if (key->enc_opts.dst_opt_type && 1541 key->enc_opts.dst_opt_type != 1542 IP_TUNNEL_GENEVE_OPT_BIT) { 1543 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1544 return -EINVAL; 1545 } 1546 option_len = 0; 1547 key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1548 option_len = fl_set_geneve_opt(nla_opt_key, key, 1549 key_depth, option_len, 1550 extack); 1551 if (option_len < 0) 1552 return option_len; 1553 1554 key->enc_opts.len += option_len; 1555 /* At the same time we need to parse through the mask 1556 * in order to verify exact and mask attribute lengths. 1557 */ 1558 mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1559 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1560 msk_depth, option_len, 1561 extack); 1562 if (option_len < 0) 1563 return option_len; 1564 1565 mask->enc_opts.len += option_len; 1566 if (key->enc_opts.len != mask->enc_opts.len) { 1567 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1568 return -EINVAL; 1569 } 1570 break; 1571 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1572 if (key->enc_opts.dst_opt_type) { 1573 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1574 return -EINVAL; 1575 } 1576 option_len = 0; 1577 key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1578 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1579 key_depth, option_len, 1580 extack); 1581 if (option_len < 0) 1582 return option_len; 1583 1584 key->enc_opts.len += option_len; 1585 /* At the same time we need to parse through the mask 1586 * in order to verify exact and mask attribute lengths. 1587 */ 1588 mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1589 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1590 msk_depth, option_len, 1591 extack); 1592 if (option_len < 0) 1593 return option_len; 1594 1595 mask->enc_opts.len += option_len; 1596 if (key->enc_opts.len != mask->enc_opts.len) { 1597 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1598 return -EINVAL; 1599 } 1600 break; 1601 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1602 if (key->enc_opts.dst_opt_type) { 1603 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1604 return -EINVAL; 1605 } 1606 option_len = 0; 1607 key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1608 option_len = fl_set_erspan_opt(nla_opt_key, key, 1609 key_depth, option_len, 1610 extack); 1611 if (option_len < 0) 1612 return option_len; 1613 1614 key->enc_opts.len += option_len; 1615 /* At the same time we need to parse through the mask 1616 * in order to verify exact and mask attribute lengths. 1617 */ 1618 mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1619 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1620 msk_depth, option_len, 1621 extack); 1622 if (option_len < 0) 1623 return option_len; 1624 1625 mask->enc_opts.len += option_len; 1626 if (key->enc_opts.len != mask->enc_opts.len) { 1627 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1628 return -EINVAL; 1629 } 1630 break; 1631 case TCA_FLOWER_KEY_ENC_OPTS_GTP: 1632 if (key->enc_opts.dst_opt_type) { 1633 NL_SET_ERR_MSG_MOD(extack, 1634 "Duplicate type for gtp options"); 1635 return -EINVAL; 1636 } 1637 option_len = 0; 1638 key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1639 option_len = fl_set_gtp_opt(nla_opt_key, key, 1640 key_depth, option_len, 1641 extack); 1642 if (option_len < 0) 1643 return option_len; 1644 1645 key->enc_opts.len += option_len; 1646 /* At the same time we need to parse through the mask 1647 * in order to verify exact and mask attribute lengths. 1648 */ 1649 mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1650 option_len = fl_set_gtp_opt(nla_opt_msk, mask, 1651 msk_depth, option_len, 1652 extack); 1653 if (option_len < 0) 1654 return option_len; 1655 1656 mask->enc_opts.len += option_len; 1657 if (key->enc_opts.len != mask->enc_opts.len) { 1658 NL_SET_ERR_MSG_MOD(extack, 1659 "Key and mask miss aligned"); 1660 return -EINVAL; 1661 } 1662 break; 1663 case TCA_FLOWER_KEY_ENC_OPTS_PFCP: 1664 if (key->enc_opts.dst_opt_type) { 1665 NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options"); 1666 return -EINVAL; 1667 } 1668 option_len = 0; 1669 key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1670 option_len = fl_set_pfcp_opt(nla_opt_key, key, 1671 key_depth, option_len, 1672 extack); 1673 if (option_len < 0) 1674 return option_len; 1675 1676 key->enc_opts.len += option_len; 1677 /* At the same time we need to parse through the mask 1678 * in order to verify exact and mask attribute lengths. 1679 */ 1680 mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1681 option_len = fl_set_pfcp_opt(nla_opt_msk, mask, 1682 msk_depth, option_len, 1683 extack); 1684 if (option_len < 0) 1685 return option_len; 1686 1687 mask->enc_opts.len += option_len; 1688 if (key->enc_opts.len != mask->enc_opts.len) { 1689 NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned"); 1690 return -EINVAL; 1691 } 1692 break; 1693 default: 1694 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1695 return -EINVAL; 1696 } 1697 1698 if (!msk_depth) 1699 continue; 1700 1701 if (!nla_ok(nla_opt_msk, msk_depth)) { 1702 NL_SET_ERR_MSG(extack, "A mask attribute is invalid"); 1703 return -EINVAL; 1704 } 1705 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1706 } 1707 1708 return 0; 1709 } 1710 1711 static int fl_validate_ct_state(u16 state, struct nlattr *tb, 1712 struct netlink_ext_ack *extack) 1713 { 1714 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { 1715 NL_SET_ERR_MSG_ATTR(extack, tb, 1716 "no trk, so no other flag can be set"); 1717 return -EINVAL; 1718 } 1719 1720 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1721 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { 1722 NL_SET_ERR_MSG_ATTR(extack, tb, 1723 "new and est are mutually exclusive"); 1724 return -EINVAL; 1725 } 1726 1727 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && 1728 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 1729 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { 1730 NL_SET_ERR_MSG_ATTR(extack, tb, 1731 "when inv is set, only trk may be set"); 1732 return -EINVAL; 1733 } 1734 1735 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1736 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { 1737 NL_SET_ERR_MSG_ATTR(extack, tb, 1738 "new and rpl are mutually exclusive"); 1739 return -EINVAL; 1740 } 1741 1742 return 0; 1743 } 1744 1745 static int fl_set_key_ct(struct nlattr **tb, 1746 struct flow_dissector_key_ct *key, 1747 struct flow_dissector_key_ct *mask, 1748 struct netlink_ext_ack *extack) 1749 { 1750 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1751 int err; 1752 1753 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1754 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1755 return -EOPNOTSUPP; 1756 } 1757 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1758 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1759 sizeof(key->ct_state)); 1760 1761 err = fl_validate_ct_state(key->ct_state & mask->ct_state, 1762 tb[TCA_FLOWER_KEY_CT_STATE_MASK], 1763 extack); 1764 if (err) 1765 return err; 1766 1767 } 1768 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1769 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1770 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1771 return -EOPNOTSUPP; 1772 } 1773 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1774 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1775 sizeof(key->ct_zone)); 1776 } 1777 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1778 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1779 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1780 return -EOPNOTSUPP; 1781 } 1782 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1783 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1784 sizeof(key->ct_mark)); 1785 } 1786 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1787 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1788 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1789 return -EOPNOTSUPP; 1790 } 1791 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1792 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1793 sizeof(key->ct_labels)); 1794 } 1795 1796 return 0; 1797 } 1798 1799 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, 1800 struct fl_flow_key *key, struct fl_flow_key *mask, 1801 int vthresh) 1802 { 1803 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh; 1804 1805 if (!tb) { 1806 *ethertype = 0; 1807 return good_num_of_vlans; 1808 } 1809 1810 *ethertype = nla_get_be16(tb); 1811 if (good_num_of_vlans || eth_type_vlan(*ethertype)) 1812 return true; 1813 1814 key->basic.n_proto = *ethertype; 1815 mask->basic.n_proto = cpu_to_be16(~0); 1816 return false; 1817 } 1818 1819 static void fl_set_key_cfm_md_level(struct nlattr **tb, 1820 struct fl_flow_key *key, 1821 struct fl_flow_key *mask, 1822 struct netlink_ext_ack *extack) 1823 { 1824 u8 level; 1825 1826 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) 1827 return; 1828 1829 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); 1830 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); 1831 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; 1832 } 1833 1834 static void fl_set_key_cfm_opcode(struct nlattr **tb, 1835 struct fl_flow_key *key, 1836 struct fl_flow_key *mask, 1837 struct netlink_ext_ack *extack) 1838 { 1839 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, 1840 &mask->cfm.opcode, TCA_FLOWER_UNSPEC, 1841 sizeof(key->cfm.opcode)); 1842 } 1843 1844 static int fl_set_key_cfm(struct nlattr **tb, 1845 struct fl_flow_key *key, 1846 struct fl_flow_key *mask, 1847 struct netlink_ext_ack *extack) 1848 { 1849 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; 1850 int err; 1851 1852 if (!tb[TCA_FLOWER_KEY_CFM]) 1853 return 0; 1854 1855 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, 1856 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); 1857 if (err < 0) 1858 return err; 1859 1860 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); 1861 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); 1862 1863 return 0; 1864 } 1865 1866 static int fl_set_key(struct net *net, struct nlattr *tca_opts, 1867 struct nlattr **tb, struct fl_flow_key *key, 1868 struct fl_flow_key *mask, struct netlink_ext_ack *extack) 1869 { 1870 __be16 ethertype; 1871 int ret = 0; 1872 1873 if (tb[TCA_FLOWER_INDEV]) { 1874 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1875 if (err < 0) 1876 return err; 1877 key->meta.ingress_ifindex = err; 1878 mask->meta.ingress_ifindex = 0xffffffff; 1879 } 1880 1881 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, 1882 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, 1883 sizeof(key->meta.l2_miss)); 1884 1885 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1886 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1887 sizeof(key->eth.dst)); 1888 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1889 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1890 sizeof(key->eth.src)); 1891 fl_set_key_val(tb, &key->num_of_vlans, 1892 TCA_FLOWER_KEY_NUM_OF_VLANS, 1893 &mask->num_of_vlans, 1894 TCA_FLOWER_UNSPEC, 1895 sizeof(key->num_of_vlans)); 1896 1897 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) { 1898 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1899 TCA_FLOWER_KEY_VLAN_PRIO, 1900 TCA_FLOWER_KEY_VLAN_ETH_TYPE, 1901 &key->vlan, &mask->vlan); 1902 1903 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE], 1904 ðertype, key, mask, 1)) { 1905 fl_set_key_vlan(tb, ethertype, 1906 TCA_FLOWER_KEY_CVLAN_ID, 1907 TCA_FLOWER_KEY_CVLAN_PRIO, 1908 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1909 &key->cvlan, &mask->cvlan); 1910 fl_set_key_val(tb, &key->basic.n_proto, 1911 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1912 &mask->basic.n_proto, 1913 TCA_FLOWER_UNSPEC, 1914 sizeof(key->basic.n_proto)); 1915 } 1916 } 1917 1918 if (key->basic.n_proto == htons(ETH_P_PPP_SES)) 1919 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask); 1920 1921 if (key->basic.n_proto == htons(ETH_P_IP) || 1922 key->basic.n_proto == htons(ETH_P_IPV6)) { 1923 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1924 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1925 sizeof(key->basic.ip_proto)); 1926 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1927 } 1928 1929 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1930 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1931 mask->control.addr_type = ~0; 1932 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1933 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1934 sizeof(key->ipv4.src)); 1935 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1936 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1937 sizeof(key->ipv4.dst)); 1938 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1939 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1940 mask->control.addr_type = ~0; 1941 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1942 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1943 sizeof(key->ipv6.src)); 1944 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1945 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1946 sizeof(key->ipv6.dst)); 1947 } 1948 1949 if (key->basic.ip_proto == IPPROTO_TCP) { 1950 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1951 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1952 sizeof(key->tp.src)); 1953 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1954 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1955 sizeof(key->tp.dst)); 1956 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1957 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1958 sizeof(key->tcp.flags)); 1959 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1960 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1961 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1962 sizeof(key->tp.src)); 1963 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1964 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1965 sizeof(key->tp.dst)); 1966 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1967 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1968 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1969 sizeof(key->tp.src)); 1970 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1971 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1972 sizeof(key->tp.dst)); 1973 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1974 key->basic.ip_proto == IPPROTO_ICMP) { 1975 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1976 &mask->icmp.type, 1977 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1978 sizeof(key->icmp.type)); 1979 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1980 &mask->icmp.code, 1981 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1982 sizeof(key->icmp.code)); 1983 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1984 key->basic.ip_proto == IPPROTO_ICMPV6) { 1985 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1986 &mask->icmp.type, 1987 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1988 sizeof(key->icmp.type)); 1989 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1990 &mask->icmp.code, 1991 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1992 sizeof(key->icmp.code)); 1993 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1994 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1995 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1996 if (ret) 1997 return ret; 1998 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1999 key->basic.n_proto == htons(ETH_P_RARP)) { 2000 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 2001 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 2002 sizeof(key->arp.sip)); 2003 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 2004 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 2005 sizeof(key->arp.tip)); 2006 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 2007 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 2008 sizeof(key->arp.op)); 2009 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 2010 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 2011 sizeof(key->arp.sha)); 2012 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2013 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2014 sizeof(key->arp.tha)); 2015 } else if (key->basic.ip_proto == IPPROTO_L2TP) { 2016 fl_set_key_val(tb, &key->l2tpv3.session_id, 2017 TCA_FLOWER_KEY_L2TPV3_SID, 2018 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, 2019 sizeof(key->l2tpv3.session_id)); 2020 } else if (key->basic.n_proto == htons(ETH_P_CFM)) { 2021 ret = fl_set_key_cfm(tb, key, mask, extack); 2022 if (ret) 2023 return ret; 2024 } 2025 2026 if (key->basic.ip_proto == IPPROTO_TCP || 2027 key->basic.ip_proto == IPPROTO_UDP || 2028 key->basic.ip_proto == IPPROTO_SCTP) { 2029 ret = fl_set_key_port_range(tb, key, mask, extack); 2030 if (ret) 2031 return ret; 2032 } 2033 2034 if (tb[TCA_FLOWER_KEY_SPI]) { 2035 ret = fl_set_key_spi(tb, key, mask, extack); 2036 if (ret) 2037 return ret; 2038 } 2039 2040 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 2041 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 2042 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2043 mask->enc_control.addr_type = ~0; 2044 fl_set_key_val(tb, &key->enc_ipv4.src, 2045 TCA_FLOWER_KEY_ENC_IPV4_SRC, 2046 &mask->enc_ipv4.src, 2047 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2048 sizeof(key->enc_ipv4.src)); 2049 fl_set_key_val(tb, &key->enc_ipv4.dst, 2050 TCA_FLOWER_KEY_ENC_IPV4_DST, 2051 &mask->enc_ipv4.dst, 2052 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2053 sizeof(key->enc_ipv4.dst)); 2054 } 2055 2056 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 2057 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 2058 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2059 mask->enc_control.addr_type = ~0; 2060 fl_set_key_val(tb, &key->enc_ipv6.src, 2061 TCA_FLOWER_KEY_ENC_IPV6_SRC, 2062 &mask->enc_ipv6.src, 2063 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2064 sizeof(key->enc_ipv6.src)); 2065 fl_set_key_val(tb, &key->enc_ipv6.dst, 2066 TCA_FLOWER_KEY_ENC_IPV6_DST, 2067 &mask->enc_ipv6.dst, 2068 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2069 sizeof(key->enc_ipv6.dst)); 2070 } 2071 2072 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 2073 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 2074 sizeof(key->enc_key_id.keyid)); 2075 2076 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2077 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2078 sizeof(key->enc_tp.src)); 2079 2080 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2081 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2082 sizeof(key->enc_tp.dst)); 2083 2084 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 2085 2086 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 2087 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 2088 sizeof(key->hash.hash)); 2089 2090 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 2091 ret = fl_set_enc_opt(tb, key, mask, extack); 2092 if (ret) 2093 return ret; 2094 } 2095 2096 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 2097 if (ret) 2098 return ret; 2099 2100 if (tb[TCA_FLOWER_KEY_FLAGS]) { 2101 ret = fl_set_key_flags(tca_opts, tb, false, 2102 &key->control.flags, 2103 &mask->control.flags, extack); 2104 if (ret) 2105 return ret; 2106 } 2107 2108 if (tb[TCA_FLOWER_KEY_ENC_FLAGS]) 2109 ret = fl_set_key_flags(tca_opts, tb, true, 2110 &key->enc_control.flags, 2111 &mask->enc_control.flags, extack); 2112 2113 return ret; 2114 } 2115 2116 static void fl_mask_copy(struct fl_flow_mask *dst, 2117 struct fl_flow_mask *src) 2118 { 2119 const void *psrc = fl_key_get_start(&src->key, src); 2120 void *pdst = fl_key_get_start(&dst->key, src); 2121 2122 memcpy(pdst, psrc, fl_mask_range(src)); 2123 dst->range = src->range; 2124 } 2125 2126 static const struct rhashtable_params fl_ht_params = { 2127 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 2128 .head_offset = offsetof(struct cls_fl_filter, ht_node), 2129 .automatic_shrinking = true, 2130 }; 2131 2132 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 2133 { 2134 mask->filter_ht_params = fl_ht_params; 2135 mask->filter_ht_params.key_len = fl_mask_range(mask); 2136 mask->filter_ht_params.key_offset += mask->range.start; 2137 2138 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 2139 } 2140 2141 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 2142 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 2143 2144 #define FL_KEY_IS_MASKED(mask, member) \ 2145 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 2146 0, FL_KEY_MEMBER_SIZE(member)) \ 2147 2148 #define FL_KEY_SET(keys, cnt, id, member) \ 2149 do { \ 2150 keys[cnt].key_id = id; \ 2151 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 2152 cnt++; \ 2153 } while(0); 2154 2155 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 2156 do { \ 2157 if (FL_KEY_IS_MASKED(mask, member)) \ 2158 FL_KEY_SET(keys, cnt, id, member); \ 2159 } while(0); 2160 2161 static void fl_init_dissector(struct flow_dissector *dissector, 2162 struct fl_flow_key *mask) 2163 { 2164 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 2165 size_t cnt = 0; 2166 2167 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2168 FLOW_DISSECTOR_KEY_META, meta); 2169 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 2170 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 2171 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2172 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 2173 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2174 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 2175 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2176 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 2177 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2178 FLOW_DISSECTOR_KEY_PORTS, tp); 2179 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2180 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 2181 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2182 FLOW_DISSECTOR_KEY_IP, ip); 2183 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2184 FLOW_DISSECTOR_KEY_TCP, tcp); 2185 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2186 FLOW_DISSECTOR_KEY_ICMP, icmp); 2187 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2188 FLOW_DISSECTOR_KEY_ARP, arp); 2189 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2190 FLOW_DISSECTOR_KEY_MPLS, mpls); 2191 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2192 FLOW_DISSECTOR_KEY_VLAN, vlan); 2193 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2194 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 2195 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2196 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 2197 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2198 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 2199 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2200 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 2201 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 2202 FL_KEY_IS_MASKED(mask, enc_ipv6) || 2203 FL_KEY_IS_MASKED(mask, enc_control)) 2204 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2205 enc_control); 2206 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2207 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 2208 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2209 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 2210 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2211 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 2212 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2213 FLOW_DISSECTOR_KEY_CT, ct); 2214 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2215 FLOW_DISSECTOR_KEY_HASH, hash); 2216 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2217 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); 2218 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2219 FLOW_DISSECTOR_KEY_PPPOE, pppoe); 2220 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2221 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); 2222 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2223 FLOW_DISSECTOR_KEY_IPSEC, ipsec); 2224 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2225 FLOW_DISSECTOR_KEY_CFM, cfm); 2226 2227 skb_flow_dissector_init(dissector, keys, cnt); 2228 } 2229 2230 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 2231 struct fl_flow_mask *mask) 2232 { 2233 struct fl_flow_mask *newmask; 2234 int err; 2235 2236 newmask = kzalloc_obj(*newmask); 2237 if (!newmask) 2238 return ERR_PTR(-ENOMEM); 2239 2240 fl_mask_copy(newmask, mask); 2241 2242 if ((newmask->key.tp_range.tp_min.dst && 2243 newmask->key.tp_range.tp_max.dst) || 2244 (newmask->key.tp_range.tp_min.src && 2245 newmask->key.tp_range.tp_max.src)) 2246 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 2247 2248 err = fl_init_mask_hashtable(newmask); 2249 if (err) 2250 goto errout_free; 2251 2252 fl_init_dissector(&newmask->dissector, &newmask->key); 2253 2254 INIT_LIST_HEAD_RCU(&newmask->filters); 2255 2256 refcount_set(&newmask->refcnt, 1); 2257 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 2258 &newmask->ht_node, mask_ht_params); 2259 if (err) 2260 goto errout_destroy; 2261 2262 spin_lock(&head->masks_lock); 2263 list_add_tail_rcu(&newmask->list, &head->masks); 2264 spin_unlock(&head->masks_lock); 2265 2266 return newmask; 2267 2268 errout_destroy: 2269 rhashtable_destroy(&newmask->ht); 2270 errout_free: 2271 kfree(newmask); 2272 2273 return ERR_PTR(err); 2274 } 2275 2276 static int fl_check_assign_mask(struct cls_fl_head *head, 2277 struct cls_fl_filter *fnew, 2278 struct cls_fl_filter *fold, 2279 struct fl_flow_mask *mask) 2280 { 2281 struct fl_flow_mask *newmask; 2282 int ret = 0; 2283 2284 rcu_read_lock(); 2285 2286 /* Insert mask as temporary node to prevent concurrent creation of mask 2287 * with same key. Any concurrent lookups with same key will return 2288 * -EAGAIN because mask's refcnt is zero. 2289 */ 2290 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 2291 &mask->ht_node, 2292 mask_ht_params); 2293 if (!fnew->mask) { 2294 rcu_read_unlock(); 2295 2296 if (fold) { 2297 ret = -EINVAL; 2298 goto errout_cleanup; 2299 } 2300 2301 newmask = fl_create_new_mask(head, mask); 2302 if (IS_ERR(newmask)) { 2303 ret = PTR_ERR(newmask); 2304 goto errout_cleanup; 2305 } 2306 2307 fnew->mask = newmask; 2308 return 0; 2309 } else if (IS_ERR(fnew->mask)) { 2310 ret = PTR_ERR(fnew->mask); 2311 } else if (fold && fold->mask != fnew->mask) { 2312 ret = -EINVAL; 2313 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 2314 /* Mask was deleted concurrently, try again */ 2315 ret = -EAGAIN; 2316 } 2317 rcu_read_unlock(); 2318 return ret; 2319 2320 errout_cleanup: 2321 rhashtable_remove_fast(&head->ht, &mask->ht_node, 2322 mask_ht_params); 2323 return ret; 2324 } 2325 2326 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) 2327 { 2328 return mask->meta.l2_miss; 2329 } 2330 2331 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 2332 struct cls_fl_filter *fold, 2333 bool *in_ht) 2334 { 2335 struct fl_flow_mask *mask = fnew->mask; 2336 int err; 2337 2338 err = rhashtable_lookup_insert_fast(&mask->ht, 2339 &fnew->ht_node, 2340 mask->filter_ht_params); 2341 if (err) { 2342 *in_ht = false; 2343 /* It is okay if filter with same key exists when 2344 * overwriting. 2345 */ 2346 return fold && err == -EEXIST ? 0 : err; 2347 } 2348 2349 *in_ht = true; 2350 return 0; 2351 } 2352 2353 static int fl_change(struct net *net, struct sk_buff *in_skb, 2354 struct tcf_proto *tp, unsigned long base, 2355 u32 handle, struct nlattr **tca, 2356 void **arg, u32 flags, 2357 struct netlink_ext_ack *extack) 2358 { 2359 struct cls_fl_head *head = fl_head_dereference(tp); 2360 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); 2361 struct nlattr *tca_opts = tca[TCA_OPTIONS]; 2362 struct cls_fl_filter *fold = *arg; 2363 bool bound_to_filter = false; 2364 struct cls_fl_filter *fnew; 2365 struct fl_flow_mask *mask; 2366 struct nlattr **tb; 2367 bool in_ht; 2368 int err; 2369 2370 if (!tca_opts) { 2371 err = -EINVAL; 2372 goto errout_fold; 2373 } 2374 2375 mask = kzalloc_obj(struct fl_flow_mask); 2376 if (!mask) { 2377 err = -ENOBUFS; 2378 goto errout_fold; 2379 } 2380 2381 tb = kzalloc_objs(struct nlattr *, TCA_FLOWER_MAX + 1); 2382 if (!tb) { 2383 err = -ENOBUFS; 2384 goto errout_mask_alloc; 2385 } 2386 2387 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2388 tca_opts, fl_policy, NULL); 2389 if (err < 0) 2390 goto errout_tb; 2391 2392 if (fold && handle && fold->handle != handle) { 2393 err = -EINVAL; 2394 goto errout_tb; 2395 } 2396 2397 fnew = kzalloc_obj(*fnew); 2398 if (!fnew) { 2399 err = -ENOBUFS; 2400 goto errout_tb; 2401 } 2402 INIT_LIST_HEAD(&fnew->hw_list); 2403 refcount_set(&fnew->refcnt, 1); 2404 2405 if (tb[TCA_FLOWER_FLAGS]) { 2406 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 2407 2408 if (!tc_flags_valid(fnew->flags)) { 2409 kfree(fnew); 2410 err = -EINVAL; 2411 goto errout_tb; 2412 } 2413 } 2414 2415 if (!fold) { 2416 spin_lock(&tp->lock); 2417 if (!handle) { 2418 handle = 1; 2419 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2420 INT_MAX, GFP_ATOMIC); 2421 } else { 2422 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2423 handle, GFP_ATOMIC); 2424 2425 /* Filter with specified handle was concurrently 2426 * inserted after initial check in cls_api. This is not 2427 * necessarily an error if NLM_F_EXCL is not set in 2428 * message flags. Returning EAGAIN will cause cls_api to 2429 * try to update concurrently inserted rule. 2430 */ 2431 if (err == -ENOSPC) 2432 err = -EAGAIN; 2433 } 2434 spin_unlock(&tp->lock); 2435 2436 if (err) { 2437 kfree(fnew); 2438 goto errout_tb; 2439 } 2440 } 2441 fnew->handle = handle; 2442 2443 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle, 2444 !tc_skip_hw(fnew->flags)); 2445 if (err < 0) 2446 goto errout_idr; 2447 2448 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], 2449 &fnew->exts, flags, fnew->flags, 2450 extack); 2451 if (err < 0) 2452 goto errout_idr; 2453 2454 if (tb[TCA_FLOWER_CLASSID]) { 2455 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 2456 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2457 rtnl_lock(); 2458 tcf_bind_filter(tp, &fnew->res, base); 2459 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2460 rtnl_unlock(); 2461 bound_to_filter = true; 2462 } 2463 2464 err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack); 2465 if (err) 2466 goto unbind_filter; 2467 2468 fl_mask_update_range(mask); 2469 fl_set_masked_key(&fnew->mkey, &fnew->key, mask); 2470 2471 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) { 2472 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 2473 err = -EINVAL; 2474 goto unbind_filter; 2475 } 2476 2477 /* Enable tc skb extension if filter matches on data extracted from 2478 * this extension. 2479 */ 2480 if (fl_needs_tc_skb_ext(&mask->key)) { 2481 fnew->needs_tc_skb_ext = 1; 2482 tc_skb_ext_tc_enable(); 2483 } 2484 2485 err = fl_check_assign_mask(head, fnew, fold, mask); 2486 if (err) 2487 goto unbind_filter; 2488 2489 err = fl_ht_insert_unique(fnew, fold, &in_ht); 2490 if (err) 2491 goto errout_mask; 2492 2493 if (!tc_skip_hw(fnew->flags)) { 2494 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 2495 if (err) 2496 goto errout_ht; 2497 } 2498 2499 if (!tc_in_hw(fnew->flags)) 2500 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 2501 2502 tcf_proto_update_usesw(tp, fnew->flags); 2503 2504 spin_lock(&tp->lock); 2505 2506 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2507 * proto again or create new one, if necessary. 2508 */ 2509 if (tp->deleting) { 2510 err = -EAGAIN; 2511 goto errout_hw; 2512 } 2513 2514 if (fold) { 2515 /* Fold filter was deleted concurrently. Retry lookup. */ 2516 if (fold->deleted) { 2517 err = -EAGAIN; 2518 goto errout_hw; 2519 } 2520 2521 fnew->handle = handle; 2522 2523 if (!in_ht) { 2524 struct rhashtable_params params = 2525 fnew->mask->filter_ht_params; 2526 2527 err = rhashtable_insert_fast(&fnew->mask->ht, 2528 &fnew->ht_node, 2529 params); 2530 if (err) 2531 goto errout_hw; 2532 in_ht = true; 2533 } 2534 2535 refcount_inc(&fnew->refcnt); 2536 rhashtable_remove_fast(&fold->mask->ht, 2537 &fold->ht_node, 2538 fold->mask->filter_ht_params); 2539 idr_replace(&head->handle_idr, fnew, fnew->handle); 2540 list_replace_rcu(&fold->list, &fnew->list); 2541 fold->deleted = true; 2542 2543 spin_unlock(&tp->lock); 2544 2545 fl_mask_put(head, fold->mask); 2546 if (!tc_skip_hw(fold->flags)) 2547 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2548 tcf_unbind_filter(tp, &fold->res); 2549 /* Caller holds reference to fold, so refcnt is always > 0 2550 * after this. 2551 */ 2552 refcount_dec(&fold->refcnt); 2553 __fl_put(fold); 2554 } else { 2555 idr_replace(&head->handle_idr, fnew, fnew->handle); 2556 2557 refcount_inc(&fnew->refcnt); 2558 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2559 spin_unlock(&tp->lock); 2560 } 2561 2562 *arg = fnew; 2563 2564 kfree(tb); 2565 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2566 return 0; 2567 2568 errout_ht: 2569 spin_lock(&tp->lock); 2570 errout_hw: 2571 fnew->deleted = true; 2572 spin_unlock(&tp->lock); 2573 if (!tc_skip_hw(fnew->flags)) 2574 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2575 if (in_ht) 2576 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2577 fnew->mask->filter_ht_params); 2578 errout_mask: 2579 fl_mask_put(head, fnew->mask); 2580 2581 unbind_filter: 2582 if (bound_to_filter) { 2583 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2584 rtnl_lock(); 2585 tcf_unbind_filter(tp, &fnew->res); 2586 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2587 rtnl_unlock(); 2588 } 2589 2590 errout_idr: 2591 if (!fold) { 2592 spin_lock(&tp->lock); 2593 idr_remove(&head->handle_idr, fnew->handle); 2594 spin_unlock(&tp->lock); 2595 } 2596 __fl_put(fnew); 2597 errout_tb: 2598 kfree(tb); 2599 errout_mask_alloc: 2600 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2601 errout_fold: 2602 if (fold) 2603 __fl_put(fold); 2604 return err; 2605 } 2606 2607 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2608 bool rtnl_held, struct netlink_ext_ack *extack) 2609 { 2610 struct cls_fl_head *head = fl_head_dereference(tp); 2611 struct cls_fl_filter *f = arg; 2612 bool last_on_mask; 2613 int err = 0; 2614 2615 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2616 *last = list_empty(&head->masks); 2617 __fl_put(f); 2618 2619 return err; 2620 } 2621 2622 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2623 bool rtnl_held) 2624 { 2625 struct cls_fl_head *head = fl_head_dereference(tp); 2626 unsigned long id = arg->cookie, tmp; 2627 struct cls_fl_filter *f; 2628 2629 arg->count = arg->skip; 2630 2631 rcu_read_lock(); 2632 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2633 /* don't return filters that are being deleted */ 2634 if (!f || !refcount_inc_not_zero(&f->refcnt)) 2635 continue; 2636 rcu_read_unlock(); 2637 2638 if (arg->fn(tp, f, arg) < 0) { 2639 __fl_put(f); 2640 arg->stop = 1; 2641 rcu_read_lock(); 2642 break; 2643 } 2644 __fl_put(f); 2645 arg->count++; 2646 rcu_read_lock(); 2647 } 2648 rcu_read_unlock(); 2649 arg->cookie = id; 2650 } 2651 2652 static struct cls_fl_filter * 2653 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2654 { 2655 struct cls_fl_head *head = fl_head_dereference(tp); 2656 2657 spin_lock(&tp->lock); 2658 if (list_empty(&head->hw_filters)) { 2659 spin_unlock(&tp->lock); 2660 return NULL; 2661 } 2662 2663 if (!f) 2664 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2665 hw_list); 2666 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2667 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2668 spin_unlock(&tp->lock); 2669 return f; 2670 } 2671 } 2672 2673 spin_unlock(&tp->lock); 2674 return NULL; 2675 } 2676 2677 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2678 void *cb_priv, struct netlink_ext_ack *extack) 2679 { 2680 struct tcf_block *block = tp->chain->block; 2681 struct flow_cls_offload cls_flower = {}; 2682 struct cls_fl_filter *f = NULL; 2683 int err; 2684 2685 /* hw_filters list can only be changed by hw offload functions after 2686 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2687 * iterating it. 2688 */ 2689 ASSERT_RTNL(); 2690 2691 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2692 cls_flower.rule = 2693 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2694 if (!cls_flower.rule) { 2695 __fl_put(f); 2696 return -ENOMEM; 2697 } 2698 2699 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2700 extack); 2701 cls_flower.command = add ? 2702 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2703 cls_flower.cookie = (unsigned long)f; 2704 cls_flower.rule->match.dissector = &f->mask->dissector; 2705 cls_flower.rule->match.mask = &f->mask->key; 2706 cls_flower.rule->match.key = &f->mkey; 2707 2708 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 2709 cls_flower.common.extack); 2710 if (err) { 2711 kfree(cls_flower.rule); 2712 if (tc_skip_sw(f->flags)) { 2713 __fl_put(f); 2714 return err; 2715 } 2716 goto next_flow; 2717 } 2718 2719 cls_flower.classid = f->res.classid; 2720 2721 err = tc_setup_cb_reoffload(block, tp, add, cb, 2722 TC_SETUP_CLSFLOWER, &cls_flower, 2723 cb_priv, &f->flags, 2724 &f->in_hw_count); 2725 tc_cleanup_offload_action(&cls_flower.rule->action); 2726 kfree(cls_flower.rule); 2727 2728 if (err) { 2729 __fl_put(f); 2730 return err; 2731 } 2732 next_flow: 2733 __fl_put(f); 2734 } 2735 2736 return 0; 2737 } 2738 2739 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2740 { 2741 struct flow_cls_offload *cls_flower = type_data; 2742 struct cls_fl_filter *f = 2743 (struct cls_fl_filter *) cls_flower->cookie; 2744 struct cls_fl_head *head = fl_head_dereference(tp); 2745 2746 spin_lock(&tp->lock); 2747 list_add(&f->hw_list, &head->hw_filters); 2748 spin_unlock(&tp->lock); 2749 } 2750 2751 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2752 { 2753 struct flow_cls_offload *cls_flower = type_data; 2754 struct cls_fl_filter *f = 2755 (struct cls_fl_filter *) cls_flower->cookie; 2756 2757 spin_lock(&tp->lock); 2758 if (!list_empty(&f->hw_list)) 2759 list_del_init(&f->hw_list); 2760 spin_unlock(&tp->lock); 2761 } 2762 2763 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2764 struct fl_flow_tmplt *tmplt) 2765 { 2766 struct flow_cls_offload cls_flower = {}; 2767 struct tcf_block *block = chain->block; 2768 2769 cls_flower.rule = flow_rule_alloc(0); 2770 if (!cls_flower.rule) 2771 return -ENOMEM; 2772 2773 cls_flower.common.chain_index = chain->index; 2774 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2775 cls_flower.cookie = (unsigned long) tmplt; 2776 cls_flower.rule->match.dissector = &tmplt->dissector; 2777 cls_flower.rule->match.mask = &tmplt->mask; 2778 cls_flower.rule->match.key = &tmplt->dummy_key; 2779 2780 /* We don't care if driver (any of them) fails to handle this 2781 * call. It serves just as a hint for it. 2782 */ 2783 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2784 kfree(cls_flower.rule); 2785 2786 return 0; 2787 } 2788 2789 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2790 struct fl_flow_tmplt *tmplt) 2791 { 2792 struct flow_cls_offload cls_flower = {}; 2793 struct tcf_block *block = chain->block; 2794 2795 cls_flower.common.chain_index = chain->index; 2796 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2797 cls_flower.cookie = (unsigned long) tmplt; 2798 2799 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2800 } 2801 2802 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2803 struct nlattr **tca, 2804 struct netlink_ext_ack *extack) 2805 { 2806 struct nlattr *tca_opts = tca[TCA_OPTIONS]; 2807 struct fl_flow_tmplt *tmplt; 2808 struct nlattr **tb; 2809 int err; 2810 2811 if (!tca_opts) 2812 return ERR_PTR(-EINVAL); 2813 2814 tb = kzalloc_objs(struct nlattr *, TCA_FLOWER_MAX + 1); 2815 if (!tb) 2816 return ERR_PTR(-ENOBUFS); 2817 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2818 tca_opts, fl_policy, NULL); 2819 if (err) 2820 goto errout_tb; 2821 2822 tmplt = kzalloc_obj(*tmplt); 2823 if (!tmplt) { 2824 err = -ENOMEM; 2825 goto errout_tb; 2826 } 2827 tmplt->chain = chain; 2828 err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key, 2829 &tmplt->mask, extack); 2830 if (err) 2831 goto errout_tmplt; 2832 2833 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2834 2835 err = fl_hw_create_tmplt(chain, tmplt); 2836 if (err) 2837 goto errout_tmplt; 2838 2839 kfree(tb); 2840 return tmplt; 2841 2842 errout_tmplt: 2843 kfree(tmplt); 2844 errout_tb: 2845 kfree(tb); 2846 return ERR_PTR(err); 2847 } 2848 2849 static void fl_tmplt_destroy(void *tmplt_priv) 2850 { 2851 struct fl_flow_tmplt *tmplt = tmplt_priv; 2852 2853 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2854 kfree(tmplt); 2855 } 2856 2857 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, 2858 flow_setup_cb_t *cb, void *cb_priv) 2859 { 2860 struct fl_flow_tmplt *tmplt = chain->tmplt_priv; 2861 struct flow_cls_offload cls_flower = {}; 2862 2863 cls_flower.rule = flow_rule_alloc(0); 2864 if (!cls_flower.rule) 2865 return; 2866 2867 cls_flower.common.chain_index = chain->index; 2868 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : 2869 FLOW_CLS_TMPLT_DESTROY; 2870 cls_flower.cookie = (unsigned long) tmplt; 2871 cls_flower.rule->match.dissector = &tmplt->dissector; 2872 cls_flower.rule->match.mask = &tmplt->mask; 2873 cls_flower.rule->match.key = &tmplt->dummy_key; 2874 2875 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 2876 kfree(cls_flower.rule); 2877 } 2878 2879 static int fl_dump_key_val(struct sk_buff *skb, 2880 void *val, int val_type, 2881 void *mask, int mask_type, int len) 2882 { 2883 int err; 2884 2885 if (!memchr_inv(mask, 0, len)) 2886 return 0; 2887 err = nla_put(skb, val_type, len, val); 2888 if (err) 2889 return err; 2890 if (mask_type != TCA_FLOWER_UNSPEC) { 2891 err = nla_put(skb, mask_type, len, mask); 2892 if (err) 2893 return err; 2894 } 2895 return 0; 2896 } 2897 2898 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2899 struct fl_flow_key *mask) 2900 { 2901 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2902 TCA_FLOWER_KEY_PORT_DST_MIN, 2903 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2904 sizeof(key->tp_range.tp_min.dst)) || 2905 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2906 TCA_FLOWER_KEY_PORT_DST_MAX, 2907 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2908 sizeof(key->tp_range.tp_max.dst)) || 2909 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2910 TCA_FLOWER_KEY_PORT_SRC_MIN, 2911 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2912 sizeof(key->tp_range.tp_min.src)) || 2913 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2914 TCA_FLOWER_KEY_PORT_SRC_MAX, 2915 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2916 sizeof(key->tp_range.tp_max.src))) 2917 return -1; 2918 2919 return 0; 2920 } 2921 2922 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2923 struct flow_dissector_key_mpls *mpls_key, 2924 struct flow_dissector_key_mpls *mpls_mask, 2925 u8 lse_index) 2926 { 2927 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2928 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2929 int err; 2930 2931 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2932 lse_index + 1); 2933 if (err) 2934 return err; 2935 2936 if (lse_mask->mpls_ttl) { 2937 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2938 lse_key->mpls_ttl); 2939 if (err) 2940 return err; 2941 } 2942 if (lse_mask->mpls_bos) { 2943 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2944 lse_key->mpls_bos); 2945 if (err) 2946 return err; 2947 } 2948 if (lse_mask->mpls_tc) { 2949 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2950 lse_key->mpls_tc); 2951 if (err) 2952 return err; 2953 } 2954 if (lse_mask->mpls_label) { 2955 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2956 lse_key->mpls_label); 2957 if (err) 2958 return err; 2959 } 2960 2961 return 0; 2962 } 2963 2964 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2965 struct flow_dissector_key_mpls *mpls_key, 2966 struct flow_dissector_key_mpls *mpls_mask) 2967 { 2968 struct nlattr *opts; 2969 struct nlattr *lse; 2970 u8 lse_index; 2971 int err; 2972 2973 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2974 if (!opts) 2975 return -EMSGSIZE; 2976 2977 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2978 if (!(mpls_mask->used_lses & 1 << lse_index)) 2979 continue; 2980 2981 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2982 if (!lse) { 2983 err = -EMSGSIZE; 2984 goto err_opts; 2985 } 2986 2987 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2988 lse_index); 2989 if (err) 2990 goto err_opts_lse; 2991 nla_nest_end(skb, lse); 2992 } 2993 nla_nest_end(skb, opts); 2994 2995 return 0; 2996 2997 err_opts_lse: 2998 nla_nest_cancel(skb, lse); 2999 err_opts: 3000 nla_nest_cancel(skb, opts); 3001 3002 return err; 3003 } 3004 3005 static int fl_dump_key_mpls(struct sk_buff *skb, 3006 struct flow_dissector_key_mpls *mpls_key, 3007 struct flow_dissector_key_mpls *mpls_mask) 3008 { 3009 struct flow_dissector_mpls_lse *lse_mask; 3010 struct flow_dissector_mpls_lse *lse_key; 3011 int err; 3012 3013 if (!mpls_mask->used_lses) 3014 return 0; 3015 3016 lse_mask = &mpls_mask->ls[0]; 3017 lse_key = &mpls_key->ls[0]; 3018 3019 /* For backward compatibility, don't use the MPLS nested attributes if 3020 * the rule can be expressed using the old attributes. 3021 */ 3022 if (mpls_mask->used_lses & ~1 || 3023 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 3024 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 3025 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 3026 3027 if (lse_mask->mpls_ttl) { 3028 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 3029 lse_key->mpls_ttl); 3030 if (err) 3031 return err; 3032 } 3033 if (lse_mask->mpls_tc) { 3034 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 3035 lse_key->mpls_tc); 3036 if (err) 3037 return err; 3038 } 3039 if (lse_mask->mpls_label) { 3040 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 3041 lse_key->mpls_label); 3042 if (err) 3043 return err; 3044 } 3045 if (lse_mask->mpls_bos) { 3046 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 3047 lse_key->mpls_bos); 3048 if (err) 3049 return err; 3050 } 3051 return 0; 3052 } 3053 3054 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 3055 struct flow_dissector_key_ip *key, 3056 struct flow_dissector_key_ip *mask) 3057 { 3058 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 3059 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 3060 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 3061 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 3062 3063 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 3064 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 3065 return -1; 3066 3067 return 0; 3068 } 3069 3070 static int fl_dump_key_vlan(struct sk_buff *skb, 3071 int vlan_id_key, int vlan_prio_key, 3072 struct flow_dissector_key_vlan *vlan_key, 3073 struct flow_dissector_key_vlan *vlan_mask) 3074 { 3075 int err; 3076 3077 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 3078 return 0; 3079 if (vlan_mask->vlan_id) { 3080 err = nla_put_u16(skb, vlan_id_key, 3081 vlan_key->vlan_id); 3082 if (err) 3083 return err; 3084 } 3085 if (vlan_mask->vlan_priority) { 3086 err = nla_put_u8(skb, vlan_prio_key, 3087 vlan_key->vlan_priority); 3088 if (err) 3089 return err; 3090 } 3091 return 0; 3092 } 3093 3094 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 3095 u32 *flower_key, u32 *flower_mask, 3096 u32 flower_flag_bit, u32 dissector_flag_bit) 3097 { 3098 if (dissector_mask & dissector_flag_bit) { 3099 *flower_mask |= flower_flag_bit; 3100 if (dissector_key & dissector_flag_bit) 3101 *flower_key |= flower_flag_bit; 3102 } 3103 } 3104 3105 static int fl_dump_key_flags(struct sk_buff *skb, bool encap, 3106 u32 flags_key, u32 flags_mask) 3107 { 3108 int fl_key, fl_mask; 3109 __be32 _key, _mask; 3110 u32 key, mask; 3111 int err; 3112 3113 if (encap) { 3114 fl_key = TCA_FLOWER_KEY_ENC_FLAGS; 3115 fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; 3116 } else { 3117 fl_key = TCA_FLOWER_KEY_FLAGS; 3118 fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; 3119 } 3120 3121 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 3122 return 0; 3123 3124 key = 0; 3125 mask = 0; 3126 3127 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3128 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 3129 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3130 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 3131 FLOW_DIS_FIRST_FRAG); 3132 3133 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3134 TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, 3135 FLOW_DIS_F_TUNNEL_CSUM); 3136 3137 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3138 TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, 3139 FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); 3140 3141 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3142 TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); 3143 3144 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3145 TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, 3146 FLOW_DIS_F_TUNNEL_CRIT_OPT); 3147 3148 _key = cpu_to_be32(key); 3149 _mask = cpu_to_be32(mask); 3150 3151 err = nla_put(skb, fl_key, 4, &_key); 3152 if (err) 3153 return err; 3154 3155 return nla_put(skb, fl_mask, 4, &_mask); 3156 } 3157 3158 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 3159 struct flow_dissector_key_enc_opts *enc_opts) 3160 { 3161 struct geneve_opt *opt; 3162 struct nlattr *nest; 3163 int opt_off = 0; 3164 3165 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 3166 if (!nest) 3167 goto nla_put_failure; 3168 3169 while (enc_opts->len > opt_off) { 3170 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 3171 3172 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 3173 opt->opt_class)) 3174 goto nla_put_failure; 3175 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 3176 opt->type)) 3177 goto nla_put_failure; 3178 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 3179 opt->length * 4, opt->opt_data)) 3180 goto nla_put_failure; 3181 3182 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 3183 } 3184 nla_nest_end(skb, nest); 3185 return 0; 3186 3187 nla_put_failure: 3188 nla_nest_cancel(skb, nest); 3189 return -EMSGSIZE; 3190 } 3191 3192 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 3193 struct flow_dissector_key_enc_opts *enc_opts) 3194 { 3195 struct vxlan_metadata *md; 3196 struct nlattr *nest; 3197 3198 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 3199 if (!nest) 3200 goto nla_put_failure; 3201 3202 md = (struct vxlan_metadata *)&enc_opts->data[0]; 3203 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 3204 goto nla_put_failure; 3205 3206 nla_nest_end(skb, nest); 3207 return 0; 3208 3209 nla_put_failure: 3210 nla_nest_cancel(skb, nest); 3211 return -EMSGSIZE; 3212 } 3213 3214 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 3215 struct flow_dissector_key_enc_opts *enc_opts) 3216 { 3217 struct erspan_metadata *md; 3218 struct nlattr *nest; 3219 3220 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 3221 if (!nest) 3222 goto nla_put_failure; 3223 3224 md = (struct erspan_metadata *)&enc_opts->data[0]; 3225 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 3226 goto nla_put_failure; 3227 3228 if (md->version == 1 && 3229 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 3230 goto nla_put_failure; 3231 3232 if (md->version == 2 && 3233 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 3234 md->u.md2.dir) || 3235 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 3236 get_hwid(&md->u.md2)))) 3237 goto nla_put_failure; 3238 3239 nla_nest_end(skb, nest); 3240 return 0; 3241 3242 nla_put_failure: 3243 nla_nest_cancel(skb, nest); 3244 return -EMSGSIZE; 3245 } 3246 3247 static int fl_dump_key_gtp_opt(struct sk_buff *skb, 3248 struct flow_dissector_key_enc_opts *enc_opts) 3249 3250 { 3251 struct gtp_pdu_session_info *session_info; 3252 struct nlattr *nest; 3253 3254 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP); 3255 if (!nest) 3256 goto nla_put_failure; 3257 3258 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0]; 3259 3260 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, 3261 session_info->pdu_type)) 3262 goto nla_put_failure; 3263 3264 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi)) 3265 goto nla_put_failure; 3266 3267 nla_nest_end(skb, nest); 3268 return 0; 3269 3270 nla_put_failure: 3271 nla_nest_cancel(skb, nest); 3272 return -EMSGSIZE; 3273 } 3274 3275 static int fl_dump_key_pfcp_opt(struct sk_buff *skb, 3276 struct flow_dissector_key_enc_opts *enc_opts) 3277 { 3278 struct pfcp_metadata *md; 3279 struct nlattr *nest; 3280 3281 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP); 3282 if (!nest) 3283 goto nla_put_failure; 3284 3285 md = (struct pfcp_metadata *)&enc_opts->data[0]; 3286 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type)) 3287 goto nla_put_failure; 3288 3289 if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, 3290 md->seid, 0)) 3291 goto nla_put_failure; 3292 3293 nla_nest_end(skb, nest); 3294 return 0; 3295 3296 nla_put_failure: 3297 nla_nest_cancel(skb, nest); 3298 return -EMSGSIZE; 3299 } 3300 3301 static int fl_dump_key_ct(struct sk_buff *skb, 3302 struct flow_dissector_key_ct *key, 3303 struct flow_dissector_key_ct *mask) 3304 { 3305 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 3306 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 3307 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 3308 sizeof(key->ct_state))) 3309 goto nla_put_failure; 3310 3311 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 3312 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 3313 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 3314 sizeof(key->ct_zone))) 3315 goto nla_put_failure; 3316 3317 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 3318 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 3319 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 3320 sizeof(key->ct_mark))) 3321 goto nla_put_failure; 3322 3323 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 3324 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 3325 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 3326 sizeof(key->ct_labels))) 3327 goto nla_put_failure; 3328 3329 return 0; 3330 3331 nla_put_failure: 3332 return -EMSGSIZE; 3333 } 3334 3335 static int fl_dump_key_cfm(struct sk_buff *skb, 3336 struct flow_dissector_key_cfm *key, 3337 struct flow_dissector_key_cfm *mask) 3338 { 3339 struct nlattr *opts; 3340 int err; 3341 u8 mdl; 3342 3343 if (!memchr_inv(mask, 0, sizeof(*mask))) 3344 return 0; 3345 3346 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); 3347 if (!opts) 3348 return -EMSGSIZE; 3349 3350 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { 3351 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); 3352 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); 3353 if (err) 3354 goto err_cfm_opts; 3355 } 3356 3357 if (mask->opcode) { 3358 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); 3359 if (err) 3360 goto err_cfm_opts; 3361 } 3362 3363 nla_nest_end(skb, opts); 3364 3365 return 0; 3366 3367 err_cfm_opts: 3368 nla_nest_cancel(skb, opts); 3369 return err; 3370 } 3371 3372 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 3373 struct flow_dissector_key_enc_opts *enc_opts) 3374 { 3375 struct nlattr *nest; 3376 int err; 3377 3378 if (!enc_opts->len) 3379 return 0; 3380 3381 nest = nla_nest_start_noflag(skb, enc_opt_type); 3382 if (!nest) 3383 goto nla_put_failure; 3384 3385 switch (enc_opts->dst_opt_type) { 3386 case IP_TUNNEL_GENEVE_OPT_BIT: 3387 err = fl_dump_key_geneve_opt(skb, enc_opts); 3388 if (err) 3389 goto nla_put_failure; 3390 break; 3391 case IP_TUNNEL_VXLAN_OPT_BIT: 3392 err = fl_dump_key_vxlan_opt(skb, enc_opts); 3393 if (err) 3394 goto nla_put_failure; 3395 break; 3396 case IP_TUNNEL_ERSPAN_OPT_BIT: 3397 err = fl_dump_key_erspan_opt(skb, enc_opts); 3398 if (err) 3399 goto nla_put_failure; 3400 break; 3401 case IP_TUNNEL_GTP_OPT_BIT: 3402 err = fl_dump_key_gtp_opt(skb, enc_opts); 3403 if (err) 3404 goto nla_put_failure; 3405 break; 3406 case IP_TUNNEL_PFCP_OPT_BIT: 3407 err = fl_dump_key_pfcp_opt(skb, enc_opts); 3408 if (err) 3409 goto nla_put_failure; 3410 break; 3411 default: 3412 goto nla_put_failure; 3413 } 3414 nla_nest_end(skb, nest); 3415 return 0; 3416 3417 nla_put_failure: 3418 nla_nest_cancel(skb, nest); 3419 return -EMSGSIZE; 3420 } 3421 3422 static int fl_dump_key_enc_opt(struct sk_buff *skb, 3423 struct flow_dissector_key_enc_opts *key_opts, 3424 struct flow_dissector_key_enc_opts *msk_opts) 3425 { 3426 int err; 3427 3428 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 3429 if (err) 3430 return err; 3431 3432 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 3433 } 3434 3435 static int fl_dump_key(struct sk_buff *skb, struct net *net, 3436 struct fl_flow_key *key, struct fl_flow_key *mask) 3437 { 3438 if (mask->meta.ingress_ifindex) { 3439 struct net_device *dev; 3440 3441 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 3442 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 3443 goto nla_put_failure; 3444 } 3445 3446 if (fl_dump_key_val(skb, &key->meta.l2_miss, 3447 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, 3448 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) 3449 goto nla_put_failure; 3450 3451 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 3452 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 3453 sizeof(key->eth.dst)) || 3454 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 3455 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 3456 sizeof(key->eth.src)) || 3457 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 3458 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 3459 sizeof(key->basic.n_proto))) 3460 goto nla_put_failure; 3461 3462 if (mask->num_of_vlans.num_of_vlans) { 3463 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) 3464 goto nla_put_failure; 3465 } 3466 3467 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 3468 goto nla_put_failure; 3469 3470 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 3471 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 3472 goto nla_put_failure; 3473 3474 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 3475 TCA_FLOWER_KEY_CVLAN_PRIO, 3476 &key->cvlan, &mask->cvlan) || 3477 (mask->cvlan.vlan_tpid && 3478 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3479 key->cvlan.vlan_tpid))) 3480 goto nla_put_failure; 3481 3482 if (mask->basic.n_proto) { 3483 if (mask->cvlan.vlan_eth_type) { 3484 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 3485 key->basic.n_proto)) 3486 goto nla_put_failure; 3487 } else if (mask->vlan.vlan_eth_type) { 3488 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3489 key->vlan.vlan_eth_type)) 3490 goto nla_put_failure; 3491 } 3492 } 3493 3494 if ((key->basic.n_proto == htons(ETH_P_IP) || 3495 key->basic.n_proto == htons(ETH_P_IPV6)) && 3496 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 3497 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 3498 sizeof(key->basic.ip_proto)) || 3499 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 3500 goto nla_put_failure; 3501 3502 if (mask->pppoe.session_id) { 3503 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID, 3504 key->pppoe.session_id)) 3505 goto nla_put_failure; 3506 } 3507 if (mask->basic.n_proto && mask->pppoe.ppp_proto) { 3508 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO, 3509 key->pppoe.ppp_proto)) 3510 goto nla_put_failure; 3511 } 3512 3513 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3514 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 3515 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 3516 sizeof(key->ipv4.src)) || 3517 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 3518 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 3519 sizeof(key->ipv4.dst)))) 3520 goto nla_put_failure; 3521 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3522 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 3523 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 3524 sizeof(key->ipv6.src)) || 3525 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 3526 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 3527 sizeof(key->ipv6.dst)))) 3528 goto nla_put_failure; 3529 3530 if (key->basic.ip_proto == IPPROTO_TCP && 3531 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 3532 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 3533 sizeof(key->tp.src)) || 3534 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 3535 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 3536 sizeof(key->tp.dst)) || 3537 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 3538 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 3539 sizeof(key->tcp.flags)))) 3540 goto nla_put_failure; 3541 else if (key->basic.ip_proto == IPPROTO_UDP && 3542 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 3543 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 3544 sizeof(key->tp.src)) || 3545 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 3546 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 3547 sizeof(key->tp.dst)))) 3548 goto nla_put_failure; 3549 else if (key->basic.ip_proto == IPPROTO_SCTP && 3550 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 3551 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 3552 sizeof(key->tp.src)) || 3553 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 3554 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 3555 sizeof(key->tp.dst)))) 3556 goto nla_put_failure; 3557 else if (key->basic.n_proto == htons(ETH_P_IP) && 3558 key->basic.ip_proto == IPPROTO_ICMP && 3559 (fl_dump_key_val(skb, &key->icmp.type, 3560 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 3561 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 3562 sizeof(key->icmp.type)) || 3563 fl_dump_key_val(skb, &key->icmp.code, 3564 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 3565 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 3566 sizeof(key->icmp.code)))) 3567 goto nla_put_failure; 3568 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 3569 key->basic.ip_proto == IPPROTO_ICMPV6 && 3570 (fl_dump_key_val(skb, &key->icmp.type, 3571 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 3572 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 3573 sizeof(key->icmp.type)) || 3574 fl_dump_key_val(skb, &key->icmp.code, 3575 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 3576 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 3577 sizeof(key->icmp.code)))) 3578 goto nla_put_failure; 3579 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 3580 key->basic.n_proto == htons(ETH_P_RARP)) && 3581 (fl_dump_key_val(skb, &key->arp.sip, 3582 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 3583 TCA_FLOWER_KEY_ARP_SIP_MASK, 3584 sizeof(key->arp.sip)) || 3585 fl_dump_key_val(skb, &key->arp.tip, 3586 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 3587 TCA_FLOWER_KEY_ARP_TIP_MASK, 3588 sizeof(key->arp.tip)) || 3589 fl_dump_key_val(skb, &key->arp.op, 3590 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 3591 TCA_FLOWER_KEY_ARP_OP_MASK, 3592 sizeof(key->arp.op)) || 3593 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 3594 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 3595 sizeof(key->arp.sha)) || 3596 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 3597 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 3598 sizeof(key->arp.tha)))) 3599 goto nla_put_failure; 3600 else if (key->basic.ip_proto == IPPROTO_L2TP && 3601 fl_dump_key_val(skb, &key->l2tpv3.session_id, 3602 TCA_FLOWER_KEY_L2TPV3_SID, 3603 &mask->l2tpv3.session_id, 3604 TCA_FLOWER_UNSPEC, 3605 sizeof(key->l2tpv3.session_id))) 3606 goto nla_put_failure; 3607 3608 if (key->ipsec.spi && 3609 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI, 3610 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 3611 sizeof(key->ipsec.spi))) 3612 goto nla_put_failure; 3613 3614 if ((key->basic.ip_proto == IPPROTO_TCP || 3615 key->basic.ip_proto == IPPROTO_UDP || 3616 key->basic.ip_proto == IPPROTO_SCTP) && 3617 fl_dump_key_port_range(skb, key, mask)) 3618 goto nla_put_failure; 3619 3620 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3621 (fl_dump_key_val(skb, &key->enc_ipv4.src, 3622 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 3623 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 3624 sizeof(key->enc_ipv4.src)) || 3625 fl_dump_key_val(skb, &key->enc_ipv4.dst, 3626 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 3627 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 3628 sizeof(key->enc_ipv4.dst)))) 3629 goto nla_put_failure; 3630 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3631 (fl_dump_key_val(skb, &key->enc_ipv6.src, 3632 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 3633 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 3634 sizeof(key->enc_ipv6.src)) || 3635 fl_dump_key_val(skb, &key->enc_ipv6.dst, 3636 TCA_FLOWER_KEY_ENC_IPV6_DST, 3637 &mask->enc_ipv6.dst, 3638 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 3639 sizeof(key->enc_ipv6.dst)))) 3640 goto nla_put_failure; 3641 3642 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 3643 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 3644 sizeof(key->enc_key_id)) || 3645 fl_dump_key_val(skb, &key->enc_tp.src, 3646 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 3647 &mask->enc_tp.src, 3648 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 3649 sizeof(key->enc_tp.src)) || 3650 fl_dump_key_val(skb, &key->enc_tp.dst, 3651 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 3652 &mask->enc_tp.dst, 3653 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 3654 sizeof(key->enc_tp.dst)) || 3655 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 3656 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 3657 goto nla_put_failure; 3658 3659 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 3660 goto nla_put_failure; 3661 3662 if (fl_dump_key_flags(skb, false, key->control.flags, 3663 mask->control.flags)) 3664 goto nla_put_failure; 3665 3666 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 3667 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 3668 sizeof(key->hash.hash))) 3669 goto nla_put_failure; 3670 3671 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) 3672 goto nla_put_failure; 3673 3674 if (fl_dump_key_flags(skb, true, key->enc_control.flags, 3675 mask->enc_control.flags)) 3676 goto nla_put_failure; 3677 3678 return 0; 3679 3680 nla_put_failure: 3681 return -EMSGSIZE; 3682 } 3683 3684 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 3685 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3686 { 3687 struct cls_fl_filter *f = fh; 3688 struct nlattr *nest; 3689 struct fl_flow_key *key, *mask; 3690 bool skip_hw; 3691 3692 if (!f) 3693 return skb->len; 3694 3695 t->tcm_handle = f->handle; 3696 3697 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3698 if (!nest) 3699 goto nla_put_failure; 3700 3701 spin_lock(&tp->lock); 3702 3703 if (f->res.classid && 3704 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3705 goto nla_put_failure_locked; 3706 3707 key = &f->key; 3708 mask = &f->mask->key; 3709 skip_hw = tc_skip_hw(f->flags); 3710 3711 if (fl_dump_key(skb, net, key, mask)) 3712 goto nla_put_failure_locked; 3713 3714 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3715 goto nla_put_failure_locked; 3716 3717 spin_unlock(&tp->lock); 3718 3719 if (!skip_hw) 3720 fl_hw_update_stats(tp, f, rtnl_held); 3721 3722 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3723 goto nla_put_failure; 3724 3725 if (tcf_exts_dump(skb, &f->exts)) 3726 goto nla_put_failure; 3727 3728 nla_nest_end(skb, nest); 3729 3730 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3731 goto nla_put_failure; 3732 3733 return skb->len; 3734 3735 nla_put_failure_locked: 3736 spin_unlock(&tp->lock); 3737 nla_put_failure: 3738 nla_nest_cancel(skb, nest); 3739 return -1; 3740 } 3741 3742 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3743 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3744 { 3745 struct cls_fl_filter *f = fh; 3746 struct nlattr *nest; 3747 bool skip_hw; 3748 3749 if (!f) 3750 return skb->len; 3751 3752 t->tcm_handle = f->handle; 3753 3754 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3755 if (!nest) 3756 goto nla_put_failure; 3757 3758 spin_lock(&tp->lock); 3759 3760 skip_hw = tc_skip_hw(f->flags); 3761 3762 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3763 goto nla_put_failure_locked; 3764 3765 spin_unlock(&tp->lock); 3766 3767 if (!skip_hw) 3768 fl_hw_update_stats(tp, f, rtnl_held); 3769 3770 if (tcf_exts_terse_dump(skb, &f->exts)) 3771 goto nla_put_failure; 3772 3773 nla_nest_end(skb, nest); 3774 3775 return skb->len; 3776 3777 nla_put_failure_locked: 3778 spin_unlock(&tp->lock); 3779 nla_put_failure: 3780 nla_nest_cancel(skb, nest); 3781 return -1; 3782 } 3783 3784 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3785 { 3786 struct fl_flow_tmplt *tmplt = tmplt_priv; 3787 struct fl_flow_key *key, *mask; 3788 struct nlattr *nest; 3789 3790 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3791 if (!nest) 3792 goto nla_put_failure; 3793 3794 key = &tmplt->dummy_key; 3795 mask = &tmplt->mask; 3796 3797 if (fl_dump_key(skb, net, key, mask)) 3798 goto nla_put_failure; 3799 3800 nla_nest_end(skb, nest); 3801 3802 return skb->len; 3803 3804 nla_put_failure: 3805 nla_nest_cancel(skb, nest); 3806 return -EMSGSIZE; 3807 } 3808 3809 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3810 unsigned long base) 3811 { 3812 struct cls_fl_filter *f = fh; 3813 3814 tc_cls_bind_class(classid, cl, q, &f->res, base); 3815 } 3816 3817 static bool fl_delete_empty(struct tcf_proto *tp) 3818 { 3819 struct cls_fl_head *head = fl_head_dereference(tp); 3820 3821 spin_lock(&tp->lock); 3822 tp->deleting = idr_is_empty(&head->handle_idr); 3823 spin_unlock(&tp->lock); 3824 3825 return tp->deleting; 3826 } 3827 3828 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3829 .kind = "flower", 3830 .classify = fl_classify, 3831 .init = fl_init, 3832 .destroy = fl_destroy, 3833 .get = fl_get, 3834 .put = fl_put, 3835 .change = fl_change, 3836 .delete = fl_delete, 3837 .delete_empty = fl_delete_empty, 3838 .walk = fl_walk, 3839 .reoffload = fl_reoffload, 3840 .hw_add = fl_hw_add, 3841 .hw_del = fl_hw_del, 3842 .dump = fl_dump, 3843 .terse_dump = fl_terse_dump, 3844 .bind_class = fl_bind_class, 3845 .tmplt_create = fl_tmplt_create, 3846 .tmplt_destroy = fl_tmplt_destroy, 3847 .tmplt_reoffload = fl_tmplt_reoffload, 3848 .tmplt_dump = fl_tmplt_dump, 3849 .get_exts = fl_get_exts, 3850 .owner = THIS_MODULE, 3851 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3852 }; 3853 MODULE_ALIAS_NET_CLS("flower"); 3854 3855 static int __init cls_fl_init(void) 3856 { 3857 return register_tcf_proto_ops(&cls_fl_ops); 3858 } 3859 3860 static void __exit cls_fl_exit(void) 3861 { 3862 unregister_tcf_proto_ops(&cls_fl_ops); 3863 } 3864 3865 module_init(cls_fl_init); 3866 module_exit(cls_fl_exit); 3867 3868 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3869 MODULE_DESCRIPTION("Flower classifier"); 3870 MODULE_LICENSE("GPL v2"); 3871