1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 #include <linux/bitfield.h> 15 16 #include <linux/if_ether.h> 17 #include <linux/in6.h> 18 #include <linux/ip.h> 19 #include <linux/mpls.h> 20 #include <linux/ppp_defs.h> 21 22 #include <net/sch_generic.h> 23 #include <net/pkt_cls.h> 24 #include <net/pkt_sched.h> 25 #include <net/ip.h> 26 #include <net/flow_dissector.h> 27 #include <net/geneve.h> 28 #include <net/vxlan.h> 29 #include <net/erspan.h> 30 #include <net/gtp.h> 31 #include <net/pfcp.h> 32 #include <net/tc_wrapper.h> 33 34 #include <net/dst.h> 35 #include <net/dst_metadata.h> 36 37 #include <uapi/linux/netfilter/nf_conntrack_common.h> 38 39 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \ 40 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1) 41 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ 42 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) 43 44 #define TUNNEL_FLAGS_PRESENT (\ 45 _BITUL(IP_TUNNEL_CSUM_BIT) | \ 46 _BITUL(IP_TUNNEL_DONT_FRAGMENT_BIT) | \ 47 _BITUL(IP_TUNNEL_OAM_BIT) | \ 48 _BITUL(IP_TUNNEL_CRIT_OPT_BIT)) 49 50 struct fl_flow_key { 51 struct flow_dissector_key_meta meta; 52 struct flow_dissector_key_control control; 53 struct flow_dissector_key_control enc_control; 54 struct flow_dissector_key_basic basic; 55 struct flow_dissector_key_eth_addrs eth; 56 struct flow_dissector_key_vlan vlan; 57 struct flow_dissector_key_vlan cvlan; 58 union { 59 struct flow_dissector_key_ipv4_addrs ipv4; 60 struct flow_dissector_key_ipv6_addrs ipv6; 61 }; 62 struct flow_dissector_key_ports tp; 63 struct flow_dissector_key_icmp icmp; 64 struct flow_dissector_key_arp arp; 65 struct flow_dissector_key_keyid enc_key_id; 66 union { 67 struct flow_dissector_key_ipv4_addrs enc_ipv4; 68 struct flow_dissector_key_ipv6_addrs enc_ipv6; 69 }; 70 struct flow_dissector_key_ports enc_tp; 71 struct flow_dissector_key_mpls mpls; 72 struct flow_dissector_key_tcp tcp; 73 struct flow_dissector_key_ip ip; 74 struct flow_dissector_key_ip enc_ip; 75 struct flow_dissector_key_enc_opts enc_opts; 76 struct flow_dissector_key_ports_range tp_range; 77 struct flow_dissector_key_ct ct; 78 struct flow_dissector_key_hash hash; 79 struct flow_dissector_key_num_of_vlans num_of_vlans; 80 struct flow_dissector_key_pppoe pppoe; 81 struct flow_dissector_key_l2tpv3 l2tpv3; 82 struct flow_dissector_key_ipsec ipsec; 83 struct flow_dissector_key_cfm cfm; 84 struct flow_dissector_key_enc_flags enc_flags; 85 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 86 87 struct fl_flow_mask_range { 88 unsigned short int start; 89 unsigned short int end; 90 }; 91 92 struct fl_flow_mask { 93 struct fl_flow_key key; 94 struct fl_flow_mask_range range; 95 u32 flags; 96 struct rhash_head ht_node; 97 struct rhashtable ht; 98 struct rhashtable_params filter_ht_params; 99 struct flow_dissector dissector; 100 struct list_head filters; 101 struct rcu_work rwork; 102 struct list_head list; 103 refcount_t refcnt; 104 }; 105 106 struct fl_flow_tmplt { 107 struct fl_flow_key dummy_key; 108 struct fl_flow_key mask; 109 struct flow_dissector dissector; 110 struct tcf_chain *chain; 111 }; 112 113 struct cls_fl_head { 114 struct rhashtable ht; 115 spinlock_t masks_lock; /* Protect masks list */ 116 struct list_head masks; 117 struct list_head hw_filters; 118 struct rcu_work rwork; 119 struct idr handle_idr; 120 }; 121 122 struct cls_fl_filter { 123 struct fl_flow_mask *mask; 124 struct rhash_head ht_node; 125 struct fl_flow_key mkey; 126 struct tcf_exts exts; 127 struct tcf_result res; 128 struct fl_flow_key key; 129 struct list_head list; 130 struct list_head hw_list; 131 u32 handle; 132 u32 flags; 133 u32 in_hw_count; 134 u8 needs_tc_skb_ext:1; 135 struct rcu_work rwork; 136 struct net_device *hw_dev; 137 /* Flower classifier is unlocked, which means that its reference counter 138 * can be changed concurrently without any kind of external 139 * synchronization. Use atomic reference counter to be concurrency-safe. 140 */ 141 refcount_t refcnt; 142 bool deleted; 143 }; 144 145 static const struct rhashtable_params mask_ht_params = { 146 .key_offset = offsetof(struct fl_flow_mask, key), 147 .key_len = sizeof(struct fl_flow_key), 148 .head_offset = offsetof(struct fl_flow_mask, ht_node), 149 .automatic_shrinking = true, 150 }; 151 152 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 153 { 154 return mask->range.end - mask->range.start; 155 } 156 157 static void fl_mask_update_range(struct fl_flow_mask *mask) 158 { 159 const u8 *bytes = (const u8 *) &mask->key; 160 size_t size = sizeof(mask->key); 161 size_t i, first = 0, last; 162 163 for (i = 0; i < size; i++) { 164 if (bytes[i]) { 165 first = i; 166 break; 167 } 168 } 169 last = first; 170 for (i = size - 1; i != first; i--) { 171 if (bytes[i]) { 172 last = i; 173 break; 174 } 175 } 176 mask->range.start = rounddown(first, sizeof(long)); 177 mask->range.end = roundup(last + 1, sizeof(long)); 178 } 179 180 static void *fl_key_get_start(struct fl_flow_key *key, 181 const struct fl_flow_mask *mask) 182 { 183 return (u8 *) key + mask->range.start; 184 } 185 186 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 187 struct fl_flow_mask *mask) 188 { 189 const long *lkey = fl_key_get_start(key, mask); 190 const long *lmask = fl_key_get_start(&mask->key, mask); 191 long *lmkey = fl_key_get_start(mkey, mask); 192 int i; 193 194 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 195 *lmkey++ = *lkey++ & *lmask++; 196 } 197 198 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 199 struct fl_flow_mask *mask) 200 { 201 const long *lmask = fl_key_get_start(&mask->key, mask); 202 const long *ltmplt; 203 int i; 204 205 if (!tmplt) 206 return true; 207 ltmplt = fl_key_get_start(&tmplt->mask, mask); 208 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 209 if (~*ltmplt++ & *lmask++) 210 return false; 211 } 212 return true; 213 } 214 215 static void fl_clear_masked_range(struct fl_flow_key *key, 216 struct fl_flow_mask *mask) 217 { 218 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 219 } 220 221 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 222 struct fl_flow_key *key, 223 struct fl_flow_key *mkey) 224 { 225 u16 min_mask, max_mask, min_val, max_val; 226 227 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst); 228 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst); 229 min_val = ntohs(filter->key.tp_range.tp_min.dst); 230 max_val = ntohs(filter->key.tp_range.tp_max.dst); 231 232 if (min_mask && max_mask) { 233 if (ntohs(key->tp_range.tp.dst) < min_val || 234 ntohs(key->tp_range.tp.dst) > max_val) 235 return false; 236 237 /* skb does not have min and max values */ 238 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 239 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 240 } 241 return true; 242 } 243 244 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 245 struct fl_flow_key *key, 246 struct fl_flow_key *mkey) 247 { 248 u16 min_mask, max_mask, min_val, max_val; 249 250 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src); 251 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src); 252 min_val = ntohs(filter->key.tp_range.tp_min.src); 253 max_val = ntohs(filter->key.tp_range.tp_max.src); 254 255 if (min_mask && max_mask) { 256 if (ntohs(key->tp_range.tp.src) < min_val || 257 ntohs(key->tp_range.tp.src) > max_val) 258 return false; 259 260 /* skb does not have min and max values */ 261 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 262 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 263 } 264 return true; 265 } 266 267 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 268 struct fl_flow_key *mkey) 269 { 270 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 271 mask->filter_ht_params); 272 } 273 274 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 275 struct fl_flow_key *mkey, 276 struct fl_flow_key *key) 277 { 278 struct cls_fl_filter *filter, *f; 279 280 list_for_each_entry_rcu(filter, &mask->filters, list) { 281 if (!fl_range_port_dst_cmp(filter, key, mkey)) 282 continue; 283 284 if (!fl_range_port_src_cmp(filter, key, mkey)) 285 continue; 286 287 f = __fl_lookup(mask, mkey); 288 if (f) 289 return f; 290 } 291 return NULL; 292 } 293 294 static noinline_for_stack 295 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 296 { 297 struct fl_flow_key mkey; 298 299 fl_set_masked_key(&mkey, key, mask); 300 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 301 return fl_lookup_range(mask, &mkey, key); 302 303 return __fl_lookup(mask, &mkey); 304 } 305 306 static u16 fl_ct_info_to_flower_map[] = { 307 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 308 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 309 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 310 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 311 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 312 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | 313 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 314 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 315 TCA_FLOWER_KEY_CT_FLAGS_RELATED | 316 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 317 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 318 TCA_FLOWER_KEY_CT_FLAGS_NEW, 319 }; 320 321 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb, 322 const struct tcf_proto *tp, 323 struct tcf_result *res) 324 { 325 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 326 bool post_ct = tc_skb_cb(skb)->post_ct; 327 u16 zone = tc_skb_cb(skb)->zone; 328 struct fl_flow_key skb_key; 329 struct fl_flow_mask *mask; 330 struct cls_fl_filter *f; 331 332 list_for_each_entry_rcu(mask, &head->masks, list) { 333 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 334 fl_clear_masked_range(&skb_key, mask); 335 336 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 337 /* skb_flow_dissect() does not set n_proto in case an unknown 338 * protocol, so do it rather here. 339 */ 340 skb_key.basic.n_proto = skb_protocol(skb, false); 341 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 342 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 343 fl_ct_info_to_flower_map, 344 ARRAY_SIZE(fl_ct_info_to_flower_map), 345 post_ct, zone); 346 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 347 skb_flow_dissect(skb, &mask->dissector, &skb_key, 348 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); 349 350 f = fl_mask_lookup(mask, &skb_key); 351 if (f && !tc_skip_sw(f->flags)) { 352 *res = f->res; 353 return tcf_exts_exec(skb, &f->exts, res); 354 } 355 } 356 return -1; 357 } 358 359 static int fl_init(struct tcf_proto *tp) 360 { 361 struct cls_fl_head *head; 362 363 head = kzalloc(sizeof(*head), GFP_KERNEL); 364 if (!head) 365 return -ENOBUFS; 366 367 spin_lock_init(&head->masks_lock); 368 INIT_LIST_HEAD_RCU(&head->masks); 369 INIT_LIST_HEAD(&head->hw_filters); 370 rcu_assign_pointer(tp->root, head); 371 idr_init(&head->handle_idr); 372 373 return rhashtable_init(&head->ht, &mask_ht_params); 374 } 375 376 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 377 { 378 /* temporary masks don't have their filters list and ht initialized */ 379 if (mask_init_done) { 380 WARN_ON(!list_empty(&mask->filters)); 381 rhashtable_destroy(&mask->ht); 382 } 383 kfree(mask); 384 } 385 386 static void fl_mask_free_work(struct work_struct *work) 387 { 388 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 389 struct fl_flow_mask, rwork); 390 391 fl_mask_free(mask, true); 392 } 393 394 static void fl_uninit_mask_free_work(struct work_struct *work) 395 { 396 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 397 struct fl_flow_mask, rwork); 398 399 fl_mask_free(mask, false); 400 } 401 402 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 403 { 404 if (!refcount_dec_and_test(&mask->refcnt)) 405 return false; 406 407 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 408 409 spin_lock(&head->masks_lock); 410 list_del_rcu(&mask->list); 411 spin_unlock(&head->masks_lock); 412 413 tcf_queue_work(&mask->rwork, fl_mask_free_work); 414 415 return true; 416 } 417 418 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 419 { 420 /* Flower classifier only changes root pointer during init and destroy. 421 * Users must obtain reference to tcf_proto instance before calling its 422 * API, so tp->root pointer is protected from concurrent call to 423 * fl_destroy() by reference counting. 424 */ 425 return rcu_dereference_raw(tp->root); 426 } 427 428 static void __fl_destroy_filter(struct cls_fl_filter *f) 429 { 430 if (f->needs_tc_skb_ext) 431 tc_skb_ext_tc_disable(); 432 tcf_exts_destroy(&f->exts); 433 tcf_exts_put_net(&f->exts); 434 kfree(f); 435 } 436 437 static void fl_destroy_filter_work(struct work_struct *work) 438 { 439 struct cls_fl_filter *f = container_of(to_rcu_work(work), 440 struct cls_fl_filter, rwork); 441 442 __fl_destroy_filter(f); 443 } 444 445 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 446 bool rtnl_held, struct netlink_ext_ack *extack) 447 { 448 struct tcf_block *block = tp->chain->block; 449 struct flow_cls_offload cls_flower = {}; 450 451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 452 cls_flower.command = FLOW_CLS_DESTROY; 453 cls_flower.cookie = (unsigned long) f; 454 455 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 456 &f->flags, &f->in_hw_count, rtnl_held); 457 458 } 459 460 static int fl_hw_replace_filter(struct tcf_proto *tp, 461 struct cls_fl_filter *f, bool rtnl_held, 462 struct netlink_ext_ack *extack) 463 { 464 struct tcf_block *block = tp->chain->block; 465 struct flow_cls_offload cls_flower = {}; 466 bool skip_sw = tc_skip_sw(f->flags); 467 int err = 0; 468 469 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 470 if (!cls_flower.rule) 471 return -ENOMEM; 472 473 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 474 cls_flower.command = FLOW_CLS_REPLACE; 475 cls_flower.cookie = (unsigned long) f; 476 cls_flower.rule->match.dissector = &f->mask->dissector; 477 cls_flower.rule->match.mask = &f->mask->key; 478 cls_flower.rule->match.key = &f->mkey; 479 cls_flower.classid = f->res.classid; 480 481 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 482 cls_flower.common.extack); 483 if (err) { 484 kfree(cls_flower.rule); 485 486 return skip_sw ? err : 0; 487 } 488 489 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 490 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 491 tc_cleanup_offload_action(&cls_flower.rule->action); 492 kfree(cls_flower.rule); 493 494 if (err) { 495 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 496 return err; 497 } 498 499 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 500 return -EINVAL; 501 502 return 0; 503 } 504 505 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 506 bool rtnl_held) 507 { 508 struct tcf_block *block = tp->chain->block; 509 struct flow_cls_offload cls_flower = {}; 510 511 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 512 cls_flower.command = FLOW_CLS_STATS; 513 cls_flower.cookie = (unsigned long) f; 514 cls_flower.classid = f->res.classid; 515 516 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 517 rtnl_held); 518 519 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats); 520 } 521 522 static void __fl_put(struct cls_fl_filter *f) 523 { 524 if (!refcount_dec_and_test(&f->refcnt)) 525 return; 526 527 if (tcf_exts_get_net(&f->exts)) 528 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 529 else 530 __fl_destroy_filter(f); 531 } 532 533 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 534 { 535 struct cls_fl_filter *f; 536 537 rcu_read_lock(); 538 f = idr_find(&head->handle_idr, handle); 539 if (f && !refcount_inc_not_zero(&f->refcnt)) 540 f = NULL; 541 rcu_read_unlock(); 542 543 return f; 544 } 545 546 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle) 547 { 548 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 549 struct cls_fl_filter *f; 550 551 f = idr_find(&head->handle_idr, handle); 552 return f ? &f->exts : NULL; 553 } 554 555 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 556 bool *last, bool rtnl_held, 557 struct netlink_ext_ack *extack) 558 { 559 struct cls_fl_head *head = fl_head_dereference(tp); 560 561 *last = false; 562 563 spin_lock(&tp->lock); 564 if (f->deleted) { 565 spin_unlock(&tp->lock); 566 return -ENOENT; 567 } 568 569 f->deleted = true; 570 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 571 f->mask->filter_ht_params); 572 idr_remove(&head->handle_idr, f->handle); 573 list_del_rcu(&f->list); 574 spin_unlock(&tp->lock); 575 576 *last = fl_mask_put(head, f->mask); 577 if (!tc_skip_hw(f->flags)) 578 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 579 tcf_unbind_filter(tp, &f->res); 580 __fl_put(f); 581 582 return 0; 583 } 584 585 static void fl_destroy_sleepable(struct work_struct *work) 586 { 587 struct cls_fl_head *head = container_of(to_rcu_work(work), 588 struct cls_fl_head, 589 rwork); 590 591 rhashtable_destroy(&head->ht); 592 kfree(head); 593 module_put(THIS_MODULE); 594 } 595 596 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 597 struct netlink_ext_ack *extack) 598 { 599 struct cls_fl_head *head = fl_head_dereference(tp); 600 struct fl_flow_mask *mask, *next_mask; 601 struct cls_fl_filter *f, *next; 602 bool last; 603 604 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 605 list_for_each_entry_safe(f, next, &mask->filters, list) { 606 __fl_delete(tp, f, &last, rtnl_held, extack); 607 if (last) 608 break; 609 } 610 } 611 idr_destroy(&head->handle_idr); 612 613 __module_get(THIS_MODULE); 614 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 615 } 616 617 static void fl_put(struct tcf_proto *tp, void *arg) 618 { 619 struct cls_fl_filter *f = arg; 620 621 __fl_put(f); 622 } 623 624 static void *fl_get(struct tcf_proto *tp, u32 handle) 625 { 626 struct cls_fl_head *head = fl_head_dereference(tp); 627 628 return __fl_get(head, handle); 629 } 630 631 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 632 [TCA_FLOWER_UNSPEC] = { .strict_start_type = 633 TCA_FLOWER_L2_MISS }, 634 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 635 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 636 .len = IFNAMSIZ }, 637 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 638 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 639 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 640 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 641 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 642 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 643 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 644 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 645 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 646 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 647 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 648 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 649 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 650 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 651 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 652 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 653 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 654 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 655 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 656 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 657 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 658 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 659 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 660 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 661 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 662 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 663 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 664 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 665 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 666 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 667 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 668 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 669 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 670 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 671 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 672 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 673 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 674 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 675 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 676 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 677 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 678 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 679 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 680 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 681 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 682 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 683 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 684 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 686 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 687 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 688 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 689 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 690 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 691 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 692 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 693 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 694 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 695 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 696 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 697 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 698 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 699 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 700 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 701 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 702 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 703 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 704 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 705 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 706 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 707 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 708 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 709 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 710 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 711 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 712 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 713 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 714 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 715 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 716 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 717 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 718 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 719 [TCA_FLOWER_KEY_CT_STATE] = 720 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 721 [TCA_FLOWER_KEY_CT_STATE_MASK] = 722 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 723 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 724 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 725 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 726 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 727 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 728 .len = 128 / BITS_PER_BYTE }, 729 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 730 .len = 128 / BITS_PER_BYTE }, 731 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 732 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 733 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 734 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, 735 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, 736 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, 737 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, 738 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 }, 739 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 }, 740 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), 741 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, 742 [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_U32, 743 TUNNEL_FLAGS_PRESENT), 744 [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_U32, 745 TUNNEL_FLAGS_PRESENT), 746 }; 747 748 static const struct nla_policy 749 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 750 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 751 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 752 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 753 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 754 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 755 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, 756 [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED }, 757 }; 758 759 static const struct nla_policy 760 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 761 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 762 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 763 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 764 .len = 128 }, 765 }; 766 767 static const struct nla_policy 768 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 769 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 770 }; 771 772 static const struct nla_policy 773 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 774 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 775 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 776 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 777 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 778 }; 779 780 static const struct nla_policy 781 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { 782 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, 783 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, 784 }; 785 786 static const struct nla_policy 787 pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = { 788 [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 }, 789 [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 }, 790 }; 791 792 static const struct nla_policy 793 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 794 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 795 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 796 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 797 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 798 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 799 }; 800 801 static const struct nla_policy 802 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { 803 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, 804 FLOW_DIS_CFM_MDL_MAX), 805 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, 806 }; 807 808 static void fl_set_key_val(struct nlattr **tb, 809 void *val, int val_type, 810 void *mask, int mask_type, int len) 811 { 812 if (!tb[val_type]) 813 return; 814 nla_memcpy(val, tb[val_type], len); 815 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 816 memset(mask, 0xff, len); 817 else 818 nla_memcpy(mask, tb[mask_type], len); 819 } 820 821 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key, 822 struct fl_flow_key *mask, 823 struct netlink_ext_ack *extack) 824 { 825 if (key->basic.ip_proto != IPPROTO_ESP && 826 key->basic.ip_proto != IPPROTO_AH) { 827 NL_SET_ERR_MSG(extack, 828 "Protocol must be either ESP or AH"); 829 return -EINVAL; 830 } 831 832 fl_set_key_val(tb, &key->ipsec.spi, 833 TCA_FLOWER_KEY_SPI, 834 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 835 sizeof(key->ipsec.spi)); 836 return 0; 837 } 838 839 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 840 struct fl_flow_key *mask, 841 struct netlink_ext_ack *extack) 842 { 843 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 844 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 845 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 846 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 847 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 848 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 849 fl_set_key_val(tb, &key->tp_range.tp_min.src, 850 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 851 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 852 fl_set_key_val(tb, &key->tp_range.tp_max.src, 853 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 854 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 855 856 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) { 857 NL_SET_ERR_MSG(extack, 858 "Both min and max destination ports must be specified"); 859 return -EINVAL; 860 } 861 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) { 862 NL_SET_ERR_MSG(extack, 863 "Both min and max source ports must be specified"); 864 return -EINVAL; 865 } 866 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 867 ntohs(key->tp_range.tp_max.dst) <= 868 ntohs(key->tp_range.tp_min.dst)) { 869 NL_SET_ERR_MSG_ATTR(extack, 870 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 871 "Invalid destination port range (min must be strictly smaller than max)"); 872 return -EINVAL; 873 } 874 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 875 ntohs(key->tp_range.tp_max.src) <= 876 ntohs(key->tp_range.tp_min.src)) { 877 NL_SET_ERR_MSG_ATTR(extack, 878 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 879 "Invalid source port range (min must be strictly smaller than max)"); 880 return -EINVAL; 881 } 882 883 return 0; 884 } 885 886 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 887 struct flow_dissector_key_mpls *key_val, 888 struct flow_dissector_key_mpls *key_mask, 889 struct netlink_ext_ack *extack) 890 { 891 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 892 struct flow_dissector_mpls_lse *lse_mask; 893 struct flow_dissector_mpls_lse *lse_val; 894 u8 lse_index; 895 u8 depth; 896 int err; 897 898 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 899 mpls_stack_entry_policy, extack); 900 if (err < 0) 901 return err; 902 903 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 904 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 905 return -EINVAL; 906 } 907 908 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 909 910 /* LSE depth starts at 1, for consistency with terminology used by 911 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 912 */ 913 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 914 NL_SET_ERR_MSG_ATTR(extack, 915 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 916 "Invalid MPLS depth"); 917 return -EINVAL; 918 } 919 lse_index = depth - 1; 920 921 dissector_set_mpls_lse(key_val, lse_index); 922 dissector_set_mpls_lse(key_mask, lse_index); 923 924 lse_val = &key_val->ls[lse_index]; 925 lse_mask = &key_mask->ls[lse_index]; 926 927 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 928 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 929 lse_mask->mpls_ttl = MPLS_TTL_MASK; 930 } 931 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 932 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 933 934 if (bos & ~MPLS_BOS_MASK) { 935 NL_SET_ERR_MSG_ATTR(extack, 936 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 937 "Bottom Of Stack (BOS) must be 0 or 1"); 938 return -EINVAL; 939 } 940 lse_val->mpls_bos = bos; 941 lse_mask->mpls_bos = MPLS_BOS_MASK; 942 } 943 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 944 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 945 946 if (tc & ~MPLS_TC_MASK) { 947 NL_SET_ERR_MSG_ATTR(extack, 948 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 949 "Traffic Class (TC) must be between 0 and 7"); 950 return -EINVAL; 951 } 952 lse_val->mpls_tc = tc; 953 lse_mask->mpls_tc = MPLS_TC_MASK; 954 } 955 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 956 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 957 958 if (label & ~MPLS_LABEL_MASK) { 959 NL_SET_ERR_MSG_ATTR(extack, 960 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 961 "Label must be between 0 and 1048575"); 962 return -EINVAL; 963 } 964 lse_val->mpls_label = label; 965 lse_mask->mpls_label = MPLS_LABEL_MASK; 966 } 967 968 return 0; 969 } 970 971 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 972 struct flow_dissector_key_mpls *key_val, 973 struct flow_dissector_key_mpls *key_mask, 974 struct netlink_ext_ack *extack) 975 { 976 struct nlattr *nla_lse; 977 int rem; 978 int err; 979 980 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 981 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 982 "NLA_F_NESTED is missing"); 983 return -EINVAL; 984 } 985 986 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 987 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 988 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 989 "Invalid MPLS option type"); 990 return -EINVAL; 991 } 992 993 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 994 if (err < 0) 995 return err; 996 } 997 if (rem) { 998 NL_SET_ERR_MSG(extack, 999 "Bytes leftover after parsing MPLS options"); 1000 return -EINVAL; 1001 } 1002 1003 return 0; 1004 } 1005 1006 static int fl_set_key_mpls(struct nlattr **tb, 1007 struct flow_dissector_key_mpls *key_val, 1008 struct flow_dissector_key_mpls *key_mask, 1009 struct netlink_ext_ack *extack) 1010 { 1011 struct flow_dissector_mpls_lse *lse_mask; 1012 struct flow_dissector_mpls_lse *lse_val; 1013 1014 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 1015 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 1016 tb[TCA_FLOWER_KEY_MPLS_BOS] || 1017 tb[TCA_FLOWER_KEY_MPLS_TC] || 1018 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1019 NL_SET_ERR_MSG_ATTR(extack, 1020 tb[TCA_FLOWER_KEY_MPLS_OPTS], 1021 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 1022 return -EBADMSG; 1023 } 1024 1025 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 1026 key_val, key_mask, extack); 1027 } 1028 1029 lse_val = &key_val->ls[0]; 1030 lse_mask = &key_mask->ls[0]; 1031 1032 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 1033 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 1034 lse_mask->mpls_ttl = MPLS_TTL_MASK; 1035 dissector_set_mpls_lse(key_val, 0); 1036 dissector_set_mpls_lse(key_mask, 0); 1037 } 1038 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 1039 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 1040 1041 if (bos & ~MPLS_BOS_MASK) { 1042 NL_SET_ERR_MSG_ATTR(extack, 1043 tb[TCA_FLOWER_KEY_MPLS_BOS], 1044 "Bottom Of Stack (BOS) must be 0 or 1"); 1045 return -EINVAL; 1046 } 1047 lse_val->mpls_bos = bos; 1048 lse_mask->mpls_bos = MPLS_BOS_MASK; 1049 dissector_set_mpls_lse(key_val, 0); 1050 dissector_set_mpls_lse(key_mask, 0); 1051 } 1052 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 1053 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 1054 1055 if (tc & ~MPLS_TC_MASK) { 1056 NL_SET_ERR_MSG_ATTR(extack, 1057 tb[TCA_FLOWER_KEY_MPLS_TC], 1058 "Traffic Class (TC) must be between 0 and 7"); 1059 return -EINVAL; 1060 } 1061 lse_val->mpls_tc = tc; 1062 lse_mask->mpls_tc = MPLS_TC_MASK; 1063 dissector_set_mpls_lse(key_val, 0); 1064 dissector_set_mpls_lse(key_mask, 0); 1065 } 1066 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1067 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 1068 1069 if (label & ~MPLS_LABEL_MASK) { 1070 NL_SET_ERR_MSG_ATTR(extack, 1071 tb[TCA_FLOWER_KEY_MPLS_LABEL], 1072 "Label must be between 0 and 1048575"); 1073 return -EINVAL; 1074 } 1075 lse_val->mpls_label = label; 1076 lse_mask->mpls_label = MPLS_LABEL_MASK; 1077 dissector_set_mpls_lse(key_val, 0); 1078 dissector_set_mpls_lse(key_mask, 0); 1079 } 1080 return 0; 1081 } 1082 1083 static void fl_set_key_vlan(struct nlattr **tb, 1084 __be16 ethertype, 1085 int vlan_id_key, int vlan_prio_key, 1086 int vlan_next_eth_type_key, 1087 struct flow_dissector_key_vlan *key_val, 1088 struct flow_dissector_key_vlan *key_mask) 1089 { 1090 #define VLAN_PRIORITY_MASK 0x7 1091 1092 if (tb[vlan_id_key]) { 1093 key_val->vlan_id = 1094 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1095 key_mask->vlan_id = VLAN_VID_MASK; 1096 } 1097 if (tb[vlan_prio_key]) { 1098 key_val->vlan_priority = 1099 nla_get_u8(tb[vlan_prio_key]) & 1100 VLAN_PRIORITY_MASK; 1101 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1102 } 1103 if (ethertype) { 1104 key_val->vlan_tpid = ethertype; 1105 key_mask->vlan_tpid = cpu_to_be16(~0); 1106 } 1107 if (tb[vlan_next_eth_type_key]) { 1108 key_val->vlan_eth_type = 1109 nla_get_be16(tb[vlan_next_eth_type_key]); 1110 key_mask->vlan_eth_type = cpu_to_be16(~0); 1111 } 1112 } 1113 1114 static void fl_set_key_pppoe(struct nlattr **tb, 1115 struct flow_dissector_key_pppoe *key_val, 1116 struct flow_dissector_key_pppoe *key_mask, 1117 struct fl_flow_key *key, 1118 struct fl_flow_key *mask) 1119 { 1120 /* key_val::type must be set to ETH_P_PPP_SES 1121 * because ETH_P_PPP_SES was stored in basic.n_proto 1122 * which might get overwritten by ppp_proto 1123 * or might be set to 0, the role of key_val::type 1124 * is similar to vlan_key::tpid 1125 */ 1126 key_val->type = htons(ETH_P_PPP_SES); 1127 key_mask->type = cpu_to_be16(~0); 1128 1129 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) { 1130 key_val->session_id = 1131 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]); 1132 key_mask->session_id = cpu_to_be16(~0); 1133 } 1134 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) { 1135 key_val->ppp_proto = 1136 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]); 1137 key_mask->ppp_proto = cpu_to_be16(~0); 1138 1139 if (key_val->ppp_proto == htons(PPP_IP)) { 1140 key->basic.n_proto = htons(ETH_P_IP); 1141 mask->basic.n_proto = cpu_to_be16(~0); 1142 } else if (key_val->ppp_proto == htons(PPP_IPV6)) { 1143 key->basic.n_proto = htons(ETH_P_IPV6); 1144 mask->basic.n_proto = cpu_to_be16(~0); 1145 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) { 1146 key->basic.n_proto = htons(ETH_P_MPLS_UC); 1147 mask->basic.n_proto = cpu_to_be16(~0); 1148 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) { 1149 key->basic.n_proto = htons(ETH_P_MPLS_MC); 1150 mask->basic.n_proto = cpu_to_be16(~0); 1151 } 1152 } else { 1153 key->basic.n_proto = 0; 1154 mask->basic.n_proto = cpu_to_be16(0); 1155 } 1156 } 1157 1158 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1159 u32 *dissector_key, u32 *dissector_mask, 1160 u32 flower_flag_bit, u32 dissector_flag_bit) 1161 { 1162 if (flower_mask & flower_flag_bit) { 1163 *dissector_mask |= dissector_flag_bit; 1164 if (flower_key & flower_flag_bit) 1165 *dissector_key |= dissector_flag_bit; 1166 } 1167 } 1168 1169 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 1170 u32 *flags_mask, struct netlink_ext_ack *extack) 1171 { 1172 u32 key, mask; 1173 1174 /* mask is mandatory for flags */ 1175 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 1176 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1177 return -EINVAL; 1178 } 1179 1180 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); 1181 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 1182 1183 *flags_key = 0; 1184 *flags_mask = 0; 1185 1186 fl_set_key_flag(key, mask, flags_key, flags_mask, 1187 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1188 fl_set_key_flag(key, mask, flags_key, flags_mask, 1189 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1190 FLOW_DIS_FIRST_FRAG); 1191 1192 return 0; 1193 } 1194 1195 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1196 struct flow_dissector_key_ip *key, 1197 struct flow_dissector_key_ip *mask) 1198 { 1199 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1200 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1201 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1202 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1203 1204 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1205 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1206 } 1207 1208 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1209 int depth, int option_len, 1210 struct netlink_ext_ack *extack) 1211 { 1212 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1213 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1214 struct geneve_opt *opt; 1215 int err, data_len = 0; 1216 1217 if (option_len > sizeof(struct geneve_opt)) 1218 data_len = option_len - sizeof(struct geneve_opt); 1219 1220 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) 1221 return -ERANGE; 1222 1223 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1224 memset(opt, 0xff, option_len); 1225 opt->length = data_len / 4; 1226 opt->r1 = 0; 1227 opt->r2 = 0; 1228 opt->r3 = 0; 1229 1230 /* If no mask has been prodived we assume an exact match. */ 1231 if (!depth) 1232 return sizeof(struct geneve_opt) + data_len; 1233 1234 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1235 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1236 return -EINVAL; 1237 } 1238 1239 err = nla_parse_nested_deprecated(tb, 1240 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1241 nla, geneve_opt_policy, extack); 1242 if (err < 0) 1243 return err; 1244 1245 /* We are not allowed to omit any of CLASS, TYPE or DATA 1246 * fields from the key. 1247 */ 1248 if (!option_len && 1249 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1250 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1251 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1252 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1253 return -EINVAL; 1254 } 1255 1256 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1257 * for the mask. 1258 */ 1259 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1260 int new_len = key->enc_opts.len; 1261 1262 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1263 data_len = nla_len(data); 1264 if (data_len < 4) { 1265 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1266 return -ERANGE; 1267 } 1268 if (data_len % 4) { 1269 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1270 return -ERANGE; 1271 } 1272 1273 new_len += sizeof(struct geneve_opt) + data_len; 1274 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1275 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1276 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1277 return -ERANGE; 1278 } 1279 opt->length = data_len / 4; 1280 memcpy(opt->opt_data, nla_data(data), data_len); 1281 } 1282 1283 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1284 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1285 opt->opt_class = nla_get_be16(class); 1286 } 1287 1288 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1289 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1290 opt->type = nla_get_u8(type); 1291 } 1292 1293 return sizeof(struct geneve_opt) + data_len; 1294 } 1295 1296 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1297 int depth, int option_len, 1298 struct netlink_ext_ack *extack) 1299 { 1300 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1301 struct vxlan_metadata *md; 1302 int err; 1303 1304 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1305 memset(md, 0xff, sizeof(*md)); 1306 1307 if (!depth) 1308 return sizeof(*md); 1309 1310 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1311 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1312 return -EINVAL; 1313 } 1314 1315 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1316 vxlan_opt_policy, extack); 1317 if (err < 0) 1318 return err; 1319 1320 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1321 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1322 return -EINVAL; 1323 } 1324 1325 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1326 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1327 md->gbp &= VXLAN_GBP_MASK; 1328 } 1329 1330 return sizeof(*md); 1331 } 1332 1333 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1334 int depth, int option_len, 1335 struct netlink_ext_ack *extack) 1336 { 1337 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1338 struct erspan_metadata *md; 1339 int err; 1340 1341 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1342 memset(md, 0xff, sizeof(*md)); 1343 md->version = 1; 1344 1345 if (!depth) 1346 return sizeof(*md); 1347 1348 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1349 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1350 return -EINVAL; 1351 } 1352 1353 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1354 erspan_opt_policy, extack); 1355 if (err < 0) 1356 return err; 1357 1358 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1359 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1360 return -EINVAL; 1361 } 1362 1363 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1364 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1365 1366 if (md->version == 1) { 1367 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1368 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1369 return -EINVAL; 1370 } 1371 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1372 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1373 memset(&md->u, 0x00, sizeof(md->u)); 1374 md->u.index = nla_get_be32(nla); 1375 } 1376 } else if (md->version == 2) { 1377 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1378 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1379 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1380 return -EINVAL; 1381 } 1382 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1383 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1384 md->u.md2.dir = nla_get_u8(nla); 1385 } 1386 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1387 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1388 set_hwid(&md->u.md2, nla_get_u8(nla)); 1389 } 1390 } else { 1391 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1392 return -EINVAL; 1393 } 1394 1395 return sizeof(*md); 1396 } 1397 1398 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1399 int depth, int option_len, 1400 struct netlink_ext_ack *extack) 1401 { 1402 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1]; 1403 struct gtp_pdu_session_info *sinfo; 1404 u8 len = key->enc_opts.len; 1405 int err; 1406 1407 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len]; 1408 memset(sinfo, 0xff, option_len); 1409 1410 if (!depth) 1411 return sizeof(*sinfo); 1412 1413 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) { 1414 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask"); 1415 return -EINVAL; 1416 } 1417 1418 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla, 1419 gtp_opt_policy, extack); 1420 if (err < 0) 1421 return err; 1422 1423 if (!option_len && 1424 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] || 1425 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) { 1426 NL_SET_ERR_MSG_MOD(extack, 1427 "Missing tunnel key gtp option pdu type or qfi"); 1428 return -EINVAL; 1429 } 1430 1431 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]) 1432 sinfo->pdu_type = 1433 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]); 1434 1435 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]) 1436 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]); 1437 1438 return sizeof(*sinfo); 1439 } 1440 1441 static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1442 int depth, int option_len, 1443 struct netlink_ext_ack *extack) 1444 { 1445 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1]; 1446 struct pfcp_metadata *md; 1447 int err; 1448 1449 md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1450 memset(md, 0xff, sizeof(*md)); 1451 1452 if (!depth) 1453 return sizeof(*md); 1454 1455 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) { 1456 NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask"); 1457 return -EINVAL; 1458 } 1459 1460 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla, 1461 pfcp_opt_policy, extack); 1462 if (err < 0) 1463 return err; 1464 1465 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) { 1466 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type"); 1467 return -EINVAL; 1468 } 1469 1470 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) 1471 md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]); 1472 1473 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]) 1474 md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]); 1475 1476 return sizeof(*md); 1477 } 1478 1479 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1480 struct fl_flow_key *mask, 1481 struct netlink_ext_ack *extack) 1482 { 1483 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1484 int err, option_len, key_depth, msk_depth = 0; 1485 1486 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1487 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1488 enc_opts_policy, extack); 1489 if (err) 1490 return err; 1491 1492 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1493 1494 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1495 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1496 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1497 enc_opts_policy, extack); 1498 if (err) 1499 return err; 1500 1501 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1502 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1503 if (!nla_ok(nla_opt_msk, msk_depth)) { 1504 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks"); 1505 return -EINVAL; 1506 } 1507 } 1508 1509 nla_for_each_attr(nla_opt_key, nla_enc_key, 1510 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1511 switch (nla_type(nla_opt_key)) { 1512 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1513 if (key->enc_opts.dst_opt_type && 1514 key->enc_opts.dst_opt_type != 1515 IP_TUNNEL_GENEVE_OPT_BIT) { 1516 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1517 return -EINVAL; 1518 } 1519 option_len = 0; 1520 key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1521 option_len = fl_set_geneve_opt(nla_opt_key, key, 1522 key_depth, option_len, 1523 extack); 1524 if (option_len < 0) 1525 return option_len; 1526 1527 key->enc_opts.len += option_len; 1528 /* At the same time we need to parse through the mask 1529 * in order to verify exact and mask attribute lengths. 1530 */ 1531 mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1532 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1533 msk_depth, option_len, 1534 extack); 1535 if (option_len < 0) 1536 return option_len; 1537 1538 mask->enc_opts.len += option_len; 1539 if (key->enc_opts.len != mask->enc_opts.len) { 1540 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1541 return -EINVAL; 1542 } 1543 break; 1544 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1545 if (key->enc_opts.dst_opt_type) { 1546 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1547 return -EINVAL; 1548 } 1549 option_len = 0; 1550 key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1551 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1552 key_depth, option_len, 1553 extack); 1554 if (option_len < 0) 1555 return option_len; 1556 1557 key->enc_opts.len += option_len; 1558 /* At the same time we need to parse through the mask 1559 * in order to verify exact and mask attribute lengths. 1560 */ 1561 mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1562 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1563 msk_depth, option_len, 1564 extack); 1565 if (option_len < 0) 1566 return option_len; 1567 1568 mask->enc_opts.len += option_len; 1569 if (key->enc_opts.len != mask->enc_opts.len) { 1570 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1571 return -EINVAL; 1572 } 1573 break; 1574 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1575 if (key->enc_opts.dst_opt_type) { 1576 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1577 return -EINVAL; 1578 } 1579 option_len = 0; 1580 key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1581 option_len = fl_set_erspan_opt(nla_opt_key, key, 1582 key_depth, option_len, 1583 extack); 1584 if (option_len < 0) 1585 return option_len; 1586 1587 key->enc_opts.len += option_len; 1588 /* At the same time we need to parse through the mask 1589 * in order to verify exact and mask attribute lengths. 1590 */ 1591 mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1592 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1593 msk_depth, option_len, 1594 extack); 1595 if (option_len < 0) 1596 return option_len; 1597 1598 mask->enc_opts.len += option_len; 1599 if (key->enc_opts.len != mask->enc_opts.len) { 1600 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1601 return -EINVAL; 1602 } 1603 break; 1604 case TCA_FLOWER_KEY_ENC_OPTS_GTP: 1605 if (key->enc_opts.dst_opt_type) { 1606 NL_SET_ERR_MSG_MOD(extack, 1607 "Duplicate type for gtp options"); 1608 return -EINVAL; 1609 } 1610 option_len = 0; 1611 key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1612 option_len = fl_set_gtp_opt(nla_opt_key, key, 1613 key_depth, option_len, 1614 extack); 1615 if (option_len < 0) 1616 return option_len; 1617 1618 key->enc_opts.len += option_len; 1619 /* At the same time we need to parse through the mask 1620 * in order to verify exact and mask attribute lengths. 1621 */ 1622 mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1623 option_len = fl_set_gtp_opt(nla_opt_msk, mask, 1624 msk_depth, option_len, 1625 extack); 1626 if (option_len < 0) 1627 return option_len; 1628 1629 mask->enc_opts.len += option_len; 1630 if (key->enc_opts.len != mask->enc_opts.len) { 1631 NL_SET_ERR_MSG_MOD(extack, 1632 "Key and mask miss aligned"); 1633 return -EINVAL; 1634 } 1635 break; 1636 case TCA_FLOWER_KEY_ENC_OPTS_PFCP: 1637 if (key->enc_opts.dst_opt_type) { 1638 NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options"); 1639 return -EINVAL; 1640 } 1641 option_len = 0; 1642 key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1643 option_len = fl_set_pfcp_opt(nla_opt_key, key, 1644 key_depth, option_len, 1645 extack); 1646 if (option_len < 0) 1647 return option_len; 1648 1649 key->enc_opts.len += option_len; 1650 /* At the same time we need to parse through the mask 1651 * in order to verify exact and mask attribute lengths. 1652 */ 1653 mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1654 option_len = fl_set_pfcp_opt(nla_opt_msk, mask, 1655 msk_depth, option_len, 1656 extack); 1657 if (option_len < 0) 1658 return option_len; 1659 1660 mask->enc_opts.len += option_len; 1661 if (key->enc_opts.len != mask->enc_opts.len) { 1662 NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned"); 1663 return -EINVAL; 1664 } 1665 break; 1666 default: 1667 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1668 return -EINVAL; 1669 } 1670 1671 if (!msk_depth) 1672 continue; 1673 1674 if (!nla_ok(nla_opt_msk, msk_depth)) { 1675 NL_SET_ERR_MSG(extack, "A mask attribute is invalid"); 1676 return -EINVAL; 1677 } 1678 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1679 } 1680 1681 return 0; 1682 } 1683 1684 static int fl_validate_ct_state(u16 state, struct nlattr *tb, 1685 struct netlink_ext_ack *extack) 1686 { 1687 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { 1688 NL_SET_ERR_MSG_ATTR(extack, tb, 1689 "no trk, so no other flag can be set"); 1690 return -EINVAL; 1691 } 1692 1693 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1694 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { 1695 NL_SET_ERR_MSG_ATTR(extack, tb, 1696 "new and est are mutually exclusive"); 1697 return -EINVAL; 1698 } 1699 1700 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && 1701 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 1702 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { 1703 NL_SET_ERR_MSG_ATTR(extack, tb, 1704 "when inv is set, only trk may be set"); 1705 return -EINVAL; 1706 } 1707 1708 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1709 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { 1710 NL_SET_ERR_MSG_ATTR(extack, tb, 1711 "new and rpl are mutually exclusive"); 1712 return -EINVAL; 1713 } 1714 1715 return 0; 1716 } 1717 1718 static int fl_set_key_ct(struct nlattr **tb, 1719 struct flow_dissector_key_ct *key, 1720 struct flow_dissector_key_ct *mask, 1721 struct netlink_ext_ack *extack) 1722 { 1723 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1724 int err; 1725 1726 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1727 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1728 return -EOPNOTSUPP; 1729 } 1730 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1731 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1732 sizeof(key->ct_state)); 1733 1734 err = fl_validate_ct_state(key->ct_state & mask->ct_state, 1735 tb[TCA_FLOWER_KEY_CT_STATE_MASK], 1736 extack); 1737 if (err) 1738 return err; 1739 1740 } 1741 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1742 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1743 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1744 return -EOPNOTSUPP; 1745 } 1746 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1747 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1748 sizeof(key->ct_zone)); 1749 } 1750 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1751 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1752 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1753 return -EOPNOTSUPP; 1754 } 1755 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1756 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1757 sizeof(key->ct_mark)); 1758 } 1759 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1760 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1761 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1762 return -EOPNOTSUPP; 1763 } 1764 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1765 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1766 sizeof(key->ct_labels)); 1767 } 1768 1769 return 0; 1770 } 1771 1772 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, 1773 struct fl_flow_key *key, struct fl_flow_key *mask, 1774 int vthresh) 1775 { 1776 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh; 1777 1778 if (!tb) { 1779 *ethertype = 0; 1780 return good_num_of_vlans; 1781 } 1782 1783 *ethertype = nla_get_be16(tb); 1784 if (good_num_of_vlans || eth_type_vlan(*ethertype)) 1785 return true; 1786 1787 key->basic.n_proto = *ethertype; 1788 mask->basic.n_proto = cpu_to_be16(~0); 1789 return false; 1790 } 1791 1792 static void fl_set_key_cfm_md_level(struct nlattr **tb, 1793 struct fl_flow_key *key, 1794 struct fl_flow_key *mask, 1795 struct netlink_ext_ack *extack) 1796 { 1797 u8 level; 1798 1799 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) 1800 return; 1801 1802 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); 1803 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); 1804 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; 1805 } 1806 1807 static void fl_set_key_cfm_opcode(struct nlattr **tb, 1808 struct fl_flow_key *key, 1809 struct fl_flow_key *mask, 1810 struct netlink_ext_ack *extack) 1811 { 1812 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, 1813 &mask->cfm.opcode, TCA_FLOWER_UNSPEC, 1814 sizeof(key->cfm.opcode)); 1815 } 1816 1817 static int fl_set_key_cfm(struct nlattr **tb, 1818 struct fl_flow_key *key, 1819 struct fl_flow_key *mask, 1820 struct netlink_ext_ack *extack) 1821 { 1822 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; 1823 int err; 1824 1825 if (!tb[TCA_FLOWER_KEY_CFM]) 1826 return 0; 1827 1828 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, 1829 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); 1830 if (err < 0) 1831 return err; 1832 1833 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); 1834 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); 1835 1836 return 0; 1837 } 1838 1839 static int fl_set_key_enc_flags(struct nlattr **tb, u32 *flags_key, 1840 u32 *flags_mask, struct netlink_ext_ack *extack) 1841 { 1842 /* mask is mandatory for flags */ 1843 if (NL_REQ_ATTR_CHECK(extack, NULL, tb, TCA_FLOWER_KEY_ENC_FLAGS_MASK)) { 1844 NL_SET_ERR_MSG(extack, "missing enc_flags mask"); 1845 return -EINVAL; 1846 } 1847 1848 *flags_key = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_FLAGS]); 1849 *flags_mask = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_FLAGS_MASK]); 1850 1851 return 0; 1852 } 1853 1854 static int fl_set_key(struct net *net, struct nlattr **tb, 1855 struct fl_flow_key *key, struct fl_flow_key *mask, 1856 struct netlink_ext_ack *extack) 1857 { 1858 __be16 ethertype; 1859 int ret = 0; 1860 1861 if (tb[TCA_FLOWER_INDEV]) { 1862 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1863 if (err < 0) 1864 return err; 1865 key->meta.ingress_ifindex = err; 1866 mask->meta.ingress_ifindex = 0xffffffff; 1867 } 1868 1869 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, 1870 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, 1871 sizeof(key->meta.l2_miss)); 1872 1873 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1874 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1875 sizeof(key->eth.dst)); 1876 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1877 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1878 sizeof(key->eth.src)); 1879 fl_set_key_val(tb, &key->num_of_vlans, 1880 TCA_FLOWER_KEY_NUM_OF_VLANS, 1881 &mask->num_of_vlans, 1882 TCA_FLOWER_UNSPEC, 1883 sizeof(key->num_of_vlans)); 1884 1885 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) { 1886 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1887 TCA_FLOWER_KEY_VLAN_PRIO, 1888 TCA_FLOWER_KEY_VLAN_ETH_TYPE, 1889 &key->vlan, &mask->vlan); 1890 1891 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE], 1892 ðertype, key, mask, 1)) { 1893 fl_set_key_vlan(tb, ethertype, 1894 TCA_FLOWER_KEY_CVLAN_ID, 1895 TCA_FLOWER_KEY_CVLAN_PRIO, 1896 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1897 &key->cvlan, &mask->cvlan); 1898 fl_set_key_val(tb, &key->basic.n_proto, 1899 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1900 &mask->basic.n_proto, 1901 TCA_FLOWER_UNSPEC, 1902 sizeof(key->basic.n_proto)); 1903 } 1904 } 1905 1906 if (key->basic.n_proto == htons(ETH_P_PPP_SES)) 1907 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask); 1908 1909 if (key->basic.n_proto == htons(ETH_P_IP) || 1910 key->basic.n_proto == htons(ETH_P_IPV6)) { 1911 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1912 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1913 sizeof(key->basic.ip_proto)); 1914 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1915 } 1916 1917 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1918 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1919 mask->control.addr_type = ~0; 1920 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1921 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1922 sizeof(key->ipv4.src)); 1923 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1924 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1925 sizeof(key->ipv4.dst)); 1926 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1927 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1928 mask->control.addr_type = ~0; 1929 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1930 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1931 sizeof(key->ipv6.src)); 1932 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1933 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1934 sizeof(key->ipv6.dst)); 1935 } 1936 1937 if (key->basic.ip_proto == IPPROTO_TCP) { 1938 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1939 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1940 sizeof(key->tp.src)); 1941 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1942 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1943 sizeof(key->tp.dst)); 1944 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1945 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1946 sizeof(key->tcp.flags)); 1947 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1948 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1949 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1950 sizeof(key->tp.src)); 1951 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1952 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1953 sizeof(key->tp.dst)); 1954 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1955 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1956 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1957 sizeof(key->tp.src)); 1958 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1959 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1960 sizeof(key->tp.dst)); 1961 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1962 key->basic.ip_proto == IPPROTO_ICMP) { 1963 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1964 &mask->icmp.type, 1965 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1966 sizeof(key->icmp.type)); 1967 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1968 &mask->icmp.code, 1969 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1970 sizeof(key->icmp.code)); 1971 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1972 key->basic.ip_proto == IPPROTO_ICMPV6) { 1973 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1974 &mask->icmp.type, 1975 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1976 sizeof(key->icmp.type)); 1977 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1978 &mask->icmp.code, 1979 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1980 sizeof(key->icmp.code)); 1981 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1982 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1983 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1984 if (ret) 1985 return ret; 1986 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1987 key->basic.n_proto == htons(ETH_P_RARP)) { 1988 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1989 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1990 sizeof(key->arp.sip)); 1991 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1992 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1993 sizeof(key->arp.tip)); 1994 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1995 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1996 sizeof(key->arp.op)); 1997 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1998 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1999 sizeof(key->arp.sha)); 2000 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2001 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2002 sizeof(key->arp.tha)); 2003 } else if (key->basic.ip_proto == IPPROTO_L2TP) { 2004 fl_set_key_val(tb, &key->l2tpv3.session_id, 2005 TCA_FLOWER_KEY_L2TPV3_SID, 2006 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, 2007 sizeof(key->l2tpv3.session_id)); 2008 } else if (key->basic.n_proto == htons(ETH_P_CFM)) { 2009 ret = fl_set_key_cfm(tb, key, mask, extack); 2010 if (ret) 2011 return ret; 2012 } 2013 2014 if (key->basic.ip_proto == IPPROTO_TCP || 2015 key->basic.ip_proto == IPPROTO_UDP || 2016 key->basic.ip_proto == IPPROTO_SCTP) { 2017 ret = fl_set_key_port_range(tb, key, mask, extack); 2018 if (ret) 2019 return ret; 2020 } 2021 2022 if (tb[TCA_FLOWER_KEY_SPI]) { 2023 ret = fl_set_key_spi(tb, key, mask, extack); 2024 if (ret) 2025 return ret; 2026 } 2027 2028 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 2029 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 2030 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2031 mask->enc_control.addr_type = ~0; 2032 fl_set_key_val(tb, &key->enc_ipv4.src, 2033 TCA_FLOWER_KEY_ENC_IPV4_SRC, 2034 &mask->enc_ipv4.src, 2035 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2036 sizeof(key->enc_ipv4.src)); 2037 fl_set_key_val(tb, &key->enc_ipv4.dst, 2038 TCA_FLOWER_KEY_ENC_IPV4_DST, 2039 &mask->enc_ipv4.dst, 2040 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2041 sizeof(key->enc_ipv4.dst)); 2042 } 2043 2044 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 2045 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 2046 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2047 mask->enc_control.addr_type = ~0; 2048 fl_set_key_val(tb, &key->enc_ipv6.src, 2049 TCA_FLOWER_KEY_ENC_IPV6_SRC, 2050 &mask->enc_ipv6.src, 2051 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2052 sizeof(key->enc_ipv6.src)); 2053 fl_set_key_val(tb, &key->enc_ipv6.dst, 2054 TCA_FLOWER_KEY_ENC_IPV6_DST, 2055 &mask->enc_ipv6.dst, 2056 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2057 sizeof(key->enc_ipv6.dst)); 2058 } 2059 2060 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 2061 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 2062 sizeof(key->enc_key_id.keyid)); 2063 2064 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2065 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2066 sizeof(key->enc_tp.src)); 2067 2068 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2069 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2070 sizeof(key->enc_tp.dst)); 2071 2072 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 2073 2074 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 2075 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 2076 sizeof(key->hash.hash)); 2077 2078 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 2079 ret = fl_set_enc_opt(tb, key, mask, extack); 2080 if (ret) 2081 return ret; 2082 } 2083 2084 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 2085 if (ret) 2086 return ret; 2087 2088 if (tb[TCA_FLOWER_KEY_FLAGS]) { 2089 ret = fl_set_key_flags(tb, &key->control.flags, 2090 &mask->control.flags, extack); 2091 if (ret) 2092 return ret; 2093 } 2094 2095 if (tb[TCA_FLOWER_KEY_ENC_FLAGS]) 2096 ret = fl_set_key_enc_flags(tb, &key->enc_flags.flags, 2097 &mask->enc_flags.flags, extack); 2098 2099 return ret; 2100 } 2101 2102 static void fl_mask_copy(struct fl_flow_mask *dst, 2103 struct fl_flow_mask *src) 2104 { 2105 const void *psrc = fl_key_get_start(&src->key, src); 2106 void *pdst = fl_key_get_start(&dst->key, src); 2107 2108 memcpy(pdst, psrc, fl_mask_range(src)); 2109 dst->range = src->range; 2110 } 2111 2112 static const struct rhashtable_params fl_ht_params = { 2113 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 2114 .head_offset = offsetof(struct cls_fl_filter, ht_node), 2115 .automatic_shrinking = true, 2116 }; 2117 2118 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 2119 { 2120 mask->filter_ht_params = fl_ht_params; 2121 mask->filter_ht_params.key_len = fl_mask_range(mask); 2122 mask->filter_ht_params.key_offset += mask->range.start; 2123 2124 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 2125 } 2126 2127 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 2128 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 2129 2130 #define FL_KEY_IS_MASKED(mask, member) \ 2131 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 2132 0, FL_KEY_MEMBER_SIZE(member)) \ 2133 2134 #define FL_KEY_SET(keys, cnt, id, member) \ 2135 do { \ 2136 keys[cnt].key_id = id; \ 2137 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 2138 cnt++; \ 2139 } while(0); 2140 2141 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 2142 do { \ 2143 if (FL_KEY_IS_MASKED(mask, member)) \ 2144 FL_KEY_SET(keys, cnt, id, member); \ 2145 } while(0); 2146 2147 static void fl_init_dissector(struct flow_dissector *dissector, 2148 struct fl_flow_key *mask) 2149 { 2150 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 2151 size_t cnt = 0; 2152 2153 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2154 FLOW_DISSECTOR_KEY_META, meta); 2155 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 2156 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 2157 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2158 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 2159 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2160 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 2161 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2162 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 2163 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2164 FLOW_DISSECTOR_KEY_PORTS, tp); 2165 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2166 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 2167 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2168 FLOW_DISSECTOR_KEY_IP, ip); 2169 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2170 FLOW_DISSECTOR_KEY_TCP, tcp); 2171 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2172 FLOW_DISSECTOR_KEY_ICMP, icmp); 2173 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2174 FLOW_DISSECTOR_KEY_ARP, arp); 2175 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2176 FLOW_DISSECTOR_KEY_MPLS, mpls); 2177 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2178 FLOW_DISSECTOR_KEY_VLAN, vlan); 2179 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2180 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 2181 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2182 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 2183 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2184 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 2185 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2186 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 2187 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 2188 FL_KEY_IS_MASKED(mask, enc_ipv6)) 2189 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2190 enc_control); 2191 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2192 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 2193 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2194 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 2195 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2196 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 2197 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2198 FLOW_DISSECTOR_KEY_CT, ct); 2199 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2200 FLOW_DISSECTOR_KEY_HASH, hash); 2201 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2202 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); 2203 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2204 FLOW_DISSECTOR_KEY_PPPOE, pppoe); 2205 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2206 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); 2207 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2208 FLOW_DISSECTOR_KEY_IPSEC, ipsec); 2209 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2210 FLOW_DISSECTOR_KEY_CFM, cfm); 2211 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2212 FLOW_DISSECTOR_KEY_ENC_FLAGS, enc_flags); 2213 2214 skb_flow_dissector_init(dissector, keys, cnt); 2215 } 2216 2217 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 2218 struct fl_flow_mask *mask) 2219 { 2220 struct fl_flow_mask *newmask; 2221 int err; 2222 2223 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 2224 if (!newmask) 2225 return ERR_PTR(-ENOMEM); 2226 2227 fl_mask_copy(newmask, mask); 2228 2229 if ((newmask->key.tp_range.tp_min.dst && 2230 newmask->key.tp_range.tp_max.dst) || 2231 (newmask->key.tp_range.tp_min.src && 2232 newmask->key.tp_range.tp_max.src)) 2233 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 2234 2235 err = fl_init_mask_hashtable(newmask); 2236 if (err) 2237 goto errout_free; 2238 2239 fl_init_dissector(&newmask->dissector, &newmask->key); 2240 2241 INIT_LIST_HEAD_RCU(&newmask->filters); 2242 2243 refcount_set(&newmask->refcnt, 1); 2244 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 2245 &newmask->ht_node, mask_ht_params); 2246 if (err) 2247 goto errout_destroy; 2248 2249 spin_lock(&head->masks_lock); 2250 list_add_tail_rcu(&newmask->list, &head->masks); 2251 spin_unlock(&head->masks_lock); 2252 2253 return newmask; 2254 2255 errout_destroy: 2256 rhashtable_destroy(&newmask->ht); 2257 errout_free: 2258 kfree(newmask); 2259 2260 return ERR_PTR(err); 2261 } 2262 2263 static int fl_check_assign_mask(struct cls_fl_head *head, 2264 struct cls_fl_filter *fnew, 2265 struct cls_fl_filter *fold, 2266 struct fl_flow_mask *mask) 2267 { 2268 struct fl_flow_mask *newmask; 2269 int ret = 0; 2270 2271 rcu_read_lock(); 2272 2273 /* Insert mask as temporary node to prevent concurrent creation of mask 2274 * with same key. Any concurrent lookups with same key will return 2275 * -EAGAIN because mask's refcnt is zero. 2276 */ 2277 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 2278 &mask->ht_node, 2279 mask_ht_params); 2280 if (!fnew->mask) { 2281 rcu_read_unlock(); 2282 2283 if (fold) { 2284 ret = -EINVAL; 2285 goto errout_cleanup; 2286 } 2287 2288 newmask = fl_create_new_mask(head, mask); 2289 if (IS_ERR(newmask)) { 2290 ret = PTR_ERR(newmask); 2291 goto errout_cleanup; 2292 } 2293 2294 fnew->mask = newmask; 2295 return 0; 2296 } else if (IS_ERR(fnew->mask)) { 2297 ret = PTR_ERR(fnew->mask); 2298 } else if (fold && fold->mask != fnew->mask) { 2299 ret = -EINVAL; 2300 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 2301 /* Mask was deleted concurrently, try again */ 2302 ret = -EAGAIN; 2303 } 2304 rcu_read_unlock(); 2305 return ret; 2306 2307 errout_cleanup: 2308 rhashtable_remove_fast(&head->ht, &mask->ht_node, 2309 mask_ht_params); 2310 return ret; 2311 } 2312 2313 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) 2314 { 2315 return mask->meta.l2_miss; 2316 } 2317 2318 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 2319 struct cls_fl_filter *fold, 2320 bool *in_ht) 2321 { 2322 struct fl_flow_mask *mask = fnew->mask; 2323 int err; 2324 2325 err = rhashtable_lookup_insert_fast(&mask->ht, 2326 &fnew->ht_node, 2327 mask->filter_ht_params); 2328 if (err) { 2329 *in_ht = false; 2330 /* It is okay if filter with same key exists when 2331 * overwriting. 2332 */ 2333 return fold && err == -EEXIST ? 0 : err; 2334 } 2335 2336 *in_ht = true; 2337 return 0; 2338 } 2339 2340 static int fl_change(struct net *net, struct sk_buff *in_skb, 2341 struct tcf_proto *tp, unsigned long base, 2342 u32 handle, struct nlattr **tca, 2343 void **arg, u32 flags, 2344 struct netlink_ext_ack *extack) 2345 { 2346 struct cls_fl_head *head = fl_head_dereference(tp); 2347 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); 2348 struct cls_fl_filter *fold = *arg; 2349 bool bound_to_filter = false; 2350 struct cls_fl_filter *fnew; 2351 struct fl_flow_mask *mask; 2352 struct nlattr **tb; 2353 bool in_ht; 2354 int err; 2355 2356 if (!tca[TCA_OPTIONS]) { 2357 err = -EINVAL; 2358 goto errout_fold; 2359 } 2360 2361 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 2362 if (!mask) { 2363 err = -ENOBUFS; 2364 goto errout_fold; 2365 } 2366 2367 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2368 if (!tb) { 2369 err = -ENOBUFS; 2370 goto errout_mask_alloc; 2371 } 2372 2373 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2374 tca[TCA_OPTIONS], fl_policy, NULL); 2375 if (err < 0) 2376 goto errout_tb; 2377 2378 if (fold && handle && fold->handle != handle) { 2379 err = -EINVAL; 2380 goto errout_tb; 2381 } 2382 2383 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 2384 if (!fnew) { 2385 err = -ENOBUFS; 2386 goto errout_tb; 2387 } 2388 INIT_LIST_HEAD(&fnew->hw_list); 2389 refcount_set(&fnew->refcnt, 1); 2390 2391 if (tb[TCA_FLOWER_FLAGS]) { 2392 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 2393 2394 if (!tc_flags_valid(fnew->flags)) { 2395 kfree(fnew); 2396 err = -EINVAL; 2397 goto errout_tb; 2398 } 2399 } 2400 2401 if (!fold) { 2402 spin_lock(&tp->lock); 2403 if (!handle) { 2404 handle = 1; 2405 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2406 INT_MAX, GFP_ATOMIC); 2407 } else { 2408 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2409 handle, GFP_ATOMIC); 2410 2411 /* Filter with specified handle was concurrently 2412 * inserted after initial check in cls_api. This is not 2413 * necessarily an error if NLM_F_EXCL is not set in 2414 * message flags. Returning EAGAIN will cause cls_api to 2415 * try to update concurrently inserted rule. 2416 */ 2417 if (err == -ENOSPC) 2418 err = -EAGAIN; 2419 } 2420 spin_unlock(&tp->lock); 2421 2422 if (err) { 2423 kfree(fnew); 2424 goto errout_tb; 2425 } 2426 } 2427 fnew->handle = handle; 2428 2429 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle, 2430 !tc_skip_hw(fnew->flags)); 2431 if (err < 0) 2432 goto errout_idr; 2433 2434 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], 2435 &fnew->exts, flags, fnew->flags, 2436 extack); 2437 if (err < 0) 2438 goto errout_idr; 2439 2440 if (tb[TCA_FLOWER_CLASSID]) { 2441 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 2442 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2443 rtnl_lock(); 2444 tcf_bind_filter(tp, &fnew->res, base); 2445 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2446 rtnl_unlock(); 2447 bound_to_filter = true; 2448 } 2449 2450 err = fl_set_key(net, tb, &fnew->key, &mask->key, extack); 2451 if (err) 2452 goto unbind_filter; 2453 2454 fl_mask_update_range(mask); 2455 fl_set_masked_key(&fnew->mkey, &fnew->key, mask); 2456 2457 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) { 2458 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 2459 err = -EINVAL; 2460 goto unbind_filter; 2461 } 2462 2463 /* Enable tc skb extension if filter matches on data extracted from 2464 * this extension. 2465 */ 2466 if (fl_needs_tc_skb_ext(&mask->key)) { 2467 fnew->needs_tc_skb_ext = 1; 2468 tc_skb_ext_tc_enable(); 2469 } 2470 2471 err = fl_check_assign_mask(head, fnew, fold, mask); 2472 if (err) 2473 goto unbind_filter; 2474 2475 err = fl_ht_insert_unique(fnew, fold, &in_ht); 2476 if (err) 2477 goto errout_mask; 2478 2479 if (!tc_skip_hw(fnew->flags)) { 2480 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 2481 if (err) 2482 goto errout_ht; 2483 } 2484 2485 if (!tc_in_hw(fnew->flags)) 2486 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 2487 2488 spin_lock(&tp->lock); 2489 2490 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2491 * proto again or create new one, if necessary. 2492 */ 2493 if (tp->deleting) { 2494 err = -EAGAIN; 2495 goto errout_hw; 2496 } 2497 2498 if (fold) { 2499 /* Fold filter was deleted concurrently. Retry lookup. */ 2500 if (fold->deleted) { 2501 err = -EAGAIN; 2502 goto errout_hw; 2503 } 2504 2505 fnew->handle = handle; 2506 2507 if (!in_ht) { 2508 struct rhashtable_params params = 2509 fnew->mask->filter_ht_params; 2510 2511 err = rhashtable_insert_fast(&fnew->mask->ht, 2512 &fnew->ht_node, 2513 params); 2514 if (err) 2515 goto errout_hw; 2516 in_ht = true; 2517 } 2518 2519 refcount_inc(&fnew->refcnt); 2520 rhashtable_remove_fast(&fold->mask->ht, 2521 &fold->ht_node, 2522 fold->mask->filter_ht_params); 2523 idr_replace(&head->handle_idr, fnew, fnew->handle); 2524 list_replace_rcu(&fold->list, &fnew->list); 2525 fold->deleted = true; 2526 2527 spin_unlock(&tp->lock); 2528 2529 fl_mask_put(head, fold->mask); 2530 if (!tc_skip_hw(fold->flags)) 2531 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2532 tcf_unbind_filter(tp, &fold->res); 2533 /* Caller holds reference to fold, so refcnt is always > 0 2534 * after this. 2535 */ 2536 refcount_dec(&fold->refcnt); 2537 __fl_put(fold); 2538 } else { 2539 idr_replace(&head->handle_idr, fnew, fnew->handle); 2540 2541 refcount_inc(&fnew->refcnt); 2542 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2543 spin_unlock(&tp->lock); 2544 } 2545 2546 *arg = fnew; 2547 2548 kfree(tb); 2549 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2550 return 0; 2551 2552 errout_ht: 2553 spin_lock(&tp->lock); 2554 errout_hw: 2555 fnew->deleted = true; 2556 spin_unlock(&tp->lock); 2557 if (!tc_skip_hw(fnew->flags)) 2558 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2559 if (in_ht) 2560 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2561 fnew->mask->filter_ht_params); 2562 errout_mask: 2563 fl_mask_put(head, fnew->mask); 2564 2565 unbind_filter: 2566 if (bound_to_filter) { 2567 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2568 rtnl_lock(); 2569 tcf_unbind_filter(tp, &fnew->res); 2570 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2571 rtnl_unlock(); 2572 } 2573 2574 errout_idr: 2575 if (!fold) { 2576 spin_lock(&tp->lock); 2577 idr_remove(&head->handle_idr, fnew->handle); 2578 spin_unlock(&tp->lock); 2579 } 2580 __fl_put(fnew); 2581 errout_tb: 2582 kfree(tb); 2583 errout_mask_alloc: 2584 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2585 errout_fold: 2586 if (fold) 2587 __fl_put(fold); 2588 return err; 2589 } 2590 2591 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2592 bool rtnl_held, struct netlink_ext_ack *extack) 2593 { 2594 struct cls_fl_head *head = fl_head_dereference(tp); 2595 struct cls_fl_filter *f = arg; 2596 bool last_on_mask; 2597 int err = 0; 2598 2599 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2600 *last = list_empty(&head->masks); 2601 __fl_put(f); 2602 2603 return err; 2604 } 2605 2606 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2607 bool rtnl_held) 2608 { 2609 struct cls_fl_head *head = fl_head_dereference(tp); 2610 unsigned long id = arg->cookie, tmp; 2611 struct cls_fl_filter *f; 2612 2613 arg->count = arg->skip; 2614 2615 rcu_read_lock(); 2616 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2617 /* don't return filters that are being deleted */ 2618 if (!f || !refcount_inc_not_zero(&f->refcnt)) 2619 continue; 2620 rcu_read_unlock(); 2621 2622 if (arg->fn(tp, f, arg) < 0) { 2623 __fl_put(f); 2624 arg->stop = 1; 2625 rcu_read_lock(); 2626 break; 2627 } 2628 __fl_put(f); 2629 arg->count++; 2630 rcu_read_lock(); 2631 } 2632 rcu_read_unlock(); 2633 arg->cookie = id; 2634 } 2635 2636 static struct cls_fl_filter * 2637 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2638 { 2639 struct cls_fl_head *head = fl_head_dereference(tp); 2640 2641 spin_lock(&tp->lock); 2642 if (list_empty(&head->hw_filters)) { 2643 spin_unlock(&tp->lock); 2644 return NULL; 2645 } 2646 2647 if (!f) 2648 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2649 hw_list); 2650 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2651 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2652 spin_unlock(&tp->lock); 2653 return f; 2654 } 2655 } 2656 2657 spin_unlock(&tp->lock); 2658 return NULL; 2659 } 2660 2661 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2662 void *cb_priv, struct netlink_ext_ack *extack) 2663 { 2664 struct tcf_block *block = tp->chain->block; 2665 struct flow_cls_offload cls_flower = {}; 2666 struct cls_fl_filter *f = NULL; 2667 int err; 2668 2669 /* hw_filters list can only be changed by hw offload functions after 2670 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2671 * iterating it. 2672 */ 2673 ASSERT_RTNL(); 2674 2675 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2676 cls_flower.rule = 2677 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2678 if (!cls_flower.rule) { 2679 __fl_put(f); 2680 return -ENOMEM; 2681 } 2682 2683 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2684 extack); 2685 cls_flower.command = add ? 2686 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2687 cls_flower.cookie = (unsigned long)f; 2688 cls_flower.rule->match.dissector = &f->mask->dissector; 2689 cls_flower.rule->match.mask = &f->mask->key; 2690 cls_flower.rule->match.key = &f->mkey; 2691 2692 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 2693 cls_flower.common.extack); 2694 if (err) { 2695 kfree(cls_flower.rule); 2696 if (tc_skip_sw(f->flags)) { 2697 __fl_put(f); 2698 return err; 2699 } 2700 goto next_flow; 2701 } 2702 2703 cls_flower.classid = f->res.classid; 2704 2705 err = tc_setup_cb_reoffload(block, tp, add, cb, 2706 TC_SETUP_CLSFLOWER, &cls_flower, 2707 cb_priv, &f->flags, 2708 &f->in_hw_count); 2709 tc_cleanup_offload_action(&cls_flower.rule->action); 2710 kfree(cls_flower.rule); 2711 2712 if (err) { 2713 __fl_put(f); 2714 return err; 2715 } 2716 next_flow: 2717 __fl_put(f); 2718 } 2719 2720 return 0; 2721 } 2722 2723 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2724 { 2725 struct flow_cls_offload *cls_flower = type_data; 2726 struct cls_fl_filter *f = 2727 (struct cls_fl_filter *) cls_flower->cookie; 2728 struct cls_fl_head *head = fl_head_dereference(tp); 2729 2730 spin_lock(&tp->lock); 2731 list_add(&f->hw_list, &head->hw_filters); 2732 spin_unlock(&tp->lock); 2733 } 2734 2735 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2736 { 2737 struct flow_cls_offload *cls_flower = type_data; 2738 struct cls_fl_filter *f = 2739 (struct cls_fl_filter *) cls_flower->cookie; 2740 2741 spin_lock(&tp->lock); 2742 if (!list_empty(&f->hw_list)) 2743 list_del_init(&f->hw_list); 2744 spin_unlock(&tp->lock); 2745 } 2746 2747 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2748 struct fl_flow_tmplt *tmplt) 2749 { 2750 struct flow_cls_offload cls_flower = {}; 2751 struct tcf_block *block = chain->block; 2752 2753 cls_flower.rule = flow_rule_alloc(0); 2754 if (!cls_flower.rule) 2755 return -ENOMEM; 2756 2757 cls_flower.common.chain_index = chain->index; 2758 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2759 cls_flower.cookie = (unsigned long) tmplt; 2760 cls_flower.rule->match.dissector = &tmplt->dissector; 2761 cls_flower.rule->match.mask = &tmplt->mask; 2762 cls_flower.rule->match.key = &tmplt->dummy_key; 2763 2764 /* We don't care if driver (any of them) fails to handle this 2765 * call. It serves just as a hint for it. 2766 */ 2767 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2768 kfree(cls_flower.rule); 2769 2770 return 0; 2771 } 2772 2773 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2774 struct fl_flow_tmplt *tmplt) 2775 { 2776 struct flow_cls_offload cls_flower = {}; 2777 struct tcf_block *block = chain->block; 2778 2779 cls_flower.common.chain_index = chain->index; 2780 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2781 cls_flower.cookie = (unsigned long) tmplt; 2782 2783 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2784 } 2785 2786 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2787 struct nlattr **tca, 2788 struct netlink_ext_ack *extack) 2789 { 2790 struct fl_flow_tmplt *tmplt; 2791 struct nlattr **tb; 2792 int err; 2793 2794 if (!tca[TCA_OPTIONS]) 2795 return ERR_PTR(-EINVAL); 2796 2797 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2798 if (!tb) 2799 return ERR_PTR(-ENOBUFS); 2800 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2801 tca[TCA_OPTIONS], fl_policy, NULL); 2802 if (err) 2803 goto errout_tb; 2804 2805 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2806 if (!tmplt) { 2807 err = -ENOMEM; 2808 goto errout_tb; 2809 } 2810 tmplt->chain = chain; 2811 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2812 if (err) 2813 goto errout_tmplt; 2814 2815 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2816 2817 err = fl_hw_create_tmplt(chain, tmplt); 2818 if (err) 2819 goto errout_tmplt; 2820 2821 kfree(tb); 2822 return tmplt; 2823 2824 errout_tmplt: 2825 kfree(tmplt); 2826 errout_tb: 2827 kfree(tb); 2828 return ERR_PTR(err); 2829 } 2830 2831 static void fl_tmplt_destroy(void *tmplt_priv) 2832 { 2833 struct fl_flow_tmplt *tmplt = tmplt_priv; 2834 2835 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2836 kfree(tmplt); 2837 } 2838 2839 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, 2840 flow_setup_cb_t *cb, void *cb_priv) 2841 { 2842 struct fl_flow_tmplt *tmplt = chain->tmplt_priv; 2843 struct flow_cls_offload cls_flower = {}; 2844 2845 cls_flower.rule = flow_rule_alloc(0); 2846 if (!cls_flower.rule) 2847 return; 2848 2849 cls_flower.common.chain_index = chain->index; 2850 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : 2851 FLOW_CLS_TMPLT_DESTROY; 2852 cls_flower.cookie = (unsigned long) tmplt; 2853 cls_flower.rule->match.dissector = &tmplt->dissector; 2854 cls_flower.rule->match.mask = &tmplt->mask; 2855 cls_flower.rule->match.key = &tmplt->dummy_key; 2856 2857 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 2858 kfree(cls_flower.rule); 2859 } 2860 2861 static int fl_dump_key_val(struct sk_buff *skb, 2862 void *val, int val_type, 2863 void *mask, int mask_type, int len) 2864 { 2865 int err; 2866 2867 if (!memchr_inv(mask, 0, len)) 2868 return 0; 2869 err = nla_put(skb, val_type, len, val); 2870 if (err) 2871 return err; 2872 if (mask_type != TCA_FLOWER_UNSPEC) { 2873 err = nla_put(skb, mask_type, len, mask); 2874 if (err) 2875 return err; 2876 } 2877 return 0; 2878 } 2879 2880 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2881 struct fl_flow_key *mask) 2882 { 2883 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2884 TCA_FLOWER_KEY_PORT_DST_MIN, 2885 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2886 sizeof(key->tp_range.tp_min.dst)) || 2887 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2888 TCA_FLOWER_KEY_PORT_DST_MAX, 2889 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2890 sizeof(key->tp_range.tp_max.dst)) || 2891 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2892 TCA_FLOWER_KEY_PORT_SRC_MIN, 2893 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2894 sizeof(key->tp_range.tp_min.src)) || 2895 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2896 TCA_FLOWER_KEY_PORT_SRC_MAX, 2897 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2898 sizeof(key->tp_range.tp_max.src))) 2899 return -1; 2900 2901 return 0; 2902 } 2903 2904 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2905 struct flow_dissector_key_mpls *mpls_key, 2906 struct flow_dissector_key_mpls *mpls_mask, 2907 u8 lse_index) 2908 { 2909 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2910 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2911 int err; 2912 2913 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2914 lse_index + 1); 2915 if (err) 2916 return err; 2917 2918 if (lse_mask->mpls_ttl) { 2919 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2920 lse_key->mpls_ttl); 2921 if (err) 2922 return err; 2923 } 2924 if (lse_mask->mpls_bos) { 2925 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2926 lse_key->mpls_bos); 2927 if (err) 2928 return err; 2929 } 2930 if (lse_mask->mpls_tc) { 2931 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2932 lse_key->mpls_tc); 2933 if (err) 2934 return err; 2935 } 2936 if (lse_mask->mpls_label) { 2937 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2938 lse_key->mpls_label); 2939 if (err) 2940 return err; 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2947 struct flow_dissector_key_mpls *mpls_key, 2948 struct flow_dissector_key_mpls *mpls_mask) 2949 { 2950 struct nlattr *opts; 2951 struct nlattr *lse; 2952 u8 lse_index; 2953 int err; 2954 2955 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2956 if (!opts) 2957 return -EMSGSIZE; 2958 2959 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2960 if (!(mpls_mask->used_lses & 1 << lse_index)) 2961 continue; 2962 2963 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2964 if (!lse) { 2965 err = -EMSGSIZE; 2966 goto err_opts; 2967 } 2968 2969 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2970 lse_index); 2971 if (err) 2972 goto err_opts_lse; 2973 nla_nest_end(skb, lse); 2974 } 2975 nla_nest_end(skb, opts); 2976 2977 return 0; 2978 2979 err_opts_lse: 2980 nla_nest_cancel(skb, lse); 2981 err_opts: 2982 nla_nest_cancel(skb, opts); 2983 2984 return err; 2985 } 2986 2987 static int fl_dump_key_mpls(struct sk_buff *skb, 2988 struct flow_dissector_key_mpls *mpls_key, 2989 struct flow_dissector_key_mpls *mpls_mask) 2990 { 2991 struct flow_dissector_mpls_lse *lse_mask; 2992 struct flow_dissector_mpls_lse *lse_key; 2993 int err; 2994 2995 if (!mpls_mask->used_lses) 2996 return 0; 2997 2998 lse_mask = &mpls_mask->ls[0]; 2999 lse_key = &mpls_key->ls[0]; 3000 3001 /* For backward compatibility, don't use the MPLS nested attributes if 3002 * the rule can be expressed using the old attributes. 3003 */ 3004 if (mpls_mask->used_lses & ~1 || 3005 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 3006 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 3007 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 3008 3009 if (lse_mask->mpls_ttl) { 3010 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 3011 lse_key->mpls_ttl); 3012 if (err) 3013 return err; 3014 } 3015 if (lse_mask->mpls_tc) { 3016 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 3017 lse_key->mpls_tc); 3018 if (err) 3019 return err; 3020 } 3021 if (lse_mask->mpls_label) { 3022 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 3023 lse_key->mpls_label); 3024 if (err) 3025 return err; 3026 } 3027 if (lse_mask->mpls_bos) { 3028 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 3029 lse_key->mpls_bos); 3030 if (err) 3031 return err; 3032 } 3033 return 0; 3034 } 3035 3036 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 3037 struct flow_dissector_key_ip *key, 3038 struct flow_dissector_key_ip *mask) 3039 { 3040 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 3041 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 3042 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 3043 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 3044 3045 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 3046 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 3047 return -1; 3048 3049 return 0; 3050 } 3051 3052 static int fl_dump_key_vlan(struct sk_buff *skb, 3053 int vlan_id_key, int vlan_prio_key, 3054 struct flow_dissector_key_vlan *vlan_key, 3055 struct flow_dissector_key_vlan *vlan_mask) 3056 { 3057 int err; 3058 3059 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 3060 return 0; 3061 if (vlan_mask->vlan_id) { 3062 err = nla_put_u16(skb, vlan_id_key, 3063 vlan_key->vlan_id); 3064 if (err) 3065 return err; 3066 } 3067 if (vlan_mask->vlan_priority) { 3068 err = nla_put_u8(skb, vlan_prio_key, 3069 vlan_key->vlan_priority); 3070 if (err) 3071 return err; 3072 } 3073 return 0; 3074 } 3075 3076 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 3077 u32 *flower_key, u32 *flower_mask, 3078 u32 flower_flag_bit, u32 dissector_flag_bit) 3079 { 3080 if (dissector_mask & dissector_flag_bit) { 3081 *flower_mask |= flower_flag_bit; 3082 if (dissector_key & dissector_flag_bit) 3083 *flower_key |= flower_flag_bit; 3084 } 3085 } 3086 3087 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 3088 { 3089 u32 key, mask; 3090 __be32 _key, _mask; 3091 int err; 3092 3093 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 3094 return 0; 3095 3096 key = 0; 3097 mask = 0; 3098 3099 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3100 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 3101 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3102 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 3103 FLOW_DIS_FIRST_FRAG); 3104 3105 _key = cpu_to_be32(key); 3106 _mask = cpu_to_be32(mask); 3107 3108 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 3109 if (err) 3110 return err; 3111 3112 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 3113 } 3114 3115 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 3116 struct flow_dissector_key_enc_opts *enc_opts) 3117 { 3118 struct geneve_opt *opt; 3119 struct nlattr *nest; 3120 int opt_off = 0; 3121 3122 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 3123 if (!nest) 3124 goto nla_put_failure; 3125 3126 while (enc_opts->len > opt_off) { 3127 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 3128 3129 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 3130 opt->opt_class)) 3131 goto nla_put_failure; 3132 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 3133 opt->type)) 3134 goto nla_put_failure; 3135 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 3136 opt->length * 4, opt->opt_data)) 3137 goto nla_put_failure; 3138 3139 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 3140 } 3141 nla_nest_end(skb, nest); 3142 return 0; 3143 3144 nla_put_failure: 3145 nla_nest_cancel(skb, nest); 3146 return -EMSGSIZE; 3147 } 3148 3149 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 3150 struct flow_dissector_key_enc_opts *enc_opts) 3151 { 3152 struct vxlan_metadata *md; 3153 struct nlattr *nest; 3154 3155 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 3156 if (!nest) 3157 goto nla_put_failure; 3158 3159 md = (struct vxlan_metadata *)&enc_opts->data[0]; 3160 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 3161 goto nla_put_failure; 3162 3163 nla_nest_end(skb, nest); 3164 return 0; 3165 3166 nla_put_failure: 3167 nla_nest_cancel(skb, nest); 3168 return -EMSGSIZE; 3169 } 3170 3171 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 3172 struct flow_dissector_key_enc_opts *enc_opts) 3173 { 3174 struct erspan_metadata *md; 3175 struct nlattr *nest; 3176 3177 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 3178 if (!nest) 3179 goto nla_put_failure; 3180 3181 md = (struct erspan_metadata *)&enc_opts->data[0]; 3182 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 3183 goto nla_put_failure; 3184 3185 if (md->version == 1 && 3186 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 3187 goto nla_put_failure; 3188 3189 if (md->version == 2 && 3190 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 3191 md->u.md2.dir) || 3192 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 3193 get_hwid(&md->u.md2)))) 3194 goto nla_put_failure; 3195 3196 nla_nest_end(skb, nest); 3197 return 0; 3198 3199 nla_put_failure: 3200 nla_nest_cancel(skb, nest); 3201 return -EMSGSIZE; 3202 } 3203 3204 static int fl_dump_key_gtp_opt(struct sk_buff *skb, 3205 struct flow_dissector_key_enc_opts *enc_opts) 3206 3207 { 3208 struct gtp_pdu_session_info *session_info; 3209 struct nlattr *nest; 3210 3211 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP); 3212 if (!nest) 3213 goto nla_put_failure; 3214 3215 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0]; 3216 3217 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, 3218 session_info->pdu_type)) 3219 goto nla_put_failure; 3220 3221 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi)) 3222 goto nla_put_failure; 3223 3224 nla_nest_end(skb, nest); 3225 return 0; 3226 3227 nla_put_failure: 3228 nla_nest_cancel(skb, nest); 3229 return -EMSGSIZE; 3230 } 3231 3232 static int fl_dump_key_pfcp_opt(struct sk_buff *skb, 3233 struct flow_dissector_key_enc_opts *enc_opts) 3234 { 3235 struct pfcp_metadata *md; 3236 struct nlattr *nest; 3237 3238 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP); 3239 if (!nest) 3240 goto nla_put_failure; 3241 3242 md = (struct pfcp_metadata *)&enc_opts->data[0]; 3243 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type)) 3244 goto nla_put_failure; 3245 3246 if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, 3247 md->seid, 0)) 3248 goto nla_put_failure; 3249 3250 nla_nest_end(skb, nest); 3251 return 0; 3252 3253 nla_put_failure: 3254 nla_nest_cancel(skb, nest); 3255 return -EMSGSIZE; 3256 } 3257 3258 static int fl_dump_key_ct(struct sk_buff *skb, 3259 struct flow_dissector_key_ct *key, 3260 struct flow_dissector_key_ct *mask) 3261 { 3262 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 3263 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 3264 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 3265 sizeof(key->ct_state))) 3266 goto nla_put_failure; 3267 3268 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 3269 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 3270 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 3271 sizeof(key->ct_zone))) 3272 goto nla_put_failure; 3273 3274 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 3275 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 3276 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 3277 sizeof(key->ct_mark))) 3278 goto nla_put_failure; 3279 3280 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 3281 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 3282 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 3283 sizeof(key->ct_labels))) 3284 goto nla_put_failure; 3285 3286 return 0; 3287 3288 nla_put_failure: 3289 return -EMSGSIZE; 3290 } 3291 3292 static int fl_dump_key_cfm(struct sk_buff *skb, 3293 struct flow_dissector_key_cfm *key, 3294 struct flow_dissector_key_cfm *mask) 3295 { 3296 struct nlattr *opts; 3297 int err; 3298 u8 mdl; 3299 3300 if (!memchr_inv(mask, 0, sizeof(*mask))) 3301 return 0; 3302 3303 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); 3304 if (!opts) 3305 return -EMSGSIZE; 3306 3307 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { 3308 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); 3309 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); 3310 if (err) 3311 goto err_cfm_opts; 3312 } 3313 3314 if (mask->opcode) { 3315 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); 3316 if (err) 3317 goto err_cfm_opts; 3318 } 3319 3320 nla_nest_end(skb, opts); 3321 3322 return 0; 3323 3324 err_cfm_opts: 3325 nla_nest_cancel(skb, opts); 3326 return err; 3327 } 3328 3329 static int fl_dump_key_enc_flags(struct sk_buff *skb, 3330 struct flow_dissector_key_enc_flags *key, 3331 struct flow_dissector_key_enc_flags *mask) 3332 { 3333 if (!memchr_inv(mask, 0, sizeof(*mask))) 3334 return 0; 3335 3336 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_FLAGS, key->flags)) 3337 return -EMSGSIZE; 3338 3339 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_FLAGS_MASK, mask->flags)) 3340 return -EMSGSIZE; 3341 3342 return 0; 3343 } 3344 3345 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 3346 struct flow_dissector_key_enc_opts *enc_opts) 3347 { 3348 struct nlattr *nest; 3349 int err; 3350 3351 if (!enc_opts->len) 3352 return 0; 3353 3354 nest = nla_nest_start_noflag(skb, enc_opt_type); 3355 if (!nest) 3356 goto nla_put_failure; 3357 3358 switch (enc_opts->dst_opt_type) { 3359 case IP_TUNNEL_GENEVE_OPT_BIT: 3360 err = fl_dump_key_geneve_opt(skb, enc_opts); 3361 if (err) 3362 goto nla_put_failure; 3363 break; 3364 case IP_TUNNEL_VXLAN_OPT_BIT: 3365 err = fl_dump_key_vxlan_opt(skb, enc_opts); 3366 if (err) 3367 goto nla_put_failure; 3368 break; 3369 case IP_TUNNEL_ERSPAN_OPT_BIT: 3370 err = fl_dump_key_erspan_opt(skb, enc_opts); 3371 if (err) 3372 goto nla_put_failure; 3373 break; 3374 case IP_TUNNEL_GTP_OPT_BIT: 3375 err = fl_dump_key_gtp_opt(skb, enc_opts); 3376 if (err) 3377 goto nla_put_failure; 3378 break; 3379 case IP_TUNNEL_PFCP_OPT_BIT: 3380 err = fl_dump_key_pfcp_opt(skb, enc_opts); 3381 if (err) 3382 goto nla_put_failure; 3383 break; 3384 default: 3385 goto nla_put_failure; 3386 } 3387 nla_nest_end(skb, nest); 3388 return 0; 3389 3390 nla_put_failure: 3391 nla_nest_cancel(skb, nest); 3392 return -EMSGSIZE; 3393 } 3394 3395 static int fl_dump_key_enc_opt(struct sk_buff *skb, 3396 struct flow_dissector_key_enc_opts *key_opts, 3397 struct flow_dissector_key_enc_opts *msk_opts) 3398 { 3399 int err; 3400 3401 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 3402 if (err) 3403 return err; 3404 3405 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 3406 } 3407 3408 static int fl_dump_key(struct sk_buff *skb, struct net *net, 3409 struct fl_flow_key *key, struct fl_flow_key *mask) 3410 { 3411 if (mask->meta.ingress_ifindex) { 3412 struct net_device *dev; 3413 3414 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 3415 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 3416 goto nla_put_failure; 3417 } 3418 3419 if (fl_dump_key_val(skb, &key->meta.l2_miss, 3420 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, 3421 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) 3422 goto nla_put_failure; 3423 3424 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 3425 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 3426 sizeof(key->eth.dst)) || 3427 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 3428 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 3429 sizeof(key->eth.src)) || 3430 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 3431 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 3432 sizeof(key->basic.n_proto))) 3433 goto nla_put_failure; 3434 3435 if (mask->num_of_vlans.num_of_vlans) { 3436 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) 3437 goto nla_put_failure; 3438 } 3439 3440 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 3441 goto nla_put_failure; 3442 3443 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 3444 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 3445 goto nla_put_failure; 3446 3447 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 3448 TCA_FLOWER_KEY_CVLAN_PRIO, 3449 &key->cvlan, &mask->cvlan) || 3450 (mask->cvlan.vlan_tpid && 3451 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3452 key->cvlan.vlan_tpid))) 3453 goto nla_put_failure; 3454 3455 if (mask->basic.n_proto) { 3456 if (mask->cvlan.vlan_eth_type) { 3457 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 3458 key->basic.n_proto)) 3459 goto nla_put_failure; 3460 } else if (mask->vlan.vlan_eth_type) { 3461 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3462 key->vlan.vlan_eth_type)) 3463 goto nla_put_failure; 3464 } 3465 } 3466 3467 if ((key->basic.n_proto == htons(ETH_P_IP) || 3468 key->basic.n_proto == htons(ETH_P_IPV6)) && 3469 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 3470 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 3471 sizeof(key->basic.ip_proto)) || 3472 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 3473 goto nla_put_failure; 3474 3475 if (mask->pppoe.session_id) { 3476 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID, 3477 key->pppoe.session_id)) 3478 goto nla_put_failure; 3479 } 3480 if (mask->basic.n_proto && mask->pppoe.ppp_proto) { 3481 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO, 3482 key->pppoe.ppp_proto)) 3483 goto nla_put_failure; 3484 } 3485 3486 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3487 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 3488 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 3489 sizeof(key->ipv4.src)) || 3490 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 3491 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 3492 sizeof(key->ipv4.dst)))) 3493 goto nla_put_failure; 3494 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3495 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 3496 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 3497 sizeof(key->ipv6.src)) || 3498 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 3499 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 3500 sizeof(key->ipv6.dst)))) 3501 goto nla_put_failure; 3502 3503 if (key->basic.ip_proto == IPPROTO_TCP && 3504 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 3505 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 3506 sizeof(key->tp.src)) || 3507 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 3508 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 3509 sizeof(key->tp.dst)) || 3510 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 3511 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 3512 sizeof(key->tcp.flags)))) 3513 goto nla_put_failure; 3514 else if (key->basic.ip_proto == IPPROTO_UDP && 3515 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 3516 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 3517 sizeof(key->tp.src)) || 3518 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 3519 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 3520 sizeof(key->tp.dst)))) 3521 goto nla_put_failure; 3522 else if (key->basic.ip_proto == IPPROTO_SCTP && 3523 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 3524 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 3525 sizeof(key->tp.src)) || 3526 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 3527 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 3528 sizeof(key->tp.dst)))) 3529 goto nla_put_failure; 3530 else if (key->basic.n_proto == htons(ETH_P_IP) && 3531 key->basic.ip_proto == IPPROTO_ICMP && 3532 (fl_dump_key_val(skb, &key->icmp.type, 3533 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 3534 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 3535 sizeof(key->icmp.type)) || 3536 fl_dump_key_val(skb, &key->icmp.code, 3537 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 3538 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 3539 sizeof(key->icmp.code)))) 3540 goto nla_put_failure; 3541 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 3542 key->basic.ip_proto == IPPROTO_ICMPV6 && 3543 (fl_dump_key_val(skb, &key->icmp.type, 3544 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 3545 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 3546 sizeof(key->icmp.type)) || 3547 fl_dump_key_val(skb, &key->icmp.code, 3548 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 3549 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 3550 sizeof(key->icmp.code)))) 3551 goto nla_put_failure; 3552 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 3553 key->basic.n_proto == htons(ETH_P_RARP)) && 3554 (fl_dump_key_val(skb, &key->arp.sip, 3555 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 3556 TCA_FLOWER_KEY_ARP_SIP_MASK, 3557 sizeof(key->arp.sip)) || 3558 fl_dump_key_val(skb, &key->arp.tip, 3559 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 3560 TCA_FLOWER_KEY_ARP_TIP_MASK, 3561 sizeof(key->arp.tip)) || 3562 fl_dump_key_val(skb, &key->arp.op, 3563 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 3564 TCA_FLOWER_KEY_ARP_OP_MASK, 3565 sizeof(key->arp.op)) || 3566 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 3567 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 3568 sizeof(key->arp.sha)) || 3569 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 3570 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 3571 sizeof(key->arp.tha)))) 3572 goto nla_put_failure; 3573 else if (key->basic.ip_proto == IPPROTO_L2TP && 3574 fl_dump_key_val(skb, &key->l2tpv3.session_id, 3575 TCA_FLOWER_KEY_L2TPV3_SID, 3576 &mask->l2tpv3.session_id, 3577 TCA_FLOWER_UNSPEC, 3578 sizeof(key->l2tpv3.session_id))) 3579 goto nla_put_failure; 3580 3581 if (key->ipsec.spi && 3582 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI, 3583 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 3584 sizeof(key->ipsec.spi))) 3585 goto nla_put_failure; 3586 3587 if ((key->basic.ip_proto == IPPROTO_TCP || 3588 key->basic.ip_proto == IPPROTO_UDP || 3589 key->basic.ip_proto == IPPROTO_SCTP) && 3590 fl_dump_key_port_range(skb, key, mask)) 3591 goto nla_put_failure; 3592 3593 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3594 (fl_dump_key_val(skb, &key->enc_ipv4.src, 3595 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 3596 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 3597 sizeof(key->enc_ipv4.src)) || 3598 fl_dump_key_val(skb, &key->enc_ipv4.dst, 3599 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 3600 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 3601 sizeof(key->enc_ipv4.dst)))) 3602 goto nla_put_failure; 3603 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3604 (fl_dump_key_val(skb, &key->enc_ipv6.src, 3605 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 3606 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 3607 sizeof(key->enc_ipv6.src)) || 3608 fl_dump_key_val(skb, &key->enc_ipv6.dst, 3609 TCA_FLOWER_KEY_ENC_IPV6_DST, 3610 &mask->enc_ipv6.dst, 3611 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 3612 sizeof(key->enc_ipv6.dst)))) 3613 goto nla_put_failure; 3614 3615 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 3616 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 3617 sizeof(key->enc_key_id)) || 3618 fl_dump_key_val(skb, &key->enc_tp.src, 3619 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 3620 &mask->enc_tp.src, 3621 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 3622 sizeof(key->enc_tp.src)) || 3623 fl_dump_key_val(skb, &key->enc_tp.dst, 3624 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 3625 &mask->enc_tp.dst, 3626 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 3627 sizeof(key->enc_tp.dst)) || 3628 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 3629 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 3630 goto nla_put_failure; 3631 3632 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 3633 goto nla_put_failure; 3634 3635 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 3636 goto nla_put_failure; 3637 3638 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 3639 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 3640 sizeof(key->hash.hash))) 3641 goto nla_put_failure; 3642 3643 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) 3644 goto nla_put_failure; 3645 3646 if (fl_dump_key_enc_flags(skb, &key->enc_flags, &mask->enc_flags)) 3647 goto nla_put_failure; 3648 3649 return 0; 3650 3651 nla_put_failure: 3652 return -EMSGSIZE; 3653 } 3654 3655 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 3656 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3657 { 3658 struct cls_fl_filter *f = fh; 3659 struct nlattr *nest; 3660 struct fl_flow_key *key, *mask; 3661 bool skip_hw; 3662 3663 if (!f) 3664 return skb->len; 3665 3666 t->tcm_handle = f->handle; 3667 3668 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3669 if (!nest) 3670 goto nla_put_failure; 3671 3672 spin_lock(&tp->lock); 3673 3674 if (f->res.classid && 3675 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3676 goto nla_put_failure_locked; 3677 3678 key = &f->key; 3679 mask = &f->mask->key; 3680 skip_hw = tc_skip_hw(f->flags); 3681 3682 if (fl_dump_key(skb, net, key, mask)) 3683 goto nla_put_failure_locked; 3684 3685 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3686 goto nla_put_failure_locked; 3687 3688 spin_unlock(&tp->lock); 3689 3690 if (!skip_hw) 3691 fl_hw_update_stats(tp, f, rtnl_held); 3692 3693 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3694 goto nla_put_failure; 3695 3696 if (tcf_exts_dump(skb, &f->exts)) 3697 goto nla_put_failure; 3698 3699 nla_nest_end(skb, nest); 3700 3701 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3702 goto nla_put_failure; 3703 3704 return skb->len; 3705 3706 nla_put_failure_locked: 3707 spin_unlock(&tp->lock); 3708 nla_put_failure: 3709 nla_nest_cancel(skb, nest); 3710 return -1; 3711 } 3712 3713 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3714 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3715 { 3716 struct cls_fl_filter *f = fh; 3717 struct nlattr *nest; 3718 bool skip_hw; 3719 3720 if (!f) 3721 return skb->len; 3722 3723 t->tcm_handle = f->handle; 3724 3725 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3726 if (!nest) 3727 goto nla_put_failure; 3728 3729 spin_lock(&tp->lock); 3730 3731 skip_hw = tc_skip_hw(f->flags); 3732 3733 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3734 goto nla_put_failure_locked; 3735 3736 spin_unlock(&tp->lock); 3737 3738 if (!skip_hw) 3739 fl_hw_update_stats(tp, f, rtnl_held); 3740 3741 if (tcf_exts_terse_dump(skb, &f->exts)) 3742 goto nla_put_failure; 3743 3744 nla_nest_end(skb, nest); 3745 3746 return skb->len; 3747 3748 nla_put_failure_locked: 3749 spin_unlock(&tp->lock); 3750 nla_put_failure: 3751 nla_nest_cancel(skb, nest); 3752 return -1; 3753 } 3754 3755 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3756 { 3757 struct fl_flow_tmplt *tmplt = tmplt_priv; 3758 struct fl_flow_key *key, *mask; 3759 struct nlattr *nest; 3760 3761 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3762 if (!nest) 3763 goto nla_put_failure; 3764 3765 key = &tmplt->dummy_key; 3766 mask = &tmplt->mask; 3767 3768 if (fl_dump_key(skb, net, key, mask)) 3769 goto nla_put_failure; 3770 3771 nla_nest_end(skb, nest); 3772 3773 return skb->len; 3774 3775 nla_put_failure: 3776 nla_nest_cancel(skb, nest); 3777 return -EMSGSIZE; 3778 } 3779 3780 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3781 unsigned long base) 3782 { 3783 struct cls_fl_filter *f = fh; 3784 3785 tc_cls_bind_class(classid, cl, q, &f->res, base); 3786 } 3787 3788 static bool fl_delete_empty(struct tcf_proto *tp) 3789 { 3790 struct cls_fl_head *head = fl_head_dereference(tp); 3791 3792 spin_lock(&tp->lock); 3793 tp->deleting = idr_is_empty(&head->handle_idr); 3794 spin_unlock(&tp->lock); 3795 3796 return tp->deleting; 3797 } 3798 3799 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3800 .kind = "flower", 3801 .classify = fl_classify, 3802 .init = fl_init, 3803 .destroy = fl_destroy, 3804 .get = fl_get, 3805 .put = fl_put, 3806 .change = fl_change, 3807 .delete = fl_delete, 3808 .delete_empty = fl_delete_empty, 3809 .walk = fl_walk, 3810 .reoffload = fl_reoffload, 3811 .hw_add = fl_hw_add, 3812 .hw_del = fl_hw_del, 3813 .dump = fl_dump, 3814 .terse_dump = fl_terse_dump, 3815 .bind_class = fl_bind_class, 3816 .tmplt_create = fl_tmplt_create, 3817 .tmplt_destroy = fl_tmplt_destroy, 3818 .tmplt_reoffload = fl_tmplt_reoffload, 3819 .tmplt_dump = fl_tmplt_dump, 3820 .get_exts = fl_get_exts, 3821 .owner = THIS_MODULE, 3822 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3823 }; 3824 MODULE_ALIAS_NET_CLS("flower"); 3825 3826 static int __init cls_fl_init(void) 3827 { 3828 return register_tcf_proto_ops(&cls_fl_ops); 3829 } 3830 3831 static void __exit cls_fl_exit(void) 3832 { 3833 unregister_tcf_proto_ops(&cls_fl_ops); 3834 } 3835 3836 module_init(cls_fl_init); 3837 module_exit(cls_fl_exit); 3838 3839 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3840 MODULE_DESCRIPTION("Flower classifier"); 3841 MODULE_LICENSE("GPL v2"); 3842