1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 #include <linux/bitfield.h> 15 16 #include <linux/if_ether.h> 17 #include <linux/in6.h> 18 #include <linux/ip.h> 19 #include <linux/mpls.h> 20 #include <linux/ppp_defs.h> 21 22 #include <net/sch_generic.h> 23 #include <net/pkt_cls.h> 24 #include <net/pkt_sched.h> 25 #include <net/ip.h> 26 #include <net/flow_dissector.h> 27 #include <net/geneve.h> 28 #include <net/vxlan.h> 29 #include <net/erspan.h> 30 #include <net/gtp.h> 31 #include <net/pfcp.h> 32 #include <net/tc_wrapper.h> 33 34 #include <net/dst.h> 35 #include <net/dst_metadata.h> 36 37 #include <uapi/linux/netfilter/nf_conntrack_common.h> 38 39 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \ 40 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1) 41 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ 42 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) 43 44 struct fl_flow_key { 45 struct flow_dissector_key_meta meta; 46 struct flow_dissector_key_control control; 47 struct flow_dissector_key_control enc_control; 48 struct flow_dissector_key_basic basic; 49 struct flow_dissector_key_eth_addrs eth; 50 struct flow_dissector_key_vlan vlan; 51 struct flow_dissector_key_vlan cvlan; 52 union { 53 struct flow_dissector_key_ipv4_addrs ipv4; 54 struct flow_dissector_key_ipv6_addrs ipv6; 55 }; 56 struct flow_dissector_key_ports tp; 57 struct flow_dissector_key_icmp icmp; 58 struct flow_dissector_key_arp arp; 59 struct flow_dissector_key_keyid enc_key_id; 60 union { 61 struct flow_dissector_key_ipv4_addrs enc_ipv4; 62 struct flow_dissector_key_ipv6_addrs enc_ipv6; 63 }; 64 struct flow_dissector_key_ports enc_tp; 65 struct flow_dissector_key_mpls mpls; 66 struct flow_dissector_key_tcp tcp; 67 struct flow_dissector_key_ip ip; 68 struct flow_dissector_key_ip enc_ip; 69 struct flow_dissector_key_enc_opts enc_opts; 70 struct flow_dissector_key_ports_range tp_range; 71 struct flow_dissector_key_ct ct; 72 struct flow_dissector_key_hash hash; 73 struct flow_dissector_key_num_of_vlans num_of_vlans; 74 struct flow_dissector_key_pppoe pppoe; 75 struct flow_dissector_key_l2tpv3 l2tpv3; 76 struct flow_dissector_key_ipsec ipsec; 77 struct flow_dissector_key_cfm cfm; 78 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 79 80 struct fl_flow_mask_range { 81 unsigned short int start; 82 unsigned short int end; 83 }; 84 85 struct fl_flow_mask { 86 struct fl_flow_key key; 87 struct fl_flow_mask_range range; 88 u32 flags; 89 struct rhash_head ht_node; 90 struct rhashtable ht; 91 struct rhashtable_params filter_ht_params; 92 struct flow_dissector dissector; 93 struct list_head filters; 94 struct rcu_work rwork; 95 struct list_head list; 96 refcount_t refcnt; 97 }; 98 99 struct fl_flow_tmplt { 100 struct fl_flow_key dummy_key; 101 struct fl_flow_key mask; 102 struct flow_dissector dissector; 103 struct tcf_chain *chain; 104 }; 105 106 struct cls_fl_head { 107 struct rhashtable ht; 108 spinlock_t masks_lock; /* Protect masks list */ 109 struct list_head masks; 110 struct list_head hw_filters; 111 struct rcu_work rwork; 112 struct idr handle_idr; 113 }; 114 115 struct cls_fl_filter { 116 struct fl_flow_mask *mask; 117 struct rhash_head ht_node; 118 struct fl_flow_key mkey; 119 struct tcf_exts exts; 120 struct tcf_result res; 121 struct fl_flow_key key; 122 struct list_head list; 123 struct list_head hw_list; 124 u32 handle; 125 u32 flags; 126 u32 in_hw_count; 127 u8 needs_tc_skb_ext:1; 128 struct rcu_work rwork; 129 struct net_device *hw_dev; 130 /* Flower classifier is unlocked, which means that its reference counter 131 * can be changed concurrently without any kind of external 132 * synchronization. Use atomic reference counter to be concurrency-safe. 133 */ 134 refcount_t refcnt; 135 bool deleted; 136 }; 137 138 static const struct rhashtable_params mask_ht_params = { 139 .key_offset = offsetof(struct fl_flow_mask, key), 140 .key_len = sizeof(struct fl_flow_key), 141 .head_offset = offsetof(struct fl_flow_mask, ht_node), 142 .automatic_shrinking = true, 143 }; 144 145 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 146 { 147 return mask->range.end - mask->range.start; 148 } 149 150 static void fl_mask_update_range(struct fl_flow_mask *mask) 151 { 152 const u8 *bytes = (const u8 *) &mask->key; 153 size_t size = sizeof(mask->key); 154 size_t i, first = 0, last; 155 156 for (i = 0; i < size; i++) { 157 if (bytes[i]) { 158 first = i; 159 break; 160 } 161 } 162 last = first; 163 for (i = size - 1; i != first; i--) { 164 if (bytes[i]) { 165 last = i; 166 break; 167 } 168 } 169 mask->range.start = rounddown(first, sizeof(long)); 170 mask->range.end = roundup(last + 1, sizeof(long)); 171 } 172 173 static void *fl_key_get_start(struct fl_flow_key *key, 174 const struct fl_flow_mask *mask) 175 { 176 return (u8 *) key + mask->range.start; 177 } 178 179 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 180 struct fl_flow_mask *mask) 181 { 182 const long *lkey = fl_key_get_start(key, mask); 183 const long *lmask = fl_key_get_start(&mask->key, mask); 184 long *lmkey = fl_key_get_start(mkey, mask); 185 int i; 186 187 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 188 *lmkey++ = *lkey++ & *lmask++; 189 } 190 191 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 192 struct fl_flow_mask *mask) 193 { 194 const long *lmask = fl_key_get_start(&mask->key, mask); 195 const long *ltmplt; 196 int i; 197 198 if (!tmplt) 199 return true; 200 ltmplt = fl_key_get_start(&tmplt->mask, mask); 201 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 202 if (~*ltmplt++ & *lmask++) 203 return false; 204 } 205 return true; 206 } 207 208 static void fl_clear_masked_range(struct fl_flow_key *key, 209 struct fl_flow_mask *mask) 210 { 211 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 212 } 213 214 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 215 struct fl_flow_key *key, 216 struct fl_flow_key *mkey) 217 { 218 u16 min_mask, max_mask, min_val, max_val; 219 220 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst); 221 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst); 222 min_val = ntohs(filter->key.tp_range.tp_min.dst); 223 max_val = ntohs(filter->key.tp_range.tp_max.dst); 224 225 if (min_mask && max_mask) { 226 if (ntohs(key->tp_range.tp.dst) < min_val || 227 ntohs(key->tp_range.tp.dst) > max_val) 228 return false; 229 230 /* skb does not have min and max values */ 231 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 232 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 233 } 234 return true; 235 } 236 237 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 238 struct fl_flow_key *key, 239 struct fl_flow_key *mkey) 240 { 241 u16 min_mask, max_mask, min_val, max_val; 242 243 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src); 244 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src); 245 min_val = ntohs(filter->key.tp_range.tp_min.src); 246 max_val = ntohs(filter->key.tp_range.tp_max.src); 247 248 if (min_mask && max_mask) { 249 if (ntohs(key->tp_range.tp.src) < min_val || 250 ntohs(key->tp_range.tp.src) > max_val) 251 return false; 252 253 /* skb does not have min and max values */ 254 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 255 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 256 } 257 return true; 258 } 259 260 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 261 struct fl_flow_key *mkey) 262 { 263 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 264 mask->filter_ht_params); 265 } 266 267 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 268 struct fl_flow_key *mkey, 269 struct fl_flow_key *key) 270 { 271 struct cls_fl_filter *filter, *f; 272 273 list_for_each_entry_rcu(filter, &mask->filters, list) { 274 if (!fl_range_port_dst_cmp(filter, key, mkey)) 275 continue; 276 277 if (!fl_range_port_src_cmp(filter, key, mkey)) 278 continue; 279 280 f = __fl_lookup(mask, mkey); 281 if (f) 282 return f; 283 } 284 return NULL; 285 } 286 287 static noinline_for_stack 288 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 289 { 290 struct fl_flow_key mkey; 291 292 fl_set_masked_key(&mkey, key, mask); 293 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 294 return fl_lookup_range(mask, &mkey, key); 295 296 return __fl_lookup(mask, &mkey); 297 } 298 299 static u16 fl_ct_info_to_flower_map[] = { 300 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 301 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 302 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 303 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 304 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 305 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | 306 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 307 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 308 TCA_FLOWER_KEY_CT_FLAGS_RELATED | 309 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 310 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 311 TCA_FLOWER_KEY_CT_FLAGS_NEW, 312 }; 313 314 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb, 315 const struct tcf_proto *tp, 316 struct tcf_result *res) 317 { 318 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 319 bool post_ct = tc_skb_cb(skb)->post_ct; 320 u16 zone = tc_skb_cb(skb)->zone; 321 struct fl_flow_key skb_key; 322 struct fl_flow_mask *mask; 323 struct cls_fl_filter *f; 324 325 list_for_each_entry_rcu(mask, &head->masks, list) { 326 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 327 fl_clear_masked_range(&skb_key, mask); 328 329 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 330 /* skb_flow_dissect() does not set n_proto in case an unknown 331 * protocol, so do it rather here. 332 */ 333 skb_key.basic.n_proto = skb_protocol(skb, false); 334 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 335 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 336 fl_ct_info_to_flower_map, 337 ARRAY_SIZE(fl_ct_info_to_flower_map), 338 post_ct, zone); 339 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 340 skb_flow_dissect(skb, &mask->dissector, &skb_key, 341 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); 342 343 f = fl_mask_lookup(mask, &skb_key); 344 if (f && !tc_skip_sw(f->flags)) { 345 *res = f->res; 346 return tcf_exts_exec(skb, &f->exts, res); 347 } 348 } 349 return -1; 350 } 351 352 static int fl_init(struct tcf_proto *tp) 353 { 354 struct cls_fl_head *head; 355 356 head = kzalloc(sizeof(*head), GFP_KERNEL); 357 if (!head) 358 return -ENOBUFS; 359 360 spin_lock_init(&head->masks_lock); 361 INIT_LIST_HEAD_RCU(&head->masks); 362 INIT_LIST_HEAD(&head->hw_filters); 363 rcu_assign_pointer(tp->root, head); 364 idr_init(&head->handle_idr); 365 366 return rhashtable_init(&head->ht, &mask_ht_params); 367 } 368 369 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 370 { 371 /* temporary masks don't have their filters list and ht initialized */ 372 if (mask_init_done) { 373 WARN_ON(!list_empty(&mask->filters)); 374 rhashtable_destroy(&mask->ht); 375 } 376 kfree(mask); 377 } 378 379 static void fl_mask_free_work(struct work_struct *work) 380 { 381 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 382 struct fl_flow_mask, rwork); 383 384 fl_mask_free(mask, true); 385 } 386 387 static void fl_uninit_mask_free_work(struct work_struct *work) 388 { 389 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 390 struct fl_flow_mask, rwork); 391 392 fl_mask_free(mask, false); 393 } 394 395 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 396 { 397 if (!refcount_dec_and_test(&mask->refcnt)) 398 return false; 399 400 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 401 402 spin_lock(&head->masks_lock); 403 list_del_rcu(&mask->list); 404 spin_unlock(&head->masks_lock); 405 406 tcf_queue_work(&mask->rwork, fl_mask_free_work); 407 408 return true; 409 } 410 411 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 412 { 413 /* Flower classifier only changes root pointer during init and destroy. 414 * Users must obtain reference to tcf_proto instance before calling its 415 * API, so tp->root pointer is protected from concurrent call to 416 * fl_destroy() by reference counting. 417 */ 418 return rcu_dereference_raw(tp->root); 419 } 420 421 static void __fl_destroy_filter(struct cls_fl_filter *f) 422 { 423 if (f->needs_tc_skb_ext) 424 tc_skb_ext_tc_disable(); 425 tcf_exts_destroy(&f->exts); 426 tcf_exts_put_net(&f->exts); 427 kfree(f); 428 } 429 430 static void fl_destroy_filter_work(struct work_struct *work) 431 { 432 struct cls_fl_filter *f = container_of(to_rcu_work(work), 433 struct cls_fl_filter, rwork); 434 435 __fl_destroy_filter(f); 436 } 437 438 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 439 bool rtnl_held, struct netlink_ext_ack *extack) 440 { 441 struct tcf_block *block = tp->chain->block; 442 struct flow_cls_offload cls_flower = {}; 443 444 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 445 cls_flower.command = FLOW_CLS_DESTROY; 446 cls_flower.cookie = (unsigned long) f; 447 448 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 449 &f->flags, &f->in_hw_count, rtnl_held); 450 451 } 452 453 static int fl_hw_replace_filter(struct tcf_proto *tp, 454 struct cls_fl_filter *f, bool rtnl_held, 455 struct netlink_ext_ack *extack) 456 { 457 struct tcf_block *block = tp->chain->block; 458 struct flow_cls_offload cls_flower = {}; 459 bool skip_sw = tc_skip_sw(f->flags); 460 int err = 0; 461 462 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 463 if (!cls_flower.rule) 464 return -ENOMEM; 465 466 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 467 cls_flower.command = FLOW_CLS_REPLACE; 468 cls_flower.cookie = (unsigned long) f; 469 cls_flower.rule->match.dissector = &f->mask->dissector; 470 cls_flower.rule->match.mask = &f->mask->key; 471 cls_flower.rule->match.key = &f->mkey; 472 cls_flower.classid = f->res.classid; 473 474 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 475 cls_flower.common.extack); 476 if (err) { 477 kfree(cls_flower.rule); 478 479 return skip_sw ? err : 0; 480 } 481 482 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 483 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 484 tc_cleanup_offload_action(&cls_flower.rule->action); 485 kfree(cls_flower.rule); 486 487 if (err) { 488 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 489 return err; 490 } 491 492 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 493 return -EINVAL; 494 495 return 0; 496 } 497 498 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 499 bool rtnl_held) 500 { 501 struct tcf_block *block = tp->chain->block; 502 struct flow_cls_offload cls_flower = {}; 503 504 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 505 cls_flower.command = FLOW_CLS_STATS; 506 cls_flower.cookie = (unsigned long) f; 507 cls_flower.classid = f->res.classid; 508 509 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 510 rtnl_held); 511 512 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats); 513 } 514 515 static void __fl_put(struct cls_fl_filter *f) 516 { 517 if (!refcount_dec_and_test(&f->refcnt)) 518 return; 519 520 if (tcf_exts_get_net(&f->exts)) 521 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 522 else 523 __fl_destroy_filter(f); 524 } 525 526 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 527 { 528 struct cls_fl_filter *f; 529 530 rcu_read_lock(); 531 f = idr_find(&head->handle_idr, handle); 532 if (f && !refcount_inc_not_zero(&f->refcnt)) 533 f = NULL; 534 rcu_read_unlock(); 535 536 return f; 537 } 538 539 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle) 540 { 541 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 542 struct cls_fl_filter *f; 543 544 f = idr_find(&head->handle_idr, handle); 545 return f ? &f->exts : NULL; 546 } 547 548 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 549 bool *last, bool rtnl_held, 550 struct netlink_ext_ack *extack) 551 { 552 struct cls_fl_head *head = fl_head_dereference(tp); 553 554 *last = false; 555 556 spin_lock(&tp->lock); 557 if (f->deleted) { 558 spin_unlock(&tp->lock); 559 return -ENOENT; 560 } 561 562 f->deleted = true; 563 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 564 f->mask->filter_ht_params); 565 idr_remove(&head->handle_idr, f->handle); 566 list_del_rcu(&f->list); 567 spin_unlock(&tp->lock); 568 569 *last = fl_mask_put(head, f->mask); 570 if (!tc_skip_hw(f->flags)) 571 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 572 tcf_unbind_filter(tp, &f->res); 573 __fl_put(f); 574 575 return 0; 576 } 577 578 static void fl_destroy_sleepable(struct work_struct *work) 579 { 580 struct cls_fl_head *head = container_of(to_rcu_work(work), 581 struct cls_fl_head, 582 rwork); 583 584 rhashtable_destroy(&head->ht); 585 kfree(head); 586 module_put(THIS_MODULE); 587 } 588 589 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 590 struct netlink_ext_ack *extack) 591 { 592 struct cls_fl_head *head = fl_head_dereference(tp); 593 struct fl_flow_mask *mask, *next_mask; 594 struct cls_fl_filter *f, *next; 595 bool last; 596 597 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 598 list_for_each_entry_safe(f, next, &mask->filters, list) { 599 __fl_delete(tp, f, &last, rtnl_held, extack); 600 if (last) 601 break; 602 } 603 } 604 idr_destroy(&head->handle_idr); 605 606 __module_get(THIS_MODULE); 607 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 608 } 609 610 static void fl_put(struct tcf_proto *tp, void *arg) 611 { 612 struct cls_fl_filter *f = arg; 613 614 __fl_put(f); 615 } 616 617 static void *fl_get(struct tcf_proto *tp, u32 handle) 618 { 619 struct cls_fl_head *head = fl_head_dereference(tp); 620 621 return __fl_get(head, handle); 622 } 623 624 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 625 [TCA_FLOWER_UNSPEC] = { .strict_start_type = 626 TCA_FLOWER_L2_MISS }, 627 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 628 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 629 .len = IFNAMSIZ }, 630 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 631 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 632 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 633 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 634 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 635 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 636 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 637 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 638 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 639 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 640 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 641 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 642 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 643 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 644 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 646 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 647 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 648 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 649 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 650 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 651 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 652 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 653 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 654 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 655 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 656 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 657 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 658 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 659 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 660 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 661 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 662 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 663 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 664 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 665 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 666 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 667 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 668 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 669 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 670 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 671 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 672 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 673 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 674 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 675 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 676 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 678 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 680 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 681 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 682 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 683 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 684 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 685 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 686 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 687 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 688 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 689 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 690 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 691 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 692 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 693 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 694 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 695 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 696 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 697 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 698 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 699 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 700 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 701 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 702 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 703 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 704 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 705 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 706 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 707 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 708 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 709 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 710 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 711 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 712 [TCA_FLOWER_KEY_CT_STATE] = 713 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 714 [TCA_FLOWER_KEY_CT_STATE_MASK] = 715 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 716 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 717 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 718 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 719 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 720 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 721 .len = 128 / BITS_PER_BYTE }, 722 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 723 .len = 128 / BITS_PER_BYTE }, 724 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 725 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 726 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 727 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, 728 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, 729 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, 730 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, 731 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 }, 732 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 }, 733 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), 734 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, 735 }; 736 737 static const struct nla_policy 738 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 739 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 740 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 741 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 742 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 743 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 744 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, 745 [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED }, 746 }; 747 748 static const struct nla_policy 749 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 750 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 751 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 752 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 753 .len = 128 }, 754 }; 755 756 static const struct nla_policy 757 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 758 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 759 }; 760 761 static const struct nla_policy 762 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 763 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 764 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 765 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 766 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 767 }; 768 769 static const struct nla_policy 770 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { 771 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, 772 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, 773 }; 774 775 static const struct nla_policy 776 pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = { 777 [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 }, 778 [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 }, 779 }; 780 781 static const struct nla_policy 782 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 783 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 784 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 785 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 786 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 787 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 788 }; 789 790 static const struct nla_policy 791 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { 792 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, 793 FLOW_DIS_CFM_MDL_MAX), 794 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, 795 }; 796 797 static void fl_set_key_val(struct nlattr **tb, 798 void *val, int val_type, 799 void *mask, int mask_type, int len) 800 { 801 if (!tb[val_type]) 802 return; 803 nla_memcpy(val, tb[val_type], len); 804 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 805 memset(mask, 0xff, len); 806 else 807 nla_memcpy(mask, tb[mask_type], len); 808 } 809 810 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key, 811 struct fl_flow_key *mask, 812 struct netlink_ext_ack *extack) 813 { 814 if (key->basic.ip_proto != IPPROTO_ESP && 815 key->basic.ip_proto != IPPROTO_AH) { 816 NL_SET_ERR_MSG(extack, 817 "Protocol must be either ESP or AH"); 818 return -EINVAL; 819 } 820 821 fl_set_key_val(tb, &key->ipsec.spi, 822 TCA_FLOWER_KEY_SPI, 823 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 824 sizeof(key->ipsec.spi)); 825 return 0; 826 } 827 828 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 829 struct fl_flow_key *mask, 830 struct netlink_ext_ack *extack) 831 { 832 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 833 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 834 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 835 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 836 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 837 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 838 fl_set_key_val(tb, &key->tp_range.tp_min.src, 839 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 840 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 841 fl_set_key_val(tb, &key->tp_range.tp_max.src, 842 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 843 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 844 845 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) { 846 NL_SET_ERR_MSG(extack, 847 "Both min and max destination ports must be specified"); 848 return -EINVAL; 849 } 850 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) { 851 NL_SET_ERR_MSG(extack, 852 "Both min and max source ports must be specified"); 853 return -EINVAL; 854 } 855 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 856 ntohs(key->tp_range.tp_max.dst) <= 857 ntohs(key->tp_range.tp_min.dst)) { 858 NL_SET_ERR_MSG_ATTR(extack, 859 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 860 "Invalid destination port range (min must be strictly smaller than max)"); 861 return -EINVAL; 862 } 863 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 864 ntohs(key->tp_range.tp_max.src) <= 865 ntohs(key->tp_range.tp_min.src)) { 866 NL_SET_ERR_MSG_ATTR(extack, 867 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 868 "Invalid source port range (min must be strictly smaller than max)"); 869 return -EINVAL; 870 } 871 872 return 0; 873 } 874 875 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 876 struct flow_dissector_key_mpls *key_val, 877 struct flow_dissector_key_mpls *key_mask, 878 struct netlink_ext_ack *extack) 879 { 880 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 881 struct flow_dissector_mpls_lse *lse_mask; 882 struct flow_dissector_mpls_lse *lse_val; 883 u8 lse_index; 884 u8 depth; 885 int err; 886 887 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 888 mpls_stack_entry_policy, extack); 889 if (err < 0) 890 return err; 891 892 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 893 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 894 return -EINVAL; 895 } 896 897 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 898 899 /* LSE depth starts at 1, for consistency with terminology used by 900 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 901 */ 902 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 903 NL_SET_ERR_MSG_ATTR(extack, 904 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 905 "Invalid MPLS depth"); 906 return -EINVAL; 907 } 908 lse_index = depth - 1; 909 910 dissector_set_mpls_lse(key_val, lse_index); 911 dissector_set_mpls_lse(key_mask, lse_index); 912 913 lse_val = &key_val->ls[lse_index]; 914 lse_mask = &key_mask->ls[lse_index]; 915 916 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 917 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 918 lse_mask->mpls_ttl = MPLS_TTL_MASK; 919 } 920 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 921 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 922 923 if (bos & ~MPLS_BOS_MASK) { 924 NL_SET_ERR_MSG_ATTR(extack, 925 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 926 "Bottom Of Stack (BOS) must be 0 or 1"); 927 return -EINVAL; 928 } 929 lse_val->mpls_bos = bos; 930 lse_mask->mpls_bos = MPLS_BOS_MASK; 931 } 932 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 933 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 934 935 if (tc & ~MPLS_TC_MASK) { 936 NL_SET_ERR_MSG_ATTR(extack, 937 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 938 "Traffic Class (TC) must be between 0 and 7"); 939 return -EINVAL; 940 } 941 lse_val->mpls_tc = tc; 942 lse_mask->mpls_tc = MPLS_TC_MASK; 943 } 944 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 945 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 946 947 if (label & ~MPLS_LABEL_MASK) { 948 NL_SET_ERR_MSG_ATTR(extack, 949 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 950 "Label must be between 0 and 1048575"); 951 return -EINVAL; 952 } 953 lse_val->mpls_label = label; 954 lse_mask->mpls_label = MPLS_LABEL_MASK; 955 } 956 957 return 0; 958 } 959 960 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 961 struct flow_dissector_key_mpls *key_val, 962 struct flow_dissector_key_mpls *key_mask, 963 struct netlink_ext_ack *extack) 964 { 965 struct nlattr *nla_lse; 966 int rem; 967 int err; 968 969 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 970 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 971 "NLA_F_NESTED is missing"); 972 return -EINVAL; 973 } 974 975 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 976 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 977 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 978 "Invalid MPLS option type"); 979 return -EINVAL; 980 } 981 982 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 983 if (err < 0) 984 return err; 985 } 986 if (rem) { 987 NL_SET_ERR_MSG(extack, 988 "Bytes leftover after parsing MPLS options"); 989 return -EINVAL; 990 } 991 992 return 0; 993 } 994 995 static int fl_set_key_mpls(struct nlattr **tb, 996 struct flow_dissector_key_mpls *key_val, 997 struct flow_dissector_key_mpls *key_mask, 998 struct netlink_ext_ack *extack) 999 { 1000 struct flow_dissector_mpls_lse *lse_mask; 1001 struct flow_dissector_mpls_lse *lse_val; 1002 1003 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 1004 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 1005 tb[TCA_FLOWER_KEY_MPLS_BOS] || 1006 tb[TCA_FLOWER_KEY_MPLS_TC] || 1007 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1008 NL_SET_ERR_MSG_ATTR(extack, 1009 tb[TCA_FLOWER_KEY_MPLS_OPTS], 1010 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 1011 return -EBADMSG; 1012 } 1013 1014 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 1015 key_val, key_mask, extack); 1016 } 1017 1018 lse_val = &key_val->ls[0]; 1019 lse_mask = &key_mask->ls[0]; 1020 1021 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 1022 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 1023 lse_mask->mpls_ttl = MPLS_TTL_MASK; 1024 dissector_set_mpls_lse(key_val, 0); 1025 dissector_set_mpls_lse(key_mask, 0); 1026 } 1027 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 1028 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 1029 1030 if (bos & ~MPLS_BOS_MASK) { 1031 NL_SET_ERR_MSG_ATTR(extack, 1032 tb[TCA_FLOWER_KEY_MPLS_BOS], 1033 "Bottom Of Stack (BOS) must be 0 or 1"); 1034 return -EINVAL; 1035 } 1036 lse_val->mpls_bos = bos; 1037 lse_mask->mpls_bos = MPLS_BOS_MASK; 1038 dissector_set_mpls_lse(key_val, 0); 1039 dissector_set_mpls_lse(key_mask, 0); 1040 } 1041 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 1042 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 1043 1044 if (tc & ~MPLS_TC_MASK) { 1045 NL_SET_ERR_MSG_ATTR(extack, 1046 tb[TCA_FLOWER_KEY_MPLS_TC], 1047 "Traffic Class (TC) must be between 0 and 7"); 1048 return -EINVAL; 1049 } 1050 lse_val->mpls_tc = tc; 1051 lse_mask->mpls_tc = MPLS_TC_MASK; 1052 dissector_set_mpls_lse(key_val, 0); 1053 dissector_set_mpls_lse(key_mask, 0); 1054 } 1055 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1056 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 1057 1058 if (label & ~MPLS_LABEL_MASK) { 1059 NL_SET_ERR_MSG_ATTR(extack, 1060 tb[TCA_FLOWER_KEY_MPLS_LABEL], 1061 "Label must be between 0 and 1048575"); 1062 return -EINVAL; 1063 } 1064 lse_val->mpls_label = label; 1065 lse_mask->mpls_label = MPLS_LABEL_MASK; 1066 dissector_set_mpls_lse(key_val, 0); 1067 dissector_set_mpls_lse(key_mask, 0); 1068 } 1069 return 0; 1070 } 1071 1072 static void fl_set_key_vlan(struct nlattr **tb, 1073 __be16 ethertype, 1074 int vlan_id_key, int vlan_prio_key, 1075 int vlan_next_eth_type_key, 1076 struct flow_dissector_key_vlan *key_val, 1077 struct flow_dissector_key_vlan *key_mask) 1078 { 1079 #define VLAN_PRIORITY_MASK 0x7 1080 1081 if (tb[vlan_id_key]) { 1082 key_val->vlan_id = 1083 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1084 key_mask->vlan_id = VLAN_VID_MASK; 1085 } 1086 if (tb[vlan_prio_key]) { 1087 key_val->vlan_priority = 1088 nla_get_u8(tb[vlan_prio_key]) & 1089 VLAN_PRIORITY_MASK; 1090 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1091 } 1092 if (ethertype) { 1093 key_val->vlan_tpid = ethertype; 1094 key_mask->vlan_tpid = cpu_to_be16(~0); 1095 } 1096 if (tb[vlan_next_eth_type_key]) { 1097 key_val->vlan_eth_type = 1098 nla_get_be16(tb[vlan_next_eth_type_key]); 1099 key_mask->vlan_eth_type = cpu_to_be16(~0); 1100 } 1101 } 1102 1103 static void fl_set_key_pppoe(struct nlattr **tb, 1104 struct flow_dissector_key_pppoe *key_val, 1105 struct flow_dissector_key_pppoe *key_mask, 1106 struct fl_flow_key *key, 1107 struct fl_flow_key *mask) 1108 { 1109 /* key_val::type must be set to ETH_P_PPP_SES 1110 * because ETH_P_PPP_SES was stored in basic.n_proto 1111 * which might get overwritten by ppp_proto 1112 * or might be set to 0, the role of key_val::type 1113 * is similar to vlan_key::tpid 1114 */ 1115 key_val->type = htons(ETH_P_PPP_SES); 1116 key_mask->type = cpu_to_be16(~0); 1117 1118 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) { 1119 key_val->session_id = 1120 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]); 1121 key_mask->session_id = cpu_to_be16(~0); 1122 } 1123 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) { 1124 key_val->ppp_proto = 1125 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]); 1126 key_mask->ppp_proto = cpu_to_be16(~0); 1127 1128 if (key_val->ppp_proto == htons(PPP_IP)) { 1129 key->basic.n_proto = htons(ETH_P_IP); 1130 mask->basic.n_proto = cpu_to_be16(~0); 1131 } else if (key_val->ppp_proto == htons(PPP_IPV6)) { 1132 key->basic.n_proto = htons(ETH_P_IPV6); 1133 mask->basic.n_proto = cpu_to_be16(~0); 1134 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) { 1135 key->basic.n_proto = htons(ETH_P_MPLS_UC); 1136 mask->basic.n_proto = cpu_to_be16(~0); 1137 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) { 1138 key->basic.n_proto = htons(ETH_P_MPLS_MC); 1139 mask->basic.n_proto = cpu_to_be16(~0); 1140 } 1141 } else { 1142 key->basic.n_proto = 0; 1143 mask->basic.n_proto = cpu_to_be16(0); 1144 } 1145 } 1146 1147 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1148 u32 *dissector_key, u32 *dissector_mask, 1149 u32 flower_flag_bit, u32 dissector_flag_bit) 1150 { 1151 if (flower_mask & flower_flag_bit) { 1152 *dissector_mask |= dissector_flag_bit; 1153 if (flower_key & flower_flag_bit) 1154 *dissector_key |= dissector_flag_bit; 1155 } 1156 } 1157 1158 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 1159 u32 *flags_mask, struct netlink_ext_ack *extack) 1160 { 1161 u32 key, mask; 1162 1163 /* mask is mandatory for flags */ 1164 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 1165 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1166 return -EINVAL; 1167 } 1168 1169 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); 1170 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 1171 1172 *flags_key = 0; 1173 *flags_mask = 0; 1174 1175 fl_set_key_flag(key, mask, flags_key, flags_mask, 1176 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1177 fl_set_key_flag(key, mask, flags_key, flags_mask, 1178 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1179 FLOW_DIS_FIRST_FRAG); 1180 1181 return 0; 1182 } 1183 1184 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1185 struct flow_dissector_key_ip *key, 1186 struct flow_dissector_key_ip *mask) 1187 { 1188 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1189 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1190 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1191 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1192 1193 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1194 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1195 } 1196 1197 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1198 int depth, int option_len, 1199 struct netlink_ext_ack *extack) 1200 { 1201 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1202 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1203 struct geneve_opt *opt; 1204 int err, data_len = 0; 1205 1206 if (option_len > sizeof(struct geneve_opt)) 1207 data_len = option_len - sizeof(struct geneve_opt); 1208 1209 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) 1210 return -ERANGE; 1211 1212 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1213 memset(opt, 0xff, option_len); 1214 opt->length = data_len / 4; 1215 opt->r1 = 0; 1216 opt->r2 = 0; 1217 opt->r3 = 0; 1218 1219 /* If no mask has been prodived we assume an exact match. */ 1220 if (!depth) 1221 return sizeof(struct geneve_opt) + data_len; 1222 1223 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1224 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1225 return -EINVAL; 1226 } 1227 1228 err = nla_parse_nested_deprecated(tb, 1229 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1230 nla, geneve_opt_policy, extack); 1231 if (err < 0) 1232 return err; 1233 1234 /* We are not allowed to omit any of CLASS, TYPE or DATA 1235 * fields from the key. 1236 */ 1237 if (!option_len && 1238 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1239 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1240 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1241 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1242 return -EINVAL; 1243 } 1244 1245 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1246 * for the mask. 1247 */ 1248 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1249 int new_len = key->enc_opts.len; 1250 1251 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1252 data_len = nla_len(data); 1253 if (data_len < 4) { 1254 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1255 return -ERANGE; 1256 } 1257 if (data_len % 4) { 1258 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1259 return -ERANGE; 1260 } 1261 1262 new_len += sizeof(struct geneve_opt) + data_len; 1263 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1264 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1265 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1266 return -ERANGE; 1267 } 1268 opt->length = data_len / 4; 1269 memcpy(opt->opt_data, nla_data(data), data_len); 1270 } 1271 1272 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1273 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1274 opt->opt_class = nla_get_be16(class); 1275 } 1276 1277 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1278 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1279 opt->type = nla_get_u8(type); 1280 } 1281 1282 return sizeof(struct geneve_opt) + data_len; 1283 } 1284 1285 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1286 int depth, int option_len, 1287 struct netlink_ext_ack *extack) 1288 { 1289 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1290 struct vxlan_metadata *md; 1291 int err; 1292 1293 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1294 memset(md, 0xff, sizeof(*md)); 1295 1296 if (!depth) 1297 return sizeof(*md); 1298 1299 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1300 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1301 return -EINVAL; 1302 } 1303 1304 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1305 vxlan_opt_policy, extack); 1306 if (err < 0) 1307 return err; 1308 1309 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1310 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1311 return -EINVAL; 1312 } 1313 1314 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1315 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1316 md->gbp &= VXLAN_GBP_MASK; 1317 } 1318 1319 return sizeof(*md); 1320 } 1321 1322 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1323 int depth, int option_len, 1324 struct netlink_ext_ack *extack) 1325 { 1326 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1327 struct erspan_metadata *md; 1328 int err; 1329 1330 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1331 memset(md, 0xff, sizeof(*md)); 1332 md->version = 1; 1333 1334 if (!depth) 1335 return sizeof(*md); 1336 1337 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1338 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1339 return -EINVAL; 1340 } 1341 1342 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1343 erspan_opt_policy, extack); 1344 if (err < 0) 1345 return err; 1346 1347 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1348 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1349 return -EINVAL; 1350 } 1351 1352 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1353 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1354 1355 if (md->version == 1) { 1356 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1357 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1358 return -EINVAL; 1359 } 1360 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1361 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1362 memset(&md->u, 0x00, sizeof(md->u)); 1363 md->u.index = nla_get_be32(nla); 1364 } 1365 } else if (md->version == 2) { 1366 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1367 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1368 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1369 return -EINVAL; 1370 } 1371 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1372 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1373 md->u.md2.dir = nla_get_u8(nla); 1374 } 1375 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1376 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1377 set_hwid(&md->u.md2, nla_get_u8(nla)); 1378 } 1379 } else { 1380 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1381 return -EINVAL; 1382 } 1383 1384 return sizeof(*md); 1385 } 1386 1387 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1388 int depth, int option_len, 1389 struct netlink_ext_ack *extack) 1390 { 1391 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1]; 1392 struct gtp_pdu_session_info *sinfo; 1393 u8 len = key->enc_opts.len; 1394 int err; 1395 1396 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len]; 1397 memset(sinfo, 0xff, option_len); 1398 1399 if (!depth) 1400 return sizeof(*sinfo); 1401 1402 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) { 1403 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask"); 1404 return -EINVAL; 1405 } 1406 1407 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla, 1408 gtp_opt_policy, extack); 1409 if (err < 0) 1410 return err; 1411 1412 if (!option_len && 1413 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] || 1414 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) { 1415 NL_SET_ERR_MSG_MOD(extack, 1416 "Missing tunnel key gtp option pdu type or qfi"); 1417 return -EINVAL; 1418 } 1419 1420 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]) 1421 sinfo->pdu_type = 1422 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]); 1423 1424 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]) 1425 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]); 1426 1427 return sizeof(*sinfo); 1428 } 1429 1430 static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1431 int depth, int option_len, 1432 struct netlink_ext_ack *extack) 1433 { 1434 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1]; 1435 struct pfcp_metadata *md; 1436 int err; 1437 1438 md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1439 memset(md, 0xff, sizeof(*md)); 1440 1441 if (!depth) 1442 return sizeof(*md); 1443 1444 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) { 1445 NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask"); 1446 return -EINVAL; 1447 } 1448 1449 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla, 1450 pfcp_opt_policy, extack); 1451 if (err < 0) 1452 return err; 1453 1454 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) { 1455 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type"); 1456 return -EINVAL; 1457 } 1458 1459 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) 1460 md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]); 1461 1462 if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]) 1463 md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]); 1464 1465 return sizeof(*md); 1466 } 1467 1468 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1469 struct fl_flow_key *mask, 1470 struct netlink_ext_ack *extack) 1471 { 1472 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1473 int err, option_len, key_depth, msk_depth = 0; 1474 1475 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1476 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1477 enc_opts_policy, extack); 1478 if (err) 1479 return err; 1480 1481 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1482 1483 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1484 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1485 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1486 enc_opts_policy, extack); 1487 if (err) 1488 return err; 1489 1490 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1491 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1492 if (!nla_ok(nla_opt_msk, msk_depth)) { 1493 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks"); 1494 return -EINVAL; 1495 } 1496 } 1497 1498 nla_for_each_attr(nla_opt_key, nla_enc_key, 1499 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1500 switch (nla_type(nla_opt_key)) { 1501 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1502 if (key->enc_opts.dst_opt_type && 1503 key->enc_opts.dst_opt_type != 1504 IP_TUNNEL_GENEVE_OPT_BIT) { 1505 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1506 return -EINVAL; 1507 } 1508 option_len = 0; 1509 key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1510 option_len = fl_set_geneve_opt(nla_opt_key, key, 1511 key_depth, option_len, 1512 extack); 1513 if (option_len < 0) 1514 return option_len; 1515 1516 key->enc_opts.len += option_len; 1517 /* At the same time we need to parse through the mask 1518 * in order to verify exact and mask attribute lengths. 1519 */ 1520 mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT; 1521 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1522 msk_depth, option_len, 1523 extack); 1524 if (option_len < 0) 1525 return option_len; 1526 1527 mask->enc_opts.len += option_len; 1528 if (key->enc_opts.len != mask->enc_opts.len) { 1529 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1530 return -EINVAL; 1531 } 1532 break; 1533 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1534 if (key->enc_opts.dst_opt_type) { 1535 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1536 return -EINVAL; 1537 } 1538 option_len = 0; 1539 key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1540 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1541 key_depth, option_len, 1542 extack); 1543 if (option_len < 0) 1544 return option_len; 1545 1546 key->enc_opts.len += option_len; 1547 /* At the same time we need to parse through the mask 1548 * in order to verify exact and mask attribute lengths. 1549 */ 1550 mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT; 1551 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1552 msk_depth, option_len, 1553 extack); 1554 if (option_len < 0) 1555 return option_len; 1556 1557 mask->enc_opts.len += option_len; 1558 if (key->enc_opts.len != mask->enc_opts.len) { 1559 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1560 return -EINVAL; 1561 } 1562 break; 1563 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1564 if (key->enc_opts.dst_opt_type) { 1565 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1566 return -EINVAL; 1567 } 1568 option_len = 0; 1569 key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1570 option_len = fl_set_erspan_opt(nla_opt_key, key, 1571 key_depth, option_len, 1572 extack); 1573 if (option_len < 0) 1574 return option_len; 1575 1576 key->enc_opts.len += option_len; 1577 /* At the same time we need to parse through the mask 1578 * in order to verify exact and mask attribute lengths. 1579 */ 1580 mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT; 1581 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1582 msk_depth, option_len, 1583 extack); 1584 if (option_len < 0) 1585 return option_len; 1586 1587 mask->enc_opts.len += option_len; 1588 if (key->enc_opts.len != mask->enc_opts.len) { 1589 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1590 return -EINVAL; 1591 } 1592 break; 1593 case TCA_FLOWER_KEY_ENC_OPTS_GTP: 1594 if (key->enc_opts.dst_opt_type) { 1595 NL_SET_ERR_MSG_MOD(extack, 1596 "Duplicate type for gtp options"); 1597 return -EINVAL; 1598 } 1599 option_len = 0; 1600 key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1601 option_len = fl_set_gtp_opt(nla_opt_key, key, 1602 key_depth, option_len, 1603 extack); 1604 if (option_len < 0) 1605 return option_len; 1606 1607 key->enc_opts.len += option_len; 1608 /* At the same time we need to parse through the mask 1609 * in order to verify exact and mask attribute lengths. 1610 */ 1611 mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT; 1612 option_len = fl_set_gtp_opt(nla_opt_msk, mask, 1613 msk_depth, option_len, 1614 extack); 1615 if (option_len < 0) 1616 return option_len; 1617 1618 mask->enc_opts.len += option_len; 1619 if (key->enc_opts.len != mask->enc_opts.len) { 1620 NL_SET_ERR_MSG_MOD(extack, 1621 "Key and mask miss aligned"); 1622 return -EINVAL; 1623 } 1624 break; 1625 case TCA_FLOWER_KEY_ENC_OPTS_PFCP: 1626 if (key->enc_opts.dst_opt_type) { 1627 NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options"); 1628 return -EINVAL; 1629 } 1630 option_len = 0; 1631 key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1632 option_len = fl_set_pfcp_opt(nla_opt_key, key, 1633 key_depth, option_len, 1634 extack); 1635 if (option_len < 0) 1636 return option_len; 1637 1638 key->enc_opts.len += option_len; 1639 /* At the same time we need to parse through the mask 1640 * in order to verify exact and mask attribute lengths. 1641 */ 1642 mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT; 1643 option_len = fl_set_pfcp_opt(nla_opt_msk, mask, 1644 msk_depth, option_len, 1645 extack); 1646 if (option_len < 0) 1647 return option_len; 1648 1649 mask->enc_opts.len += option_len; 1650 if (key->enc_opts.len != mask->enc_opts.len) { 1651 NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned"); 1652 return -EINVAL; 1653 } 1654 break; 1655 default: 1656 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1657 return -EINVAL; 1658 } 1659 1660 if (!msk_depth) 1661 continue; 1662 1663 if (!nla_ok(nla_opt_msk, msk_depth)) { 1664 NL_SET_ERR_MSG(extack, "A mask attribute is invalid"); 1665 return -EINVAL; 1666 } 1667 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1668 } 1669 1670 return 0; 1671 } 1672 1673 static int fl_validate_ct_state(u16 state, struct nlattr *tb, 1674 struct netlink_ext_ack *extack) 1675 { 1676 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { 1677 NL_SET_ERR_MSG_ATTR(extack, tb, 1678 "no trk, so no other flag can be set"); 1679 return -EINVAL; 1680 } 1681 1682 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1683 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { 1684 NL_SET_ERR_MSG_ATTR(extack, tb, 1685 "new and est are mutually exclusive"); 1686 return -EINVAL; 1687 } 1688 1689 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && 1690 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 1691 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { 1692 NL_SET_ERR_MSG_ATTR(extack, tb, 1693 "when inv is set, only trk may be set"); 1694 return -EINVAL; 1695 } 1696 1697 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1698 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { 1699 NL_SET_ERR_MSG_ATTR(extack, tb, 1700 "new and rpl are mutually exclusive"); 1701 return -EINVAL; 1702 } 1703 1704 return 0; 1705 } 1706 1707 static int fl_set_key_ct(struct nlattr **tb, 1708 struct flow_dissector_key_ct *key, 1709 struct flow_dissector_key_ct *mask, 1710 struct netlink_ext_ack *extack) 1711 { 1712 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1713 int err; 1714 1715 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1716 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1717 return -EOPNOTSUPP; 1718 } 1719 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1720 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1721 sizeof(key->ct_state)); 1722 1723 err = fl_validate_ct_state(key->ct_state & mask->ct_state, 1724 tb[TCA_FLOWER_KEY_CT_STATE_MASK], 1725 extack); 1726 if (err) 1727 return err; 1728 1729 } 1730 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1731 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1732 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1733 return -EOPNOTSUPP; 1734 } 1735 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1736 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1737 sizeof(key->ct_zone)); 1738 } 1739 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1740 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1741 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1742 return -EOPNOTSUPP; 1743 } 1744 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1745 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1746 sizeof(key->ct_mark)); 1747 } 1748 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1749 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1750 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1751 return -EOPNOTSUPP; 1752 } 1753 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1754 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1755 sizeof(key->ct_labels)); 1756 } 1757 1758 return 0; 1759 } 1760 1761 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, 1762 struct fl_flow_key *key, struct fl_flow_key *mask, 1763 int vthresh) 1764 { 1765 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh; 1766 1767 if (!tb) { 1768 *ethertype = 0; 1769 return good_num_of_vlans; 1770 } 1771 1772 *ethertype = nla_get_be16(tb); 1773 if (good_num_of_vlans || eth_type_vlan(*ethertype)) 1774 return true; 1775 1776 key->basic.n_proto = *ethertype; 1777 mask->basic.n_proto = cpu_to_be16(~0); 1778 return false; 1779 } 1780 1781 static void fl_set_key_cfm_md_level(struct nlattr **tb, 1782 struct fl_flow_key *key, 1783 struct fl_flow_key *mask, 1784 struct netlink_ext_ack *extack) 1785 { 1786 u8 level; 1787 1788 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) 1789 return; 1790 1791 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); 1792 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); 1793 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; 1794 } 1795 1796 static void fl_set_key_cfm_opcode(struct nlattr **tb, 1797 struct fl_flow_key *key, 1798 struct fl_flow_key *mask, 1799 struct netlink_ext_ack *extack) 1800 { 1801 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, 1802 &mask->cfm.opcode, TCA_FLOWER_UNSPEC, 1803 sizeof(key->cfm.opcode)); 1804 } 1805 1806 static int fl_set_key_cfm(struct nlattr **tb, 1807 struct fl_flow_key *key, 1808 struct fl_flow_key *mask, 1809 struct netlink_ext_ack *extack) 1810 { 1811 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; 1812 int err; 1813 1814 if (!tb[TCA_FLOWER_KEY_CFM]) 1815 return 0; 1816 1817 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, 1818 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); 1819 if (err < 0) 1820 return err; 1821 1822 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); 1823 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); 1824 1825 return 0; 1826 } 1827 1828 static int fl_set_key(struct net *net, struct nlattr **tb, 1829 struct fl_flow_key *key, struct fl_flow_key *mask, 1830 struct netlink_ext_ack *extack) 1831 { 1832 __be16 ethertype; 1833 int ret = 0; 1834 1835 if (tb[TCA_FLOWER_INDEV]) { 1836 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1837 if (err < 0) 1838 return err; 1839 key->meta.ingress_ifindex = err; 1840 mask->meta.ingress_ifindex = 0xffffffff; 1841 } 1842 1843 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, 1844 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, 1845 sizeof(key->meta.l2_miss)); 1846 1847 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1848 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1849 sizeof(key->eth.dst)); 1850 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1851 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1852 sizeof(key->eth.src)); 1853 fl_set_key_val(tb, &key->num_of_vlans, 1854 TCA_FLOWER_KEY_NUM_OF_VLANS, 1855 &mask->num_of_vlans, 1856 TCA_FLOWER_UNSPEC, 1857 sizeof(key->num_of_vlans)); 1858 1859 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) { 1860 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1861 TCA_FLOWER_KEY_VLAN_PRIO, 1862 TCA_FLOWER_KEY_VLAN_ETH_TYPE, 1863 &key->vlan, &mask->vlan); 1864 1865 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE], 1866 ðertype, key, mask, 1)) { 1867 fl_set_key_vlan(tb, ethertype, 1868 TCA_FLOWER_KEY_CVLAN_ID, 1869 TCA_FLOWER_KEY_CVLAN_PRIO, 1870 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1871 &key->cvlan, &mask->cvlan); 1872 fl_set_key_val(tb, &key->basic.n_proto, 1873 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1874 &mask->basic.n_proto, 1875 TCA_FLOWER_UNSPEC, 1876 sizeof(key->basic.n_proto)); 1877 } 1878 } 1879 1880 if (key->basic.n_proto == htons(ETH_P_PPP_SES)) 1881 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask); 1882 1883 if (key->basic.n_proto == htons(ETH_P_IP) || 1884 key->basic.n_proto == htons(ETH_P_IPV6)) { 1885 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1886 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1887 sizeof(key->basic.ip_proto)); 1888 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1889 } 1890 1891 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1892 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1893 mask->control.addr_type = ~0; 1894 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1895 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1896 sizeof(key->ipv4.src)); 1897 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1898 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1899 sizeof(key->ipv4.dst)); 1900 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1901 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1902 mask->control.addr_type = ~0; 1903 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1904 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1905 sizeof(key->ipv6.src)); 1906 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1907 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1908 sizeof(key->ipv6.dst)); 1909 } 1910 1911 if (key->basic.ip_proto == IPPROTO_TCP) { 1912 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1913 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1914 sizeof(key->tp.src)); 1915 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1916 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1917 sizeof(key->tp.dst)); 1918 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1919 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1920 sizeof(key->tcp.flags)); 1921 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1922 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1923 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1924 sizeof(key->tp.src)); 1925 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1926 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1927 sizeof(key->tp.dst)); 1928 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1929 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1930 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1931 sizeof(key->tp.src)); 1932 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1933 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1934 sizeof(key->tp.dst)); 1935 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1936 key->basic.ip_proto == IPPROTO_ICMP) { 1937 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1938 &mask->icmp.type, 1939 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1940 sizeof(key->icmp.type)); 1941 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1942 &mask->icmp.code, 1943 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1944 sizeof(key->icmp.code)); 1945 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1946 key->basic.ip_proto == IPPROTO_ICMPV6) { 1947 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1948 &mask->icmp.type, 1949 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1950 sizeof(key->icmp.type)); 1951 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1952 &mask->icmp.code, 1953 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1954 sizeof(key->icmp.code)); 1955 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1956 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1957 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1958 if (ret) 1959 return ret; 1960 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1961 key->basic.n_proto == htons(ETH_P_RARP)) { 1962 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1963 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1964 sizeof(key->arp.sip)); 1965 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1966 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1967 sizeof(key->arp.tip)); 1968 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1969 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1970 sizeof(key->arp.op)); 1971 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1972 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1973 sizeof(key->arp.sha)); 1974 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1975 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1976 sizeof(key->arp.tha)); 1977 } else if (key->basic.ip_proto == IPPROTO_L2TP) { 1978 fl_set_key_val(tb, &key->l2tpv3.session_id, 1979 TCA_FLOWER_KEY_L2TPV3_SID, 1980 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, 1981 sizeof(key->l2tpv3.session_id)); 1982 } else if (key->basic.n_proto == htons(ETH_P_CFM)) { 1983 ret = fl_set_key_cfm(tb, key, mask, extack); 1984 if (ret) 1985 return ret; 1986 } 1987 1988 if (key->basic.ip_proto == IPPROTO_TCP || 1989 key->basic.ip_proto == IPPROTO_UDP || 1990 key->basic.ip_proto == IPPROTO_SCTP) { 1991 ret = fl_set_key_port_range(tb, key, mask, extack); 1992 if (ret) 1993 return ret; 1994 } 1995 1996 if (tb[TCA_FLOWER_KEY_SPI]) { 1997 ret = fl_set_key_spi(tb, key, mask, extack); 1998 if (ret) 1999 return ret; 2000 } 2001 2002 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 2003 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 2004 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2005 mask->enc_control.addr_type = ~0; 2006 fl_set_key_val(tb, &key->enc_ipv4.src, 2007 TCA_FLOWER_KEY_ENC_IPV4_SRC, 2008 &mask->enc_ipv4.src, 2009 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2010 sizeof(key->enc_ipv4.src)); 2011 fl_set_key_val(tb, &key->enc_ipv4.dst, 2012 TCA_FLOWER_KEY_ENC_IPV4_DST, 2013 &mask->enc_ipv4.dst, 2014 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2015 sizeof(key->enc_ipv4.dst)); 2016 } 2017 2018 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 2019 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 2020 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2021 mask->enc_control.addr_type = ~0; 2022 fl_set_key_val(tb, &key->enc_ipv6.src, 2023 TCA_FLOWER_KEY_ENC_IPV6_SRC, 2024 &mask->enc_ipv6.src, 2025 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2026 sizeof(key->enc_ipv6.src)); 2027 fl_set_key_val(tb, &key->enc_ipv6.dst, 2028 TCA_FLOWER_KEY_ENC_IPV6_DST, 2029 &mask->enc_ipv6.dst, 2030 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2031 sizeof(key->enc_ipv6.dst)); 2032 } 2033 2034 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 2035 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 2036 sizeof(key->enc_key_id.keyid)); 2037 2038 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2039 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2040 sizeof(key->enc_tp.src)); 2041 2042 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2043 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2044 sizeof(key->enc_tp.dst)); 2045 2046 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 2047 2048 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 2049 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 2050 sizeof(key->hash.hash)); 2051 2052 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 2053 ret = fl_set_enc_opt(tb, key, mask, extack); 2054 if (ret) 2055 return ret; 2056 } 2057 2058 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 2059 if (ret) 2060 return ret; 2061 2062 if (tb[TCA_FLOWER_KEY_FLAGS]) 2063 ret = fl_set_key_flags(tb, &key->control.flags, 2064 &mask->control.flags, extack); 2065 2066 return ret; 2067 } 2068 2069 static void fl_mask_copy(struct fl_flow_mask *dst, 2070 struct fl_flow_mask *src) 2071 { 2072 const void *psrc = fl_key_get_start(&src->key, src); 2073 void *pdst = fl_key_get_start(&dst->key, src); 2074 2075 memcpy(pdst, psrc, fl_mask_range(src)); 2076 dst->range = src->range; 2077 } 2078 2079 static const struct rhashtable_params fl_ht_params = { 2080 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 2081 .head_offset = offsetof(struct cls_fl_filter, ht_node), 2082 .automatic_shrinking = true, 2083 }; 2084 2085 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 2086 { 2087 mask->filter_ht_params = fl_ht_params; 2088 mask->filter_ht_params.key_len = fl_mask_range(mask); 2089 mask->filter_ht_params.key_offset += mask->range.start; 2090 2091 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 2092 } 2093 2094 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 2095 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 2096 2097 #define FL_KEY_IS_MASKED(mask, member) \ 2098 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 2099 0, FL_KEY_MEMBER_SIZE(member)) \ 2100 2101 #define FL_KEY_SET(keys, cnt, id, member) \ 2102 do { \ 2103 keys[cnt].key_id = id; \ 2104 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 2105 cnt++; \ 2106 } while(0); 2107 2108 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 2109 do { \ 2110 if (FL_KEY_IS_MASKED(mask, member)) \ 2111 FL_KEY_SET(keys, cnt, id, member); \ 2112 } while(0); 2113 2114 static void fl_init_dissector(struct flow_dissector *dissector, 2115 struct fl_flow_key *mask) 2116 { 2117 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 2118 size_t cnt = 0; 2119 2120 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2121 FLOW_DISSECTOR_KEY_META, meta); 2122 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 2123 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 2124 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2125 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 2126 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2127 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 2128 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2129 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 2130 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2131 FLOW_DISSECTOR_KEY_PORTS, tp); 2132 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2133 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 2134 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2135 FLOW_DISSECTOR_KEY_IP, ip); 2136 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2137 FLOW_DISSECTOR_KEY_TCP, tcp); 2138 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2139 FLOW_DISSECTOR_KEY_ICMP, icmp); 2140 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2141 FLOW_DISSECTOR_KEY_ARP, arp); 2142 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2143 FLOW_DISSECTOR_KEY_MPLS, mpls); 2144 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2145 FLOW_DISSECTOR_KEY_VLAN, vlan); 2146 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2147 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 2148 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2149 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 2150 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2151 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 2152 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2153 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 2154 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 2155 FL_KEY_IS_MASKED(mask, enc_ipv6)) 2156 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2157 enc_control); 2158 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2159 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 2160 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2161 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 2162 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2163 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 2164 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2165 FLOW_DISSECTOR_KEY_CT, ct); 2166 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2167 FLOW_DISSECTOR_KEY_HASH, hash); 2168 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2169 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); 2170 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2171 FLOW_DISSECTOR_KEY_PPPOE, pppoe); 2172 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2173 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); 2174 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2175 FLOW_DISSECTOR_KEY_IPSEC, ipsec); 2176 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2177 FLOW_DISSECTOR_KEY_CFM, cfm); 2178 2179 skb_flow_dissector_init(dissector, keys, cnt); 2180 } 2181 2182 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 2183 struct fl_flow_mask *mask) 2184 { 2185 struct fl_flow_mask *newmask; 2186 int err; 2187 2188 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 2189 if (!newmask) 2190 return ERR_PTR(-ENOMEM); 2191 2192 fl_mask_copy(newmask, mask); 2193 2194 if ((newmask->key.tp_range.tp_min.dst && 2195 newmask->key.tp_range.tp_max.dst) || 2196 (newmask->key.tp_range.tp_min.src && 2197 newmask->key.tp_range.tp_max.src)) 2198 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 2199 2200 err = fl_init_mask_hashtable(newmask); 2201 if (err) 2202 goto errout_free; 2203 2204 fl_init_dissector(&newmask->dissector, &newmask->key); 2205 2206 INIT_LIST_HEAD_RCU(&newmask->filters); 2207 2208 refcount_set(&newmask->refcnt, 1); 2209 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 2210 &newmask->ht_node, mask_ht_params); 2211 if (err) 2212 goto errout_destroy; 2213 2214 spin_lock(&head->masks_lock); 2215 list_add_tail_rcu(&newmask->list, &head->masks); 2216 spin_unlock(&head->masks_lock); 2217 2218 return newmask; 2219 2220 errout_destroy: 2221 rhashtable_destroy(&newmask->ht); 2222 errout_free: 2223 kfree(newmask); 2224 2225 return ERR_PTR(err); 2226 } 2227 2228 static int fl_check_assign_mask(struct cls_fl_head *head, 2229 struct cls_fl_filter *fnew, 2230 struct cls_fl_filter *fold, 2231 struct fl_flow_mask *mask) 2232 { 2233 struct fl_flow_mask *newmask; 2234 int ret = 0; 2235 2236 rcu_read_lock(); 2237 2238 /* Insert mask as temporary node to prevent concurrent creation of mask 2239 * with same key. Any concurrent lookups with same key will return 2240 * -EAGAIN because mask's refcnt is zero. 2241 */ 2242 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 2243 &mask->ht_node, 2244 mask_ht_params); 2245 if (!fnew->mask) { 2246 rcu_read_unlock(); 2247 2248 if (fold) { 2249 ret = -EINVAL; 2250 goto errout_cleanup; 2251 } 2252 2253 newmask = fl_create_new_mask(head, mask); 2254 if (IS_ERR(newmask)) { 2255 ret = PTR_ERR(newmask); 2256 goto errout_cleanup; 2257 } 2258 2259 fnew->mask = newmask; 2260 return 0; 2261 } else if (IS_ERR(fnew->mask)) { 2262 ret = PTR_ERR(fnew->mask); 2263 } else if (fold && fold->mask != fnew->mask) { 2264 ret = -EINVAL; 2265 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 2266 /* Mask was deleted concurrently, try again */ 2267 ret = -EAGAIN; 2268 } 2269 rcu_read_unlock(); 2270 return ret; 2271 2272 errout_cleanup: 2273 rhashtable_remove_fast(&head->ht, &mask->ht_node, 2274 mask_ht_params); 2275 return ret; 2276 } 2277 2278 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) 2279 { 2280 return mask->meta.l2_miss; 2281 } 2282 2283 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 2284 struct cls_fl_filter *fold, 2285 bool *in_ht) 2286 { 2287 struct fl_flow_mask *mask = fnew->mask; 2288 int err; 2289 2290 err = rhashtable_lookup_insert_fast(&mask->ht, 2291 &fnew->ht_node, 2292 mask->filter_ht_params); 2293 if (err) { 2294 *in_ht = false; 2295 /* It is okay if filter with same key exists when 2296 * overwriting. 2297 */ 2298 return fold && err == -EEXIST ? 0 : err; 2299 } 2300 2301 *in_ht = true; 2302 return 0; 2303 } 2304 2305 static int fl_change(struct net *net, struct sk_buff *in_skb, 2306 struct tcf_proto *tp, unsigned long base, 2307 u32 handle, struct nlattr **tca, 2308 void **arg, u32 flags, 2309 struct netlink_ext_ack *extack) 2310 { 2311 struct cls_fl_head *head = fl_head_dereference(tp); 2312 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); 2313 struct cls_fl_filter *fold = *arg; 2314 bool bound_to_filter = false; 2315 struct cls_fl_filter *fnew; 2316 struct fl_flow_mask *mask; 2317 struct nlattr **tb; 2318 bool in_ht; 2319 int err; 2320 2321 if (!tca[TCA_OPTIONS]) { 2322 err = -EINVAL; 2323 goto errout_fold; 2324 } 2325 2326 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 2327 if (!mask) { 2328 err = -ENOBUFS; 2329 goto errout_fold; 2330 } 2331 2332 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2333 if (!tb) { 2334 err = -ENOBUFS; 2335 goto errout_mask_alloc; 2336 } 2337 2338 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2339 tca[TCA_OPTIONS], fl_policy, NULL); 2340 if (err < 0) 2341 goto errout_tb; 2342 2343 if (fold && handle && fold->handle != handle) { 2344 err = -EINVAL; 2345 goto errout_tb; 2346 } 2347 2348 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 2349 if (!fnew) { 2350 err = -ENOBUFS; 2351 goto errout_tb; 2352 } 2353 INIT_LIST_HEAD(&fnew->hw_list); 2354 refcount_set(&fnew->refcnt, 1); 2355 2356 if (tb[TCA_FLOWER_FLAGS]) { 2357 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 2358 2359 if (!tc_flags_valid(fnew->flags)) { 2360 kfree(fnew); 2361 err = -EINVAL; 2362 goto errout_tb; 2363 } 2364 } 2365 2366 if (!fold) { 2367 spin_lock(&tp->lock); 2368 if (!handle) { 2369 handle = 1; 2370 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2371 INT_MAX, GFP_ATOMIC); 2372 } else { 2373 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2374 handle, GFP_ATOMIC); 2375 2376 /* Filter with specified handle was concurrently 2377 * inserted after initial check in cls_api. This is not 2378 * necessarily an error if NLM_F_EXCL is not set in 2379 * message flags. Returning EAGAIN will cause cls_api to 2380 * try to update concurrently inserted rule. 2381 */ 2382 if (err == -ENOSPC) 2383 err = -EAGAIN; 2384 } 2385 spin_unlock(&tp->lock); 2386 2387 if (err) { 2388 kfree(fnew); 2389 goto errout_tb; 2390 } 2391 } 2392 fnew->handle = handle; 2393 2394 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle, 2395 !tc_skip_hw(fnew->flags)); 2396 if (err < 0) 2397 goto errout_idr; 2398 2399 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], 2400 &fnew->exts, flags, fnew->flags, 2401 extack); 2402 if (err < 0) 2403 goto errout_idr; 2404 2405 if (tb[TCA_FLOWER_CLASSID]) { 2406 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 2407 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2408 rtnl_lock(); 2409 tcf_bind_filter(tp, &fnew->res, base); 2410 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2411 rtnl_unlock(); 2412 bound_to_filter = true; 2413 } 2414 2415 err = fl_set_key(net, tb, &fnew->key, &mask->key, extack); 2416 if (err) 2417 goto unbind_filter; 2418 2419 fl_mask_update_range(mask); 2420 fl_set_masked_key(&fnew->mkey, &fnew->key, mask); 2421 2422 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) { 2423 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 2424 err = -EINVAL; 2425 goto unbind_filter; 2426 } 2427 2428 /* Enable tc skb extension if filter matches on data extracted from 2429 * this extension. 2430 */ 2431 if (fl_needs_tc_skb_ext(&mask->key)) { 2432 fnew->needs_tc_skb_ext = 1; 2433 tc_skb_ext_tc_enable(); 2434 } 2435 2436 err = fl_check_assign_mask(head, fnew, fold, mask); 2437 if (err) 2438 goto unbind_filter; 2439 2440 err = fl_ht_insert_unique(fnew, fold, &in_ht); 2441 if (err) 2442 goto errout_mask; 2443 2444 if (!tc_skip_hw(fnew->flags)) { 2445 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 2446 if (err) 2447 goto errout_ht; 2448 } 2449 2450 if (!tc_in_hw(fnew->flags)) 2451 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 2452 2453 spin_lock(&tp->lock); 2454 2455 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2456 * proto again or create new one, if necessary. 2457 */ 2458 if (tp->deleting) { 2459 err = -EAGAIN; 2460 goto errout_hw; 2461 } 2462 2463 if (fold) { 2464 /* Fold filter was deleted concurrently. Retry lookup. */ 2465 if (fold->deleted) { 2466 err = -EAGAIN; 2467 goto errout_hw; 2468 } 2469 2470 fnew->handle = handle; 2471 2472 if (!in_ht) { 2473 struct rhashtable_params params = 2474 fnew->mask->filter_ht_params; 2475 2476 err = rhashtable_insert_fast(&fnew->mask->ht, 2477 &fnew->ht_node, 2478 params); 2479 if (err) 2480 goto errout_hw; 2481 in_ht = true; 2482 } 2483 2484 refcount_inc(&fnew->refcnt); 2485 rhashtable_remove_fast(&fold->mask->ht, 2486 &fold->ht_node, 2487 fold->mask->filter_ht_params); 2488 idr_replace(&head->handle_idr, fnew, fnew->handle); 2489 list_replace_rcu(&fold->list, &fnew->list); 2490 fold->deleted = true; 2491 2492 spin_unlock(&tp->lock); 2493 2494 fl_mask_put(head, fold->mask); 2495 if (!tc_skip_hw(fold->flags)) 2496 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2497 tcf_unbind_filter(tp, &fold->res); 2498 /* Caller holds reference to fold, so refcnt is always > 0 2499 * after this. 2500 */ 2501 refcount_dec(&fold->refcnt); 2502 __fl_put(fold); 2503 } else { 2504 idr_replace(&head->handle_idr, fnew, fnew->handle); 2505 2506 refcount_inc(&fnew->refcnt); 2507 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2508 spin_unlock(&tp->lock); 2509 } 2510 2511 *arg = fnew; 2512 2513 kfree(tb); 2514 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2515 return 0; 2516 2517 errout_ht: 2518 spin_lock(&tp->lock); 2519 errout_hw: 2520 fnew->deleted = true; 2521 spin_unlock(&tp->lock); 2522 if (!tc_skip_hw(fnew->flags)) 2523 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2524 if (in_ht) 2525 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2526 fnew->mask->filter_ht_params); 2527 errout_mask: 2528 fl_mask_put(head, fnew->mask); 2529 2530 unbind_filter: 2531 if (bound_to_filter) { 2532 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2533 rtnl_lock(); 2534 tcf_unbind_filter(tp, &fnew->res); 2535 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2536 rtnl_unlock(); 2537 } 2538 2539 errout_idr: 2540 if (!fold) { 2541 spin_lock(&tp->lock); 2542 idr_remove(&head->handle_idr, fnew->handle); 2543 spin_unlock(&tp->lock); 2544 } 2545 __fl_put(fnew); 2546 errout_tb: 2547 kfree(tb); 2548 errout_mask_alloc: 2549 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2550 errout_fold: 2551 if (fold) 2552 __fl_put(fold); 2553 return err; 2554 } 2555 2556 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2557 bool rtnl_held, struct netlink_ext_ack *extack) 2558 { 2559 struct cls_fl_head *head = fl_head_dereference(tp); 2560 struct cls_fl_filter *f = arg; 2561 bool last_on_mask; 2562 int err = 0; 2563 2564 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2565 *last = list_empty(&head->masks); 2566 __fl_put(f); 2567 2568 return err; 2569 } 2570 2571 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2572 bool rtnl_held) 2573 { 2574 struct cls_fl_head *head = fl_head_dereference(tp); 2575 unsigned long id = arg->cookie, tmp; 2576 struct cls_fl_filter *f; 2577 2578 arg->count = arg->skip; 2579 2580 rcu_read_lock(); 2581 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2582 /* don't return filters that are being deleted */ 2583 if (!f || !refcount_inc_not_zero(&f->refcnt)) 2584 continue; 2585 rcu_read_unlock(); 2586 2587 if (arg->fn(tp, f, arg) < 0) { 2588 __fl_put(f); 2589 arg->stop = 1; 2590 rcu_read_lock(); 2591 break; 2592 } 2593 __fl_put(f); 2594 arg->count++; 2595 rcu_read_lock(); 2596 } 2597 rcu_read_unlock(); 2598 arg->cookie = id; 2599 } 2600 2601 static struct cls_fl_filter * 2602 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2603 { 2604 struct cls_fl_head *head = fl_head_dereference(tp); 2605 2606 spin_lock(&tp->lock); 2607 if (list_empty(&head->hw_filters)) { 2608 spin_unlock(&tp->lock); 2609 return NULL; 2610 } 2611 2612 if (!f) 2613 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2614 hw_list); 2615 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2616 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2617 spin_unlock(&tp->lock); 2618 return f; 2619 } 2620 } 2621 2622 spin_unlock(&tp->lock); 2623 return NULL; 2624 } 2625 2626 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2627 void *cb_priv, struct netlink_ext_ack *extack) 2628 { 2629 struct tcf_block *block = tp->chain->block; 2630 struct flow_cls_offload cls_flower = {}; 2631 struct cls_fl_filter *f = NULL; 2632 int err; 2633 2634 /* hw_filters list can only be changed by hw offload functions after 2635 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2636 * iterating it. 2637 */ 2638 ASSERT_RTNL(); 2639 2640 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2641 cls_flower.rule = 2642 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2643 if (!cls_flower.rule) { 2644 __fl_put(f); 2645 return -ENOMEM; 2646 } 2647 2648 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2649 extack); 2650 cls_flower.command = add ? 2651 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2652 cls_flower.cookie = (unsigned long)f; 2653 cls_flower.rule->match.dissector = &f->mask->dissector; 2654 cls_flower.rule->match.mask = &f->mask->key; 2655 cls_flower.rule->match.key = &f->mkey; 2656 2657 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 2658 cls_flower.common.extack); 2659 if (err) { 2660 kfree(cls_flower.rule); 2661 if (tc_skip_sw(f->flags)) { 2662 __fl_put(f); 2663 return err; 2664 } 2665 goto next_flow; 2666 } 2667 2668 cls_flower.classid = f->res.classid; 2669 2670 err = tc_setup_cb_reoffload(block, tp, add, cb, 2671 TC_SETUP_CLSFLOWER, &cls_flower, 2672 cb_priv, &f->flags, 2673 &f->in_hw_count); 2674 tc_cleanup_offload_action(&cls_flower.rule->action); 2675 kfree(cls_flower.rule); 2676 2677 if (err) { 2678 __fl_put(f); 2679 return err; 2680 } 2681 next_flow: 2682 __fl_put(f); 2683 } 2684 2685 return 0; 2686 } 2687 2688 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2689 { 2690 struct flow_cls_offload *cls_flower = type_data; 2691 struct cls_fl_filter *f = 2692 (struct cls_fl_filter *) cls_flower->cookie; 2693 struct cls_fl_head *head = fl_head_dereference(tp); 2694 2695 spin_lock(&tp->lock); 2696 list_add(&f->hw_list, &head->hw_filters); 2697 spin_unlock(&tp->lock); 2698 } 2699 2700 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2701 { 2702 struct flow_cls_offload *cls_flower = type_data; 2703 struct cls_fl_filter *f = 2704 (struct cls_fl_filter *) cls_flower->cookie; 2705 2706 spin_lock(&tp->lock); 2707 if (!list_empty(&f->hw_list)) 2708 list_del_init(&f->hw_list); 2709 spin_unlock(&tp->lock); 2710 } 2711 2712 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2713 struct fl_flow_tmplt *tmplt) 2714 { 2715 struct flow_cls_offload cls_flower = {}; 2716 struct tcf_block *block = chain->block; 2717 2718 cls_flower.rule = flow_rule_alloc(0); 2719 if (!cls_flower.rule) 2720 return -ENOMEM; 2721 2722 cls_flower.common.chain_index = chain->index; 2723 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2724 cls_flower.cookie = (unsigned long) tmplt; 2725 cls_flower.rule->match.dissector = &tmplt->dissector; 2726 cls_flower.rule->match.mask = &tmplt->mask; 2727 cls_flower.rule->match.key = &tmplt->dummy_key; 2728 2729 /* We don't care if driver (any of them) fails to handle this 2730 * call. It serves just as a hint for it. 2731 */ 2732 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2733 kfree(cls_flower.rule); 2734 2735 return 0; 2736 } 2737 2738 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2739 struct fl_flow_tmplt *tmplt) 2740 { 2741 struct flow_cls_offload cls_flower = {}; 2742 struct tcf_block *block = chain->block; 2743 2744 cls_flower.common.chain_index = chain->index; 2745 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2746 cls_flower.cookie = (unsigned long) tmplt; 2747 2748 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2749 } 2750 2751 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2752 struct nlattr **tca, 2753 struct netlink_ext_ack *extack) 2754 { 2755 struct fl_flow_tmplt *tmplt; 2756 struct nlattr **tb; 2757 int err; 2758 2759 if (!tca[TCA_OPTIONS]) 2760 return ERR_PTR(-EINVAL); 2761 2762 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2763 if (!tb) 2764 return ERR_PTR(-ENOBUFS); 2765 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2766 tca[TCA_OPTIONS], fl_policy, NULL); 2767 if (err) 2768 goto errout_tb; 2769 2770 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2771 if (!tmplt) { 2772 err = -ENOMEM; 2773 goto errout_tb; 2774 } 2775 tmplt->chain = chain; 2776 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2777 if (err) 2778 goto errout_tmplt; 2779 2780 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2781 2782 err = fl_hw_create_tmplt(chain, tmplt); 2783 if (err) 2784 goto errout_tmplt; 2785 2786 kfree(tb); 2787 return tmplt; 2788 2789 errout_tmplt: 2790 kfree(tmplt); 2791 errout_tb: 2792 kfree(tb); 2793 return ERR_PTR(err); 2794 } 2795 2796 static void fl_tmplt_destroy(void *tmplt_priv) 2797 { 2798 struct fl_flow_tmplt *tmplt = tmplt_priv; 2799 2800 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2801 kfree(tmplt); 2802 } 2803 2804 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, 2805 flow_setup_cb_t *cb, void *cb_priv) 2806 { 2807 struct fl_flow_tmplt *tmplt = chain->tmplt_priv; 2808 struct flow_cls_offload cls_flower = {}; 2809 2810 cls_flower.rule = flow_rule_alloc(0); 2811 if (!cls_flower.rule) 2812 return; 2813 2814 cls_flower.common.chain_index = chain->index; 2815 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : 2816 FLOW_CLS_TMPLT_DESTROY; 2817 cls_flower.cookie = (unsigned long) tmplt; 2818 cls_flower.rule->match.dissector = &tmplt->dissector; 2819 cls_flower.rule->match.mask = &tmplt->mask; 2820 cls_flower.rule->match.key = &tmplt->dummy_key; 2821 2822 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 2823 kfree(cls_flower.rule); 2824 } 2825 2826 static int fl_dump_key_val(struct sk_buff *skb, 2827 void *val, int val_type, 2828 void *mask, int mask_type, int len) 2829 { 2830 int err; 2831 2832 if (!memchr_inv(mask, 0, len)) 2833 return 0; 2834 err = nla_put(skb, val_type, len, val); 2835 if (err) 2836 return err; 2837 if (mask_type != TCA_FLOWER_UNSPEC) { 2838 err = nla_put(skb, mask_type, len, mask); 2839 if (err) 2840 return err; 2841 } 2842 return 0; 2843 } 2844 2845 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2846 struct fl_flow_key *mask) 2847 { 2848 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2849 TCA_FLOWER_KEY_PORT_DST_MIN, 2850 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2851 sizeof(key->tp_range.tp_min.dst)) || 2852 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2853 TCA_FLOWER_KEY_PORT_DST_MAX, 2854 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2855 sizeof(key->tp_range.tp_max.dst)) || 2856 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2857 TCA_FLOWER_KEY_PORT_SRC_MIN, 2858 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2859 sizeof(key->tp_range.tp_min.src)) || 2860 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2861 TCA_FLOWER_KEY_PORT_SRC_MAX, 2862 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2863 sizeof(key->tp_range.tp_max.src))) 2864 return -1; 2865 2866 return 0; 2867 } 2868 2869 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2870 struct flow_dissector_key_mpls *mpls_key, 2871 struct flow_dissector_key_mpls *mpls_mask, 2872 u8 lse_index) 2873 { 2874 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2875 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2876 int err; 2877 2878 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2879 lse_index + 1); 2880 if (err) 2881 return err; 2882 2883 if (lse_mask->mpls_ttl) { 2884 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2885 lse_key->mpls_ttl); 2886 if (err) 2887 return err; 2888 } 2889 if (lse_mask->mpls_bos) { 2890 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2891 lse_key->mpls_bos); 2892 if (err) 2893 return err; 2894 } 2895 if (lse_mask->mpls_tc) { 2896 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2897 lse_key->mpls_tc); 2898 if (err) 2899 return err; 2900 } 2901 if (lse_mask->mpls_label) { 2902 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2903 lse_key->mpls_label); 2904 if (err) 2905 return err; 2906 } 2907 2908 return 0; 2909 } 2910 2911 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2912 struct flow_dissector_key_mpls *mpls_key, 2913 struct flow_dissector_key_mpls *mpls_mask) 2914 { 2915 struct nlattr *opts; 2916 struct nlattr *lse; 2917 u8 lse_index; 2918 int err; 2919 2920 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2921 if (!opts) 2922 return -EMSGSIZE; 2923 2924 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2925 if (!(mpls_mask->used_lses & 1 << lse_index)) 2926 continue; 2927 2928 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2929 if (!lse) { 2930 err = -EMSGSIZE; 2931 goto err_opts; 2932 } 2933 2934 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2935 lse_index); 2936 if (err) 2937 goto err_opts_lse; 2938 nla_nest_end(skb, lse); 2939 } 2940 nla_nest_end(skb, opts); 2941 2942 return 0; 2943 2944 err_opts_lse: 2945 nla_nest_cancel(skb, lse); 2946 err_opts: 2947 nla_nest_cancel(skb, opts); 2948 2949 return err; 2950 } 2951 2952 static int fl_dump_key_mpls(struct sk_buff *skb, 2953 struct flow_dissector_key_mpls *mpls_key, 2954 struct flow_dissector_key_mpls *mpls_mask) 2955 { 2956 struct flow_dissector_mpls_lse *lse_mask; 2957 struct flow_dissector_mpls_lse *lse_key; 2958 int err; 2959 2960 if (!mpls_mask->used_lses) 2961 return 0; 2962 2963 lse_mask = &mpls_mask->ls[0]; 2964 lse_key = &mpls_key->ls[0]; 2965 2966 /* For backward compatibility, don't use the MPLS nested attributes if 2967 * the rule can be expressed using the old attributes. 2968 */ 2969 if (mpls_mask->used_lses & ~1 || 2970 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 2971 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 2972 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 2973 2974 if (lse_mask->mpls_ttl) { 2975 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2976 lse_key->mpls_ttl); 2977 if (err) 2978 return err; 2979 } 2980 if (lse_mask->mpls_tc) { 2981 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2982 lse_key->mpls_tc); 2983 if (err) 2984 return err; 2985 } 2986 if (lse_mask->mpls_label) { 2987 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2988 lse_key->mpls_label); 2989 if (err) 2990 return err; 2991 } 2992 if (lse_mask->mpls_bos) { 2993 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2994 lse_key->mpls_bos); 2995 if (err) 2996 return err; 2997 } 2998 return 0; 2999 } 3000 3001 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 3002 struct flow_dissector_key_ip *key, 3003 struct flow_dissector_key_ip *mask) 3004 { 3005 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 3006 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 3007 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 3008 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 3009 3010 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 3011 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 3012 return -1; 3013 3014 return 0; 3015 } 3016 3017 static int fl_dump_key_vlan(struct sk_buff *skb, 3018 int vlan_id_key, int vlan_prio_key, 3019 struct flow_dissector_key_vlan *vlan_key, 3020 struct flow_dissector_key_vlan *vlan_mask) 3021 { 3022 int err; 3023 3024 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 3025 return 0; 3026 if (vlan_mask->vlan_id) { 3027 err = nla_put_u16(skb, vlan_id_key, 3028 vlan_key->vlan_id); 3029 if (err) 3030 return err; 3031 } 3032 if (vlan_mask->vlan_priority) { 3033 err = nla_put_u8(skb, vlan_prio_key, 3034 vlan_key->vlan_priority); 3035 if (err) 3036 return err; 3037 } 3038 return 0; 3039 } 3040 3041 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 3042 u32 *flower_key, u32 *flower_mask, 3043 u32 flower_flag_bit, u32 dissector_flag_bit) 3044 { 3045 if (dissector_mask & dissector_flag_bit) { 3046 *flower_mask |= flower_flag_bit; 3047 if (dissector_key & dissector_flag_bit) 3048 *flower_key |= flower_flag_bit; 3049 } 3050 } 3051 3052 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 3053 { 3054 u32 key, mask; 3055 __be32 _key, _mask; 3056 int err; 3057 3058 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 3059 return 0; 3060 3061 key = 0; 3062 mask = 0; 3063 3064 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3065 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 3066 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 3067 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 3068 FLOW_DIS_FIRST_FRAG); 3069 3070 _key = cpu_to_be32(key); 3071 _mask = cpu_to_be32(mask); 3072 3073 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 3074 if (err) 3075 return err; 3076 3077 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 3078 } 3079 3080 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 3081 struct flow_dissector_key_enc_opts *enc_opts) 3082 { 3083 struct geneve_opt *opt; 3084 struct nlattr *nest; 3085 int opt_off = 0; 3086 3087 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 3088 if (!nest) 3089 goto nla_put_failure; 3090 3091 while (enc_opts->len > opt_off) { 3092 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 3093 3094 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 3095 opt->opt_class)) 3096 goto nla_put_failure; 3097 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 3098 opt->type)) 3099 goto nla_put_failure; 3100 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 3101 opt->length * 4, opt->opt_data)) 3102 goto nla_put_failure; 3103 3104 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 3105 } 3106 nla_nest_end(skb, nest); 3107 return 0; 3108 3109 nla_put_failure: 3110 nla_nest_cancel(skb, nest); 3111 return -EMSGSIZE; 3112 } 3113 3114 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 3115 struct flow_dissector_key_enc_opts *enc_opts) 3116 { 3117 struct vxlan_metadata *md; 3118 struct nlattr *nest; 3119 3120 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 3121 if (!nest) 3122 goto nla_put_failure; 3123 3124 md = (struct vxlan_metadata *)&enc_opts->data[0]; 3125 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 3126 goto nla_put_failure; 3127 3128 nla_nest_end(skb, nest); 3129 return 0; 3130 3131 nla_put_failure: 3132 nla_nest_cancel(skb, nest); 3133 return -EMSGSIZE; 3134 } 3135 3136 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 3137 struct flow_dissector_key_enc_opts *enc_opts) 3138 { 3139 struct erspan_metadata *md; 3140 struct nlattr *nest; 3141 3142 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 3143 if (!nest) 3144 goto nla_put_failure; 3145 3146 md = (struct erspan_metadata *)&enc_opts->data[0]; 3147 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 3148 goto nla_put_failure; 3149 3150 if (md->version == 1 && 3151 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 3152 goto nla_put_failure; 3153 3154 if (md->version == 2 && 3155 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 3156 md->u.md2.dir) || 3157 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 3158 get_hwid(&md->u.md2)))) 3159 goto nla_put_failure; 3160 3161 nla_nest_end(skb, nest); 3162 return 0; 3163 3164 nla_put_failure: 3165 nla_nest_cancel(skb, nest); 3166 return -EMSGSIZE; 3167 } 3168 3169 static int fl_dump_key_gtp_opt(struct sk_buff *skb, 3170 struct flow_dissector_key_enc_opts *enc_opts) 3171 3172 { 3173 struct gtp_pdu_session_info *session_info; 3174 struct nlattr *nest; 3175 3176 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP); 3177 if (!nest) 3178 goto nla_put_failure; 3179 3180 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0]; 3181 3182 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, 3183 session_info->pdu_type)) 3184 goto nla_put_failure; 3185 3186 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi)) 3187 goto nla_put_failure; 3188 3189 nla_nest_end(skb, nest); 3190 return 0; 3191 3192 nla_put_failure: 3193 nla_nest_cancel(skb, nest); 3194 return -EMSGSIZE; 3195 } 3196 3197 static int fl_dump_key_pfcp_opt(struct sk_buff *skb, 3198 struct flow_dissector_key_enc_opts *enc_opts) 3199 { 3200 struct pfcp_metadata *md; 3201 struct nlattr *nest; 3202 3203 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP); 3204 if (!nest) 3205 goto nla_put_failure; 3206 3207 md = (struct pfcp_metadata *)&enc_opts->data[0]; 3208 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type)) 3209 goto nla_put_failure; 3210 3211 if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, 3212 md->seid, 0)) 3213 goto nla_put_failure; 3214 3215 nla_nest_end(skb, nest); 3216 return 0; 3217 3218 nla_put_failure: 3219 nla_nest_cancel(skb, nest); 3220 return -EMSGSIZE; 3221 } 3222 3223 static int fl_dump_key_ct(struct sk_buff *skb, 3224 struct flow_dissector_key_ct *key, 3225 struct flow_dissector_key_ct *mask) 3226 { 3227 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 3228 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 3229 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 3230 sizeof(key->ct_state))) 3231 goto nla_put_failure; 3232 3233 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 3234 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 3235 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 3236 sizeof(key->ct_zone))) 3237 goto nla_put_failure; 3238 3239 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 3240 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 3241 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 3242 sizeof(key->ct_mark))) 3243 goto nla_put_failure; 3244 3245 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 3246 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 3247 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 3248 sizeof(key->ct_labels))) 3249 goto nla_put_failure; 3250 3251 return 0; 3252 3253 nla_put_failure: 3254 return -EMSGSIZE; 3255 } 3256 3257 static int fl_dump_key_cfm(struct sk_buff *skb, 3258 struct flow_dissector_key_cfm *key, 3259 struct flow_dissector_key_cfm *mask) 3260 { 3261 struct nlattr *opts; 3262 int err; 3263 u8 mdl; 3264 3265 if (!memchr_inv(mask, 0, sizeof(*mask))) 3266 return 0; 3267 3268 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); 3269 if (!opts) 3270 return -EMSGSIZE; 3271 3272 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { 3273 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); 3274 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); 3275 if (err) 3276 goto err_cfm_opts; 3277 } 3278 3279 if (mask->opcode) { 3280 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); 3281 if (err) 3282 goto err_cfm_opts; 3283 } 3284 3285 nla_nest_end(skb, opts); 3286 3287 return 0; 3288 3289 err_cfm_opts: 3290 nla_nest_cancel(skb, opts); 3291 return err; 3292 } 3293 3294 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 3295 struct flow_dissector_key_enc_opts *enc_opts) 3296 { 3297 struct nlattr *nest; 3298 int err; 3299 3300 if (!enc_opts->len) 3301 return 0; 3302 3303 nest = nla_nest_start_noflag(skb, enc_opt_type); 3304 if (!nest) 3305 goto nla_put_failure; 3306 3307 switch (enc_opts->dst_opt_type) { 3308 case IP_TUNNEL_GENEVE_OPT_BIT: 3309 err = fl_dump_key_geneve_opt(skb, enc_opts); 3310 if (err) 3311 goto nla_put_failure; 3312 break; 3313 case IP_TUNNEL_VXLAN_OPT_BIT: 3314 err = fl_dump_key_vxlan_opt(skb, enc_opts); 3315 if (err) 3316 goto nla_put_failure; 3317 break; 3318 case IP_TUNNEL_ERSPAN_OPT_BIT: 3319 err = fl_dump_key_erspan_opt(skb, enc_opts); 3320 if (err) 3321 goto nla_put_failure; 3322 break; 3323 case IP_TUNNEL_GTP_OPT_BIT: 3324 err = fl_dump_key_gtp_opt(skb, enc_opts); 3325 if (err) 3326 goto nla_put_failure; 3327 break; 3328 case IP_TUNNEL_PFCP_OPT_BIT: 3329 err = fl_dump_key_pfcp_opt(skb, enc_opts); 3330 if (err) 3331 goto nla_put_failure; 3332 break; 3333 default: 3334 goto nla_put_failure; 3335 } 3336 nla_nest_end(skb, nest); 3337 return 0; 3338 3339 nla_put_failure: 3340 nla_nest_cancel(skb, nest); 3341 return -EMSGSIZE; 3342 } 3343 3344 static int fl_dump_key_enc_opt(struct sk_buff *skb, 3345 struct flow_dissector_key_enc_opts *key_opts, 3346 struct flow_dissector_key_enc_opts *msk_opts) 3347 { 3348 int err; 3349 3350 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 3351 if (err) 3352 return err; 3353 3354 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 3355 } 3356 3357 static int fl_dump_key(struct sk_buff *skb, struct net *net, 3358 struct fl_flow_key *key, struct fl_flow_key *mask) 3359 { 3360 if (mask->meta.ingress_ifindex) { 3361 struct net_device *dev; 3362 3363 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 3364 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 3365 goto nla_put_failure; 3366 } 3367 3368 if (fl_dump_key_val(skb, &key->meta.l2_miss, 3369 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, 3370 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) 3371 goto nla_put_failure; 3372 3373 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 3374 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 3375 sizeof(key->eth.dst)) || 3376 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 3377 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 3378 sizeof(key->eth.src)) || 3379 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 3380 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 3381 sizeof(key->basic.n_proto))) 3382 goto nla_put_failure; 3383 3384 if (mask->num_of_vlans.num_of_vlans) { 3385 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) 3386 goto nla_put_failure; 3387 } 3388 3389 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 3390 goto nla_put_failure; 3391 3392 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 3393 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 3394 goto nla_put_failure; 3395 3396 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 3397 TCA_FLOWER_KEY_CVLAN_PRIO, 3398 &key->cvlan, &mask->cvlan) || 3399 (mask->cvlan.vlan_tpid && 3400 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3401 key->cvlan.vlan_tpid))) 3402 goto nla_put_failure; 3403 3404 if (mask->basic.n_proto) { 3405 if (mask->cvlan.vlan_eth_type) { 3406 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 3407 key->basic.n_proto)) 3408 goto nla_put_failure; 3409 } else if (mask->vlan.vlan_eth_type) { 3410 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3411 key->vlan.vlan_eth_type)) 3412 goto nla_put_failure; 3413 } 3414 } 3415 3416 if ((key->basic.n_proto == htons(ETH_P_IP) || 3417 key->basic.n_proto == htons(ETH_P_IPV6)) && 3418 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 3419 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 3420 sizeof(key->basic.ip_proto)) || 3421 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 3422 goto nla_put_failure; 3423 3424 if (mask->pppoe.session_id) { 3425 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID, 3426 key->pppoe.session_id)) 3427 goto nla_put_failure; 3428 } 3429 if (mask->basic.n_proto && mask->pppoe.ppp_proto) { 3430 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO, 3431 key->pppoe.ppp_proto)) 3432 goto nla_put_failure; 3433 } 3434 3435 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3436 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 3437 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 3438 sizeof(key->ipv4.src)) || 3439 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 3440 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 3441 sizeof(key->ipv4.dst)))) 3442 goto nla_put_failure; 3443 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3444 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 3445 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 3446 sizeof(key->ipv6.src)) || 3447 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 3448 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 3449 sizeof(key->ipv6.dst)))) 3450 goto nla_put_failure; 3451 3452 if (key->basic.ip_proto == IPPROTO_TCP && 3453 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 3454 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 3455 sizeof(key->tp.src)) || 3456 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 3457 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 3458 sizeof(key->tp.dst)) || 3459 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 3460 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 3461 sizeof(key->tcp.flags)))) 3462 goto nla_put_failure; 3463 else if (key->basic.ip_proto == IPPROTO_UDP && 3464 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 3465 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 3466 sizeof(key->tp.src)) || 3467 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 3468 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 3469 sizeof(key->tp.dst)))) 3470 goto nla_put_failure; 3471 else if (key->basic.ip_proto == IPPROTO_SCTP && 3472 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 3473 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 3474 sizeof(key->tp.src)) || 3475 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 3476 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 3477 sizeof(key->tp.dst)))) 3478 goto nla_put_failure; 3479 else if (key->basic.n_proto == htons(ETH_P_IP) && 3480 key->basic.ip_proto == IPPROTO_ICMP && 3481 (fl_dump_key_val(skb, &key->icmp.type, 3482 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 3483 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 3484 sizeof(key->icmp.type)) || 3485 fl_dump_key_val(skb, &key->icmp.code, 3486 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 3487 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 3488 sizeof(key->icmp.code)))) 3489 goto nla_put_failure; 3490 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 3491 key->basic.ip_proto == IPPROTO_ICMPV6 && 3492 (fl_dump_key_val(skb, &key->icmp.type, 3493 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 3494 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 3495 sizeof(key->icmp.type)) || 3496 fl_dump_key_val(skb, &key->icmp.code, 3497 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 3498 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 3499 sizeof(key->icmp.code)))) 3500 goto nla_put_failure; 3501 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 3502 key->basic.n_proto == htons(ETH_P_RARP)) && 3503 (fl_dump_key_val(skb, &key->arp.sip, 3504 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 3505 TCA_FLOWER_KEY_ARP_SIP_MASK, 3506 sizeof(key->arp.sip)) || 3507 fl_dump_key_val(skb, &key->arp.tip, 3508 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 3509 TCA_FLOWER_KEY_ARP_TIP_MASK, 3510 sizeof(key->arp.tip)) || 3511 fl_dump_key_val(skb, &key->arp.op, 3512 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 3513 TCA_FLOWER_KEY_ARP_OP_MASK, 3514 sizeof(key->arp.op)) || 3515 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 3516 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 3517 sizeof(key->arp.sha)) || 3518 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 3519 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 3520 sizeof(key->arp.tha)))) 3521 goto nla_put_failure; 3522 else if (key->basic.ip_proto == IPPROTO_L2TP && 3523 fl_dump_key_val(skb, &key->l2tpv3.session_id, 3524 TCA_FLOWER_KEY_L2TPV3_SID, 3525 &mask->l2tpv3.session_id, 3526 TCA_FLOWER_UNSPEC, 3527 sizeof(key->l2tpv3.session_id))) 3528 goto nla_put_failure; 3529 3530 if (key->ipsec.spi && 3531 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI, 3532 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK, 3533 sizeof(key->ipsec.spi))) 3534 goto nla_put_failure; 3535 3536 if ((key->basic.ip_proto == IPPROTO_TCP || 3537 key->basic.ip_proto == IPPROTO_UDP || 3538 key->basic.ip_proto == IPPROTO_SCTP) && 3539 fl_dump_key_port_range(skb, key, mask)) 3540 goto nla_put_failure; 3541 3542 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3543 (fl_dump_key_val(skb, &key->enc_ipv4.src, 3544 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 3545 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 3546 sizeof(key->enc_ipv4.src)) || 3547 fl_dump_key_val(skb, &key->enc_ipv4.dst, 3548 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 3549 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 3550 sizeof(key->enc_ipv4.dst)))) 3551 goto nla_put_failure; 3552 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3553 (fl_dump_key_val(skb, &key->enc_ipv6.src, 3554 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 3555 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 3556 sizeof(key->enc_ipv6.src)) || 3557 fl_dump_key_val(skb, &key->enc_ipv6.dst, 3558 TCA_FLOWER_KEY_ENC_IPV6_DST, 3559 &mask->enc_ipv6.dst, 3560 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 3561 sizeof(key->enc_ipv6.dst)))) 3562 goto nla_put_failure; 3563 3564 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 3565 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 3566 sizeof(key->enc_key_id)) || 3567 fl_dump_key_val(skb, &key->enc_tp.src, 3568 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 3569 &mask->enc_tp.src, 3570 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 3571 sizeof(key->enc_tp.src)) || 3572 fl_dump_key_val(skb, &key->enc_tp.dst, 3573 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 3574 &mask->enc_tp.dst, 3575 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 3576 sizeof(key->enc_tp.dst)) || 3577 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 3578 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 3579 goto nla_put_failure; 3580 3581 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 3582 goto nla_put_failure; 3583 3584 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 3585 goto nla_put_failure; 3586 3587 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 3588 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 3589 sizeof(key->hash.hash))) 3590 goto nla_put_failure; 3591 3592 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) 3593 goto nla_put_failure; 3594 3595 return 0; 3596 3597 nla_put_failure: 3598 return -EMSGSIZE; 3599 } 3600 3601 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 3602 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3603 { 3604 struct cls_fl_filter *f = fh; 3605 struct nlattr *nest; 3606 struct fl_flow_key *key, *mask; 3607 bool skip_hw; 3608 3609 if (!f) 3610 return skb->len; 3611 3612 t->tcm_handle = f->handle; 3613 3614 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3615 if (!nest) 3616 goto nla_put_failure; 3617 3618 spin_lock(&tp->lock); 3619 3620 if (f->res.classid && 3621 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3622 goto nla_put_failure_locked; 3623 3624 key = &f->key; 3625 mask = &f->mask->key; 3626 skip_hw = tc_skip_hw(f->flags); 3627 3628 if (fl_dump_key(skb, net, key, mask)) 3629 goto nla_put_failure_locked; 3630 3631 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3632 goto nla_put_failure_locked; 3633 3634 spin_unlock(&tp->lock); 3635 3636 if (!skip_hw) 3637 fl_hw_update_stats(tp, f, rtnl_held); 3638 3639 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3640 goto nla_put_failure; 3641 3642 if (tcf_exts_dump(skb, &f->exts)) 3643 goto nla_put_failure; 3644 3645 nla_nest_end(skb, nest); 3646 3647 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3648 goto nla_put_failure; 3649 3650 return skb->len; 3651 3652 nla_put_failure_locked: 3653 spin_unlock(&tp->lock); 3654 nla_put_failure: 3655 nla_nest_cancel(skb, nest); 3656 return -1; 3657 } 3658 3659 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3660 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3661 { 3662 struct cls_fl_filter *f = fh; 3663 struct nlattr *nest; 3664 bool skip_hw; 3665 3666 if (!f) 3667 return skb->len; 3668 3669 t->tcm_handle = f->handle; 3670 3671 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3672 if (!nest) 3673 goto nla_put_failure; 3674 3675 spin_lock(&tp->lock); 3676 3677 skip_hw = tc_skip_hw(f->flags); 3678 3679 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3680 goto nla_put_failure_locked; 3681 3682 spin_unlock(&tp->lock); 3683 3684 if (!skip_hw) 3685 fl_hw_update_stats(tp, f, rtnl_held); 3686 3687 if (tcf_exts_terse_dump(skb, &f->exts)) 3688 goto nla_put_failure; 3689 3690 nla_nest_end(skb, nest); 3691 3692 return skb->len; 3693 3694 nla_put_failure_locked: 3695 spin_unlock(&tp->lock); 3696 nla_put_failure: 3697 nla_nest_cancel(skb, nest); 3698 return -1; 3699 } 3700 3701 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3702 { 3703 struct fl_flow_tmplt *tmplt = tmplt_priv; 3704 struct fl_flow_key *key, *mask; 3705 struct nlattr *nest; 3706 3707 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3708 if (!nest) 3709 goto nla_put_failure; 3710 3711 key = &tmplt->dummy_key; 3712 mask = &tmplt->mask; 3713 3714 if (fl_dump_key(skb, net, key, mask)) 3715 goto nla_put_failure; 3716 3717 nla_nest_end(skb, nest); 3718 3719 return skb->len; 3720 3721 nla_put_failure: 3722 nla_nest_cancel(skb, nest); 3723 return -EMSGSIZE; 3724 } 3725 3726 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3727 unsigned long base) 3728 { 3729 struct cls_fl_filter *f = fh; 3730 3731 tc_cls_bind_class(classid, cl, q, &f->res, base); 3732 } 3733 3734 static bool fl_delete_empty(struct tcf_proto *tp) 3735 { 3736 struct cls_fl_head *head = fl_head_dereference(tp); 3737 3738 spin_lock(&tp->lock); 3739 tp->deleting = idr_is_empty(&head->handle_idr); 3740 spin_unlock(&tp->lock); 3741 3742 return tp->deleting; 3743 } 3744 3745 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3746 .kind = "flower", 3747 .classify = fl_classify, 3748 .init = fl_init, 3749 .destroy = fl_destroy, 3750 .get = fl_get, 3751 .put = fl_put, 3752 .change = fl_change, 3753 .delete = fl_delete, 3754 .delete_empty = fl_delete_empty, 3755 .walk = fl_walk, 3756 .reoffload = fl_reoffload, 3757 .hw_add = fl_hw_add, 3758 .hw_del = fl_hw_del, 3759 .dump = fl_dump, 3760 .terse_dump = fl_terse_dump, 3761 .bind_class = fl_bind_class, 3762 .tmplt_create = fl_tmplt_create, 3763 .tmplt_destroy = fl_tmplt_destroy, 3764 .tmplt_reoffload = fl_tmplt_reoffload, 3765 .tmplt_dump = fl_tmplt_dump, 3766 .get_exts = fl_get_exts, 3767 .owner = THIS_MODULE, 3768 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3769 }; 3770 MODULE_ALIAS_NET_CLS("flower"); 3771 3772 static int __init cls_fl_init(void) 3773 { 3774 return register_tcf_proto_ops(&cls_fl_ops); 3775 } 3776 3777 static void __exit cls_fl_exit(void) 3778 { 3779 unregister_tcf_proto_ops(&cls_fl_ops); 3780 } 3781 3782 module_init(cls_fl_init); 3783 module_exit(cls_fl_exit); 3784 3785 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3786 MODULE_DESCRIPTION("Flower classifier"); 3787 MODULE_LICENSE("GPL v2"); 3788