1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 15 #include <linux/if_ether.h> 16 #include <linux/in6.h> 17 #include <linux/ip.h> 18 #include <linux/mpls.h> 19 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/ip.h> 23 #include <net/flow_dissector.h> 24 #include <net/geneve.h> 25 #include <net/vxlan.h> 26 #include <net/erspan.h> 27 28 #include <net/dst.h> 29 #include <net/dst_metadata.h> 30 31 #include <uapi/linux/netfilter/nf_conntrack_common.h> 32 33 struct fl_flow_key { 34 struct flow_dissector_key_meta meta; 35 struct flow_dissector_key_control control; 36 struct flow_dissector_key_control enc_control; 37 struct flow_dissector_key_basic basic; 38 struct flow_dissector_key_eth_addrs eth; 39 struct flow_dissector_key_vlan vlan; 40 struct flow_dissector_key_vlan cvlan; 41 union { 42 struct flow_dissector_key_ipv4_addrs ipv4; 43 struct flow_dissector_key_ipv6_addrs ipv6; 44 }; 45 struct flow_dissector_key_ports tp; 46 struct flow_dissector_key_icmp icmp; 47 struct flow_dissector_key_arp arp; 48 struct flow_dissector_key_keyid enc_key_id; 49 union { 50 struct flow_dissector_key_ipv4_addrs enc_ipv4; 51 struct flow_dissector_key_ipv6_addrs enc_ipv6; 52 }; 53 struct flow_dissector_key_ports enc_tp; 54 struct flow_dissector_key_mpls mpls; 55 struct flow_dissector_key_tcp tcp; 56 struct flow_dissector_key_ip ip; 57 struct flow_dissector_key_ip enc_ip; 58 struct flow_dissector_key_enc_opts enc_opts; 59 union { 60 struct flow_dissector_key_ports tp; 61 struct { 62 struct flow_dissector_key_ports tp_min; 63 struct flow_dissector_key_ports tp_max; 64 }; 65 } tp_range; 66 struct flow_dissector_key_ct ct; 67 struct flow_dissector_key_hash hash; 68 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 69 70 struct fl_flow_mask_range { 71 unsigned short int start; 72 unsigned short int end; 73 }; 74 75 struct fl_flow_mask { 76 struct fl_flow_key key; 77 struct fl_flow_mask_range range; 78 u32 flags; 79 struct rhash_head ht_node; 80 struct rhashtable ht; 81 struct rhashtable_params filter_ht_params; 82 struct flow_dissector dissector; 83 struct list_head filters; 84 struct rcu_work rwork; 85 struct list_head list; 86 refcount_t refcnt; 87 }; 88 89 struct fl_flow_tmplt { 90 struct fl_flow_key dummy_key; 91 struct fl_flow_key mask; 92 struct flow_dissector dissector; 93 struct tcf_chain *chain; 94 }; 95 96 struct cls_fl_head { 97 struct rhashtable ht; 98 spinlock_t masks_lock; /* Protect masks list */ 99 struct list_head masks; 100 struct list_head hw_filters; 101 struct rcu_work rwork; 102 struct idr handle_idr; 103 }; 104 105 struct cls_fl_filter { 106 struct fl_flow_mask *mask; 107 struct rhash_head ht_node; 108 struct fl_flow_key mkey; 109 struct tcf_exts exts; 110 struct tcf_result res; 111 struct fl_flow_key key; 112 struct list_head list; 113 struct list_head hw_list; 114 u32 handle; 115 u32 flags; 116 u32 in_hw_count; 117 struct rcu_work rwork; 118 struct net_device *hw_dev; 119 /* Flower classifier is unlocked, which means that its reference counter 120 * can be changed concurrently without any kind of external 121 * synchronization. Use atomic reference counter to be concurrency-safe. 122 */ 123 refcount_t refcnt; 124 bool deleted; 125 }; 126 127 static const struct rhashtable_params mask_ht_params = { 128 .key_offset = offsetof(struct fl_flow_mask, key), 129 .key_len = sizeof(struct fl_flow_key), 130 .head_offset = offsetof(struct fl_flow_mask, ht_node), 131 .automatic_shrinking = true, 132 }; 133 134 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 135 { 136 return mask->range.end - mask->range.start; 137 } 138 139 static void fl_mask_update_range(struct fl_flow_mask *mask) 140 { 141 const u8 *bytes = (const u8 *) &mask->key; 142 size_t size = sizeof(mask->key); 143 size_t i, first = 0, last; 144 145 for (i = 0; i < size; i++) { 146 if (bytes[i]) { 147 first = i; 148 break; 149 } 150 } 151 last = first; 152 for (i = size - 1; i != first; i--) { 153 if (bytes[i]) { 154 last = i; 155 break; 156 } 157 } 158 mask->range.start = rounddown(first, sizeof(long)); 159 mask->range.end = roundup(last + 1, sizeof(long)); 160 } 161 162 static void *fl_key_get_start(struct fl_flow_key *key, 163 const struct fl_flow_mask *mask) 164 { 165 return (u8 *) key + mask->range.start; 166 } 167 168 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 169 struct fl_flow_mask *mask) 170 { 171 const long *lkey = fl_key_get_start(key, mask); 172 const long *lmask = fl_key_get_start(&mask->key, mask); 173 long *lmkey = fl_key_get_start(mkey, mask); 174 int i; 175 176 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 177 *lmkey++ = *lkey++ & *lmask++; 178 } 179 180 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 181 struct fl_flow_mask *mask) 182 { 183 const long *lmask = fl_key_get_start(&mask->key, mask); 184 const long *ltmplt; 185 int i; 186 187 if (!tmplt) 188 return true; 189 ltmplt = fl_key_get_start(&tmplt->mask, mask); 190 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 191 if (~*ltmplt++ & *lmask++) 192 return false; 193 } 194 return true; 195 } 196 197 static void fl_clear_masked_range(struct fl_flow_key *key, 198 struct fl_flow_mask *mask) 199 { 200 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 201 } 202 203 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 204 struct fl_flow_key *key, 205 struct fl_flow_key *mkey) 206 { 207 __be16 min_mask, max_mask, min_val, max_val; 208 209 min_mask = htons(filter->mask->key.tp_range.tp_min.dst); 210 max_mask = htons(filter->mask->key.tp_range.tp_max.dst); 211 min_val = htons(filter->key.tp_range.tp_min.dst); 212 max_val = htons(filter->key.tp_range.tp_max.dst); 213 214 if (min_mask && max_mask) { 215 if (htons(key->tp_range.tp.dst) < min_val || 216 htons(key->tp_range.tp.dst) > max_val) 217 return false; 218 219 /* skb does not have min and max values */ 220 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 221 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 222 } 223 return true; 224 } 225 226 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 227 struct fl_flow_key *key, 228 struct fl_flow_key *mkey) 229 { 230 __be16 min_mask, max_mask, min_val, max_val; 231 232 min_mask = htons(filter->mask->key.tp_range.tp_min.src); 233 max_mask = htons(filter->mask->key.tp_range.tp_max.src); 234 min_val = htons(filter->key.tp_range.tp_min.src); 235 max_val = htons(filter->key.tp_range.tp_max.src); 236 237 if (min_mask && max_mask) { 238 if (htons(key->tp_range.tp.src) < min_val || 239 htons(key->tp_range.tp.src) > max_val) 240 return false; 241 242 /* skb does not have min and max values */ 243 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 244 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 245 } 246 return true; 247 } 248 249 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 250 struct fl_flow_key *mkey) 251 { 252 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 253 mask->filter_ht_params); 254 } 255 256 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 257 struct fl_flow_key *mkey, 258 struct fl_flow_key *key) 259 { 260 struct cls_fl_filter *filter, *f; 261 262 list_for_each_entry_rcu(filter, &mask->filters, list) { 263 if (!fl_range_port_dst_cmp(filter, key, mkey)) 264 continue; 265 266 if (!fl_range_port_src_cmp(filter, key, mkey)) 267 continue; 268 269 f = __fl_lookup(mask, mkey); 270 if (f) 271 return f; 272 } 273 return NULL; 274 } 275 276 static noinline_for_stack 277 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 278 { 279 struct fl_flow_key mkey; 280 281 fl_set_masked_key(&mkey, key, mask); 282 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 283 return fl_lookup_range(mask, &mkey, key); 284 285 return __fl_lookup(mask, &mkey); 286 } 287 288 static u16 fl_ct_info_to_flower_map[] = { 289 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 290 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 291 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 292 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 293 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 294 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 295 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 296 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 297 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 298 TCA_FLOWER_KEY_CT_FLAGS_NEW, 299 }; 300 301 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 302 struct tcf_result *res) 303 { 304 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 305 struct fl_flow_key skb_key; 306 struct fl_flow_mask *mask; 307 struct cls_fl_filter *f; 308 309 list_for_each_entry_rcu(mask, &head->masks, list) { 310 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 311 fl_clear_masked_range(&skb_key, mask); 312 313 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 314 /* skb_flow_dissect() does not set n_proto in case an unknown 315 * protocol, so do it rather here. 316 */ 317 skb_key.basic.n_proto = skb_protocol(skb, false); 318 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 319 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 320 fl_ct_info_to_flower_map, 321 ARRAY_SIZE(fl_ct_info_to_flower_map)); 322 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 323 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); 324 325 f = fl_mask_lookup(mask, &skb_key); 326 if (f && !tc_skip_sw(f->flags)) { 327 *res = f->res; 328 return tcf_exts_exec(skb, &f->exts, res); 329 } 330 } 331 return -1; 332 } 333 334 static int fl_init(struct tcf_proto *tp) 335 { 336 struct cls_fl_head *head; 337 338 head = kzalloc(sizeof(*head), GFP_KERNEL); 339 if (!head) 340 return -ENOBUFS; 341 342 spin_lock_init(&head->masks_lock); 343 INIT_LIST_HEAD_RCU(&head->masks); 344 INIT_LIST_HEAD(&head->hw_filters); 345 rcu_assign_pointer(tp->root, head); 346 idr_init(&head->handle_idr); 347 348 return rhashtable_init(&head->ht, &mask_ht_params); 349 } 350 351 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 352 { 353 /* temporary masks don't have their filters list and ht initialized */ 354 if (mask_init_done) { 355 WARN_ON(!list_empty(&mask->filters)); 356 rhashtable_destroy(&mask->ht); 357 } 358 kfree(mask); 359 } 360 361 static void fl_mask_free_work(struct work_struct *work) 362 { 363 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 364 struct fl_flow_mask, rwork); 365 366 fl_mask_free(mask, true); 367 } 368 369 static void fl_uninit_mask_free_work(struct work_struct *work) 370 { 371 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 372 struct fl_flow_mask, rwork); 373 374 fl_mask_free(mask, false); 375 } 376 377 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 378 { 379 if (!refcount_dec_and_test(&mask->refcnt)) 380 return false; 381 382 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 383 384 spin_lock(&head->masks_lock); 385 list_del_rcu(&mask->list); 386 spin_unlock(&head->masks_lock); 387 388 tcf_queue_work(&mask->rwork, fl_mask_free_work); 389 390 return true; 391 } 392 393 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 394 { 395 /* Flower classifier only changes root pointer during init and destroy. 396 * Users must obtain reference to tcf_proto instance before calling its 397 * API, so tp->root pointer is protected from concurrent call to 398 * fl_destroy() by reference counting. 399 */ 400 return rcu_dereference_raw(tp->root); 401 } 402 403 static void __fl_destroy_filter(struct cls_fl_filter *f) 404 { 405 tcf_exts_destroy(&f->exts); 406 tcf_exts_put_net(&f->exts); 407 kfree(f); 408 } 409 410 static void fl_destroy_filter_work(struct work_struct *work) 411 { 412 struct cls_fl_filter *f = container_of(to_rcu_work(work), 413 struct cls_fl_filter, rwork); 414 415 __fl_destroy_filter(f); 416 } 417 418 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 419 bool rtnl_held, struct netlink_ext_ack *extack) 420 { 421 struct tcf_block *block = tp->chain->block; 422 struct flow_cls_offload cls_flower = {}; 423 424 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 425 cls_flower.command = FLOW_CLS_DESTROY; 426 cls_flower.cookie = (unsigned long) f; 427 428 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 429 &f->flags, &f->in_hw_count, rtnl_held); 430 431 } 432 433 static int fl_hw_replace_filter(struct tcf_proto *tp, 434 struct cls_fl_filter *f, bool rtnl_held, 435 struct netlink_ext_ack *extack) 436 { 437 struct tcf_block *block = tp->chain->block; 438 struct flow_cls_offload cls_flower = {}; 439 bool skip_sw = tc_skip_sw(f->flags); 440 int err = 0; 441 442 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 443 if (!cls_flower.rule) 444 return -ENOMEM; 445 446 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 447 cls_flower.command = FLOW_CLS_REPLACE; 448 cls_flower.cookie = (unsigned long) f; 449 cls_flower.rule->match.dissector = &f->mask->dissector; 450 cls_flower.rule->match.mask = &f->mask->key; 451 cls_flower.rule->match.key = &f->mkey; 452 cls_flower.classid = f->res.classid; 453 454 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 455 if (err) { 456 kfree(cls_flower.rule); 457 if (skip_sw) { 458 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 459 return err; 460 } 461 return 0; 462 } 463 464 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 465 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 466 tc_cleanup_flow_action(&cls_flower.rule->action); 467 kfree(cls_flower.rule); 468 469 if (err) { 470 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 471 return err; 472 } 473 474 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 475 return -EINVAL; 476 477 return 0; 478 } 479 480 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 481 bool rtnl_held) 482 { 483 struct tcf_block *block = tp->chain->block; 484 struct flow_cls_offload cls_flower = {}; 485 486 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 487 cls_flower.command = FLOW_CLS_STATS; 488 cls_flower.cookie = (unsigned long) f; 489 cls_flower.classid = f->res.classid; 490 491 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 492 rtnl_held); 493 494 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, 495 cls_flower.stats.pkts, 496 cls_flower.stats.drops, 497 cls_flower.stats.lastused, 498 cls_flower.stats.used_hw_stats, 499 cls_flower.stats.used_hw_stats_valid); 500 } 501 502 static void __fl_put(struct cls_fl_filter *f) 503 { 504 if (!refcount_dec_and_test(&f->refcnt)) 505 return; 506 507 if (tcf_exts_get_net(&f->exts)) 508 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 509 else 510 __fl_destroy_filter(f); 511 } 512 513 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 514 { 515 struct cls_fl_filter *f; 516 517 rcu_read_lock(); 518 f = idr_find(&head->handle_idr, handle); 519 if (f && !refcount_inc_not_zero(&f->refcnt)) 520 f = NULL; 521 rcu_read_unlock(); 522 523 return f; 524 } 525 526 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 527 bool *last, bool rtnl_held, 528 struct netlink_ext_ack *extack) 529 { 530 struct cls_fl_head *head = fl_head_dereference(tp); 531 532 *last = false; 533 534 spin_lock(&tp->lock); 535 if (f->deleted) { 536 spin_unlock(&tp->lock); 537 return -ENOENT; 538 } 539 540 f->deleted = true; 541 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 542 f->mask->filter_ht_params); 543 idr_remove(&head->handle_idr, f->handle); 544 list_del_rcu(&f->list); 545 spin_unlock(&tp->lock); 546 547 *last = fl_mask_put(head, f->mask); 548 if (!tc_skip_hw(f->flags)) 549 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 550 tcf_unbind_filter(tp, &f->res); 551 __fl_put(f); 552 553 return 0; 554 } 555 556 static void fl_destroy_sleepable(struct work_struct *work) 557 { 558 struct cls_fl_head *head = container_of(to_rcu_work(work), 559 struct cls_fl_head, 560 rwork); 561 562 rhashtable_destroy(&head->ht); 563 kfree(head); 564 module_put(THIS_MODULE); 565 } 566 567 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 568 struct netlink_ext_ack *extack) 569 { 570 struct cls_fl_head *head = fl_head_dereference(tp); 571 struct fl_flow_mask *mask, *next_mask; 572 struct cls_fl_filter *f, *next; 573 bool last; 574 575 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 576 list_for_each_entry_safe(f, next, &mask->filters, list) { 577 __fl_delete(tp, f, &last, rtnl_held, extack); 578 if (last) 579 break; 580 } 581 } 582 idr_destroy(&head->handle_idr); 583 584 __module_get(THIS_MODULE); 585 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 586 } 587 588 static void fl_put(struct tcf_proto *tp, void *arg) 589 { 590 struct cls_fl_filter *f = arg; 591 592 __fl_put(f); 593 } 594 595 static void *fl_get(struct tcf_proto *tp, u32 handle) 596 { 597 struct cls_fl_head *head = fl_head_dereference(tp); 598 599 return __fl_get(head, handle); 600 } 601 602 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 603 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, 604 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 605 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 606 .len = IFNAMSIZ }, 607 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 608 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 609 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 610 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 611 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 612 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 613 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 614 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 615 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 616 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 617 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 618 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 619 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 620 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 621 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 622 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 623 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 624 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 625 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 626 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 627 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 628 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 629 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 630 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 631 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 632 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 633 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 634 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 635 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 636 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 637 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 638 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 639 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 640 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 641 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 642 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 643 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 644 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 646 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 647 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 648 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 649 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 650 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 651 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 652 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 653 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 654 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 655 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 656 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 657 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 658 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 659 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 660 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 661 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 662 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 663 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 664 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 665 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 666 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 667 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 668 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 669 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 670 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 671 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 672 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 673 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 674 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 675 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 676 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 678 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 680 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 681 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 682 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 683 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 684 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 686 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 687 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 688 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 689 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 }, 690 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 }, 691 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 692 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 693 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 694 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 695 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 696 .len = 128 / BITS_PER_BYTE }, 697 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 698 .len = 128 / BITS_PER_BYTE }, 699 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 700 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 701 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 702 703 }; 704 705 static const struct nla_policy 706 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 707 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 708 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 709 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 710 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 711 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 712 }; 713 714 static const struct nla_policy 715 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 716 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 717 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 718 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 719 .len = 128 }, 720 }; 721 722 static const struct nla_policy 723 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 724 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 725 }; 726 727 static const struct nla_policy 728 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 729 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 730 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 731 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 732 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 733 }; 734 735 static const struct nla_policy 736 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 737 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 738 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 739 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 740 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 741 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 742 }; 743 744 static void fl_set_key_val(struct nlattr **tb, 745 void *val, int val_type, 746 void *mask, int mask_type, int len) 747 { 748 if (!tb[val_type]) 749 return; 750 nla_memcpy(val, tb[val_type], len); 751 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 752 memset(mask, 0xff, len); 753 else 754 nla_memcpy(mask, tb[mask_type], len); 755 } 756 757 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 758 struct fl_flow_key *mask, 759 struct netlink_ext_ack *extack) 760 { 761 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 762 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 763 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 764 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 765 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 766 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 767 fl_set_key_val(tb, &key->tp_range.tp_min.src, 768 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 769 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 770 fl_set_key_val(tb, &key->tp_range.tp_max.src, 771 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 772 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 773 774 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 775 htons(key->tp_range.tp_max.dst) <= 776 htons(key->tp_range.tp_min.dst)) { 777 NL_SET_ERR_MSG_ATTR(extack, 778 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 779 "Invalid destination port range (min must be strictly smaller than max)"); 780 return -EINVAL; 781 } 782 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 783 htons(key->tp_range.tp_max.src) <= 784 htons(key->tp_range.tp_min.src)) { 785 NL_SET_ERR_MSG_ATTR(extack, 786 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 787 "Invalid source port range (min must be strictly smaller than max)"); 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 795 struct flow_dissector_key_mpls *key_val, 796 struct flow_dissector_key_mpls *key_mask, 797 struct netlink_ext_ack *extack) 798 { 799 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 800 struct flow_dissector_mpls_lse *lse_mask; 801 struct flow_dissector_mpls_lse *lse_val; 802 u8 lse_index; 803 u8 depth; 804 int err; 805 806 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 807 mpls_stack_entry_policy, extack); 808 if (err < 0) 809 return err; 810 811 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 812 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 813 return -EINVAL; 814 } 815 816 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 817 818 /* LSE depth starts at 1, for consistency with terminology used by 819 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 820 */ 821 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 822 NL_SET_ERR_MSG_ATTR(extack, 823 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 824 "Invalid MPLS depth"); 825 return -EINVAL; 826 } 827 lse_index = depth - 1; 828 829 dissector_set_mpls_lse(key_val, lse_index); 830 dissector_set_mpls_lse(key_mask, lse_index); 831 832 lse_val = &key_val->ls[lse_index]; 833 lse_mask = &key_mask->ls[lse_index]; 834 835 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 836 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 837 lse_mask->mpls_ttl = MPLS_TTL_MASK; 838 } 839 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 840 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 841 842 if (bos & ~MPLS_BOS_MASK) { 843 NL_SET_ERR_MSG_ATTR(extack, 844 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 845 "Bottom Of Stack (BOS) must be 0 or 1"); 846 return -EINVAL; 847 } 848 lse_val->mpls_bos = bos; 849 lse_mask->mpls_bos = MPLS_BOS_MASK; 850 } 851 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 852 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 853 854 if (tc & ~MPLS_TC_MASK) { 855 NL_SET_ERR_MSG_ATTR(extack, 856 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 857 "Traffic Class (TC) must be between 0 and 7"); 858 return -EINVAL; 859 } 860 lse_val->mpls_tc = tc; 861 lse_mask->mpls_tc = MPLS_TC_MASK; 862 } 863 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 864 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 865 866 if (label & ~MPLS_LABEL_MASK) { 867 NL_SET_ERR_MSG_ATTR(extack, 868 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 869 "Label must be between 0 and 1048575"); 870 return -EINVAL; 871 } 872 lse_val->mpls_label = label; 873 lse_mask->mpls_label = MPLS_LABEL_MASK; 874 } 875 876 return 0; 877 } 878 879 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 880 struct flow_dissector_key_mpls *key_val, 881 struct flow_dissector_key_mpls *key_mask, 882 struct netlink_ext_ack *extack) 883 { 884 struct nlattr *nla_lse; 885 int rem; 886 int err; 887 888 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 889 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 890 "NLA_F_NESTED is missing"); 891 return -EINVAL; 892 } 893 894 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 895 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 896 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 897 "Invalid MPLS option type"); 898 return -EINVAL; 899 } 900 901 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 902 if (err < 0) 903 return err; 904 } 905 if (rem) { 906 NL_SET_ERR_MSG(extack, 907 "Bytes leftover after parsing MPLS options"); 908 return -EINVAL; 909 } 910 911 return 0; 912 } 913 914 static int fl_set_key_mpls(struct nlattr **tb, 915 struct flow_dissector_key_mpls *key_val, 916 struct flow_dissector_key_mpls *key_mask, 917 struct netlink_ext_ack *extack) 918 { 919 struct flow_dissector_mpls_lse *lse_mask; 920 struct flow_dissector_mpls_lse *lse_val; 921 922 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 923 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 924 tb[TCA_FLOWER_KEY_MPLS_BOS] || 925 tb[TCA_FLOWER_KEY_MPLS_TC] || 926 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 927 NL_SET_ERR_MSG_ATTR(extack, 928 tb[TCA_FLOWER_KEY_MPLS_OPTS], 929 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 930 return -EBADMSG; 931 } 932 933 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 934 key_val, key_mask, extack); 935 } 936 937 lse_val = &key_val->ls[0]; 938 lse_mask = &key_mask->ls[0]; 939 940 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 941 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 942 lse_mask->mpls_ttl = MPLS_TTL_MASK; 943 dissector_set_mpls_lse(key_val, 0); 944 dissector_set_mpls_lse(key_mask, 0); 945 } 946 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 947 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 948 949 if (bos & ~MPLS_BOS_MASK) { 950 NL_SET_ERR_MSG_ATTR(extack, 951 tb[TCA_FLOWER_KEY_MPLS_BOS], 952 "Bottom Of Stack (BOS) must be 0 or 1"); 953 return -EINVAL; 954 } 955 lse_val->mpls_bos = bos; 956 lse_mask->mpls_bos = MPLS_BOS_MASK; 957 dissector_set_mpls_lse(key_val, 0); 958 dissector_set_mpls_lse(key_mask, 0); 959 } 960 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 961 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 962 963 if (tc & ~MPLS_TC_MASK) { 964 NL_SET_ERR_MSG_ATTR(extack, 965 tb[TCA_FLOWER_KEY_MPLS_TC], 966 "Traffic Class (TC) must be between 0 and 7"); 967 return -EINVAL; 968 } 969 lse_val->mpls_tc = tc; 970 lse_mask->mpls_tc = MPLS_TC_MASK; 971 dissector_set_mpls_lse(key_val, 0); 972 dissector_set_mpls_lse(key_mask, 0); 973 } 974 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 975 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 976 977 if (label & ~MPLS_LABEL_MASK) { 978 NL_SET_ERR_MSG_ATTR(extack, 979 tb[TCA_FLOWER_KEY_MPLS_LABEL], 980 "Label must be between 0 and 1048575"); 981 return -EINVAL; 982 } 983 lse_val->mpls_label = label; 984 lse_mask->mpls_label = MPLS_LABEL_MASK; 985 dissector_set_mpls_lse(key_val, 0); 986 dissector_set_mpls_lse(key_mask, 0); 987 } 988 return 0; 989 } 990 991 static void fl_set_key_vlan(struct nlattr **tb, 992 __be16 ethertype, 993 int vlan_id_key, int vlan_prio_key, 994 struct flow_dissector_key_vlan *key_val, 995 struct flow_dissector_key_vlan *key_mask) 996 { 997 #define VLAN_PRIORITY_MASK 0x7 998 999 if (tb[vlan_id_key]) { 1000 key_val->vlan_id = 1001 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1002 key_mask->vlan_id = VLAN_VID_MASK; 1003 } 1004 if (tb[vlan_prio_key]) { 1005 key_val->vlan_priority = 1006 nla_get_u8(tb[vlan_prio_key]) & 1007 VLAN_PRIORITY_MASK; 1008 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1009 } 1010 key_val->vlan_tpid = ethertype; 1011 key_mask->vlan_tpid = cpu_to_be16(~0); 1012 } 1013 1014 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1015 u32 *dissector_key, u32 *dissector_mask, 1016 u32 flower_flag_bit, u32 dissector_flag_bit) 1017 { 1018 if (flower_mask & flower_flag_bit) { 1019 *dissector_mask |= dissector_flag_bit; 1020 if (flower_key & flower_flag_bit) 1021 *dissector_key |= dissector_flag_bit; 1022 } 1023 } 1024 1025 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 1026 u32 *flags_mask, struct netlink_ext_ack *extack) 1027 { 1028 u32 key, mask; 1029 1030 /* mask is mandatory for flags */ 1031 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 1032 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1033 return -EINVAL; 1034 } 1035 1036 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS])); 1037 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 1038 1039 *flags_key = 0; 1040 *flags_mask = 0; 1041 1042 fl_set_key_flag(key, mask, flags_key, flags_mask, 1043 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1044 fl_set_key_flag(key, mask, flags_key, flags_mask, 1045 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1046 FLOW_DIS_FIRST_FRAG); 1047 1048 return 0; 1049 } 1050 1051 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1052 struct flow_dissector_key_ip *key, 1053 struct flow_dissector_key_ip *mask) 1054 { 1055 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1056 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1057 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1058 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1059 1060 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1061 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1062 } 1063 1064 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1065 int depth, int option_len, 1066 struct netlink_ext_ack *extack) 1067 { 1068 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1069 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1070 struct geneve_opt *opt; 1071 int err, data_len = 0; 1072 1073 if (option_len > sizeof(struct geneve_opt)) 1074 data_len = option_len - sizeof(struct geneve_opt); 1075 1076 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1077 memset(opt, 0xff, option_len); 1078 opt->length = data_len / 4; 1079 opt->r1 = 0; 1080 opt->r2 = 0; 1081 opt->r3 = 0; 1082 1083 /* If no mask has been prodived we assume an exact match. */ 1084 if (!depth) 1085 return sizeof(struct geneve_opt) + data_len; 1086 1087 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1088 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1089 return -EINVAL; 1090 } 1091 1092 err = nla_parse_nested_deprecated(tb, 1093 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1094 nla, geneve_opt_policy, extack); 1095 if (err < 0) 1096 return err; 1097 1098 /* We are not allowed to omit any of CLASS, TYPE or DATA 1099 * fields from the key. 1100 */ 1101 if (!option_len && 1102 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1103 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1104 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1105 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1106 return -EINVAL; 1107 } 1108 1109 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1110 * for the mask. 1111 */ 1112 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1113 int new_len = key->enc_opts.len; 1114 1115 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1116 data_len = nla_len(data); 1117 if (data_len < 4) { 1118 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1119 return -ERANGE; 1120 } 1121 if (data_len % 4) { 1122 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1123 return -ERANGE; 1124 } 1125 1126 new_len += sizeof(struct geneve_opt) + data_len; 1127 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1128 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1129 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1130 return -ERANGE; 1131 } 1132 opt->length = data_len / 4; 1133 memcpy(opt->opt_data, nla_data(data), data_len); 1134 } 1135 1136 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1137 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1138 opt->opt_class = nla_get_be16(class); 1139 } 1140 1141 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1142 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1143 opt->type = nla_get_u8(type); 1144 } 1145 1146 return sizeof(struct geneve_opt) + data_len; 1147 } 1148 1149 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1150 int depth, int option_len, 1151 struct netlink_ext_ack *extack) 1152 { 1153 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1154 struct vxlan_metadata *md; 1155 int err; 1156 1157 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1158 memset(md, 0xff, sizeof(*md)); 1159 1160 if (!depth) 1161 return sizeof(*md); 1162 1163 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1164 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1165 return -EINVAL; 1166 } 1167 1168 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1169 vxlan_opt_policy, extack); 1170 if (err < 0) 1171 return err; 1172 1173 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1174 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1175 return -EINVAL; 1176 } 1177 1178 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1179 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1180 md->gbp &= VXLAN_GBP_MASK; 1181 } 1182 1183 return sizeof(*md); 1184 } 1185 1186 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1187 int depth, int option_len, 1188 struct netlink_ext_ack *extack) 1189 { 1190 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1191 struct erspan_metadata *md; 1192 int err; 1193 1194 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1195 memset(md, 0xff, sizeof(*md)); 1196 md->version = 1; 1197 1198 if (!depth) 1199 return sizeof(*md); 1200 1201 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1202 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1203 return -EINVAL; 1204 } 1205 1206 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1207 erspan_opt_policy, extack); 1208 if (err < 0) 1209 return err; 1210 1211 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1212 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1213 return -EINVAL; 1214 } 1215 1216 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1217 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1218 1219 if (md->version == 1) { 1220 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1221 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1222 return -EINVAL; 1223 } 1224 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1225 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1226 memset(&md->u, 0x00, sizeof(md->u)); 1227 md->u.index = nla_get_be32(nla); 1228 } 1229 } else if (md->version == 2) { 1230 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1231 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1232 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1233 return -EINVAL; 1234 } 1235 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1236 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1237 md->u.md2.dir = nla_get_u8(nla); 1238 } 1239 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1240 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1241 set_hwid(&md->u.md2, nla_get_u8(nla)); 1242 } 1243 } else { 1244 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1245 return -EINVAL; 1246 } 1247 1248 return sizeof(*md); 1249 } 1250 1251 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1252 struct fl_flow_key *mask, 1253 struct netlink_ext_ack *extack) 1254 { 1255 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1256 int err, option_len, key_depth, msk_depth = 0; 1257 1258 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1259 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1260 enc_opts_policy, extack); 1261 if (err) 1262 return err; 1263 1264 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1265 1266 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1267 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1268 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1269 enc_opts_policy, extack); 1270 if (err) 1271 return err; 1272 1273 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1274 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1275 } 1276 1277 nla_for_each_attr(nla_opt_key, nla_enc_key, 1278 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1279 switch (nla_type(nla_opt_key)) { 1280 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1281 if (key->enc_opts.dst_opt_type && 1282 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { 1283 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1284 return -EINVAL; 1285 } 1286 option_len = 0; 1287 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1288 option_len = fl_set_geneve_opt(nla_opt_key, key, 1289 key_depth, option_len, 1290 extack); 1291 if (option_len < 0) 1292 return option_len; 1293 1294 key->enc_opts.len += option_len; 1295 /* At the same time we need to parse through the mask 1296 * in order to verify exact and mask attribute lengths. 1297 */ 1298 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1299 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1300 msk_depth, option_len, 1301 extack); 1302 if (option_len < 0) 1303 return option_len; 1304 1305 mask->enc_opts.len += option_len; 1306 if (key->enc_opts.len != mask->enc_opts.len) { 1307 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1308 return -EINVAL; 1309 } 1310 1311 if (msk_depth) 1312 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1313 break; 1314 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1315 if (key->enc_opts.dst_opt_type) { 1316 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1317 return -EINVAL; 1318 } 1319 option_len = 0; 1320 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1321 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1322 key_depth, option_len, 1323 extack); 1324 if (option_len < 0) 1325 return option_len; 1326 1327 key->enc_opts.len += option_len; 1328 /* At the same time we need to parse through the mask 1329 * in order to verify exact and mask attribute lengths. 1330 */ 1331 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1332 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1333 msk_depth, option_len, 1334 extack); 1335 if (option_len < 0) 1336 return option_len; 1337 1338 mask->enc_opts.len += option_len; 1339 if (key->enc_opts.len != mask->enc_opts.len) { 1340 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1341 return -EINVAL; 1342 } 1343 1344 if (msk_depth) 1345 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1346 break; 1347 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1348 if (key->enc_opts.dst_opt_type) { 1349 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1350 return -EINVAL; 1351 } 1352 option_len = 0; 1353 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1354 option_len = fl_set_erspan_opt(nla_opt_key, key, 1355 key_depth, option_len, 1356 extack); 1357 if (option_len < 0) 1358 return option_len; 1359 1360 key->enc_opts.len += option_len; 1361 /* At the same time we need to parse through the mask 1362 * in order to verify exact and mask attribute lengths. 1363 */ 1364 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1365 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1366 msk_depth, option_len, 1367 extack); 1368 if (option_len < 0) 1369 return option_len; 1370 1371 mask->enc_opts.len += option_len; 1372 if (key->enc_opts.len != mask->enc_opts.len) { 1373 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1374 return -EINVAL; 1375 } 1376 1377 if (msk_depth) 1378 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1379 break; 1380 default: 1381 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1382 return -EINVAL; 1383 } 1384 } 1385 1386 return 0; 1387 } 1388 1389 static int fl_set_key_ct(struct nlattr **tb, 1390 struct flow_dissector_key_ct *key, 1391 struct flow_dissector_key_ct *mask, 1392 struct netlink_ext_ack *extack) 1393 { 1394 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1395 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1396 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1397 return -EOPNOTSUPP; 1398 } 1399 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1400 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1401 sizeof(key->ct_state)); 1402 } 1403 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1404 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1405 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1406 return -EOPNOTSUPP; 1407 } 1408 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1409 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1410 sizeof(key->ct_zone)); 1411 } 1412 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1413 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1414 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1415 return -EOPNOTSUPP; 1416 } 1417 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1418 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1419 sizeof(key->ct_mark)); 1420 } 1421 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1422 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1423 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1424 return -EOPNOTSUPP; 1425 } 1426 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1427 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1428 sizeof(key->ct_labels)); 1429 } 1430 1431 return 0; 1432 } 1433 1434 static int fl_set_key(struct net *net, struct nlattr **tb, 1435 struct fl_flow_key *key, struct fl_flow_key *mask, 1436 struct netlink_ext_ack *extack) 1437 { 1438 __be16 ethertype; 1439 int ret = 0; 1440 1441 if (tb[TCA_FLOWER_INDEV]) { 1442 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1443 if (err < 0) 1444 return err; 1445 key->meta.ingress_ifindex = err; 1446 mask->meta.ingress_ifindex = 0xffffffff; 1447 } 1448 1449 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1450 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1451 sizeof(key->eth.dst)); 1452 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1453 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1454 sizeof(key->eth.src)); 1455 1456 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { 1457 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); 1458 1459 if (eth_type_vlan(ethertype)) { 1460 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1461 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, 1462 &mask->vlan); 1463 1464 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { 1465 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); 1466 if (eth_type_vlan(ethertype)) { 1467 fl_set_key_vlan(tb, ethertype, 1468 TCA_FLOWER_KEY_CVLAN_ID, 1469 TCA_FLOWER_KEY_CVLAN_PRIO, 1470 &key->cvlan, &mask->cvlan); 1471 fl_set_key_val(tb, &key->basic.n_proto, 1472 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1473 &mask->basic.n_proto, 1474 TCA_FLOWER_UNSPEC, 1475 sizeof(key->basic.n_proto)); 1476 } else { 1477 key->basic.n_proto = ethertype; 1478 mask->basic.n_proto = cpu_to_be16(~0); 1479 } 1480 } 1481 } else { 1482 key->basic.n_proto = ethertype; 1483 mask->basic.n_proto = cpu_to_be16(~0); 1484 } 1485 } 1486 1487 if (key->basic.n_proto == htons(ETH_P_IP) || 1488 key->basic.n_proto == htons(ETH_P_IPV6)) { 1489 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1490 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1491 sizeof(key->basic.ip_proto)); 1492 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1493 } 1494 1495 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1496 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1497 mask->control.addr_type = ~0; 1498 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1499 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1500 sizeof(key->ipv4.src)); 1501 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1502 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1503 sizeof(key->ipv4.dst)); 1504 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1505 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1506 mask->control.addr_type = ~0; 1507 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1508 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1509 sizeof(key->ipv6.src)); 1510 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1511 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1512 sizeof(key->ipv6.dst)); 1513 } 1514 1515 if (key->basic.ip_proto == IPPROTO_TCP) { 1516 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1517 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1518 sizeof(key->tp.src)); 1519 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1520 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1521 sizeof(key->tp.dst)); 1522 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1523 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1524 sizeof(key->tcp.flags)); 1525 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1526 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1527 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1528 sizeof(key->tp.src)); 1529 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1530 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1531 sizeof(key->tp.dst)); 1532 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1533 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1534 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1535 sizeof(key->tp.src)); 1536 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1537 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1538 sizeof(key->tp.dst)); 1539 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1540 key->basic.ip_proto == IPPROTO_ICMP) { 1541 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1542 &mask->icmp.type, 1543 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1544 sizeof(key->icmp.type)); 1545 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1546 &mask->icmp.code, 1547 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1548 sizeof(key->icmp.code)); 1549 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1550 key->basic.ip_proto == IPPROTO_ICMPV6) { 1551 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1552 &mask->icmp.type, 1553 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1554 sizeof(key->icmp.type)); 1555 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1556 &mask->icmp.code, 1557 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1558 sizeof(key->icmp.code)); 1559 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1560 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1561 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1562 if (ret) 1563 return ret; 1564 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1565 key->basic.n_proto == htons(ETH_P_RARP)) { 1566 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1567 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1568 sizeof(key->arp.sip)); 1569 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1570 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1571 sizeof(key->arp.tip)); 1572 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1573 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1574 sizeof(key->arp.op)); 1575 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1576 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1577 sizeof(key->arp.sha)); 1578 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1579 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1580 sizeof(key->arp.tha)); 1581 } 1582 1583 if (key->basic.ip_proto == IPPROTO_TCP || 1584 key->basic.ip_proto == IPPROTO_UDP || 1585 key->basic.ip_proto == IPPROTO_SCTP) { 1586 ret = fl_set_key_port_range(tb, key, mask, extack); 1587 if (ret) 1588 return ret; 1589 } 1590 1591 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 1592 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 1593 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1594 mask->enc_control.addr_type = ~0; 1595 fl_set_key_val(tb, &key->enc_ipv4.src, 1596 TCA_FLOWER_KEY_ENC_IPV4_SRC, 1597 &mask->enc_ipv4.src, 1598 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1599 sizeof(key->enc_ipv4.src)); 1600 fl_set_key_val(tb, &key->enc_ipv4.dst, 1601 TCA_FLOWER_KEY_ENC_IPV4_DST, 1602 &mask->enc_ipv4.dst, 1603 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1604 sizeof(key->enc_ipv4.dst)); 1605 } 1606 1607 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 1608 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 1609 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1610 mask->enc_control.addr_type = ~0; 1611 fl_set_key_val(tb, &key->enc_ipv6.src, 1612 TCA_FLOWER_KEY_ENC_IPV6_SRC, 1613 &mask->enc_ipv6.src, 1614 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1615 sizeof(key->enc_ipv6.src)); 1616 fl_set_key_val(tb, &key->enc_ipv6.dst, 1617 TCA_FLOWER_KEY_ENC_IPV6_DST, 1618 &mask->enc_ipv6.dst, 1619 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1620 sizeof(key->enc_ipv6.dst)); 1621 } 1622 1623 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 1624 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 1625 sizeof(key->enc_key_id.keyid)); 1626 1627 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1628 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1629 sizeof(key->enc_tp.src)); 1630 1631 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1632 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1633 sizeof(key->enc_tp.dst)); 1634 1635 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 1636 1637 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 1638 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 1639 sizeof(key->hash.hash)); 1640 1641 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 1642 ret = fl_set_enc_opt(tb, key, mask, extack); 1643 if (ret) 1644 return ret; 1645 } 1646 1647 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 1648 if (ret) 1649 return ret; 1650 1651 if (tb[TCA_FLOWER_KEY_FLAGS]) 1652 ret = fl_set_key_flags(tb, &key->control.flags, 1653 &mask->control.flags, extack); 1654 1655 return ret; 1656 } 1657 1658 static void fl_mask_copy(struct fl_flow_mask *dst, 1659 struct fl_flow_mask *src) 1660 { 1661 const void *psrc = fl_key_get_start(&src->key, src); 1662 void *pdst = fl_key_get_start(&dst->key, src); 1663 1664 memcpy(pdst, psrc, fl_mask_range(src)); 1665 dst->range = src->range; 1666 } 1667 1668 static const struct rhashtable_params fl_ht_params = { 1669 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 1670 .head_offset = offsetof(struct cls_fl_filter, ht_node), 1671 .automatic_shrinking = true, 1672 }; 1673 1674 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 1675 { 1676 mask->filter_ht_params = fl_ht_params; 1677 mask->filter_ht_params.key_len = fl_mask_range(mask); 1678 mask->filter_ht_params.key_offset += mask->range.start; 1679 1680 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 1681 } 1682 1683 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1684 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 1685 1686 #define FL_KEY_IS_MASKED(mask, member) \ 1687 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 1688 0, FL_KEY_MEMBER_SIZE(member)) \ 1689 1690 #define FL_KEY_SET(keys, cnt, id, member) \ 1691 do { \ 1692 keys[cnt].key_id = id; \ 1693 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 1694 cnt++; \ 1695 } while(0); 1696 1697 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 1698 do { \ 1699 if (FL_KEY_IS_MASKED(mask, member)) \ 1700 FL_KEY_SET(keys, cnt, id, member); \ 1701 } while(0); 1702 1703 static void fl_init_dissector(struct flow_dissector *dissector, 1704 struct fl_flow_key *mask) 1705 { 1706 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 1707 size_t cnt = 0; 1708 1709 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1710 FLOW_DISSECTOR_KEY_META, meta); 1711 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 1712 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 1713 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1714 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 1715 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1716 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 1717 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1718 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 1719 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1720 FLOW_DISSECTOR_KEY_PORTS, tp); 1721 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1722 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 1723 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1724 FLOW_DISSECTOR_KEY_IP, ip); 1725 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1726 FLOW_DISSECTOR_KEY_TCP, tcp); 1727 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1728 FLOW_DISSECTOR_KEY_ICMP, icmp); 1729 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1730 FLOW_DISSECTOR_KEY_ARP, arp); 1731 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1732 FLOW_DISSECTOR_KEY_MPLS, mpls); 1733 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1734 FLOW_DISSECTOR_KEY_VLAN, vlan); 1735 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1736 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 1737 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1738 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 1739 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1740 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 1741 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1742 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 1743 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 1744 FL_KEY_IS_MASKED(mask, enc_ipv6)) 1745 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1746 enc_control); 1747 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1748 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 1749 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1750 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 1751 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1752 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 1753 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1754 FLOW_DISSECTOR_KEY_CT, ct); 1755 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1756 FLOW_DISSECTOR_KEY_HASH, hash); 1757 1758 skb_flow_dissector_init(dissector, keys, cnt); 1759 } 1760 1761 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 1762 struct fl_flow_mask *mask) 1763 { 1764 struct fl_flow_mask *newmask; 1765 int err; 1766 1767 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 1768 if (!newmask) 1769 return ERR_PTR(-ENOMEM); 1770 1771 fl_mask_copy(newmask, mask); 1772 1773 if ((newmask->key.tp_range.tp_min.dst && 1774 newmask->key.tp_range.tp_max.dst) || 1775 (newmask->key.tp_range.tp_min.src && 1776 newmask->key.tp_range.tp_max.src)) 1777 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 1778 1779 err = fl_init_mask_hashtable(newmask); 1780 if (err) 1781 goto errout_free; 1782 1783 fl_init_dissector(&newmask->dissector, &newmask->key); 1784 1785 INIT_LIST_HEAD_RCU(&newmask->filters); 1786 1787 refcount_set(&newmask->refcnt, 1); 1788 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 1789 &newmask->ht_node, mask_ht_params); 1790 if (err) 1791 goto errout_destroy; 1792 1793 spin_lock(&head->masks_lock); 1794 list_add_tail_rcu(&newmask->list, &head->masks); 1795 spin_unlock(&head->masks_lock); 1796 1797 return newmask; 1798 1799 errout_destroy: 1800 rhashtable_destroy(&newmask->ht); 1801 errout_free: 1802 kfree(newmask); 1803 1804 return ERR_PTR(err); 1805 } 1806 1807 static int fl_check_assign_mask(struct cls_fl_head *head, 1808 struct cls_fl_filter *fnew, 1809 struct cls_fl_filter *fold, 1810 struct fl_flow_mask *mask) 1811 { 1812 struct fl_flow_mask *newmask; 1813 int ret = 0; 1814 1815 rcu_read_lock(); 1816 1817 /* Insert mask as temporary node to prevent concurrent creation of mask 1818 * with same key. Any concurrent lookups with same key will return 1819 * -EAGAIN because mask's refcnt is zero. 1820 */ 1821 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 1822 &mask->ht_node, 1823 mask_ht_params); 1824 if (!fnew->mask) { 1825 rcu_read_unlock(); 1826 1827 if (fold) { 1828 ret = -EINVAL; 1829 goto errout_cleanup; 1830 } 1831 1832 newmask = fl_create_new_mask(head, mask); 1833 if (IS_ERR(newmask)) { 1834 ret = PTR_ERR(newmask); 1835 goto errout_cleanup; 1836 } 1837 1838 fnew->mask = newmask; 1839 return 0; 1840 } else if (IS_ERR(fnew->mask)) { 1841 ret = PTR_ERR(fnew->mask); 1842 } else if (fold && fold->mask != fnew->mask) { 1843 ret = -EINVAL; 1844 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 1845 /* Mask was deleted concurrently, try again */ 1846 ret = -EAGAIN; 1847 } 1848 rcu_read_unlock(); 1849 return ret; 1850 1851 errout_cleanup: 1852 rhashtable_remove_fast(&head->ht, &mask->ht_node, 1853 mask_ht_params); 1854 return ret; 1855 } 1856 1857 static int fl_set_parms(struct net *net, struct tcf_proto *tp, 1858 struct cls_fl_filter *f, struct fl_flow_mask *mask, 1859 unsigned long base, struct nlattr **tb, 1860 struct nlattr *est, bool ovr, 1861 struct fl_flow_tmplt *tmplt, bool rtnl_held, 1862 struct netlink_ext_ack *extack) 1863 { 1864 int err; 1865 1866 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held, 1867 extack); 1868 if (err < 0) 1869 return err; 1870 1871 if (tb[TCA_FLOWER_CLASSID]) { 1872 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 1873 if (!rtnl_held) 1874 rtnl_lock(); 1875 tcf_bind_filter(tp, &f->res, base); 1876 if (!rtnl_held) 1877 rtnl_unlock(); 1878 } 1879 1880 err = fl_set_key(net, tb, &f->key, &mask->key, extack); 1881 if (err) 1882 return err; 1883 1884 fl_mask_update_range(mask); 1885 fl_set_masked_key(&f->mkey, &f->key, mask); 1886 1887 if (!fl_mask_fits_tmplt(tmplt, mask)) { 1888 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 1889 return -EINVAL; 1890 } 1891 1892 return 0; 1893 } 1894 1895 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 1896 struct cls_fl_filter *fold, 1897 bool *in_ht) 1898 { 1899 struct fl_flow_mask *mask = fnew->mask; 1900 int err; 1901 1902 err = rhashtable_lookup_insert_fast(&mask->ht, 1903 &fnew->ht_node, 1904 mask->filter_ht_params); 1905 if (err) { 1906 *in_ht = false; 1907 /* It is okay if filter with same key exists when 1908 * overwriting. 1909 */ 1910 return fold && err == -EEXIST ? 0 : err; 1911 } 1912 1913 *in_ht = true; 1914 return 0; 1915 } 1916 1917 static int fl_change(struct net *net, struct sk_buff *in_skb, 1918 struct tcf_proto *tp, unsigned long base, 1919 u32 handle, struct nlattr **tca, 1920 void **arg, bool ovr, bool rtnl_held, 1921 struct netlink_ext_ack *extack) 1922 { 1923 struct cls_fl_head *head = fl_head_dereference(tp); 1924 struct cls_fl_filter *fold = *arg; 1925 struct cls_fl_filter *fnew; 1926 struct fl_flow_mask *mask; 1927 struct nlattr **tb; 1928 bool in_ht; 1929 int err; 1930 1931 if (!tca[TCA_OPTIONS]) { 1932 err = -EINVAL; 1933 goto errout_fold; 1934 } 1935 1936 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 1937 if (!mask) { 1938 err = -ENOBUFS; 1939 goto errout_fold; 1940 } 1941 1942 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1943 if (!tb) { 1944 err = -ENOBUFS; 1945 goto errout_mask_alloc; 1946 } 1947 1948 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 1949 tca[TCA_OPTIONS], fl_policy, NULL); 1950 if (err < 0) 1951 goto errout_tb; 1952 1953 if (fold && handle && fold->handle != handle) { 1954 err = -EINVAL; 1955 goto errout_tb; 1956 } 1957 1958 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 1959 if (!fnew) { 1960 err = -ENOBUFS; 1961 goto errout_tb; 1962 } 1963 INIT_LIST_HEAD(&fnew->hw_list); 1964 refcount_set(&fnew->refcnt, 1); 1965 1966 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); 1967 if (err < 0) 1968 goto errout; 1969 1970 if (tb[TCA_FLOWER_FLAGS]) { 1971 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 1972 1973 if (!tc_flags_valid(fnew->flags)) { 1974 err = -EINVAL; 1975 goto errout; 1976 } 1977 } 1978 1979 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, 1980 tp->chain->tmplt_priv, rtnl_held, extack); 1981 if (err) 1982 goto errout; 1983 1984 err = fl_check_assign_mask(head, fnew, fold, mask); 1985 if (err) 1986 goto errout; 1987 1988 err = fl_ht_insert_unique(fnew, fold, &in_ht); 1989 if (err) 1990 goto errout_mask; 1991 1992 if (!tc_skip_hw(fnew->flags)) { 1993 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 1994 if (err) 1995 goto errout_ht; 1996 } 1997 1998 if (!tc_in_hw(fnew->flags)) 1999 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 2000 2001 spin_lock(&tp->lock); 2002 2003 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2004 * proto again or create new one, if necessary. 2005 */ 2006 if (tp->deleting) { 2007 err = -EAGAIN; 2008 goto errout_hw; 2009 } 2010 2011 if (fold) { 2012 /* Fold filter was deleted concurrently. Retry lookup. */ 2013 if (fold->deleted) { 2014 err = -EAGAIN; 2015 goto errout_hw; 2016 } 2017 2018 fnew->handle = handle; 2019 2020 if (!in_ht) { 2021 struct rhashtable_params params = 2022 fnew->mask->filter_ht_params; 2023 2024 err = rhashtable_insert_fast(&fnew->mask->ht, 2025 &fnew->ht_node, 2026 params); 2027 if (err) 2028 goto errout_hw; 2029 in_ht = true; 2030 } 2031 2032 refcount_inc(&fnew->refcnt); 2033 rhashtable_remove_fast(&fold->mask->ht, 2034 &fold->ht_node, 2035 fold->mask->filter_ht_params); 2036 idr_replace(&head->handle_idr, fnew, fnew->handle); 2037 list_replace_rcu(&fold->list, &fnew->list); 2038 fold->deleted = true; 2039 2040 spin_unlock(&tp->lock); 2041 2042 fl_mask_put(head, fold->mask); 2043 if (!tc_skip_hw(fold->flags)) 2044 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2045 tcf_unbind_filter(tp, &fold->res); 2046 /* Caller holds reference to fold, so refcnt is always > 0 2047 * after this. 2048 */ 2049 refcount_dec(&fold->refcnt); 2050 __fl_put(fold); 2051 } else { 2052 if (handle) { 2053 /* user specifies a handle and it doesn't exist */ 2054 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2055 handle, GFP_ATOMIC); 2056 2057 /* Filter with specified handle was concurrently 2058 * inserted after initial check in cls_api. This is not 2059 * necessarily an error if NLM_F_EXCL is not set in 2060 * message flags. Returning EAGAIN will cause cls_api to 2061 * try to update concurrently inserted rule. 2062 */ 2063 if (err == -ENOSPC) 2064 err = -EAGAIN; 2065 } else { 2066 handle = 1; 2067 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2068 INT_MAX, GFP_ATOMIC); 2069 } 2070 if (err) 2071 goto errout_hw; 2072 2073 refcount_inc(&fnew->refcnt); 2074 fnew->handle = handle; 2075 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2076 spin_unlock(&tp->lock); 2077 } 2078 2079 *arg = fnew; 2080 2081 kfree(tb); 2082 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2083 return 0; 2084 2085 errout_ht: 2086 spin_lock(&tp->lock); 2087 errout_hw: 2088 fnew->deleted = true; 2089 spin_unlock(&tp->lock); 2090 if (!tc_skip_hw(fnew->flags)) 2091 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2092 if (in_ht) 2093 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2094 fnew->mask->filter_ht_params); 2095 errout_mask: 2096 fl_mask_put(head, fnew->mask); 2097 errout: 2098 __fl_put(fnew); 2099 errout_tb: 2100 kfree(tb); 2101 errout_mask_alloc: 2102 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2103 errout_fold: 2104 if (fold) 2105 __fl_put(fold); 2106 return err; 2107 } 2108 2109 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2110 bool rtnl_held, struct netlink_ext_ack *extack) 2111 { 2112 struct cls_fl_head *head = fl_head_dereference(tp); 2113 struct cls_fl_filter *f = arg; 2114 bool last_on_mask; 2115 int err = 0; 2116 2117 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2118 *last = list_empty(&head->masks); 2119 __fl_put(f); 2120 2121 return err; 2122 } 2123 2124 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2125 bool rtnl_held) 2126 { 2127 struct cls_fl_head *head = fl_head_dereference(tp); 2128 unsigned long id = arg->cookie, tmp; 2129 struct cls_fl_filter *f; 2130 2131 arg->count = arg->skip; 2132 2133 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2134 /* don't return filters that are being deleted */ 2135 if (!refcount_inc_not_zero(&f->refcnt)) 2136 continue; 2137 if (arg->fn(tp, f, arg) < 0) { 2138 __fl_put(f); 2139 arg->stop = 1; 2140 break; 2141 } 2142 __fl_put(f); 2143 arg->count++; 2144 } 2145 arg->cookie = id; 2146 } 2147 2148 static struct cls_fl_filter * 2149 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2150 { 2151 struct cls_fl_head *head = fl_head_dereference(tp); 2152 2153 spin_lock(&tp->lock); 2154 if (list_empty(&head->hw_filters)) { 2155 spin_unlock(&tp->lock); 2156 return NULL; 2157 } 2158 2159 if (!f) 2160 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2161 hw_list); 2162 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2163 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2164 spin_unlock(&tp->lock); 2165 return f; 2166 } 2167 } 2168 2169 spin_unlock(&tp->lock); 2170 return NULL; 2171 } 2172 2173 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2174 void *cb_priv, struct netlink_ext_ack *extack) 2175 { 2176 struct tcf_block *block = tp->chain->block; 2177 struct flow_cls_offload cls_flower = {}; 2178 struct cls_fl_filter *f = NULL; 2179 int err; 2180 2181 /* hw_filters list can only be changed by hw offload functions after 2182 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2183 * iterating it. 2184 */ 2185 ASSERT_RTNL(); 2186 2187 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2188 cls_flower.rule = 2189 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2190 if (!cls_flower.rule) { 2191 __fl_put(f); 2192 return -ENOMEM; 2193 } 2194 2195 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2196 extack); 2197 cls_flower.command = add ? 2198 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2199 cls_flower.cookie = (unsigned long)f; 2200 cls_flower.rule->match.dissector = &f->mask->dissector; 2201 cls_flower.rule->match.mask = &f->mask->key; 2202 cls_flower.rule->match.key = &f->mkey; 2203 2204 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 2205 if (err) { 2206 kfree(cls_flower.rule); 2207 if (tc_skip_sw(f->flags)) { 2208 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 2209 __fl_put(f); 2210 return err; 2211 } 2212 goto next_flow; 2213 } 2214 2215 cls_flower.classid = f->res.classid; 2216 2217 err = tc_setup_cb_reoffload(block, tp, add, cb, 2218 TC_SETUP_CLSFLOWER, &cls_flower, 2219 cb_priv, &f->flags, 2220 &f->in_hw_count); 2221 tc_cleanup_flow_action(&cls_flower.rule->action); 2222 kfree(cls_flower.rule); 2223 2224 if (err) { 2225 __fl_put(f); 2226 return err; 2227 } 2228 next_flow: 2229 __fl_put(f); 2230 } 2231 2232 return 0; 2233 } 2234 2235 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2236 { 2237 struct flow_cls_offload *cls_flower = type_data; 2238 struct cls_fl_filter *f = 2239 (struct cls_fl_filter *) cls_flower->cookie; 2240 struct cls_fl_head *head = fl_head_dereference(tp); 2241 2242 spin_lock(&tp->lock); 2243 list_add(&f->hw_list, &head->hw_filters); 2244 spin_unlock(&tp->lock); 2245 } 2246 2247 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2248 { 2249 struct flow_cls_offload *cls_flower = type_data; 2250 struct cls_fl_filter *f = 2251 (struct cls_fl_filter *) cls_flower->cookie; 2252 2253 spin_lock(&tp->lock); 2254 if (!list_empty(&f->hw_list)) 2255 list_del_init(&f->hw_list); 2256 spin_unlock(&tp->lock); 2257 } 2258 2259 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2260 struct fl_flow_tmplt *tmplt) 2261 { 2262 struct flow_cls_offload cls_flower = {}; 2263 struct tcf_block *block = chain->block; 2264 2265 cls_flower.rule = flow_rule_alloc(0); 2266 if (!cls_flower.rule) 2267 return -ENOMEM; 2268 2269 cls_flower.common.chain_index = chain->index; 2270 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2271 cls_flower.cookie = (unsigned long) tmplt; 2272 cls_flower.rule->match.dissector = &tmplt->dissector; 2273 cls_flower.rule->match.mask = &tmplt->mask; 2274 cls_flower.rule->match.key = &tmplt->dummy_key; 2275 2276 /* We don't care if driver (any of them) fails to handle this 2277 * call. It serves just as a hint for it. 2278 */ 2279 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2280 kfree(cls_flower.rule); 2281 2282 return 0; 2283 } 2284 2285 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2286 struct fl_flow_tmplt *tmplt) 2287 { 2288 struct flow_cls_offload cls_flower = {}; 2289 struct tcf_block *block = chain->block; 2290 2291 cls_flower.common.chain_index = chain->index; 2292 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2293 cls_flower.cookie = (unsigned long) tmplt; 2294 2295 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2296 } 2297 2298 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2299 struct nlattr **tca, 2300 struct netlink_ext_ack *extack) 2301 { 2302 struct fl_flow_tmplt *tmplt; 2303 struct nlattr **tb; 2304 int err; 2305 2306 if (!tca[TCA_OPTIONS]) 2307 return ERR_PTR(-EINVAL); 2308 2309 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2310 if (!tb) 2311 return ERR_PTR(-ENOBUFS); 2312 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2313 tca[TCA_OPTIONS], fl_policy, NULL); 2314 if (err) 2315 goto errout_tb; 2316 2317 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2318 if (!tmplt) { 2319 err = -ENOMEM; 2320 goto errout_tb; 2321 } 2322 tmplt->chain = chain; 2323 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2324 if (err) 2325 goto errout_tmplt; 2326 2327 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2328 2329 err = fl_hw_create_tmplt(chain, tmplt); 2330 if (err) 2331 goto errout_tmplt; 2332 2333 kfree(tb); 2334 return tmplt; 2335 2336 errout_tmplt: 2337 kfree(tmplt); 2338 errout_tb: 2339 kfree(tb); 2340 return ERR_PTR(err); 2341 } 2342 2343 static void fl_tmplt_destroy(void *tmplt_priv) 2344 { 2345 struct fl_flow_tmplt *tmplt = tmplt_priv; 2346 2347 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2348 kfree(tmplt); 2349 } 2350 2351 static int fl_dump_key_val(struct sk_buff *skb, 2352 void *val, int val_type, 2353 void *mask, int mask_type, int len) 2354 { 2355 int err; 2356 2357 if (!memchr_inv(mask, 0, len)) 2358 return 0; 2359 err = nla_put(skb, val_type, len, val); 2360 if (err) 2361 return err; 2362 if (mask_type != TCA_FLOWER_UNSPEC) { 2363 err = nla_put(skb, mask_type, len, mask); 2364 if (err) 2365 return err; 2366 } 2367 return 0; 2368 } 2369 2370 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2371 struct fl_flow_key *mask) 2372 { 2373 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2374 TCA_FLOWER_KEY_PORT_DST_MIN, 2375 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2376 sizeof(key->tp_range.tp_min.dst)) || 2377 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2378 TCA_FLOWER_KEY_PORT_DST_MAX, 2379 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2380 sizeof(key->tp_range.tp_max.dst)) || 2381 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2382 TCA_FLOWER_KEY_PORT_SRC_MIN, 2383 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2384 sizeof(key->tp_range.tp_min.src)) || 2385 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2386 TCA_FLOWER_KEY_PORT_SRC_MAX, 2387 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2388 sizeof(key->tp_range.tp_max.src))) 2389 return -1; 2390 2391 return 0; 2392 } 2393 2394 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2395 struct flow_dissector_key_mpls *mpls_key, 2396 struct flow_dissector_key_mpls *mpls_mask, 2397 u8 lse_index) 2398 { 2399 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2400 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2401 int err; 2402 2403 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2404 lse_index + 1); 2405 if (err) 2406 return err; 2407 2408 if (lse_mask->mpls_ttl) { 2409 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2410 lse_key->mpls_ttl); 2411 if (err) 2412 return err; 2413 } 2414 if (lse_mask->mpls_bos) { 2415 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2416 lse_key->mpls_bos); 2417 if (err) 2418 return err; 2419 } 2420 if (lse_mask->mpls_tc) { 2421 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2422 lse_key->mpls_tc); 2423 if (err) 2424 return err; 2425 } 2426 if (lse_mask->mpls_label) { 2427 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2428 lse_key->mpls_label); 2429 if (err) 2430 return err; 2431 } 2432 2433 return 0; 2434 } 2435 2436 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2437 struct flow_dissector_key_mpls *mpls_key, 2438 struct flow_dissector_key_mpls *mpls_mask) 2439 { 2440 struct nlattr *opts; 2441 struct nlattr *lse; 2442 u8 lse_index; 2443 int err; 2444 2445 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2446 if (!opts) 2447 return -EMSGSIZE; 2448 2449 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2450 if (!(mpls_mask->used_lses & 1 << lse_index)) 2451 continue; 2452 2453 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2454 if (!lse) { 2455 err = -EMSGSIZE; 2456 goto err_opts; 2457 } 2458 2459 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2460 lse_index); 2461 if (err) 2462 goto err_opts_lse; 2463 nla_nest_end(skb, lse); 2464 } 2465 nla_nest_end(skb, opts); 2466 2467 return 0; 2468 2469 err_opts_lse: 2470 nla_nest_cancel(skb, lse); 2471 err_opts: 2472 nla_nest_cancel(skb, opts); 2473 2474 return err; 2475 } 2476 2477 static int fl_dump_key_mpls(struct sk_buff *skb, 2478 struct flow_dissector_key_mpls *mpls_key, 2479 struct flow_dissector_key_mpls *mpls_mask) 2480 { 2481 struct flow_dissector_mpls_lse *lse_mask; 2482 struct flow_dissector_mpls_lse *lse_key; 2483 int err; 2484 2485 if (!mpls_mask->used_lses) 2486 return 0; 2487 2488 lse_mask = &mpls_mask->ls[0]; 2489 lse_key = &mpls_key->ls[0]; 2490 2491 /* For backward compatibility, don't use the MPLS nested attributes if 2492 * the rule can be expressed using the old attributes. 2493 */ 2494 if (mpls_mask->used_lses & ~1 || 2495 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 2496 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 2497 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 2498 2499 if (lse_mask->mpls_ttl) { 2500 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2501 lse_key->mpls_ttl); 2502 if (err) 2503 return err; 2504 } 2505 if (lse_mask->mpls_tc) { 2506 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2507 lse_key->mpls_tc); 2508 if (err) 2509 return err; 2510 } 2511 if (lse_mask->mpls_label) { 2512 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2513 lse_key->mpls_label); 2514 if (err) 2515 return err; 2516 } 2517 if (lse_mask->mpls_bos) { 2518 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2519 lse_key->mpls_bos); 2520 if (err) 2521 return err; 2522 } 2523 return 0; 2524 } 2525 2526 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 2527 struct flow_dissector_key_ip *key, 2528 struct flow_dissector_key_ip *mask) 2529 { 2530 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 2531 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 2532 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 2533 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 2534 2535 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 2536 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 2537 return -1; 2538 2539 return 0; 2540 } 2541 2542 static int fl_dump_key_vlan(struct sk_buff *skb, 2543 int vlan_id_key, int vlan_prio_key, 2544 struct flow_dissector_key_vlan *vlan_key, 2545 struct flow_dissector_key_vlan *vlan_mask) 2546 { 2547 int err; 2548 2549 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 2550 return 0; 2551 if (vlan_mask->vlan_id) { 2552 err = nla_put_u16(skb, vlan_id_key, 2553 vlan_key->vlan_id); 2554 if (err) 2555 return err; 2556 } 2557 if (vlan_mask->vlan_priority) { 2558 err = nla_put_u8(skb, vlan_prio_key, 2559 vlan_key->vlan_priority); 2560 if (err) 2561 return err; 2562 } 2563 return 0; 2564 } 2565 2566 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 2567 u32 *flower_key, u32 *flower_mask, 2568 u32 flower_flag_bit, u32 dissector_flag_bit) 2569 { 2570 if (dissector_mask & dissector_flag_bit) { 2571 *flower_mask |= flower_flag_bit; 2572 if (dissector_key & dissector_flag_bit) 2573 *flower_key |= flower_flag_bit; 2574 } 2575 } 2576 2577 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 2578 { 2579 u32 key, mask; 2580 __be32 _key, _mask; 2581 int err; 2582 2583 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 2584 return 0; 2585 2586 key = 0; 2587 mask = 0; 2588 2589 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2590 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 2591 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2592 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 2593 FLOW_DIS_FIRST_FRAG); 2594 2595 _key = cpu_to_be32(key); 2596 _mask = cpu_to_be32(mask); 2597 2598 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 2599 if (err) 2600 return err; 2601 2602 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 2603 } 2604 2605 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 2606 struct flow_dissector_key_enc_opts *enc_opts) 2607 { 2608 struct geneve_opt *opt; 2609 struct nlattr *nest; 2610 int opt_off = 0; 2611 2612 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 2613 if (!nest) 2614 goto nla_put_failure; 2615 2616 while (enc_opts->len > opt_off) { 2617 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 2618 2619 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 2620 opt->opt_class)) 2621 goto nla_put_failure; 2622 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 2623 opt->type)) 2624 goto nla_put_failure; 2625 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 2626 opt->length * 4, opt->opt_data)) 2627 goto nla_put_failure; 2628 2629 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 2630 } 2631 nla_nest_end(skb, nest); 2632 return 0; 2633 2634 nla_put_failure: 2635 nla_nest_cancel(skb, nest); 2636 return -EMSGSIZE; 2637 } 2638 2639 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 2640 struct flow_dissector_key_enc_opts *enc_opts) 2641 { 2642 struct vxlan_metadata *md; 2643 struct nlattr *nest; 2644 2645 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 2646 if (!nest) 2647 goto nla_put_failure; 2648 2649 md = (struct vxlan_metadata *)&enc_opts->data[0]; 2650 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 2651 goto nla_put_failure; 2652 2653 nla_nest_end(skb, nest); 2654 return 0; 2655 2656 nla_put_failure: 2657 nla_nest_cancel(skb, nest); 2658 return -EMSGSIZE; 2659 } 2660 2661 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 2662 struct flow_dissector_key_enc_opts *enc_opts) 2663 { 2664 struct erspan_metadata *md; 2665 struct nlattr *nest; 2666 2667 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 2668 if (!nest) 2669 goto nla_put_failure; 2670 2671 md = (struct erspan_metadata *)&enc_opts->data[0]; 2672 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 2673 goto nla_put_failure; 2674 2675 if (md->version == 1 && 2676 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 2677 goto nla_put_failure; 2678 2679 if (md->version == 2 && 2680 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 2681 md->u.md2.dir) || 2682 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 2683 get_hwid(&md->u.md2)))) 2684 goto nla_put_failure; 2685 2686 nla_nest_end(skb, nest); 2687 return 0; 2688 2689 nla_put_failure: 2690 nla_nest_cancel(skb, nest); 2691 return -EMSGSIZE; 2692 } 2693 2694 static int fl_dump_key_ct(struct sk_buff *skb, 2695 struct flow_dissector_key_ct *key, 2696 struct flow_dissector_key_ct *mask) 2697 { 2698 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 2699 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 2700 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 2701 sizeof(key->ct_state))) 2702 goto nla_put_failure; 2703 2704 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 2705 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 2706 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 2707 sizeof(key->ct_zone))) 2708 goto nla_put_failure; 2709 2710 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 2711 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 2712 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 2713 sizeof(key->ct_mark))) 2714 goto nla_put_failure; 2715 2716 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 2717 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 2718 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 2719 sizeof(key->ct_labels))) 2720 goto nla_put_failure; 2721 2722 return 0; 2723 2724 nla_put_failure: 2725 return -EMSGSIZE; 2726 } 2727 2728 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 2729 struct flow_dissector_key_enc_opts *enc_opts) 2730 { 2731 struct nlattr *nest; 2732 int err; 2733 2734 if (!enc_opts->len) 2735 return 0; 2736 2737 nest = nla_nest_start_noflag(skb, enc_opt_type); 2738 if (!nest) 2739 goto nla_put_failure; 2740 2741 switch (enc_opts->dst_opt_type) { 2742 case TUNNEL_GENEVE_OPT: 2743 err = fl_dump_key_geneve_opt(skb, enc_opts); 2744 if (err) 2745 goto nla_put_failure; 2746 break; 2747 case TUNNEL_VXLAN_OPT: 2748 err = fl_dump_key_vxlan_opt(skb, enc_opts); 2749 if (err) 2750 goto nla_put_failure; 2751 break; 2752 case TUNNEL_ERSPAN_OPT: 2753 err = fl_dump_key_erspan_opt(skb, enc_opts); 2754 if (err) 2755 goto nla_put_failure; 2756 break; 2757 default: 2758 goto nla_put_failure; 2759 } 2760 nla_nest_end(skb, nest); 2761 return 0; 2762 2763 nla_put_failure: 2764 nla_nest_cancel(skb, nest); 2765 return -EMSGSIZE; 2766 } 2767 2768 static int fl_dump_key_enc_opt(struct sk_buff *skb, 2769 struct flow_dissector_key_enc_opts *key_opts, 2770 struct flow_dissector_key_enc_opts *msk_opts) 2771 { 2772 int err; 2773 2774 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 2775 if (err) 2776 return err; 2777 2778 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 2779 } 2780 2781 static int fl_dump_key(struct sk_buff *skb, struct net *net, 2782 struct fl_flow_key *key, struct fl_flow_key *mask) 2783 { 2784 if (mask->meta.ingress_ifindex) { 2785 struct net_device *dev; 2786 2787 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 2788 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 2789 goto nla_put_failure; 2790 } 2791 2792 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 2793 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 2794 sizeof(key->eth.dst)) || 2795 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 2796 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 2797 sizeof(key->eth.src)) || 2798 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 2799 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 2800 sizeof(key->basic.n_proto))) 2801 goto nla_put_failure; 2802 2803 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 2804 goto nla_put_failure; 2805 2806 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 2807 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 2808 goto nla_put_failure; 2809 2810 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 2811 TCA_FLOWER_KEY_CVLAN_PRIO, 2812 &key->cvlan, &mask->cvlan) || 2813 (mask->cvlan.vlan_tpid && 2814 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2815 key->cvlan.vlan_tpid))) 2816 goto nla_put_failure; 2817 2818 if (mask->basic.n_proto) { 2819 if (mask->cvlan.vlan_tpid) { 2820 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 2821 key->basic.n_proto)) 2822 goto nla_put_failure; 2823 } else if (mask->vlan.vlan_tpid) { 2824 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2825 key->basic.n_proto)) 2826 goto nla_put_failure; 2827 } 2828 } 2829 2830 if ((key->basic.n_proto == htons(ETH_P_IP) || 2831 key->basic.n_proto == htons(ETH_P_IPV6)) && 2832 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 2833 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 2834 sizeof(key->basic.ip_proto)) || 2835 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 2836 goto nla_put_failure; 2837 2838 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2839 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 2840 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 2841 sizeof(key->ipv4.src)) || 2842 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 2843 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 2844 sizeof(key->ipv4.dst)))) 2845 goto nla_put_failure; 2846 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2847 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 2848 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 2849 sizeof(key->ipv6.src)) || 2850 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 2851 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 2852 sizeof(key->ipv6.dst)))) 2853 goto nla_put_failure; 2854 2855 if (key->basic.ip_proto == IPPROTO_TCP && 2856 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 2857 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 2858 sizeof(key->tp.src)) || 2859 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 2860 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 2861 sizeof(key->tp.dst)) || 2862 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 2863 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 2864 sizeof(key->tcp.flags)))) 2865 goto nla_put_failure; 2866 else if (key->basic.ip_proto == IPPROTO_UDP && 2867 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 2868 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 2869 sizeof(key->tp.src)) || 2870 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 2871 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 2872 sizeof(key->tp.dst)))) 2873 goto nla_put_failure; 2874 else if (key->basic.ip_proto == IPPROTO_SCTP && 2875 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 2876 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 2877 sizeof(key->tp.src)) || 2878 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 2879 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 2880 sizeof(key->tp.dst)))) 2881 goto nla_put_failure; 2882 else if (key->basic.n_proto == htons(ETH_P_IP) && 2883 key->basic.ip_proto == IPPROTO_ICMP && 2884 (fl_dump_key_val(skb, &key->icmp.type, 2885 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 2886 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 2887 sizeof(key->icmp.type)) || 2888 fl_dump_key_val(skb, &key->icmp.code, 2889 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 2890 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 2891 sizeof(key->icmp.code)))) 2892 goto nla_put_failure; 2893 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 2894 key->basic.ip_proto == IPPROTO_ICMPV6 && 2895 (fl_dump_key_val(skb, &key->icmp.type, 2896 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 2897 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 2898 sizeof(key->icmp.type)) || 2899 fl_dump_key_val(skb, &key->icmp.code, 2900 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 2901 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 2902 sizeof(key->icmp.code)))) 2903 goto nla_put_failure; 2904 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 2905 key->basic.n_proto == htons(ETH_P_RARP)) && 2906 (fl_dump_key_val(skb, &key->arp.sip, 2907 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 2908 TCA_FLOWER_KEY_ARP_SIP_MASK, 2909 sizeof(key->arp.sip)) || 2910 fl_dump_key_val(skb, &key->arp.tip, 2911 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 2912 TCA_FLOWER_KEY_ARP_TIP_MASK, 2913 sizeof(key->arp.tip)) || 2914 fl_dump_key_val(skb, &key->arp.op, 2915 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 2916 TCA_FLOWER_KEY_ARP_OP_MASK, 2917 sizeof(key->arp.op)) || 2918 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 2919 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 2920 sizeof(key->arp.sha)) || 2921 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2922 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2923 sizeof(key->arp.tha)))) 2924 goto nla_put_failure; 2925 2926 if ((key->basic.ip_proto == IPPROTO_TCP || 2927 key->basic.ip_proto == IPPROTO_UDP || 2928 key->basic.ip_proto == IPPROTO_SCTP) && 2929 fl_dump_key_port_range(skb, key, mask)) 2930 goto nla_put_failure; 2931 2932 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2933 (fl_dump_key_val(skb, &key->enc_ipv4.src, 2934 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 2935 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2936 sizeof(key->enc_ipv4.src)) || 2937 fl_dump_key_val(skb, &key->enc_ipv4.dst, 2938 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 2939 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2940 sizeof(key->enc_ipv4.dst)))) 2941 goto nla_put_failure; 2942 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2943 (fl_dump_key_val(skb, &key->enc_ipv6.src, 2944 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 2945 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2946 sizeof(key->enc_ipv6.src)) || 2947 fl_dump_key_val(skb, &key->enc_ipv6.dst, 2948 TCA_FLOWER_KEY_ENC_IPV6_DST, 2949 &mask->enc_ipv6.dst, 2950 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2951 sizeof(key->enc_ipv6.dst)))) 2952 goto nla_put_failure; 2953 2954 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 2955 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 2956 sizeof(key->enc_key_id)) || 2957 fl_dump_key_val(skb, &key->enc_tp.src, 2958 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2959 &mask->enc_tp.src, 2960 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2961 sizeof(key->enc_tp.src)) || 2962 fl_dump_key_val(skb, &key->enc_tp.dst, 2963 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2964 &mask->enc_tp.dst, 2965 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2966 sizeof(key->enc_tp.dst)) || 2967 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 2968 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 2969 goto nla_put_failure; 2970 2971 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 2972 goto nla_put_failure; 2973 2974 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 2975 goto nla_put_failure; 2976 2977 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 2978 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 2979 sizeof(key->hash.hash))) 2980 goto nla_put_failure; 2981 2982 return 0; 2983 2984 nla_put_failure: 2985 return -EMSGSIZE; 2986 } 2987 2988 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 2989 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 2990 { 2991 struct cls_fl_filter *f = fh; 2992 struct nlattr *nest; 2993 struct fl_flow_key *key, *mask; 2994 bool skip_hw; 2995 2996 if (!f) 2997 return skb->len; 2998 2999 t->tcm_handle = f->handle; 3000 3001 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3002 if (!nest) 3003 goto nla_put_failure; 3004 3005 spin_lock(&tp->lock); 3006 3007 if (f->res.classid && 3008 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3009 goto nla_put_failure_locked; 3010 3011 key = &f->key; 3012 mask = &f->mask->key; 3013 skip_hw = tc_skip_hw(f->flags); 3014 3015 if (fl_dump_key(skb, net, key, mask)) 3016 goto nla_put_failure_locked; 3017 3018 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3019 goto nla_put_failure_locked; 3020 3021 spin_unlock(&tp->lock); 3022 3023 if (!skip_hw) 3024 fl_hw_update_stats(tp, f, rtnl_held); 3025 3026 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3027 goto nla_put_failure; 3028 3029 if (tcf_exts_dump(skb, &f->exts)) 3030 goto nla_put_failure; 3031 3032 nla_nest_end(skb, nest); 3033 3034 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3035 goto nla_put_failure; 3036 3037 return skb->len; 3038 3039 nla_put_failure_locked: 3040 spin_unlock(&tp->lock); 3041 nla_put_failure: 3042 nla_nest_cancel(skb, nest); 3043 return -1; 3044 } 3045 3046 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3047 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3048 { 3049 struct cls_fl_filter *f = fh; 3050 struct nlattr *nest; 3051 bool skip_hw; 3052 3053 if (!f) 3054 return skb->len; 3055 3056 t->tcm_handle = f->handle; 3057 3058 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3059 if (!nest) 3060 goto nla_put_failure; 3061 3062 spin_lock(&tp->lock); 3063 3064 skip_hw = tc_skip_hw(f->flags); 3065 3066 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3067 goto nla_put_failure_locked; 3068 3069 spin_unlock(&tp->lock); 3070 3071 if (!skip_hw) 3072 fl_hw_update_stats(tp, f, rtnl_held); 3073 3074 if (tcf_exts_terse_dump(skb, &f->exts)) 3075 goto nla_put_failure; 3076 3077 nla_nest_end(skb, nest); 3078 3079 return skb->len; 3080 3081 nla_put_failure_locked: 3082 spin_unlock(&tp->lock); 3083 nla_put_failure: 3084 nla_nest_cancel(skb, nest); 3085 return -1; 3086 } 3087 3088 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3089 { 3090 struct fl_flow_tmplt *tmplt = tmplt_priv; 3091 struct fl_flow_key *key, *mask; 3092 struct nlattr *nest; 3093 3094 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3095 if (!nest) 3096 goto nla_put_failure; 3097 3098 key = &tmplt->dummy_key; 3099 mask = &tmplt->mask; 3100 3101 if (fl_dump_key(skb, net, key, mask)) 3102 goto nla_put_failure; 3103 3104 nla_nest_end(skb, nest); 3105 3106 return skb->len; 3107 3108 nla_put_failure: 3109 nla_nest_cancel(skb, nest); 3110 return -EMSGSIZE; 3111 } 3112 3113 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3114 unsigned long base) 3115 { 3116 struct cls_fl_filter *f = fh; 3117 3118 if (f && f->res.classid == classid) { 3119 if (cl) 3120 __tcf_bind_filter(q, &f->res, base); 3121 else 3122 __tcf_unbind_filter(q, &f->res); 3123 } 3124 } 3125 3126 static bool fl_delete_empty(struct tcf_proto *tp) 3127 { 3128 struct cls_fl_head *head = fl_head_dereference(tp); 3129 3130 spin_lock(&tp->lock); 3131 tp->deleting = idr_is_empty(&head->handle_idr); 3132 spin_unlock(&tp->lock); 3133 3134 return tp->deleting; 3135 } 3136 3137 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3138 .kind = "flower", 3139 .classify = fl_classify, 3140 .init = fl_init, 3141 .destroy = fl_destroy, 3142 .get = fl_get, 3143 .put = fl_put, 3144 .change = fl_change, 3145 .delete = fl_delete, 3146 .delete_empty = fl_delete_empty, 3147 .walk = fl_walk, 3148 .reoffload = fl_reoffload, 3149 .hw_add = fl_hw_add, 3150 .hw_del = fl_hw_del, 3151 .dump = fl_dump, 3152 .terse_dump = fl_terse_dump, 3153 .bind_class = fl_bind_class, 3154 .tmplt_create = fl_tmplt_create, 3155 .tmplt_destroy = fl_tmplt_destroy, 3156 .tmplt_dump = fl_tmplt_dump, 3157 .owner = THIS_MODULE, 3158 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3159 }; 3160 3161 static int __init cls_fl_init(void) 3162 { 3163 return register_tcf_proto_ops(&cls_fl_ops); 3164 } 3165 3166 static void __exit cls_fl_exit(void) 3167 { 3168 unregister_tcf_proto_ops(&cls_fl_ops); 3169 } 3170 3171 module_init(cls_fl_init); 3172 module_exit(cls_fl_exit); 3173 3174 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3175 MODULE_DESCRIPTION("Flower classifier"); 3176 MODULE_LICENSE("GPL v2"); 3177