1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 15 #include <linux/if_ether.h> 16 #include <linux/in6.h> 17 #include <linux/ip.h> 18 #include <linux/mpls.h> 19 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/ip.h> 23 #include <net/flow_dissector.h> 24 #include <net/geneve.h> 25 #include <net/vxlan.h> 26 #include <net/erspan.h> 27 28 #include <net/dst.h> 29 #include <net/dst_metadata.h> 30 31 #include <uapi/linux/netfilter/nf_conntrack_common.h> 32 33 struct fl_flow_key { 34 struct flow_dissector_key_meta meta; 35 struct flow_dissector_key_control control; 36 struct flow_dissector_key_control enc_control; 37 struct flow_dissector_key_basic basic; 38 struct flow_dissector_key_eth_addrs eth; 39 struct flow_dissector_key_vlan vlan; 40 struct flow_dissector_key_vlan cvlan; 41 union { 42 struct flow_dissector_key_ipv4_addrs ipv4; 43 struct flow_dissector_key_ipv6_addrs ipv6; 44 }; 45 struct flow_dissector_key_ports tp; 46 struct flow_dissector_key_icmp icmp; 47 struct flow_dissector_key_arp arp; 48 struct flow_dissector_key_keyid enc_key_id; 49 union { 50 struct flow_dissector_key_ipv4_addrs enc_ipv4; 51 struct flow_dissector_key_ipv6_addrs enc_ipv6; 52 }; 53 struct flow_dissector_key_ports enc_tp; 54 struct flow_dissector_key_mpls mpls; 55 struct flow_dissector_key_tcp tcp; 56 struct flow_dissector_key_ip ip; 57 struct flow_dissector_key_ip enc_ip; 58 struct flow_dissector_key_enc_opts enc_opts; 59 union { 60 struct flow_dissector_key_ports tp; 61 struct { 62 struct flow_dissector_key_ports tp_min; 63 struct flow_dissector_key_ports tp_max; 64 }; 65 } tp_range; 66 struct flow_dissector_key_ct ct; 67 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 68 69 struct fl_flow_mask_range { 70 unsigned short int start; 71 unsigned short int end; 72 }; 73 74 struct fl_flow_mask { 75 struct fl_flow_key key; 76 struct fl_flow_mask_range range; 77 u32 flags; 78 struct rhash_head ht_node; 79 struct rhashtable ht; 80 struct rhashtable_params filter_ht_params; 81 struct flow_dissector dissector; 82 struct list_head filters; 83 struct rcu_work rwork; 84 struct list_head list; 85 refcount_t refcnt; 86 }; 87 88 struct fl_flow_tmplt { 89 struct fl_flow_key dummy_key; 90 struct fl_flow_key mask; 91 struct flow_dissector dissector; 92 struct tcf_chain *chain; 93 }; 94 95 struct cls_fl_head { 96 struct rhashtable ht; 97 spinlock_t masks_lock; /* Protect masks list */ 98 struct list_head masks; 99 struct list_head hw_filters; 100 struct rcu_work rwork; 101 struct idr handle_idr; 102 }; 103 104 struct cls_fl_filter { 105 struct fl_flow_mask *mask; 106 struct rhash_head ht_node; 107 struct fl_flow_key mkey; 108 struct tcf_exts exts; 109 struct tcf_result res; 110 struct fl_flow_key key; 111 struct list_head list; 112 struct list_head hw_list; 113 u32 handle; 114 u32 flags; 115 u32 in_hw_count; 116 struct rcu_work rwork; 117 struct net_device *hw_dev; 118 /* Flower classifier is unlocked, which means that its reference counter 119 * can be changed concurrently without any kind of external 120 * synchronization. Use atomic reference counter to be concurrency-safe. 121 */ 122 refcount_t refcnt; 123 bool deleted; 124 }; 125 126 static const struct rhashtable_params mask_ht_params = { 127 .key_offset = offsetof(struct fl_flow_mask, key), 128 .key_len = sizeof(struct fl_flow_key), 129 .head_offset = offsetof(struct fl_flow_mask, ht_node), 130 .automatic_shrinking = true, 131 }; 132 133 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 134 { 135 return mask->range.end - mask->range.start; 136 } 137 138 static void fl_mask_update_range(struct fl_flow_mask *mask) 139 { 140 const u8 *bytes = (const u8 *) &mask->key; 141 size_t size = sizeof(mask->key); 142 size_t i, first = 0, last; 143 144 for (i = 0; i < size; i++) { 145 if (bytes[i]) { 146 first = i; 147 break; 148 } 149 } 150 last = first; 151 for (i = size - 1; i != first; i--) { 152 if (bytes[i]) { 153 last = i; 154 break; 155 } 156 } 157 mask->range.start = rounddown(first, sizeof(long)); 158 mask->range.end = roundup(last + 1, sizeof(long)); 159 } 160 161 static void *fl_key_get_start(struct fl_flow_key *key, 162 const struct fl_flow_mask *mask) 163 { 164 return (u8 *) key + mask->range.start; 165 } 166 167 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 168 struct fl_flow_mask *mask) 169 { 170 const long *lkey = fl_key_get_start(key, mask); 171 const long *lmask = fl_key_get_start(&mask->key, mask); 172 long *lmkey = fl_key_get_start(mkey, mask); 173 int i; 174 175 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 176 *lmkey++ = *lkey++ & *lmask++; 177 } 178 179 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 180 struct fl_flow_mask *mask) 181 { 182 const long *lmask = fl_key_get_start(&mask->key, mask); 183 const long *ltmplt; 184 int i; 185 186 if (!tmplt) 187 return true; 188 ltmplt = fl_key_get_start(&tmplt->mask, mask); 189 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 190 if (~*ltmplt++ & *lmask++) 191 return false; 192 } 193 return true; 194 } 195 196 static void fl_clear_masked_range(struct fl_flow_key *key, 197 struct fl_flow_mask *mask) 198 { 199 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 200 } 201 202 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 203 struct fl_flow_key *key, 204 struct fl_flow_key *mkey) 205 { 206 __be16 min_mask, max_mask, min_val, max_val; 207 208 min_mask = htons(filter->mask->key.tp_range.tp_min.dst); 209 max_mask = htons(filter->mask->key.tp_range.tp_max.dst); 210 min_val = htons(filter->key.tp_range.tp_min.dst); 211 max_val = htons(filter->key.tp_range.tp_max.dst); 212 213 if (min_mask && max_mask) { 214 if (htons(key->tp_range.tp.dst) < min_val || 215 htons(key->tp_range.tp.dst) > max_val) 216 return false; 217 218 /* skb does not have min and max values */ 219 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 220 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 221 } 222 return true; 223 } 224 225 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 226 struct fl_flow_key *key, 227 struct fl_flow_key *mkey) 228 { 229 __be16 min_mask, max_mask, min_val, max_val; 230 231 min_mask = htons(filter->mask->key.tp_range.tp_min.src); 232 max_mask = htons(filter->mask->key.tp_range.tp_max.src); 233 min_val = htons(filter->key.tp_range.tp_min.src); 234 max_val = htons(filter->key.tp_range.tp_max.src); 235 236 if (min_mask && max_mask) { 237 if (htons(key->tp_range.tp.src) < min_val || 238 htons(key->tp_range.tp.src) > max_val) 239 return false; 240 241 /* skb does not have min and max values */ 242 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 243 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 244 } 245 return true; 246 } 247 248 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 249 struct fl_flow_key *mkey) 250 { 251 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 252 mask->filter_ht_params); 253 } 254 255 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 256 struct fl_flow_key *mkey, 257 struct fl_flow_key *key) 258 { 259 struct cls_fl_filter *filter, *f; 260 261 list_for_each_entry_rcu(filter, &mask->filters, list) { 262 if (!fl_range_port_dst_cmp(filter, key, mkey)) 263 continue; 264 265 if (!fl_range_port_src_cmp(filter, key, mkey)) 266 continue; 267 268 f = __fl_lookup(mask, mkey); 269 if (f) 270 return f; 271 } 272 return NULL; 273 } 274 275 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask, 276 struct fl_flow_key *mkey, 277 struct fl_flow_key *key) 278 { 279 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 280 return fl_lookup_range(mask, mkey, key); 281 282 return __fl_lookup(mask, mkey); 283 } 284 285 static u16 fl_ct_info_to_flower_map[] = { 286 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 287 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 288 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 289 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 290 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 291 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 292 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 293 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 294 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 295 TCA_FLOWER_KEY_CT_FLAGS_NEW, 296 }; 297 298 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 299 struct tcf_result *res) 300 { 301 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 302 struct fl_flow_key skb_mkey; 303 struct fl_flow_key skb_key; 304 struct fl_flow_mask *mask; 305 struct cls_fl_filter *f; 306 307 list_for_each_entry_rcu(mask, &head->masks, list) { 308 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 309 fl_clear_masked_range(&skb_key, mask); 310 311 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 312 /* skb_flow_dissect() does not set n_proto in case an unknown 313 * protocol, so do it rather here. 314 */ 315 skb_key.basic.n_proto = skb->protocol; 316 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 317 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 318 fl_ct_info_to_flower_map, 319 ARRAY_SIZE(fl_ct_info_to_flower_map)); 320 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); 321 322 fl_set_masked_key(&skb_mkey, &skb_key, mask); 323 324 f = fl_lookup(mask, &skb_mkey, &skb_key); 325 if (f && !tc_skip_sw(f->flags)) { 326 *res = f->res; 327 return tcf_exts_exec(skb, &f->exts, res); 328 } 329 } 330 return -1; 331 } 332 333 static int fl_init(struct tcf_proto *tp) 334 { 335 struct cls_fl_head *head; 336 337 head = kzalloc(sizeof(*head), GFP_KERNEL); 338 if (!head) 339 return -ENOBUFS; 340 341 spin_lock_init(&head->masks_lock); 342 INIT_LIST_HEAD_RCU(&head->masks); 343 INIT_LIST_HEAD(&head->hw_filters); 344 rcu_assign_pointer(tp->root, head); 345 idr_init(&head->handle_idr); 346 347 return rhashtable_init(&head->ht, &mask_ht_params); 348 } 349 350 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 351 { 352 /* temporary masks don't have their filters list and ht initialized */ 353 if (mask_init_done) { 354 WARN_ON(!list_empty(&mask->filters)); 355 rhashtable_destroy(&mask->ht); 356 } 357 kfree(mask); 358 } 359 360 static void fl_mask_free_work(struct work_struct *work) 361 { 362 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 363 struct fl_flow_mask, rwork); 364 365 fl_mask_free(mask, true); 366 } 367 368 static void fl_uninit_mask_free_work(struct work_struct *work) 369 { 370 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 371 struct fl_flow_mask, rwork); 372 373 fl_mask_free(mask, false); 374 } 375 376 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 377 { 378 if (!refcount_dec_and_test(&mask->refcnt)) 379 return false; 380 381 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 382 383 spin_lock(&head->masks_lock); 384 list_del_rcu(&mask->list); 385 spin_unlock(&head->masks_lock); 386 387 tcf_queue_work(&mask->rwork, fl_mask_free_work); 388 389 return true; 390 } 391 392 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 393 { 394 /* Flower classifier only changes root pointer during init and destroy. 395 * Users must obtain reference to tcf_proto instance before calling its 396 * API, so tp->root pointer is protected from concurrent call to 397 * fl_destroy() by reference counting. 398 */ 399 return rcu_dereference_raw(tp->root); 400 } 401 402 static void __fl_destroy_filter(struct cls_fl_filter *f) 403 { 404 tcf_exts_destroy(&f->exts); 405 tcf_exts_put_net(&f->exts); 406 kfree(f); 407 } 408 409 static void fl_destroy_filter_work(struct work_struct *work) 410 { 411 struct cls_fl_filter *f = container_of(to_rcu_work(work), 412 struct cls_fl_filter, rwork); 413 414 __fl_destroy_filter(f); 415 } 416 417 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 418 bool rtnl_held, struct netlink_ext_ack *extack) 419 { 420 struct tcf_block *block = tp->chain->block; 421 struct flow_cls_offload cls_flower = {}; 422 423 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 424 cls_flower.command = FLOW_CLS_DESTROY; 425 cls_flower.cookie = (unsigned long) f; 426 427 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 428 &f->flags, &f->in_hw_count, rtnl_held); 429 430 } 431 432 static int fl_hw_replace_filter(struct tcf_proto *tp, 433 struct cls_fl_filter *f, bool rtnl_held, 434 struct netlink_ext_ack *extack) 435 { 436 struct tcf_block *block = tp->chain->block; 437 struct flow_cls_offload cls_flower = {}; 438 bool skip_sw = tc_skip_sw(f->flags); 439 int err = 0; 440 441 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 442 if (!cls_flower.rule) 443 return -ENOMEM; 444 445 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 446 cls_flower.command = FLOW_CLS_REPLACE; 447 cls_flower.cookie = (unsigned long) f; 448 cls_flower.rule->match.dissector = &f->mask->dissector; 449 cls_flower.rule->match.mask = &f->mask->key; 450 cls_flower.rule->match.key = &f->mkey; 451 cls_flower.classid = f->res.classid; 452 453 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 454 if (err) { 455 kfree(cls_flower.rule); 456 if (skip_sw) { 457 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 458 return err; 459 } 460 return 0; 461 } 462 463 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 464 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 465 tc_cleanup_flow_action(&cls_flower.rule->action); 466 kfree(cls_flower.rule); 467 468 if (err) { 469 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 470 return err; 471 } 472 473 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 474 return -EINVAL; 475 476 return 0; 477 } 478 479 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 480 bool rtnl_held) 481 { 482 struct tcf_block *block = tp->chain->block; 483 struct flow_cls_offload cls_flower = {}; 484 485 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 486 cls_flower.command = FLOW_CLS_STATS; 487 cls_flower.cookie = (unsigned long) f; 488 cls_flower.classid = f->res.classid; 489 490 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 491 rtnl_held); 492 493 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, 494 cls_flower.stats.pkts, 495 cls_flower.stats.lastused); 496 } 497 498 static void __fl_put(struct cls_fl_filter *f) 499 { 500 if (!refcount_dec_and_test(&f->refcnt)) 501 return; 502 503 if (tcf_exts_get_net(&f->exts)) 504 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 505 else 506 __fl_destroy_filter(f); 507 } 508 509 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 510 { 511 struct cls_fl_filter *f; 512 513 rcu_read_lock(); 514 f = idr_find(&head->handle_idr, handle); 515 if (f && !refcount_inc_not_zero(&f->refcnt)) 516 f = NULL; 517 rcu_read_unlock(); 518 519 return f; 520 } 521 522 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 523 bool *last, bool rtnl_held, 524 struct netlink_ext_ack *extack) 525 { 526 struct cls_fl_head *head = fl_head_dereference(tp); 527 528 *last = false; 529 530 spin_lock(&tp->lock); 531 if (f->deleted) { 532 spin_unlock(&tp->lock); 533 return -ENOENT; 534 } 535 536 f->deleted = true; 537 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 538 f->mask->filter_ht_params); 539 idr_remove(&head->handle_idr, f->handle); 540 list_del_rcu(&f->list); 541 spin_unlock(&tp->lock); 542 543 *last = fl_mask_put(head, f->mask); 544 if (!tc_skip_hw(f->flags)) 545 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 546 tcf_unbind_filter(tp, &f->res); 547 __fl_put(f); 548 549 return 0; 550 } 551 552 static void fl_destroy_sleepable(struct work_struct *work) 553 { 554 struct cls_fl_head *head = container_of(to_rcu_work(work), 555 struct cls_fl_head, 556 rwork); 557 558 rhashtable_destroy(&head->ht); 559 kfree(head); 560 module_put(THIS_MODULE); 561 } 562 563 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 564 struct netlink_ext_ack *extack) 565 { 566 struct cls_fl_head *head = fl_head_dereference(tp); 567 struct fl_flow_mask *mask, *next_mask; 568 struct cls_fl_filter *f, *next; 569 bool last; 570 571 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 572 list_for_each_entry_safe(f, next, &mask->filters, list) { 573 __fl_delete(tp, f, &last, rtnl_held, extack); 574 if (last) 575 break; 576 } 577 } 578 idr_destroy(&head->handle_idr); 579 580 __module_get(THIS_MODULE); 581 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 582 } 583 584 static void fl_put(struct tcf_proto *tp, void *arg) 585 { 586 struct cls_fl_filter *f = arg; 587 588 __fl_put(f); 589 } 590 591 static void *fl_get(struct tcf_proto *tp, u32 handle) 592 { 593 struct cls_fl_head *head = fl_head_dereference(tp); 594 595 return __fl_get(head, handle); 596 } 597 598 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 599 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, 600 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 601 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 602 .len = IFNAMSIZ }, 603 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 604 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 605 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 606 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 607 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 608 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 609 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 610 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 611 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 612 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 613 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 614 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 615 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 616 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 617 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 618 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 619 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 620 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 621 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 622 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 623 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 624 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 625 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 626 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 627 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 628 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 629 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 630 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 631 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 632 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 633 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 634 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 635 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 636 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 637 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 638 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 639 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 640 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 641 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 642 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 643 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 644 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 646 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 647 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 648 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 649 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 650 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 651 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 652 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 653 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 654 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 655 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 656 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 657 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 658 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 659 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 660 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 661 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 662 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 663 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 664 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 665 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 666 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 667 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 668 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 669 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 670 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 671 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 672 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 673 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 674 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 675 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 676 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 678 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 680 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 681 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 682 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 683 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 684 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 }, 685 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 }, 686 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 687 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 688 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 689 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 690 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 691 .len = 128 / BITS_PER_BYTE }, 692 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 693 .len = 128 / BITS_PER_BYTE }, 694 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 695 }; 696 697 static const struct nla_policy 698 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 699 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 700 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 701 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 702 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 703 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 704 }; 705 706 static const struct nla_policy 707 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 708 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 709 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 710 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 711 .len = 128 }, 712 }; 713 714 static const struct nla_policy 715 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 716 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 717 }; 718 719 static const struct nla_policy 720 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 721 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 722 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 723 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 724 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 725 }; 726 727 static void fl_set_key_val(struct nlattr **tb, 728 void *val, int val_type, 729 void *mask, int mask_type, int len) 730 { 731 if (!tb[val_type]) 732 return; 733 nla_memcpy(val, tb[val_type], len); 734 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 735 memset(mask, 0xff, len); 736 else 737 nla_memcpy(mask, tb[mask_type], len); 738 } 739 740 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 741 struct fl_flow_key *mask, 742 struct netlink_ext_ack *extack) 743 { 744 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 745 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 746 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 747 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 748 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 749 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 750 fl_set_key_val(tb, &key->tp_range.tp_min.src, 751 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 752 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 753 fl_set_key_val(tb, &key->tp_range.tp_max.src, 754 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 755 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 756 757 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 758 htons(key->tp_range.tp_max.dst) <= 759 htons(key->tp_range.tp_min.dst)) { 760 NL_SET_ERR_MSG_ATTR(extack, 761 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 762 "Invalid destination port range (min must be strictly smaller than max)"); 763 return -EINVAL; 764 } 765 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 766 htons(key->tp_range.tp_max.src) <= 767 htons(key->tp_range.tp_min.src)) { 768 NL_SET_ERR_MSG_ATTR(extack, 769 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 770 "Invalid source port range (min must be strictly smaller than max)"); 771 return -EINVAL; 772 } 773 774 return 0; 775 } 776 777 static int fl_set_key_mpls(struct nlattr **tb, 778 struct flow_dissector_key_mpls *key_val, 779 struct flow_dissector_key_mpls *key_mask, 780 struct netlink_ext_ack *extack) 781 { 782 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 783 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 784 key_mask->mpls_ttl = MPLS_TTL_MASK; 785 } 786 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 787 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 788 789 if (bos & ~MPLS_BOS_MASK) { 790 NL_SET_ERR_MSG_ATTR(extack, 791 tb[TCA_FLOWER_KEY_MPLS_BOS], 792 "Bottom Of Stack (BOS) must be 0 or 1"); 793 return -EINVAL; 794 } 795 key_val->mpls_bos = bos; 796 key_mask->mpls_bos = MPLS_BOS_MASK; 797 } 798 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 799 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 800 801 if (tc & ~MPLS_TC_MASK) { 802 NL_SET_ERR_MSG_ATTR(extack, 803 tb[TCA_FLOWER_KEY_MPLS_TC], 804 "Traffic Class (TC) must be between 0 and 7"); 805 return -EINVAL; 806 } 807 key_val->mpls_tc = tc; 808 key_mask->mpls_tc = MPLS_TC_MASK; 809 } 810 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 811 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 812 813 if (label & ~MPLS_LABEL_MASK) { 814 NL_SET_ERR_MSG_ATTR(extack, 815 tb[TCA_FLOWER_KEY_MPLS_LABEL], 816 "Label must be between 0 and 1048575"); 817 return -EINVAL; 818 } 819 key_val->mpls_label = label; 820 key_mask->mpls_label = MPLS_LABEL_MASK; 821 } 822 return 0; 823 } 824 825 static void fl_set_key_vlan(struct nlattr **tb, 826 __be16 ethertype, 827 int vlan_id_key, int vlan_prio_key, 828 struct flow_dissector_key_vlan *key_val, 829 struct flow_dissector_key_vlan *key_mask) 830 { 831 #define VLAN_PRIORITY_MASK 0x7 832 833 if (tb[vlan_id_key]) { 834 key_val->vlan_id = 835 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 836 key_mask->vlan_id = VLAN_VID_MASK; 837 } 838 if (tb[vlan_prio_key]) { 839 key_val->vlan_priority = 840 nla_get_u8(tb[vlan_prio_key]) & 841 VLAN_PRIORITY_MASK; 842 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 843 } 844 key_val->vlan_tpid = ethertype; 845 key_mask->vlan_tpid = cpu_to_be16(~0); 846 } 847 848 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 849 u32 *dissector_key, u32 *dissector_mask, 850 u32 flower_flag_bit, u32 dissector_flag_bit) 851 { 852 if (flower_mask & flower_flag_bit) { 853 *dissector_mask |= dissector_flag_bit; 854 if (flower_key & flower_flag_bit) 855 *dissector_key |= dissector_flag_bit; 856 } 857 } 858 859 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 860 u32 *flags_mask, struct netlink_ext_ack *extack) 861 { 862 u32 key, mask; 863 864 /* mask is mandatory for flags */ 865 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 866 NL_SET_ERR_MSG(extack, "Missing flags mask"); 867 return -EINVAL; 868 } 869 870 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS])); 871 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 872 873 *flags_key = 0; 874 *flags_mask = 0; 875 876 fl_set_key_flag(key, mask, flags_key, flags_mask, 877 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 878 fl_set_key_flag(key, mask, flags_key, flags_mask, 879 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 880 FLOW_DIS_FIRST_FRAG); 881 882 return 0; 883 } 884 885 static void fl_set_key_ip(struct nlattr **tb, bool encap, 886 struct flow_dissector_key_ip *key, 887 struct flow_dissector_key_ip *mask) 888 { 889 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 890 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 891 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 892 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 893 894 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 895 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 896 } 897 898 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 899 int depth, int option_len, 900 struct netlink_ext_ack *extack) 901 { 902 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 903 struct nlattr *class = NULL, *type = NULL, *data = NULL; 904 struct geneve_opt *opt; 905 int err, data_len = 0; 906 907 if (option_len > sizeof(struct geneve_opt)) 908 data_len = option_len - sizeof(struct geneve_opt); 909 910 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 911 memset(opt, 0xff, option_len); 912 opt->length = data_len / 4; 913 opt->r1 = 0; 914 opt->r2 = 0; 915 opt->r3 = 0; 916 917 /* If no mask has been prodived we assume an exact match. */ 918 if (!depth) 919 return sizeof(struct geneve_opt) + data_len; 920 921 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 922 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 923 return -EINVAL; 924 } 925 926 err = nla_parse_nested_deprecated(tb, 927 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 928 nla, geneve_opt_policy, extack); 929 if (err < 0) 930 return err; 931 932 /* We are not allowed to omit any of CLASS, TYPE or DATA 933 * fields from the key. 934 */ 935 if (!option_len && 936 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 937 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 938 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 939 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 940 return -EINVAL; 941 } 942 943 /* Omitting any of CLASS, TYPE or DATA fields is allowed 944 * for the mask. 945 */ 946 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 947 int new_len = key->enc_opts.len; 948 949 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 950 data_len = nla_len(data); 951 if (data_len < 4) { 952 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 953 return -ERANGE; 954 } 955 if (data_len % 4) { 956 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 957 return -ERANGE; 958 } 959 960 new_len += sizeof(struct geneve_opt) + data_len; 961 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 962 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 963 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 964 return -ERANGE; 965 } 966 opt->length = data_len / 4; 967 memcpy(opt->opt_data, nla_data(data), data_len); 968 } 969 970 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 971 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 972 opt->opt_class = nla_get_be16(class); 973 } 974 975 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 976 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 977 opt->type = nla_get_u8(type); 978 } 979 980 return sizeof(struct geneve_opt) + data_len; 981 } 982 983 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 984 int depth, int option_len, 985 struct netlink_ext_ack *extack) 986 { 987 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 988 struct vxlan_metadata *md; 989 int err; 990 991 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 992 memset(md, 0xff, sizeof(*md)); 993 994 if (!depth) 995 return sizeof(*md); 996 997 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 998 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 999 return -EINVAL; 1000 } 1001 1002 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1003 vxlan_opt_policy, extack); 1004 if (err < 0) 1005 return err; 1006 1007 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1008 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1009 return -EINVAL; 1010 } 1011 1012 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) 1013 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1014 1015 return sizeof(*md); 1016 } 1017 1018 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1019 int depth, int option_len, 1020 struct netlink_ext_ack *extack) 1021 { 1022 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1023 struct erspan_metadata *md; 1024 int err; 1025 1026 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1027 memset(md, 0xff, sizeof(*md)); 1028 md->version = 1; 1029 1030 if (!depth) 1031 return sizeof(*md); 1032 1033 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1034 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1035 return -EINVAL; 1036 } 1037 1038 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1039 erspan_opt_policy, extack); 1040 if (err < 0) 1041 return err; 1042 1043 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1044 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1045 return -EINVAL; 1046 } 1047 1048 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1049 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1050 1051 if (md->version == 1) { 1052 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1053 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1054 return -EINVAL; 1055 } 1056 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1057 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1058 md->u.index = nla_get_be32(nla); 1059 } 1060 } else if (md->version == 2) { 1061 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1062 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1063 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1064 return -EINVAL; 1065 } 1066 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1067 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1068 md->u.md2.dir = nla_get_u8(nla); 1069 } 1070 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1071 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1072 set_hwid(&md->u.md2, nla_get_u8(nla)); 1073 } 1074 } else { 1075 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1076 return -EINVAL; 1077 } 1078 1079 return sizeof(*md); 1080 } 1081 1082 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1083 struct fl_flow_key *mask, 1084 struct netlink_ext_ack *extack) 1085 { 1086 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1087 int err, option_len, key_depth, msk_depth = 0; 1088 1089 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1090 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1091 enc_opts_policy, extack); 1092 if (err) 1093 return err; 1094 1095 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1096 1097 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1098 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1099 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1100 enc_opts_policy, extack); 1101 if (err) 1102 return err; 1103 1104 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1105 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1106 } 1107 1108 nla_for_each_attr(nla_opt_key, nla_enc_key, 1109 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1110 switch (nla_type(nla_opt_key)) { 1111 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1112 if (key->enc_opts.dst_opt_type && 1113 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { 1114 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1115 return -EINVAL; 1116 } 1117 option_len = 0; 1118 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1119 option_len = fl_set_geneve_opt(nla_opt_key, key, 1120 key_depth, option_len, 1121 extack); 1122 if (option_len < 0) 1123 return option_len; 1124 1125 key->enc_opts.len += option_len; 1126 /* At the same time we need to parse through the mask 1127 * in order to verify exact and mask attribute lengths. 1128 */ 1129 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1130 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1131 msk_depth, option_len, 1132 extack); 1133 if (option_len < 0) 1134 return option_len; 1135 1136 mask->enc_opts.len += option_len; 1137 if (key->enc_opts.len != mask->enc_opts.len) { 1138 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1139 return -EINVAL; 1140 } 1141 1142 if (msk_depth) 1143 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1144 break; 1145 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1146 if (key->enc_opts.dst_opt_type) { 1147 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1148 return -EINVAL; 1149 } 1150 option_len = 0; 1151 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1152 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1153 key_depth, option_len, 1154 extack); 1155 if (option_len < 0) 1156 return option_len; 1157 1158 key->enc_opts.len += option_len; 1159 /* At the same time we need to parse through the mask 1160 * in order to verify exact and mask attribute lengths. 1161 */ 1162 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1163 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1164 msk_depth, option_len, 1165 extack); 1166 if (option_len < 0) 1167 return option_len; 1168 1169 mask->enc_opts.len += option_len; 1170 if (key->enc_opts.len != mask->enc_opts.len) { 1171 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1172 return -EINVAL; 1173 } 1174 1175 if (msk_depth) 1176 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1177 break; 1178 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1179 if (key->enc_opts.dst_opt_type) { 1180 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1181 return -EINVAL; 1182 } 1183 option_len = 0; 1184 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1185 option_len = fl_set_erspan_opt(nla_opt_key, key, 1186 key_depth, option_len, 1187 extack); 1188 if (option_len < 0) 1189 return option_len; 1190 1191 key->enc_opts.len += option_len; 1192 /* At the same time we need to parse through the mask 1193 * in order to verify exact and mask attribute lengths. 1194 */ 1195 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1196 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1197 msk_depth, option_len, 1198 extack); 1199 if (option_len < 0) 1200 return option_len; 1201 1202 mask->enc_opts.len += option_len; 1203 if (key->enc_opts.len != mask->enc_opts.len) { 1204 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1205 return -EINVAL; 1206 } 1207 1208 if (msk_depth) 1209 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1210 break; 1211 default: 1212 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1213 return -EINVAL; 1214 } 1215 } 1216 1217 return 0; 1218 } 1219 1220 static int fl_set_key_ct(struct nlattr **tb, 1221 struct flow_dissector_key_ct *key, 1222 struct flow_dissector_key_ct *mask, 1223 struct netlink_ext_ack *extack) 1224 { 1225 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1226 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1227 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1228 return -EOPNOTSUPP; 1229 } 1230 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1231 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1232 sizeof(key->ct_state)); 1233 } 1234 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1235 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1236 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1237 return -EOPNOTSUPP; 1238 } 1239 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1240 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1241 sizeof(key->ct_zone)); 1242 } 1243 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1244 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1245 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1246 return -EOPNOTSUPP; 1247 } 1248 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1249 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1250 sizeof(key->ct_mark)); 1251 } 1252 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1253 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1254 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1255 return -EOPNOTSUPP; 1256 } 1257 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1258 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1259 sizeof(key->ct_labels)); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int fl_set_key(struct net *net, struct nlattr **tb, 1266 struct fl_flow_key *key, struct fl_flow_key *mask, 1267 struct netlink_ext_ack *extack) 1268 { 1269 __be16 ethertype; 1270 int ret = 0; 1271 1272 if (tb[TCA_FLOWER_INDEV]) { 1273 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1274 if (err < 0) 1275 return err; 1276 key->meta.ingress_ifindex = err; 1277 mask->meta.ingress_ifindex = 0xffffffff; 1278 } 1279 1280 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1281 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1282 sizeof(key->eth.dst)); 1283 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1284 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1285 sizeof(key->eth.src)); 1286 1287 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { 1288 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); 1289 1290 if (eth_type_vlan(ethertype)) { 1291 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1292 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, 1293 &mask->vlan); 1294 1295 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { 1296 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); 1297 if (eth_type_vlan(ethertype)) { 1298 fl_set_key_vlan(tb, ethertype, 1299 TCA_FLOWER_KEY_CVLAN_ID, 1300 TCA_FLOWER_KEY_CVLAN_PRIO, 1301 &key->cvlan, &mask->cvlan); 1302 fl_set_key_val(tb, &key->basic.n_proto, 1303 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1304 &mask->basic.n_proto, 1305 TCA_FLOWER_UNSPEC, 1306 sizeof(key->basic.n_proto)); 1307 } else { 1308 key->basic.n_proto = ethertype; 1309 mask->basic.n_proto = cpu_to_be16(~0); 1310 } 1311 } 1312 } else { 1313 key->basic.n_proto = ethertype; 1314 mask->basic.n_proto = cpu_to_be16(~0); 1315 } 1316 } 1317 1318 if (key->basic.n_proto == htons(ETH_P_IP) || 1319 key->basic.n_proto == htons(ETH_P_IPV6)) { 1320 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1321 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1322 sizeof(key->basic.ip_proto)); 1323 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1324 } 1325 1326 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1327 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1328 mask->control.addr_type = ~0; 1329 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1330 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1331 sizeof(key->ipv4.src)); 1332 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1333 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1334 sizeof(key->ipv4.dst)); 1335 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1336 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1337 mask->control.addr_type = ~0; 1338 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1339 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1340 sizeof(key->ipv6.src)); 1341 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1342 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1343 sizeof(key->ipv6.dst)); 1344 } 1345 1346 if (key->basic.ip_proto == IPPROTO_TCP) { 1347 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1348 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1349 sizeof(key->tp.src)); 1350 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1351 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1352 sizeof(key->tp.dst)); 1353 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1354 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1355 sizeof(key->tcp.flags)); 1356 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1357 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1358 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1359 sizeof(key->tp.src)); 1360 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1361 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1362 sizeof(key->tp.dst)); 1363 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1364 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1365 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1366 sizeof(key->tp.src)); 1367 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1368 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1369 sizeof(key->tp.dst)); 1370 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1371 key->basic.ip_proto == IPPROTO_ICMP) { 1372 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1373 &mask->icmp.type, 1374 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1375 sizeof(key->icmp.type)); 1376 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1377 &mask->icmp.code, 1378 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1379 sizeof(key->icmp.code)); 1380 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1381 key->basic.ip_proto == IPPROTO_ICMPV6) { 1382 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1383 &mask->icmp.type, 1384 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1385 sizeof(key->icmp.type)); 1386 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1387 &mask->icmp.code, 1388 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1389 sizeof(key->icmp.code)); 1390 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1391 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1392 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1393 if (ret) 1394 return ret; 1395 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1396 key->basic.n_proto == htons(ETH_P_RARP)) { 1397 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1398 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1399 sizeof(key->arp.sip)); 1400 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1401 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1402 sizeof(key->arp.tip)); 1403 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1404 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1405 sizeof(key->arp.op)); 1406 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1407 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1408 sizeof(key->arp.sha)); 1409 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1410 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1411 sizeof(key->arp.tha)); 1412 } 1413 1414 if (key->basic.ip_proto == IPPROTO_TCP || 1415 key->basic.ip_proto == IPPROTO_UDP || 1416 key->basic.ip_proto == IPPROTO_SCTP) { 1417 ret = fl_set_key_port_range(tb, key, mask, extack); 1418 if (ret) 1419 return ret; 1420 } 1421 1422 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 1423 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 1424 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1425 mask->enc_control.addr_type = ~0; 1426 fl_set_key_val(tb, &key->enc_ipv4.src, 1427 TCA_FLOWER_KEY_ENC_IPV4_SRC, 1428 &mask->enc_ipv4.src, 1429 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1430 sizeof(key->enc_ipv4.src)); 1431 fl_set_key_val(tb, &key->enc_ipv4.dst, 1432 TCA_FLOWER_KEY_ENC_IPV4_DST, 1433 &mask->enc_ipv4.dst, 1434 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1435 sizeof(key->enc_ipv4.dst)); 1436 } 1437 1438 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 1439 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 1440 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1441 mask->enc_control.addr_type = ~0; 1442 fl_set_key_val(tb, &key->enc_ipv6.src, 1443 TCA_FLOWER_KEY_ENC_IPV6_SRC, 1444 &mask->enc_ipv6.src, 1445 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1446 sizeof(key->enc_ipv6.src)); 1447 fl_set_key_val(tb, &key->enc_ipv6.dst, 1448 TCA_FLOWER_KEY_ENC_IPV6_DST, 1449 &mask->enc_ipv6.dst, 1450 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1451 sizeof(key->enc_ipv6.dst)); 1452 } 1453 1454 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 1455 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 1456 sizeof(key->enc_key_id.keyid)); 1457 1458 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1459 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1460 sizeof(key->enc_tp.src)); 1461 1462 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1463 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1464 sizeof(key->enc_tp.dst)); 1465 1466 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 1467 1468 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 1469 ret = fl_set_enc_opt(tb, key, mask, extack); 1470 if (ret) 1471 return ret; 1472 } 1473 1474 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 1475 if (ret) 1476 return ret; 1477 1478 if (tb[TCA_FLOWER_KEY_FLAGS]) 1479 ret = fl_set_key_flags(tb, &key->control.flags, 1480 &mask->control.flags, extack); 1481 1482 return ret; 1483 } 1484 1485 static void fl_mask_copy(struct fl_flow_mask *dst, 1486 struct fl_flow_mask *src) 1487 { 1488 const void *psrc = fl_key_get_start(&src->key, src); 1489 void *pdst = fl_key_get_start(&dst->key, src); 1490 1491 memcpy(pdst, psrc, fl_mask_range(src)); 1492 dst->range = src->range; 1493 } 1494 1495 static const struct rhashtable_params fl_ht_params = { 1496 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 1497 .head_offset = offsetof(struct cls_fl_filter, ht_node), 1498 .automatic_shrinking = true, 1499 }; 1500 1501 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 1502 { 1503 mask->filter_ht_params = fl_ht_params; 1504 mask->filter_ht_params.key_len = fl_mask_range(mask); 1505 mask->filter_ht_params.key_offset += mask->range.start; 1506 1507 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 1508 } 1509 1510 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1511 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 1512 1513 #define FL_KEY_IS_MASKED(mask, member) \ 1514 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 1515 0, FL_KEY_MEMBER_SIZE(member)) \ 1516 1517 #define FL_KEY_SET(keys, cnt, id, member) \ 1518 do { \ 1519 keys[cnt].key_id = id; \ 1520 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 1521 cnt++; \ 1522 } while(0); 1523 1524 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 1525 do { \ 1526 if (FL_KEY_IS_MASKED(mask, member)) \ 1527 FL_KEY_SET(keys, cnt, id, member); \ 1528 } while(0); 1529 1530 static void fl_init_dissector(struct flow_dissector *dissector, 1531 struct fl_flow_key *mask) 1532 { 1533 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 1534 size_t cnt = 0; 1535 1536 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1537 FLOW_DISSECTOR_KEY_META, meta); 1538 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 1539 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 1540 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1541 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 1542 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1543 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 1544 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1545 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 1546 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1547 FLOW_DISSECTOR_KEY_PORTS, tp); 1548 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1549 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 1550 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1551 FLOW_DISSECTOR_KEY_IP, ip); 1552 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1553 FLOW_DISSECTOR_KEY_TCP, tcp); 1554 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1555 FLOW_DISSECTOR_KEY_ICMP, icmp); 1556 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1557 FLOW_DISSECTOR_KEY_ARP, arp); 1558 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1559 FLOW_DISSECTOR_KEY_MPLS, mpls); 1560 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1561 FLOW_DISSECTOR_KEY_VLAN, vlan); 1562 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1563 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 1564 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1565 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 1566 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1567 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 1568 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1569 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 1570 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 1571 FL_KEY_IS_MASKED(mask, enc_ipv6)) 1572 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1573 enc_control); 1574 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1575 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 1576 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1577 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 1578 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1579 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 1580 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1581 FLOW_DISSECTOR_KEY_CT, ct); 1582 1583 skb_flow_dissector_init(dissector, keys, cnt); 1584 } 1585 1586 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 1587 struct fl_flow_mask *mask) 1588 { 1589 struct fl_flow_mask *newmask; 1590 int err; 1591 1592 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 1593 if (!newmask) 1594 return ERR_PTR(-ENOMEM); 1595 1596 fl_mask_copy(newmask, mask); 1597 1598 if ((newmask->key.tp_range.tp_min.dst && 1599 newmask->key.tp_range.tp_max.dst) || 1600 (newmask->key.tp_range.tp_min.src && 1601 newmask->key.tp_range.tp_max.src)) 1602 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 1603 1604 err = fl_init_mask_hashtable(newmask); 1605 if (err) 1606 goto errout_free; 1607 1608 fl_init_dissector(&newmask->dissector, &newmask->key); 1609 1610 INIT_LIST_HEAD_RCU(&newmask->filters); 1611 1612 refcount_set(&newmask->refcnt, 1); 1613 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 1614 &newmask->ht_node, mask_ht_params); 1615 if (err) 1616 goto errout_destroy; 1617 1618 spin_lock(&head->masks_lock); 1619 list_add_tail_rcu(&newmask->list, &head->masks); 1620 spin_unlock(&head->masks_lock); 1621 1622 return newmask; 1623 1624 errout_destroy: 1625 rhashtable_destroy(&newmask->ht); 1626 errout_free: 1627 kfree(newmask); 1628 1629 return ERR_PTR(err); 1630 } 1631 1632 static int fl_check_assign_mask(struct cls_fl_head *head, 1633 struct cls_fl_filter *fnew, 1634 struct cls_fl_filter *fold, 1635 struct fl_flow_mask *mask) 1636 { 1637 struct fl_flow_mask *newmask; 1638 int ret = 0; 1639 1640 rcu_read_lock(); 1641 1642 /* Insert mask as temporary node to prevent concurrent creation of mask 1643 * with same key. Any concurrent lookups with same key will return 1644 * -EAGAIN because mask's refcnt is zero. 1645 */ 1646 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 1647 &mask->ht_node, 1648 mask_ht_params); 1649 if (!fnew->mask) { 1650 rcu_read_unlock(); 1651 1652 if (fold) { 1653 ret = -EINVAL; 1654 goto errout_cleanup; 1655 } 1656 1657 newmask = fl_create_new_mask(head, mask); 1658 if (IS_ERR(newmask)) { 1659 ret = PTR_ERR(newmask); 1660 goto errout_cleanup; 1661 } 1662 1663 fnew->mask = newmask; 1664 return 0; 1665 } else if (IS_ERR(fnew->mask)) { 1666 ret = PTR_ERR(fnew->mask); 1667 } else if (fold && fold->mask != fnew->mask) { 1668 ret = -EINVAL; 1669 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 1670 /* Mask was deleted concurrently, try again */ 1671 ret = -EAGAIN; 1672 } 1673 rcu_read_unlock(); 1674 return ret; 1675 1676 errout_cleanup: 1677 rhashtable_remove_fast(&head->ht, &mask->ht_node, 1678 mask_ht_params); 1679 return ret; 1680 } 1681 1682 static int fl_set_parms(struct net *net, struct tcf_proto *tp, 1683 struct cls_fl_filter *f, struct fl_flow_mask *mask, 1684 unsigned long base, struct nlattr **tb, 1685 struct nlattr *est, bool ovr, 1686 struct fl_flow_tmplt *tmplt, bool rtnl_held, 1687 struct netlink_ext_ack *extack) 1688 { 1689 int err; 1690 1691 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held, 1692 extack); 1693 if (err < 0) 1694 return err; 1695 1696 if (tb[TCA_FLOWER_CLASSID]) { 1697 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 1698 if (!rtnl_held) 1699 rtnl_lock(); 1700 tcf_bind_filter(tp, &f->res, base); 1701 if (!rtnl_held) 1702 rtnl_unlock(); 1703 } 1704 1705 err = fl_set_key(net, tb, &f->key, &mask->key, extack); 1706 if (err) 1707 return err; 1708 1709 fl_mask_update_range(mask); 1710 fl_set_masked_key(&f->mkey, &f->key, mask); 1711 1712 if (!fl_mask_fits_tmplt(tmplt, mask)) { 1713 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 1714 return -EINVAL; 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 1721 struct cls_fl_filter *fold, 1722 bool *in_ht) 1723 { 1724 struct fl_flow_mask *mask = fnew->mask; 1725 int err; 1726 1727 err = rhashtable_lookup_insert_fast(&mask->ht, 1728 &fnew->ht_node, 1729 mask->filter_ht_params); 1730 if (err) { 1731 *in_ht = false; 1732 /* It is okay if filter with same key exists when 1733 * overwriting. 1734 */ 1735 return fold && err == -EEXIST ? 0 : err; 1736 } 1737 1738 *in_ht = true; 1739 return 0; 1740 } 1741 1742 static int fl_change(struct net *net, struct sk_buff *in_skb, 1743 struct tcf_proto *tp, unsigned long base, 1744 u32 handle, struct nlattr **tca, 1745 void **arg, bool ovr, bool rtnl_held, 1746 struct netlink_ext_ack *extack) 1747 { 1748 struct cls_fl_head *head = fl_head_dereference(tp); 1749 struct cls_fl_filter *fold = *arg; 1750 struct cls_fl_filter *fnew; 1751 struct fl_flow_mask *mask; 1752 struct nlattr **tb; 1753 bool in_ht; 1754 int err; 1755 1756 if (!tca[TCA_OPTIONS]) { 1757 err = -EINVAL; 1758 goto errout_fold; 1759 } 1760 1761 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 1762 if (!mask) { 1763 err = -ENOBUFS; 1764 goto errout_fold; 1765 } 1766 1767 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1768 if (!tb) { 1769 err = -ENOBUFS; 1770 goto errout_mask_alloc; 1771 } 1772 1773 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 1774 tca[TCA_OPTIONS], fl_policy, NULL); 1775 if (err < 0) 1776 goto errout_tb; 1777 1778 if (fold && handle && fold->handle != handle) { 1779 err = -EINVAL; 1780 goto errout_tb; 1781 } 1782 1783 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 1784 if (!fnew) { 1785 err = -ENOBUFS; 1786 goto errout_tb; 1787 } 1788 INIT_LIST_HEAD(&fnew->hw_list); 1789 refcount_set(&fnew->refcnt, 1); 1790 1791 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); 1792 if (err < 0) 1793 goto errout; 1794 1795 if (tb[TCA_FLOWER_FLAGS]) { 1796 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 1797 1798 if (!tc_flags_valid(fnew->flags)) { 1799 err = -EINVAL; 1800 goto errout; 1801 } 1802 } 1803 1804 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, 1805 tp->chain->tmplt_priv, rtnl_held, extack); 1806 if (err) 1807 goto errout; 1808 1809 err = fl_check_assign_mask(head, fnew, fold, mask); 1810 if (err) 1811 goto errout; 1812 1813 err = fl_ht_insert_unique(fnew, fold, &in_ht); 1814 if (err) 1815 goto errout_mask; 1816 1817 if (!tc_skip_hw(fnew->flags)) { 1818 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 1819 if (err) 1820 goto errout_ht; 1821 } 1822 1823 if (!tc_in_hw(fnew->flags)) 1824 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1825 1826 spin_lock(&tp->lock); 1827 1828 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 1829 * proto again or create new one, if necessary. 1830 */ 1831 if (tp->deleting) { 1832 err = -EAGAIN; 1833 goto errout_hw; 1834 } 1835 1836 if (fold) { 1837 /* Fold filter was deleted concurrently. Retry lookup. */ 1838 if (fold->deleted) { 1839 err = -EAGAIN; 1840 goto errout_hw; 1841 } 1842 1843 fnew->handle = handle; 1844 1845 if (!in_ht) { 1846 struct rhashtable_params params = 1847 fnew->mask->filter_ht_params; 1848 1849 err = rhashtable_insert_fast(&fnew->mask->ht, 1850 &fnew->ht_node, 1851 params); 1852 if (err) 1853 goto errout_hw; 1854 in_ht = true; 1855 } 1856 1857 refcount_inc(&fnew->refcnt); 1858 rhashtable_remove_fast(&fold->mask->ht, 1859 &fold->ht_node, 1860 fold->mask->filter_ht_params); 1861 idr_replace(&head->handle_idr, fnew, fnew->handle); 1862 list_replace_rcu(&fold->list, &fnew->list); 1863 fold->deleted = true; 1864 1865 spin_unlock(&tp->lock); 1866 1867 fl_mask_put(head, fold->mask); 1868 if (!tc_skip_hw(fold->flags)) 1869 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 1870 tcf_unbind_filter(tp, &fold->res); 1871 /* Caller holds reference to fold, so refcnt is always > 0 1872 * after this. 1873 */ 1874 refcount_dec(&fold->refcnt); 1875 __fl_put(fold); 1876 } else { 1877 if (handle) { 1878 /* user specifies a handle and it doesn't exist */ 1879 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 1880 handle, GFP_ATOMIC); 1881 1882 /* Filter with specified handle was concurrently 1883 * inserted after initial check in cls_api. This is not 1884 * necessarily an error if NLM_F_EXCL is not set in 1885 * message flags. Returning EAGAIN will cause cls_api to 1886 * try to update concurrently inserted rule. 1887 */ 1888 if (err == -ENOSPC) 1889 err = -EAGAIN; 1890 } else { 1891 handle = 1; 1892 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 1893 INT_MAX, GFP_ATOMIC); 1894 } 1895 if (err) 1896 goto errout_hw; 1897 1898 refcount_inc(&fnew->refcnt); 1899 fnew->handle = handle; 1900 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 1901 spin_unlock(&tp->lock); 1902 } 1903 1904 *arg = fnew; 1905 1906 kfree(tb); 1907 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 1908 return 0; 1909 1910 errout_ht: 1911 spin_lock(&tp->lock); 1912 errout_hw: 1913 fnew->deleted = true; 1914 spin_unlock(&tp->lock); 1915 if (!tc_skip_hw(fnew->flags)) 1916 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 1917 if (in_ht) 1918 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 1919 fnew->mask->filter_ht_params); 1920 errout_mask: 1921 fl_mask_put(head, fnew->mask); 1922 errout: 1923 __fl_put(fnew); 1924 errout_tb: 1925 kfree(tb); 1926 errout_mask_alloc: 1927 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 1928 errout_fold: 1929 if (fold) 1930 __fl_put(fold); 1931 return err; 1932 } 1933 1934 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 1935 bool rtnl_held, struct netlink_ext_ack *extack) 1936 { 1937 struct cls_fl_head *head = fl_head_dereference(tp); 1938 struct cls_fl_filter *f = arg; 1939 bool last_on_mask; 1940 int err = 0; 1941 1942 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 1943 *last = list_empty(&head->masks); 1944 __fl_put(f); 1945 1946 return err; 1947 } 1948 1949 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 1950 bool rtnl_held) 1951 { 1952 struct cls_fl_head *head = fl_head_dereference(tp); 1953 unsigned long id = arg->cookie, tmp; 1954 struct cls_fl_filter *f; 1955 1956 arg->count = arg->skip; 1957 1958 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 1959 /* don't return filters that are being deleted */ 1960 if (!refcount_inc_not_zero(&f->refcnt)) 1961 continue; 1962 if (arg->fn(tp, f, arg) < 0) { 1963 __fl_put(f); 1964 arg->stop = 1; 1965 break; 1966 } 1967 __fl_put(f); 1968 arg->count++; 1969 } 1970 arg->cookie = id; 1971 } 1972 1973 static struct cls_fl_filter * 1974 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 1975 { 1976 struct cls_fl_head *head = fl_head_dereference(tp); 1977 1978 spin_lock(&tp->lock); 1979 if (list_empty(&head->hw_filters)) { 1980 spin_unlock(&tp->lock); 1981 return NULL; 1982 } 1983 1984 if (!f) 1985 f = list_entry(&head->hw_filters, struct cls_fl_filter, 1986 hw_list); 1987 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 1988 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 1989 spin_unlock(&tp->lock); 1990 return f; 1991 } 1992 } 1993 1994 spin_unlock(&tp->lock); 1995 return NULL; 1996 } 1997 1998 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 1999 void *cb_priv, struct netlink_ext_ack *extack) 2000 { 2001 struct tcf_block *block = tp->chain->block; 2002 struct flow_cls_offload cls_flower = {}; 2003 struct cls_fl_filter *f = NULL; 2004 int err; 2005 2006 /* hw_filters list can only be changed by hw offload functions after 2007 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2008 * iterating it. 2009 */ 2010 ASSERT_RTNL(); 2011 2012 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2013 cls_flower.rule = 2014 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2015 if (!cls_flower.rule) { 2016 __fl_put(f); 2017 return -ENOMEM; 2018 } 2019 2020 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2021 extack); 2022 cls_flower.command = add ? 2023 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2024 cls_flower.cookie = (unsigned long)f; 2025 cls_flower.rule->match.dissector = &f->mask->dissector; 2026 cls_flower.rule->match.mask = &f->mask->key; 2027 cls_flower.rule->match.key = &f->mkey; 2028 2029 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 2030 if (err) { 2031 kfree(cls_flower.rule); 2032 if (tc_skip_sw(f->flags)) { 2033 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 2034 __fl_put(f); 2035 return err; 2036 } 2037 goto next_flow; 2038 } 2039 2040 cls_flower.classid = f->res.classid; 2041 2042 err = tc_setup_cb_reoffload(block, tp, add, cb, 2043 TC_SETUP_CLSFLOWER, &cls_flower, 2044 cb_priv, &f->flags, 2045 &f->in_hw_count); 2046 tc_cleanup_flow_action(&cls_flower.rule->action); 2047 kfree(cls_flower.rule); 2048 2049 if (err) { 2050 __fl_put(f); 2051 return err; 2052 } 2053 next_flow: 2054 __fl_put(f); 2055 } 2056 2057 return 0; 2058 } 2059 2060 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2061 { 2062 struct flow_cls_offload *cls_flower = type_data; 2063 struct cls_fl_filter *f = 2064 (struct cls_fl_filter *) cls_flower->cookie; 2065 struct cls_fl_head *head = fl_head_dereference(tp); 2066 2067 spin_lock(&tp->lock); 2068 list_add(&f->hw_list, &head->hw_filters); 2069 spin_unlock(&tp->lock); 2070 } 2071 2072 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2073 { 2074 struct flow_cls_offload *cls_flower = type_data; 2075 struct cls_fl_filter *f = 2076 (struct cls_fl_filter *) cls_flower->cookie; 2077 2078 spin_lock(&tp->lock); 2079 if (!list_empty(&f->hw_list)) 2080 list_del_init(&f->hw_list); 2081 spin_unlock(&tp->lock); 2082 } 2083 2084 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2085 struct fl_flow_tmplt *tmplt) 2086 { 2087 struct flow_cls_offload cls_flower = {}; 2088 struct tcf_block *block = chain->block; 2089 2090 cls_flower.rule = flow_rule_alloc(0); 2091 if (!cls_flower.rule) 2092 return -ENOMEM; 2093 2094 cls_flower.common.chain_index = chain->index; 2095 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2096 cls_flower.cookie = (unsigned long) tmplt; 2097 cls_flower.rule->match.dissector = &tmplt->dissector; 2098 cls_flower.rule->match.mask = &tmplt->mask; 2099 cls_flower.rule->match.key = &tmplt->dummy_key; 2100 2101 /* We don't care if driver (any of them) fails to handle this 2102 * call. It serves just as a hint for it. 2103 */ 2104 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2105 kfree(cls_flower.rule); 2106 2107 return 0; 2108 } 2109 2110 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2111 struct fl_flow_tmplt *tmplt) 2112 { 2113 struct flow_cls_offload cls_flower = {}; 2114 struct tcf_block *block = chain->block; 2115 2116 cls_flower.common.chain_index = chain->index; 2117 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2118 cls_flower.cookie = (unsigned long) tmplt; 2119 2120 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2121 } 2122 2123 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2124 struct nlattr **tca, 2125 struct netlink_ext_ack *extack) 2126 { 2127 struct fl_flow_tmplt *tmplt; 2128 struct nlattr **tb; 2129 int err; 2130 2131 if (!tca[TCA_OPTIONS]) 2132 return ERR_PTR(-EINVAL); 2133 2134 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2135 if (!tb) 2136 return ERR_PTR(-ENOBUFS); 2137 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2138 tca[TCA_OPTIONS], fl_policy, NULL); 2139 if (err) 2140 goto errout_tb; 2141 2142 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2143 if (!tmplt) { 2144 err = -ENOMEM; 2145 goto errout_tb; 2146 } 2147 tmplt->chain = chain; 2148 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2149 if (err) 2150 goto errout_tmplt; 2151 2152 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2153 2154 err = fl_hw_create_tmplt(chain, tmplt); 2155 if (err) 2156 goto errout_tmplt; 2157 2158 kfree(tb); 2159 return tmplt; 2160 2161 errout_tmplt: 2162 kfree(tmplt); 2163 errout_tb: 2164 kfree(tb); 2165 return ERR_PTR(err); 2166 } 2167 2168 static void fl_tmplt_destroy(void *tmplt_priv) 2169 { 2170 struct fl_flow_tmplt *tmplt = tmplt_priv; 2171 2172 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2173 kfree(tmplt); 2174 } 2175 2176 static int fl_dump_key_val(struct sk_buff *skb, 2177 void *val, int val_type, 2178 void *mask, int mask_type, int len) 2179 { 2180 int err; 2181 2182 if (!memchr_inv(mask, 0, len)) 2183 return 0; 2184 err = nla_put(skb, val_type, len, val); 2185 if (err) 2186 return err; 2187 if (mask_type != TCA_FLOWER_UNSPEC) { 2188 err = nla_put(skb, mask_type, len, mask); 2189 if (err) 2190 return err; 2191 } 2192 return 0; 2193 } 2194 2195 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2196 struct fl_flow_key *mask) 2197 { 2198 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2199 TCA_FLOWER_KEY_PORT_DST_MIN, 2200 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2201 sizeof(key->tp_range.tp_min.dst)) || 2202 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2203 TCA_FLOWER_KEY_PORT_DST_MAX, 2204 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2205 sizeof(key->tp_range.tp_max.dst)) || 2206 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2207 TCA_FLOWER_KEY_PORT_SRC_MIN, 2208 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2209 sizeof(key->tp_range.tp_min.src)) || 2210 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2211 TCA_FLOWER_KEY_PORT_SRC_MAX, 2212 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2213 sizeof(key->tp_range.tp_max.src))) 2214 return -1; 2215 2216 return 0; 2217 } 2218 2219 static int fl_dump_key_mpls(struct sk_buff *skb, 2220 struct flow_dissector_key_mpls *mpls_key, 2221 struct flow_dissector_key_mpls *mpls_mask) 2222 { 2223 int err; 2224 2225 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask))) 2226 return 0; 2227 if (mpls_mask->mpls_ttl) { 2228 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2229 mpls_key->mpls_ttl); 2230 if (err) 2231 return err; 2232 } 2233 if (mpls_mask->mpls_tc) { 2234 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2235 mpls_key->mpls_tc); 2236 if (err) 2237 return err; 2238 } 2239 if (mpls_mask->mpls_label) { 2240 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2241 mpls_key->mpls_label); 2242 if (err) 2243 return err; 2244 } 2245 if (mpls_mask->mpls_bos) { 2246 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2247 mpls_key->mpls_bos); 2248 if (err) 2249 return err; 2250 } 2251 return 0; 2252 } 2253 2254 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 2255 struct flow_dissector_key_ip *key, 2256 struct flow_dissector_key_ip *mask) 2257 { 2258 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 2259 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 2260 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 2261 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 2262 2263 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 2264 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 2265 return -1; 2266 2267 return 0; 2268 } 2269 2270 static int fl_dump_key_vlan(struct sk_buff *skb, 2271 int vlan_id_key, int vlan_prio_key, 2272 struct flow_dissector_key_vlan *vlan_key, 2273 struct flow_dissector_key_vlan *vlan_mask) 2274 { 2275 int err; 2276 2277 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 2278 return 0; 2279 if (vlan_mask->vlan_id) { 2280 err = nla_put_u16(skb, vlan_id_key, 2281 vlan_key->vlan_id); 2282 if (err) 2283 return err; 2284 } 2285 if (vlan_mask->vlan_priority) { 2286 err = nla_put_u8(skb, vlan_prio_key, 2287 vlan_key->vlan_priority); 2288 if (err) 2289 return err; 2290 } 2291 return 0; 2292 } 2293 2294 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 2295 u32 *flower_key, u32 *flower_mask, 2296 u32 flower_flag_bit, u32 dissector_flag_bit) 2297 { 2298 if (dissector_mask & dissector_flag_bit) { 2299 *flower_mask |= flower_flag_bit; 2300 if (dissector_key & dissector_flag_bit) 2301 *flower_key |= flower_flag_bit; 2302 } 2303 } 2304 2305 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 2306 { 2307 u32 key, mask; 2308 __be32 _key, _mask; 2309 int err; 2310 2311 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 2312 return 0; 2313 2314 key = 0; 2315 mask = 0; 2316 2317 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2318 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 2319 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2320 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 2321 FLOW_DIS_FIRST_FRAG); 2322 2323 _key = cpu_to_be32(key); 2324 _mask = cpu_to_be32(mask); 2325 2326 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 2327 if (err) 2328 return err; 2329 2330 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 2331 } 2332 2333 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 2334 struct flow_dissector_key_enc_opts *enc_opts) 2335 { 2336 struct geneve_opt *opt; 2337 struct nlattr *nest; 2338 int opt_off = 0; 2339 2340 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 2341 if (!nest) 2342 goto nla_put_failure; 2343 2344 while (enc_opts->len > opt_off) { 2345 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 2346 2347 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 2348 opt->opt_class)) 2349 goto nla_put_failure; 2350 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 2351 opt->type)) 2352 goto nla_put_failure; 2353 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 2354 opt->length * 4, opt->opt_data)) 2355 goto nla_put_failure; 2356 2357 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 2358 } 2359 nla_nest_end(skb, nest); 2360 return 0; 2361 2362 nla_put_failure: 2363 nla_nest_cancel(skb, nest); 2364 return -EMSGSIZE; 2365 } 2366 2367 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 2368 struct flow_dissector_key_enc_opts *enc_opts) 2369 { 2370 struct vxlan_metadata *md; 2371 struct nlattr *nest; 2372 2373 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 2374 if (!nest) 2375 goto nla_put_failure; 2376 2377 md = (struct vxlan_metadata *)&enc_opts->data[0]; 2378 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 2379 goto nla_put_failure; 2380 2381 nla_nest_end(skb, nest); 2382 return 0; 2383 2384 nla_put_failure: 2385 nla_nest_cancel(skb, nest); 2386 return -EMSGSIZE; 2387 } 2388 2389 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 2390 struct flow_dissector_key_enc_opts *enc_opts) 2391 { 2392 struct erspan_metadata *md; 2393 struct nlattr *nest; 2394 2395 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 2396 if (!nest) 2397 goto nla_put_failure; 2398 2399 md = (struct erspan_metadata *)&enc_opts->data[0]; 2400 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 2401 goto nla_put_failure; 2402 2403 if (md->version == 1 && 2404 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 2405 goto nla_put_failure; 2406 2407 if (md->version == 2 && 2408 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 2409 md->u.md2.dir) || 2410 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 2411 get_hwid(&md->u.md2)))) 2412 goto nla_put_failure; 2413 2414 nla_nest_end(skb, nest); 2415 return 0; 2416 2417 nla_put_failure: 2418 nla_nest_cancel(skb, nest); 2419 return -EMSGSIZE; 2420 } 2421 2422 static int fl_dump_key_ct(struct sk_buff *skb, 2423 struct flow_dissector_key_ct *key, 2424 struct flow_dissector_key_ct *mask) 2425 { 2426 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 2427 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 2428 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 2429 sizeof(key->ct_state))) 2430 goto nla_put_failure; 2431 2432 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 2433 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 2434 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 2435 sizeof(key->ct_zone))) 2436 goto nla_put_failure; 2437 2438 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 2439 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 2440 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 2441 sizeof(key->ct_mark))) 2442 goto nla_put_failure; 2443 2444 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 2445 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 2446 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 2447 sizeof(key->ct_labels))) 2448 goto nla_put_failure; 2449 2450 return 0; 2451 2452 nla_put_failure: 2453 return -EMSGSIZE; 2454 } 2455 2456 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 2457 struct flow_dissector_key_enc_opts *enc_opts) 2458 { 2459 struct nlattr *nest; 2460 int err; 2461 2462 if (!enc_opts->len) 2463 return 0; 2464 2465 nest = nla_nest_start_noflag(skb, enc_opt_type); 2466 if (!nest) 2467 goto nla_put_failure; 2468 2469 switch (enc_opts->dst_opt_type) { 2470 case TUNNEL_GENEVE_OPT: 2471 err = fl_dump_key_geneve_opt(skb, enc_opts); 2472 if (err) 2473 goto nla_put_failure; 2474 break; 2475 case TUNNEL_VXLAN_OPT: 2476 err = fl_dump_key_vxlan_opt(skb, enc_opts); 2477 if (err) 2478 goto nla_put_failure; 2479 break; 2480 case TUNNEL_ERSPAN_OPT: 2481 err = fl_dump_key_erspan_opt(skb, enc_opts); 2482 if (err) 2483 goto nla_put_failure; 2484 break; 2485 default: 2486 goto nla_put_failure; 2487 } 2488 nla_nest_end(skb, nest); 2489 return 0; 2490 2491 nla_put_failure: 2492 nla_nest_cancel(skb, nest); 2493 return -EMSGSIZE; 2494 } 2495 2496 static int fl_dump_key_enc_opt(struct sk_buff *skb, 2497 struct flow_dissector_key_enc_opts *key_opts, 2498 struct flow_dissector_key_enc_opts *msk_opts) 2499 { 2500 int err; 2501 2502 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 2503 if (err) 2504 return err; 2505 2506 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 2507 } 2508 2509 static int fl_dump_key(struct sk_buff *skb, struct net *net, 2510 struct fl_flow_key *key, struct fl_flow_key *mask) 2511 { 2512 if (mask->meta.ingress_ifindex) { 2513 struct net_device *dev; 2514 2515 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 2516 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 2517 goto nla_put_failure; 2518 } 2519 2520 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 2521 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 2522 sizeof(key->eth.dst)) || 2523 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 2524 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 2525 sizeof(key->eth.src)) || 2526 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 2527 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 2528 sizeof(key->basic.n_proto))) 2529 goto nla_put_failure; 2530 2531 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 2532 goto nla_put_failure; 2533 2534 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 2535 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 2536 goto nla_put_failure; 2537 2538 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 2539 TCA_FLOWER_KEY_CVLAN_PRIO, 2540 &key->cvlan, &mask->cvlan) || 2541 (mask->cvlan.vlan_tpid && 2542 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2543 key->cvlan.vlan_tpid))) 2544 goto nla_put_failure; 2545 2546 if (mask->basic.n_proto) { 2547 if (mask->cvlan.vlan_tpid) { 2548 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 2549 key->basic.n_proto)) 2550 goto nla_put_failure; 2551 } else if (mask->vlan.vlan_tpid) { 2552 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2553 key->basic.n_proto)) 2554 goto nla_put_failure; 2555 } 2556 } 2557 2558 if ((key->basic.n_proto == htons(ETH_P_IP) || 2559 key->basic.n_proto == htons(ETH_P_IPV6)) && 2560 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 2561 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 2562 sizeof(key->basic.ip_proto)) || 2563 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 2564 goto nla_put_failure; 2565 2566 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2567 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 2568 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 2569 sizeof(key->ipv4.src)) || 2570 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 2571 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 2572 sizeof(key->ipv4.dst)))) 2573 goto nla_put_failure; 2574 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2575 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 2576 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 2577 sizeof(key->ipv6.src)) || 2578 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 2579 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 2580 sizeof(key->ipv6.dst)))) 2581 goto nla_put_failure; 2582 2583 if (key->basic.ip_proto == IPPROTO_TCP && 2584 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 2585 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 2586 sizeof(key->tp.src)) || 2587 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 2588 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 2589 sizeof(key->tp.dst)) || 2590 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 2591 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 2592 sizeof(key->tcp.flags)))) 2593 goto nla_put_failure; 2594 else if (key->basic.ip_proto == IPPROTO_UDP && 2595 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 2596 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 2597 sizeof(key->tp.src)) || 2598 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 2599 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 2600 sizeof(key->tp.dst)))) 2601 goto nla_put_failure; 2602 else if (key->basic.ip_proto == IPPROTO_SCTP && 2603 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 2604 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 2605 sizeof(key->tp.src)) || 2606 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 2607 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 2608 sizeof(key->tp.dst)))) 2609 goto nla_put_failure; 2610 else if (key->basic.n_proto == htons(ETH_P_IP) && 2611 key->basic.ip_proto == IPPROTO_ICMP && 2612 (fl_dump_key_val(skb, &key->icmp.type, 2613 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 2614 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 2615 sizeof(key->icmp.type)) || 2616 fl_dump_key_val(skb, &key->icmp.code, 2617 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 2618 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 2619 sizeof(key->icmp.code)))) 2620 goto nla_put_failure; 2621 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 2622 key->basic.ip_proto == IPPROTO_ICMPV6 && 2623 (fl_dump_key_val(skb, &key->icmp.type, 2624 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 2625 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 2626 sizeof(key->icmp.type)) || 2627 fl_dump_key_val(skb, &key->icmp.code, 2628 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 2629 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 2630 sizeof(key->icmp.code)))) 2631 goto nla_put_failure; 2632 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 2633 key->basic.n_proto == htons(ETH_P_RARP)) && 2634 (fl_dump_key_val(skb, &key->arp.sip, 2635 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 2636 TCA_FLOWER_KEY_ARP_SIP_MASK, 2637 sizeof(key->arp.sip)) || 2638 fl_dump_key_val(skb, &key->arp.tip, 2639 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 2640 TCA_FLOWER_KEY_ARP_TIP_MASK, 2641 sizeof(key->arp.tip)) || 2642 fl_dump_key_val(skb, &key->arp.op, 2643 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 2644 TCA_FLOWER_KEY_ARP_OP_MASK, 2645 sizeof(key->arp.op)) || 2646 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 2647 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 2648 sizeof(key->arp.sha)) || 2649 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2650 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2651 sizeof(key->arp.tha)))) 2652 goto nla_put_failure; 2653 2654 if ((key->basic.ip_proto == IPPROTO_TCP || 2655 key->basic.ip_proto == IPPROTO_UDP || 2656 key->basic.ip_proto == IPPROTO_SCTP) && 2657 fl_dump_key_port_range(skb, key, mask)) 2658 goto nla_put_failure; 2659 2660 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2661 (fl_dump_key_val(skb, &key->enc_ipv4.src, 2662 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 2663 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2664 sizeof(key->enc_ipv4.src)) || 2665 fl_dump_key_val(skb, &key->enc_ipv4.dst, 2666 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 2667 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2668 sizeof(key->enc_ipv4.dst)))) 2669 goto nla_put_failure; 2670 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2671 (fl_dump_key_val(skb, &key->enc_ipv6.src, 2672 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 2673 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2674 sizeof(key->enc_ipv6.src)) || 2675 fl_dump_key_val(skb, &key->enc_ipv6.dst, 2676 TCA_FLOWER_KEY_ENC_IPV6_DST, 2677 &mask->enc_ipv6.dst, 2678 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2679 sizeof(key->enc_ipv6.dst)))) 2680 goto nla_put_failure; 2681 2682 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 2683 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 2684 sizeof(key->enc_key_id)) || 2685 fl_dump_key_val(skb, &key->enc_tp.src, 2686 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2687 &mask->enc_tp.src, 2688 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2689 sizeof(key->enc_tp.src)) || 2690 fl_dump_key_val(skb, &key->enc_tp.dst, 2691 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2692 &mask->enc_tp.dst, 2693 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2694 sizeof(key->enc_tp.dst)) || 2695 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 2696 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 2697 goto nla_put_failure; 2698 2699 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 2700 goto nla_put_failure; 2701 2702 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 2703 goto nla_put_failure; 2704 2705 return 0; 2706 2707 nla_put_failure: 2708 return -EMSGSIZE; 2709 } 2710 2711 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 2712 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 2713 { 2714 struct cls_fl_filter *f = fh; 2715 struct nlattr *nest; 2716 struct fl_flow_key *key, *mask; 2717 bool skip_hw; 2718 2719 if (!f) 2720 return skb->len; 2721 2722 t->tcm_handle = f->handle; 2723 2724 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2725 if (!nest) 2726 goto nla_put_failure; 2727 2728 spin_lock(&tp->lock); 2729 2730 if (f->res.classid && 2731 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 2732 goto nla_put_failure_locked; 2733 2734 key = &f->key; 2735 mask = &f->mask->key; 2736 skip_hw = tc_skip_hw(f->flags); 2737 2738 if (fl_dump_key(skb, net, key, mask)) 2739 goto nla_put_failure_locked; 2740 2741 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 2742 goto nla_put_failure_locked; 2743 2744 spin_unlock(&tp->lock); 2745 2746 if (!skip_hw) 2747 fl_hw_update_stats(tp, f, rtnl_held); 2748 2749 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 2750 goto nla_put_failure; 2751 2752 if (tcf_exts_dump(skb, &f->exts)) 2753 goto nla_put_failure; 2754 2755 nla_nest_end(skb, nest); 2756 2757 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 2758 goto nla_put_failure; 2759 2760 return skb->len; 2761 2762 nla_put_failure_locked: 2763 spin_unlock(&tp->lock); 2764 nla_put_failure: 2765 nla_nest_cancel(skb, nest); 2766 return -1; 2767 } 2768 2769 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 2770 { 2771 struct fl_flow_tmplt *tmplt = tmplt_priv; 2772 struct fl_flow_key *key, *mask; 2773 struct nlattr *nest; 2774 2775 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2776 if (!nest) 2777 goto nla_put_failure; 2778 2779 key = &tmplt->dummy_key; 2780 mask = &tmplt->mask; 2781 2782 if (fl_dump_key(skb, net, key, mask)) 2783 goto nla_put_failure; 2784 2785 nla_nest_end(skb, nest); 2786 2787 return skb->len; 2788 2789 nla_put_failure: 2790 nla_nest_cancel(skb, nest); 2791 return -EMSGSIZE; 2792 } 2793 2794 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 2795 unsigned long base) 2796 { 2797 struct cls_fl_filter *f = fh; 2798 2799 if (f && f->res.classid == classid) { 2800 if (cl) 2801 __tcf_bind_filter(q, &f->res, base); 2802 else 2803 __tcf_unbind_filter(q, &f->res); 2804 } 2805 } 2806 2807 static bool fl_delete_empty(struct tcf_proto *tp) 2808 { 2809 struct cls_fl_head *head = fl_head_dereference(tp); 2810 2811 spin_lock(&tp->lock); 2812 tp->deleting = idr_is_empty(&head->handle_idr); 2813 spin_unlock(&tp->lock); 2814 2815 return tp->deleting; 2816 } 2817 2818 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 2819 .kind = "flower", 2820 .classify = fl_classify, 2821 .init = fl_init, 2822 .destroy = fl_destroy, 2823 .get = fl_get, 2824 .put = fl_put, 2825 .change = fl_change, 2826 .delete = fl_delete, 2827 .delete_empty = fl_delete_empty, 2828 .walk = fl_walk, 2829 .reoffload = fl_reoffload, 2830 .hw_add = fl_hw_add, 2831 .hw_del = fl_hw_del, 2832 .dump = fl_dump, 2833 .bind_class = fl_bind_class, 2834 .tmplt_create = fl_tmplt_create, 2835 .tmplt_destroy = fl_tmplt_destroy, 2836 .tmplt_dump = fl_tmplt_dump, 2837 .owner = THIS_MODULE, 2838 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 2839 }; 2840 2841 static int __init cls_fl_init(void) 2842 { 2843 return register_tcf_proto_ops(&cls_fl_ops); 2844 } 2845 2846 static void __exit cls_fl_exit(void) 2847 { 2848 unregister_tcf_proto_ops(&cls_fl_ops); 2849 } 2850 2851 module_init(cls_fl_init); 2852 module_exit(cls_fl_exit); 2853 2854 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 2855 MODULE_DESCRIPTION("Flower classifier"); 2856 MODULE_LICENSE("GPL v2"); 2857