1 /* 2 * net/sched/em_meta.c Metadata ematch 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 * 11 * ========================================================================== 12 * 13 * The metadata ematch compares two meta objects where each object 14 * represents either a meta value stored in the kernel or a static 15 * value provided by userspace. The objects are not provided by 16 * userspace itself but rather a definition providing the information 17 * to build them. Every object is of a certain type which must be 18 * equal to the object it is being compared to. 19 * 20 * The definition of a objects conists of the type (meta type), a 21 * identifier (meta id) and additional type specific information. 22 * The meta id is either TCF_META_TYPE_VALUE for values provided by 23 * userspace or a index to the meta operations table consisting of 24 * function pointers to type specific meta data collectors returning 25 * the value of the requested meta value. 26 * 27 * lvalue rvalue 28 * +-----------+ +-----------+ 29 * | type: INT | | type: INT | 30 * def | id: DEV | | id: VALUE | 31 * | data: | | data: 3 | 32 * +-----------+ +-----------+ 33 * | | 34 * ---> meta_ops[INT][DEV](...) | 35 * | | 36 * ----------- | 37 * V V 38 * +-----------+ +-----------+ 39 * | type: INT | | type: INT | 40 * obj | id: DEV | | id: VALUE | 41 * | data: 2 |<--data got filled out | data: 3 | 42 * +-----------+ +-----------+ 43 * | | 44 * --------------> 2 equals 3 <-------------- 45 * 46 * This is a simplified schema, the complexity varies depending 47 * on the meta type. Obviously, the length of the data must also 48 * be provided for non-numeric types. 49 * 50 * Additionally, type dependent modifiers such as shift operators 51 * or mask may be applied to extend the functionaliy. As of now, 52 * the variable length type supports shifting the byte string to 53 * the right, eating up any number of octets and thus supporting 54 * wildcard interface name comparisons such as "ppp%" matching 55 * ppp0..9. 56 * 57 * NOTE: Certain meta values depend on other subsystems and are 58 * only available if that subsystem is enabled in the kernel. 59 */ 60 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/types.h> 64 #include <linux/kernel.h> 65 #include <linux/sched.h> 66 #include <linux/string.h> 67 #include <linux/skbuff.h> 68 #include <linux/random.h> 69 #include <linux/if_vlan.h> 70 #include <linux/tc_ematch/tc_em_meta.h> 71 #include <net/dst.h> 72 #include <net/route.h> 73 #include <net/pkt_cls.h> 74 #include <net/sock.h> 75 76 struct meta_obj { 77 unsigned long value; 78 unsigned int len; 79 }; 80 81 struct meta_value { 82 struct tcf_meta_val hdr; 83 unsigned long val; 84 unsigned int len; 85 }; 86 87 struct meta_match { 88 struct meta_value lvalue; 89 struct meta_value rvalue; 90 }; 91 92 static inline int meta_id(struct meta_value *v) 93 { 94 return TCF_META_ID(v->hdr.kind); 95 } 96 97 static inline int meta_type(struct meta_value *v) 98 { 99 return TCF_META_TYPE(v->hdr.kind); 100 } 101 102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \ 103 struct tcf_pkt_info *info, struct meta_value *v, \ 104 struct meta_obj *dst, int *err) 105 106 /************************************************************************** 107 * System status & misc 108 **************************************************************************/ 109 110 META_COLLECTOR(int_random) 111 { 112 get_random_bytes(&dst->value, sizeof(dst->value)); 113 } 114 115 static inline unsigned long fixed_loadavg(int load) 116 { 117 int rnd_load = load + (FIXED_1/200); 118 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT; 119 120 return ((rnd_load >> FSHIFT) * 100) + rnd_frac; 121 } 122 123 META_COLLECTOR(int_loadavg_0) 124 { 125 dst->value = fixed_loadavg(avenrun[0]); 126 } 127 128 META_COLLECTOR(int_loadavg_1) 129 { 130 dst->value = fixed_loadavg(avenrun[1]); 131 } 132 133 META_COLLECTOR(int_loadavg_2) 134 { 135 dst->value = fixed_loadavg(avenrun[2]); 136 } 137 138 /************************************************************************** 139 * Device names & indices 140 **************************************************************************/ 141 142 static inline int int_dev(struct net_device *dev, struct meta_obj *dst) 143 { 144 if (unlikely(dev == NULL)) 145 return -1; 146 147 dst->value = dev->ifindex; 148 return 0; 149 } 150 151 static inline int var_dev(struct net_device *dev, struct meta_obj *dst) 152 { 153 if (unlikely(dev == NULL)) 154 return -1; 155 156 dst->value = (unsigned long) dev->name; 157 dst->len = strlen(dev->name); 158 return 0; 159 } 160 161 META_COLLECTOR(int_dev) 162 { 163 *err = int_dev(skb->dev, dst); 164 } 165 166 META_COLLECTOR(var_dev) 167 { 168 *err = var_dev(skb->dev, dst); 169 } 170 171 /************************************************************************** 172 * vlan tag 173 **************************************************************************/ 174 175 META_COLLECTOR(int_vlan_tag) 176 { 177 unsigned short tag; 178 179 if (skb_vlan_tag_present(skb)) 180 dst->value = skb_vlan_tag_get(skb); 181 else if (!__vlan_get_tag(skb, &tag)) 182 dst->value = tag; 183 else 184 *err = -1; 185 } 186 187 188 189 /************************************************************************** 190 * skb attributes 191 **************************************************************************/ 192 193 META_COLLECTOR(int_priority) 194 { 195 dst->value = skb->priority; 196 } 197 198 META_COLLECTOR(int_protocol) 199 { 200 /* Let userspace take care of the byte ordering */ 201 dst->value = tc_skb_protocol(skb); 202 } 203 204 META_COLLECTOR(int_pkttype) 205 { 206 dst->value = skb->pkt_type; 207 } 208 209 META_COLLECTOR(int_pktlen) 210 { 211 dst->value = skb->len; 212 } 213 214 META_COLLECTOR(int_datalen) 215 { 216 dst->value = skb->data_len; 217 } 218 219 META_COLLECTOR(int_maclen) 220 { 221 dst->value = skb->mac_len; 222 } 223 224 META_COLLECTOR(int_rxhash) 225 { 226 dst->value = skb_get_hash(skb); 227 } 228 229 /************************************************************************** 230 * Netfilter 231 **************************************************************************/ 232 233 META_COLLECTOR(int_mark) 234 { 235 dst->value = skb->mark; 236 } 237 238 /************************************************************************** 239 * Traffic Control 240 **************************************************************************/ 241 242 META_COLLECTOR(int_tcindex) 243 { 244 dst->value = skb->tc_index; 245 } 246 247 /************************************************************************** 248 * Routing 249 **************************************************************************/ 250 251 META_COLLECTOR(int_rtclassid) 252 { 253 if (unlikely(skb_dst(skb) == NULL)) 254 *err = -1; 255 else 256 #ifdef CONFIG_IP_ROUTE_CLASSID 257 dst->value = skb_dst(skb)->tclassid; 258 #else 259 dst->value = 0; 260 #endif 261 } 262 263 META_COLLECTOR(int_rtiif) 264 { 265 if (unlikely(skb_rtable(skb) == NULL)) 266 *err = -1; 267 else 268 dst->value = inet_iif(skb); 269 } 270 271 /************************************************************************** 272 * Socket Attributes 273 **************************************************************************/ 274 275 #define skip_nonlocal(skb) \ 276 (unlikely(skb->sk == NULL)) 277 278 META_COLLECTOR(int_sk_family) 279 { 280 if (skip_nonlocal(skb)) { 281 *err = -1; 282 return; 283 } 284 dst->value = skb->sk->sk_family; 285 } 286 287 META_COLLECTOR(int_sk_state) 288 { 289 if (skip_nonlocal(skb)) { 290 *err = -1; 291 return; 292 } 293 dst->value = skb->sk->sk_state; 294 } 295 296 META_COLLECTOR(int_sk_reuse) 297 { 298 if (skip_nonlocal(skb)) { 299 *err = -1; 300 return; 301 } 302 dst->value = skb->sk->sk_reuse; 303 } 304 305 META_COLLECTOR(int_sk_bound_if) 306 { 307 if (skip_nonlocal(skb)) { 308 *err = -1; 309 return; 310 } 311 /* No error if bound_dev_if is 0, legal userspace check */ 312 dst->value = skb->sk->sk_bound_dev_if; 313 } 314 315 META_COLLECTOR(var_sk_bound_if) 316 { 317 if (skip_nonlocal(skb)) { 318 *err = -1; 319 return; 320 } 321 322 if (skb->sk->sk_bound_dev_if == 0) { 323 dst->value = (unsigned long) "any"; 324 dst->len = 3; 325 } else { 326 struct net_device *dev; 327 328 rcu_read_lock(); 329 dev = dev_get_by_index_rcu(sock_net(skb->sk), 330 skb->sk->sk_bound_dev_if); 331 *err = var_dev(dev, dst); 332 rcu_read_unlock(); 333 } 334 } 335 336 META_COLLECTOR(int_sk_refcnt) 337 { 338 if (skip_nonlocal(skb)) { 339 *err = -1; 340 return; 341 } 342 dst->value = atomic_read(&skb->sk->sk_refcnt); 343 } 344 345 META_COLLECTOR(int_sk_rcvbuf) 346 { 347 const struct sock *sk = skb_to_full_sk(skb); 348 349 if (!sk) { 350 *err = -1; 351 return; 352 } 353 dst->value = sk->sk_rcvbuf; 354 } 355 356 META_COLLECTOR(int_sk_shutdown) 357 { 358 const struct sock *sk = skb_to_full_sk(skb); 359 360 if (!sk) { 361 *err = -1; 362 return; 363 } 364 dst->value = sk->sk_shutdown; 365 } 366 367 META_COLLECTOR(int_sk_proto) 368 { 369 const struct sock *sk = skb_to_full_sk(skb); 370 371 if (!sk) { 372 *err = -1; 373 return; 374 } 375 dst->value = sk->sk_protocol; 376 } 377 378 META_COLLECTOR(int_sk_type) 379 { 380 const struct sock *sk = skb_to_full_sk(skb); 381 382 if (!sk) { 383 *err = -1; 384 return; 385 } 386 dst->value = sk->sk_type; 387 } 388 389 META_COLLECTOR(int_sk_rmem_alloc) 390 { 391 const struct sock *sk = skb_to_full_sk(skb); 392 393 if (!sk) { 394 *err = -1; 395 return; 396 } 397 dst->value = sk_rmem_alloc_get(sk); 398 } 399 400 META_COLLECTOR(int_sk_wmem_alloc) 401 { 402 const struct sock *sk = skb_to_full_sk(skb); 403 404 if (!sk) { 405 *err = -1; 406 return; 407 } 408 dst->value = sk_wmem_alloc_get(sk); 409 } 410 411 META_COLLECTOR(int_sk_omem_alloc) 412 { 413 const struct sock *sk = skb_to_full_sk(skb); 414 415 if (!sk) { 416 *err = -1; 417 return; 418 } 419 dst->value = atomic_read(&sk->sk_omem_alloc); 420 } 421 422 META_COLLECTOR(int_sk_rcv_qlen) 423 { 424 const struct sock *sk = skb_to_full_sk(skb); 425 426 if (!sk) { 427 *err = -1; 428 return; 429 } 430 dst->value = sk->sk_receive_queue.qlen; 431 } 432 433 META_COLLECTOR(int_sk_snd_qlen) 434 { 435 const struct sock *sk = skb_to_full_sk(skb); 436 437 if (!sk) { 438 *err = -1; 439 return; 440 } 441 dst->value = sk->sk_write_queue.qlen; 442 } 443 444 META_COLLECTOR(int_sk_wmem_queued) 445 { 446 const struct sock *sk = skb_to_full_sk(skb); 447 448 if (!sk) { 449 *err = -1; 450 return; 451 } 452 dst->value = sk->sk_wmem_queued; 453 } 454 455 META_COLLECTOR(int_sk_fwd_alloc) 456 { 457 const struct sock *sk = skb_to_full_sk(skb); 458 459 if (!sk) { 460 *err = -1; 461 return; 462 } 463 dst->value = sk->sk_forward_alloc; 464 } 465 466 META_COLLECTOR(int_sk_sndbuf) 467 { 468 const struct sock *sk = skb_to_full_sk(skb); 469 470 if (!sk) { 471 *err = -1; 472 return; 473 } 474 dst->value = sk->sk_sndbuf; 475 } 476 477 META_COLLECTOR(int_sk_alloc) 478 { 479 const struct sock *sk = skb_to_full_sk(skb); 480 481 if (!sk) { 482 *err = -1; 483 return; 484 } 485 dst->value = (__force int) sk->sk_allocation; 486 } 487 488 META_COLLECTOR(int_sk_hash) 489 { 490 if (skip_nonlocal(skb)) { 491 *err = -1; 492 return; 493 } 494 dst->value = skb->sk->sk_hash; 495 } 496 497 META_COLLECTOR(int_sk_lingertime) 498 { 499 const struct sock *sk = skb_to_full_sk(skb); 500 501 if (!sk) { 502 *err = -1; 503 return; 504 } 505 dst->value = sk->sk_lingertime / HZ; 506 } 507 508 META_COLLECTOR(int_sk_err_qlen) 509 { 510 const struct sock *sk = skb_to_full_sk(skb); 511 512 if (!sk) { 513 *err = -1; 514 return; 515 } 516 dst->value = sk->sk_error_queue.qlen; 517 } 518 519 META_COLLECTOR(int_sk_ack_bl) 520 { 521 const struct sock *sk = skb_to_full_sk(skb); 522 523 if (!sk) { 524 *err = -1; 525 return; 526 } 527 dst->value = sk->sk_ack_backlog; 528 } 529 530 META_COLLECTOR(int_sk_max_ack_bl) 531 { 532 const struct sock *sk = skb_to_full_sk(skb); 533 534 if (!sk) { 535 *err = -1; 536 return; 537 } 538 dst->value = sk->sk_max_ack_backlog; 539 } 540 541 META_COLLECTOR(int_sk_prio) 542 { 543 const struct sock *sk = skb_to_full_sk(skb); 544 545 if (!sk) { 546 *err = -1; 547 return; 548 } 549 dst->value = sk->sk_priority; 550 } 551 552 META_COLLECTOR(int_sk_rcvlowat) 553 { 554 const struct sock *sk = skb_to_full_sk(skb); 555 556 if (!sk) { 557 *err = -1; 558 return; 559 } 560 dst->value = sk->sk_rcvlowat; 561 } 562 563 META_COLLECTOR(int_sk_rcvtimeo) 564 { 565 const struct sock *sk = skb_to_full_sk(skb); 566 567 if (!sk) { 568 *err = -1; 569 return; 570 } 571 dst->value = sk->sk_rcvtimeo / HZ; 572 } 573 574 META_COLLECTOR(int_sk_sndtimeo) 575 { 576 const struct sock *sk = skb_to_full_sk(skb); 577 578 if (!sk) { 579 *err = -1; 580 return; 581 } 582 dst->value = sk->sk_sndtimeo / HZ; 583 } 584 585 META_COLLECTOR(int_sk_sendmsg_off) 586 { 587 const struct sock *sk = skb_to_full_sk(skb); 588 589 if (!sk) { 590 *err = -1; 591 return; 592 } 593 dst->value = sk->sk_frag.offset; 594 } 595 596 META_COLLECTOR(int_sk_write_pend) 597 { 598 const struct sock *sk = skb_to_full_sk(skb); 599 600 if (!sk) { 601 *err = -1; 602 return; 603 } 604 dst->value = sk->sk_write_pending; 605 } 606 607 /************************************************************************** 608 * Meta value collectors assignment table 609 **************************************************************************/ 610 611 struct meta_ops { 612 void (*get)(struct sk_buff *, struct tcf_pkt_info *, 613 struct meta_value *, struct meta_obj *, int *); 614 }; 615 616 #define META_ID(name) TCF_META_ID_##name 617 #define META_FUNC(name) { .get = meta_##name } 618 619 /* Meta value operations table listing all meta value collectors and 620 * assigns them to a type and meta id. */ 621 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = { 622 [TCF_META_TYPE_VAR] = { 623 [META_ID(DEV)] = META_FUNC(var_dev), 624 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), 625 }, 626 [TCF_META_TYPE_INT] = { 627 [META_ID(RANDOM)] = META_FUNC(int_random), 628 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0), 629 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1), 630 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2), 631 [META_ID(DEV)] = META_FUNC(int_dev), 632 [META_ID(PRIORITY)] = META_FUNC(int_priority), 633 [META_ID(PROTOCOL)] = META_FUNC(int_protocol), 634 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype), 635 [META_ID(PKTLEN)] = META_FUNC(int_pktlen), 636 [META_ID(DATALEN)] = META_FUNC(int_datalen), 637 [META_ID(MACLEN)] = META_FUNC(int_maclen), 638 [META_ID(NFMARK)] = META_FUNC(int_mark), 639 [META_ID(TCINDEX)] = META_FUNC(int_tcindex), 640 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), 641 [META_ID(RTIIF)] = META_FUNC(int_rtiif), 642 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family), 643 [META_ID(SK_STATE)] = META_FUNC(int_sk_state), 644 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse), 645 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if), 646 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt), 647 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf), 648 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf), 649 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown), 650 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto), 651 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type), 652 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc), 653 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc), 654 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc), 655 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued), 656 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen), 657 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen), 658 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen), 659 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 660 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 661 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash), 662 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 663 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 664 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), 665 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio), 666 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat), 667 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo), 668 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo), 669 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), 670 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), 671 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag), 672 [META_ID(RXHASH)] = META_FUNC(int_rxhash), 673 } 674 }; 675 676 static inline struct meta_ops *meta_ops(struct meta_value *val) 677 { 678 return &__meta_ops[meta_type(val)][meta_id(val)]; 679 } 680 681 /************************************************************************** 682 * Type specific operations for TCF_META_TYPE_VAR 683 **************************************************************************/ 684 685 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b) 686 { 687 int r = a->len - b->len; 688 689 if (r == 0) 690 r = memcmp((void *) a->value, (void *) b->value, a->len); 691 692 return r; 693 } 694 695 static int meta_var_change(struct meta_value *dst, struct nlattr *nla) 696 { 697 int len = nla_len(nla); 698 699 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL); 700 if (dst->val == 0UL) 701 return -ENOMEM; 702 dst->len = len; 703 return 0; 704 } 705 706 static void meta_var_destroy(struct meta_value *v) 707 { 708 kfree((void *) v->val); 709 } 710 711 static void meta_var_apply_extras(struct meta_value *v, 712 struct meta_obj *dst) 713 { 714 int shift = v->hdr.shift; 715 716 if (shift && shift < dst->len) 717 dst->len -= shift; 718 } 719 720 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 721 { 722 if (v->val && v->len && 723 nla_put(skb, tlv, v->len, (void *) v->val)) 724 goto nla_put_failure; 725 return 0; 726 727 nla_put_failure: 728 return -1; 729 } 730 731 /************************************************************************** 732 * Type specific operations for TCF_META_TYPE_INT 733 **************************************************************************/ 734 735 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b) 736 { 737 /* Let gcc optimize it, the unlikely is not really based on 738 * some numbers but jump free code for mismatches seems 739 * more logical. */ 740 if (unlikely(a->value == b->value)) 741 return 0; 742 else if (a->value < b->value) 743 return -1; 744 else 745 return 1; 746 } 747 748 static int meta_int_change(struct meta_value *dst, struct nlattr *nla) 749 { 750 if (nla_len(nla) >= sizeof(unsigned long)) { 751 dst->val = *(unsigned long *) nla_data(nla); 752 dst->len = sizeof(unsigned long); 753 } else if (nla_len(nla) == sizeof(u32)) { 754 dst->val = nla_get_u32(nla); 755 dst->len = sizeof(u32); 756 } else 757 return -EINVAL; 758 759 return 0; 760 } 761 762 static void meta_int_apply_extras(struct meta_value *v, 763 struct meta_obj *dst) 764 { 765 if (v->hdr.shift) 766 dst->value >>= v->hdr.shift; 767 768 if (v->val) 769 dst->value &= v->val; 770 } 771 772 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 773 { 774 if (v->len == sizeof(unsigned long)) { 775 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val)) 776 goto nla_put_failure; 777 } else if (v->len == sizeof(u32)) { 778 if (nla_put_u32(skb, tlv, v->val)) 779 goto nla_put_failure; 780 } 781 782 return 0; 783 784 nla_put_failure: 785 return -1; 786 } 787 788 /************************************************************************** 789 * Type specific operations table 790 **************************************************************************/ 791 792 struct meta_type_ops { 793 void (*destroy)(struct meta_value *); 794 int (*compare)(struct meta_obj *, struct meta_obj *); 795 int (*change)(struct meta_value *, struct nlattr *); 796 void (*apply_extras)(struct meta_value *, struct meta_obj *); 797 int (*dump)(struct sk_buff *, struct meta_value *, int); 798 }; 799 800 static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { 801 [TCF_META_TYPE_VAR] = { 802 .destroy = meta_var_destroy, 803 .compare = meta_var_compare, 804 .change = meta_var_change, 805 .apply_extras = meta_var_apply_extras, 806 .dump = meta_var_dump 807 }, 808 [TCF_META_TYPE_INT] = { 809 .compare = meta_int_compare, 810 .change = meta_int_change, 811 .apply_extras = meta_int_apply_extras, 812 .dump = meta_int_dump 813 } 814 }; 815 816 static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v) 817 { 818 return &__meta_type_ops[meta_type(v)]; 819 } 820 821 /************************************************************************** 822 * Core 823 **************************************************************************/ 824 825 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, 826 struct meta_value *v, struct meta_obj *dst) 827 { 828 int err = 0; 829 830 if (meta_id(v) == TCF_META_ID_VALUE) { 831 dst->value = v->val; 832 dst->len = v->len; 833 return 0; 834 } 835 836 meta_ops(v)->get(skb, info, v, dst, &err); 837 if (err < 0) 838 return err; 839 840 if (meta_type_ops(v)->apply_extras) 841 meta_type_ops(v)->apply_extras(v, dst); 842 843 return 0; 844 } 845 846 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m, 847 struct tcf_pkt_info *info) 848 { 849 int r; 850 struct meta_match *meta = (struct meta_match *) m->data; 851 struct meta_obj l_value, r_value; 852 853 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 || 854 meta_get(skb, info, &meta->rvalue, &r_value) < 0) 855 return 0; 856 857 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); 858 859 switch (meta->lvalue.hdr.op) { 860 case TCF_EM_OPND_EQ: 861 return !r; 862 case TCF_EM_OPND_LT: 863 return r < 0; 864 case TCF_EM_OPND_GT: 865 return r > 0; 866 } 867 868 return 0; 869 } 870 871 static void meta_delete(struct meta_match *meta) 872 { 873 if (meta) { 874 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue); 875 876 if (ops && ops->destroy) { 877 ops->destroy(&meta->lvalue); 878 ops->destroy(&meta->rvalue); 879 } 880 } 881 882 kfree(meta); 883 } 884 885 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla) 886 { 887 if (nla) { 888 if (nla_len(nla) == 0) 889 return -EINVAL; 890 891 return meta_type_ops(dst)->change(dst, nla); 892 } 893 894 return 0; 895 } 896 897 static inline int meta_is_supported(struct meta_value *val) 898 { 899 return !meta_id(val) || meta_ops(val)->get; 900 } 901 902 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { 903 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) }, 904 }; 905 906 static int em_meta_change(struct net *net, void *data, int len, 907 struct tcf_ematch *m) 908 { 909 int err; 910 struct nlattr *tb[TCA_EM_META_MAX + 1]; 911 struct tcf_meta_hdr *hdr; 912 struct meta_match *meta = NULL; 913 914 err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy); 915 if (err < 0) 916 goto errout; 917 918 err = -EINVAL; 919 if (tb[TCA_EM_META_HDR] == NULL) 920 goto errout; 921 hdr = nla_data(tb[TCA_EM_META_HDR]); 922 923 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) || 924 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX || 925 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX || 926 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 927 goto errout; 928 929 meta = kzalloc(sizeof(*meta), GFP_KERNEL); 930 if (meta == NULL) { 931 err = -ENOMEM; 932 goto errout; 933 } 934 935 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 936 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); 937 938 if (!meta_is_supported(&meta->lvalue) || 939 !meta_is_supported(&meta->rvalue)) { 940 err = -EOPNOTSUPP; 941 goto errout; 942 } 943 944 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 || 945 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0) 946 goto errout; 947 948 m->datalen = sizeof(*meta); 949 m->data = (unsigned long) meta; 950 951 err = 0; 952 errout: 953 if (err && meta) 954 meta_delete(meta); 955 return err; 956 } 957 958 static void em_meta_destroy(struct tcf_ematch *m) 959 { 960 if (m) 961 meta_delete((struct meta_match *) m->data); 962 } 963 964 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) 965 { 966 struct meta_match *meta = (struct meta_match *) em->data; 967 struct tcf_meta_hdr hdr; 968 const struct meta_type_ops *ops; 969 970 memset(&hdr, 0, sizeof(hdr)); 971 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); 972 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); 973 974 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr)) 975 goto nla_put_failure; 976 977 ops = meta_type_ops(&meta->lvalue); 978 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || 979 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0) 980 goto nla_put_failure; 981 982 return 0; 983 984 nla_put_failure: 985 return -1; 986 } 987 988 static struct tcf_ematch_ops em_meta_ops = { 989 .kind = TCF_EM_META, 990 .change = em_meta_change, 991 .match = em_meta_match, 992 .destroy = em_meta_destroy, 993 .dump = em_meta_dump, 994 .owner = THIS_MODULE, 995 .link = LIST_HEAD_INIT(em_meta_ops.link) 996 }; 997 998 static int __init init_em_meta(void) 999 { 1000 return tcf_em_register(&em_meta_ops); 1001 } 1002 1003 static void __exit exit_em_meta(void) 1004 { 1005 tcf_em_unregister(&em_meta_ops); 1006 } 1007 1008 MODULE_LICENSE("GPL"); 1009 1010 module_init(init_em_meta); 1011 module_exit(exit_em_meta); 1012 1013 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META); 1014