1 /* 2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * The filters are packed to hash tables of key nodes 12 * with a set of 32bit key/mask pairs at every node. 13 * Nodes reference next level hash tables etc. 14 * 15 * This scheme is the best universal classifier I managed to 16 * invent; it is not super-fast, but it is not slow (provided you 17 * program it correctly), and general enough. And its relative 18 * speed grows as the number of rules becomes larger. 19 * 20 * It seems that it represents the best middle point between 21 * speed and manageability both by human and by machine. 22 * 23 * It is especially useful for link sharing combined with QoS; 24 * pure RSVP doesn't need such a general approach and can use 25 * much simpler (and faster) schemes, sort of cls_rsvp.c. 26 * 27 * JHS: We should remove the CONFIG_NET_CLS_IND from here 28 * eventually when the meta match extension is made available 29 * 30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> 31 */ 32 33 #include <asm/uaccess.h> 34 #include <asm/system.h> 35 #include <linux/bitops.h> 36 #include <linux/module.h> 37 #include <linux/types.h> 38 #include <linux/kernel.h> 39 #include <linux/sched.h> 40 #include <linux/string.h> 41 #include <linux/mm.h> 42 #include <linux/socket.h> 43 #include <linux/sockios.h> 44 #include <linux/in.h> 45 #include <linux/errno.h> 46 #include <linux/interrupt.h> 47 #include <linux/if_ether.h> 48 #include <linux/inet.h> 49 #include <linux/netdevice.h> 50 #include <linux/etherdevice.h> 51 #include <linux/notifier.h> 52 #include <linux/rtnetlink.h> 53 #include <net/ip.h> 54 #include <net/route.h> 55 #include <linux/skbuff.h> 56 #include <net/sock.h> 57 #include <net/act_api.h> 58 #include <net/pkt_cls.h> 59 60 struct tc_u_knode 61 { 62 struct tc_u_knode *next; 63 u32 handle; 64 struct tc_u_hnode *ht_up; 65 struct tcf_exts exts; 66 #ifdef CONFIG_NET_CLS_IND 67 char indev[IFNAMSIZ]; 68 #endif 69 u8 fshift; 70 struct tcf_result res; 71 struct tc_u_hnode *ht_down; 72 #ifdef CONFIG_CLS_U32_PERF 73 struct tc_u32_pcnt *pf; 74 #endif 75 #ifdef CONFIG_CLS_U32_MARK 76 struct tc_u32_mark mark; 77 #endif 78 struct tc_u32_sel sel; 79 }; 80 81 struct tc_u_hnode 82 { 83 struct tc_u_hnode *next; 84 u32 handle; 85 u32 prio; 86 struct tc_u_common *tp_c; 87 int refcnt; 88 unsigned divisor; 89 struct tc_u_knode *ht[1]; 90 }; 91 92 struct tc_u_common 93 { 94 struct tc_u_common *next; 95 struct tc_u_hnode *hlist; 96 struct Qdisc *q; 97 int refcnt; 98 u32 hgenerator; 99 }; 100 101 static struct tcf_ext_map u32_ext_map = { 102 .action = TCA_U32_ACT, 103 .police = TCA_U32_POLICE 104 }; 105 106 static struct tc_u_common *u32_list; 107 108 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) 109 { 110 unsigned h = (key & sel->hmask)>>fshift; 111 112 return h; 113 } 114 115 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) 116 { 117 struct { 118 struct tc_u_knode *knode; 119 u8 *ptr; 120 } stack[TC_U32_MAXDEPTH]; 121 122 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 123 u8 *ptr = skb->nh.raw; 124 struct tc_u_knode *n; 125 int sdepth = 0; 126 int off2 = 0; 127 int sel = 0; 128 #ifdef CONFIG_CLS_U32_PERF 129 int j; 130 #endif 131 int i, r; 132 133 next_ht: 134 n = ht->ht[sel]; 135 136 next_knode: 137 if (n) { 138 struct tc_u32_key *key = n->sel.keys; 139 140 #ifdef CONFIG_CLS_U32_PERF 141 n->pf->rcnt +=1; 142 j = 0; 143 #endif 144 145 #ifdef CONFIG_CLS_U32_MARK 146 if ((skb->nfmark & n->mark.mask) != n->mark.val) { 147 n = n->next; 148 goto next_knode; 149 } else { 150 n->mark.success++; 151 } 152 #endif 153 154 for (i = n->sel.nkeys; i>0; i--, key++) { 155 156 if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { 157 n = n->next; 158 goto next_knode; 159 } 160 #ifdef CONFIG_CLS_U32_PERF 161 n->pf->kcnts[j] +=1; 162 j++; 163 #endif 164 } 165 if (n->ht_down == NULL) { 166 check_terminal: 167 if (n->sel.flags&TC_U32_TERMINAL) { 168 169 *res = n->res; 170 #ifdef CONFIG_NET_CLS_IND 171 if (!tcf_match_indev(skb, n->indev)) { 172 n = n->next; 173 goto next_knode; 174 } 175 #endif 176 #ifdef CONFIG_CLS_U32_PERF 177 n->pf->rhit +=1; 178 #endif 179 r = tcf_exts_exec(skb, &n->exts, res); 180 if (r < 0) { 181 n = n->next; 182 goto next_knode; 183 } 184 185 return r; 186 } 187 n = n->next; 188 goto next_knode; 189 } 190 191 /* PUSH */ 192 if (sdepth >= TC_U32_MAXDEPTH) 193 goto deadloop; 194 stack[sdepth].knode = n; 195 stack[sdepth].ptr = ptr; 196 sdepth++; 197 198 ht = n->ht_down; 199 sel = 0; 200 if (ht->divisor) 201 sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift); 202 203 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) 204 goto next_ht; 205 206 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { 207 off2 = n->sel.off + 3; 208 if (n->sel.flags&TC_U32_VAROFFSET) 209 off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift; 210 off2 &= ~3; 211 } 212 if (n->sel.flags&TC_U32_EAT) { 213 ptr += off2; 214 off2 = 0; 215 } 216 217 if (ptr < skb->tail) 218 goto next_ht; 219 } 220 221 /* POP */ 222 if (sdepth--) { 223 n = stack[sdepth].knode; 224 ht = n->ht_up; 225 ptr = stack[sdepth].ptr; 226 goto check_terminal; 227 } 228 return -1; 229 230 deadloop: 231 if (net_ratelimit()) 232 printk("cls_u32: dead loop\n"); 233 return -1; 234 } 235 236 static __inline__ struct tc_u_hnode * 237 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) 238 { 239 struct tc_u_hnode *ht; 240 241 for (ht = tp_c->hlist; ht; ht = ht->next) 242 if (ht->handle == handle) 243 break; 244 245 return ht; 246 } 247 248 static __inline__ struct tc_u_knode * 249 u32_lookup_key(struct tc_u_hnode *ht, u32 handle) 250 { 251 unsigned sel; 252 struct tc_u_knode *n = NULL; 253 254 sel = TC_U32_HASH(handle); 255 if (sel > ht->divisor) 256 goto out; 257 258 for (n = ht->ht[sel]; n; n = n->next) 259 if (n->handle == handle) 260 break; 261 out: 262 return n; 263 } 264 265 266 static unsigned long u32_get(struct tcf_proto *tp, u32 handle) 267 { 268 struct tc_u_hnode *ht; 269 struct tc_u_common *tp_c = tp->data; 270 271 if (TC_U32_HTID(handle) == TC_U32_ROOT) 272 ht = tp->root; 273 else 274 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); 275 276 if (!ht) 277 return 0; 278 279 if (TC_U32_KEY(handle) == 0) 280 return (unsigned long)ht; 281 282 return (unsigned long)u32_lookup_key(ht, handle); 283 } 284 285 static void u32_put(struct tcf_proto *tp, unsigned long f) 286 { 287 } 288 289 static u32 gen_new_htid(struct tc_u_common *tp_c) 290 { 291 int i = 0x800; 292 293 do { 294 if (++tp_c->hgenerator == 0x7FF) 295 tp_c->hgenerator = 1; 296 } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); 297 298 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; 299 } 300 301 static int u32_init(struct tcf_proto *tp) 302 { 303 struct tc_u_hnode *root_ht; 304 struct tc_u_common *tp_c; 305 306 for (tp_c = u32_list; tp_c; tp_c = tp_c->next) 307 if (tp_c->q == tp->q) 308 break; 309 310 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 311 if (root_ht == NULL) 312 return -ENOBUFS; 313 314 memset(root_ht, 0, sizeof(*root_ht)); 315 root_ht->divisor = 0; 316 root_ht->refcnt++; 317 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 318 root_ht->prio = tp->prio; 319 320 if (tp_c == NULL) { 321 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 322 if (tp_c == NULL) { 323 kfree(root_ht); 324 return -ENOBUFS; 325 } 326 memset(tp_c, 0, sizeof(*tp_c)); 327 tp_c->q = tp->q; 328 tp_c->next = u32_list; 329 u32_list = tp_c; 330 } 331 332 tp_c->refcnt++; 333 root_ht->next = tp_c->hlist; 334 tp_c->hlist = root_ht; 335 root_ht->tp_c = tp_c; 336 337 tp->root = root_ht; 338 tp->data = tp_c; 339 return 0; 340 } 341 342 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) 343 { 344 tcf_unbind_filter(tp, &n->res); 345 tcf_exts_destroy(tp, &n->exts); 346 if (n->ht_down) 347 n->ht_down->refcnt--; 348 #ifdef CONFIG_CLS_U32_PERF 349 kfree(n->pf); 350 #endif 351 kfree(n); 352 return 0; 353 } 354 355 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) 356 { 357 struct tc_u_knode **kp; 358 struct tc_u_hnode *ht = key->ht_up; 359 360 if (ht) { 361 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { 362 if (*kp == key) { 363 tcf_tree_lock(tp); 364 *kp = key->next; 365 tcf_tree_unlock(tp); 366 367 u32_destroy_key(tp, key); 368 return 0; 369 } 370 } 371 } 372 BUG_TRAP(0); 373 return 0; 374 } 375 376 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) 377 { 378 struct tc_u_knode *n; 379 unsigned h; 380 381 for (h=0; h<=ht->divisor; h++) { 382 while ((n = ht->ht[h]) != NULL) { 383 ht->ht[h] = n->next; 384 385 u32_destroy_key(tp, n); 386 } 387 } 388 } 389 390 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) 391 { 392 struct tc_u_common *tp_c = tp->data; 393 struct tc_u_hnode **hn; 394 395 BUG_TRAP(!ht->refcnt); 396 397 u32_clear_hnode(tp, ht); 398 399 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { 400 if (*hn == ht) { 401 *hn = ht->next; 402 kfree(ht); 403 return 0; 404 } 405 } 406 407 BUG_TRAP(0); 408 return -ENOENT; 409 } 410 411 static void u32_destroy(struct tcf_proto *tp) 412 { 413 struct tc_u_common *tp_c = tp->data; 414 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL); 415 416 BUG_TRAP(root_ht != NULL); 417 418 if (root_ht && --root_ht->refcnt == 0) 419 u32_destroy_hnode(tp, root_ht); 420 421 if (--tp_c->refcnt == 0) { 422 struct tc_u_hnode *ht; 423 struct tc_u_common **tp_cp; 424 425 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) { 426 if (*tp_cp == tp_c) { 427 *tp_cp = tp_c->next; 428 break; 429 } 430 } 431 432 for (ht=tp_c->hlist; ht; ht = ht->next) 433 u32_clear_hnode(tp, ht); 434 435 while ((ht = tp_c->hlist) != NULL) { 436 tp_c->hlist = ht->next; 437 438 BUG_TRAP(ht->refcnt == 0); 439 440 kfree(ht); 441 }; 442 443 kfree(tp_c); 444 } 445 446 tp->data = NULL; 447 } 448 449 static int u32_delete(struct tcf_proto *tp, unsigned long arg) 450 { 451 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; 452 453 if (ht == NULL) 454 return 0; 455 456 if (TC_U32_KEY(ht->handle)) 457 return u32_delete_key(tp, (struct tc_u_knode*)ht); 458 459 if (tp->root == ht) 460 return -EINVAL; 461 462 if (--ht->refcnt == 0) 463 u32_destroy_hnode(tp, ht); 464 465 return 0; 466 } 467 468 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 469 { 470 struct tc_u_knode *n; 471 unsigned i = 0x7FF; 472 473 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 if (i < TC_U32_NODE(n->handle)) 475 i = TC_U32_NODE(n->handle); 476 i++; 477 478 return handle|(i>0xFFF ? 0xFFF : i); 479 } 480 481 static int u32_set_parms(struct tcf_proto *tp, unsigned long base, 482 struct tc_u_hnode *ht, 483 struct tc_u_knode *n, struct rtattr **tb, 484 struct rtattr *est) 485 { 486 int err; 487 struct tcf_exts e; 488 489 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map); 490 if (err < 0) 491 return err; 492 493 err = -EINVAL; 494 if (tb[TCA_U32_LINK-1]) { 495 u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]); 496 struct tc_u_hnode *ht_down = NULL; 497 498 if (TC_U32_KEY(handle)) 499 goto errout; 500 501 if (handle) { 502 ht_down = u32_lookup_ht(ht->tp_c, handle); 503 504 if (ht_down == NULL) 505 goto errout; 506 ht_down->refcnt++; 507 } 508 509 tcf_tree_lock(tp); 510 ht_down = xchg(&n->ht_down, ht_down); 511 tcf_tree_unlock(tp); 512 513 if (ht_down) 514 ht_down->refcnt--; 515 } 516 if (tb[TCA_U32_CLASSID-1]) { 517 n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]); 518 tcf_bind_filter(tp, &n->res, base); 519 } 520 521 #ifdef CONFIG_NET_CLS_IND 522 if (tb[TCA_U32_INDEV-1]) { 523 int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); 524 if (err < 0) 525 goto errout; 526 } 527 #endif 528 tcf_exts_change(tp, &n->exts, &e); 529 530 return 0; 531 errout: 532 tcf_exts_destroy(tp, &e); 533 return err; 534 } 535 536 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, 537 struct rtattr **tca, 538 unsigned long *arg) 539 { 540 struct tc_u_common *tp_c = tp->data; 541 struct tc_u_hnode *ht; 542 struct tc_u_knode *n; 543 struct tc_u32_sel *s; 544 struct rtattr *opt = tca[TCA_OPTIONS-1]; 545 struct rtattr *tb[TCA_U32_MAX]; 546 u32 htid; 547 int err; 548 549 if (opt == NULL) 550 return handle ? -EINVAL : 0; 551 552 if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0) 553 return -EINVAL; 554 555 if ((n = (struct tc_u_knode*)*arg) != NULL) { 556 if (TC_U32_KEY(n->handle) == 0) 557 return -EINVAL; 558 559 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]); 560 } 561 562 if (tb[TCA_U32_DIVISOR-1]) { 563 unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]); 564 565 if (--divisor > 0x100) 566 return -EINVAL; 567 if (TC_U32_KEY(handle)) 568 return -EINVAL; 569 if (handle == 0) { 570 handle = gen_new_htid(tp->data); 571 if (handle == 0) 572 return -ENOMEM; 573 } 574 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 575 if (ht == NULL) 576 return -ENOBUFS; 577 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*)); 578 ht->tp_c = tp_c; 579 ht->refcnt = 0; 580 ht->divisor = divisor; 581 ht->handle = handle; 582 ht->prio = tp->prio; 583 ht->next = tp_c->hlist; 584 tp_c->hlist = ht; 585 *arg = (unsigned long)ht; 586 return 0; 587 } 588 589 if (tb[TCA_U32_HASH-1]) { 590 htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]); 591 if (TC_U32_HTID(htid) == TC_U32_ROOT) { 592 ht = tp->root; 593 htid = ht->handle; 594 } else { 595 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); 596 if (ht == NULL) 597 return -EINVAL; 598 } 599 } else { 600 ht = tp->root; 601 htid = ht->handle; 602 } 603 604 if (ht->divisor < TC_U32_HASH(htid)) 605 return -EINVAL; 606 607 if (handle) { 608 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) 609 return -EINVAL; 610 handle = htid | TC_U32_NODE(handle); 611 } else 612 handle = gen_new_kid(ht, htid); 613 614 if (tb[TCA_U32_SEL-1] == 0 || 615 RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel)) 616 return -EINVAL; 617 618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 619 620 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 621 if (n == NULL) 622 return -ENOBUFS; 623 624 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key)); 625 #ifdef CONFIG_CLS_U32_PERF 626 n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 627 if (n->pf == NULL) { 628 kfree(n); 629 return -ENOBUFS; 630 } 631 memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64)); 632 #endif 633 634 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 635 n->ht_up = ht; 636 n->handle = handle; 637 { 638 u8 i = 0; 639 u32 mask = s->hmask; 640 if (mask) { 641 while (!(mask & 1)) { 642 i++; 643 mask>>=1; 644 } 645 } 646 n->fshift = i; 647 } 648 649 #ifdef CONFIG_CLS_U32_MARK 650 if (tb[TCA_U32_MARK-1]) { 651 struct tc_u32_mark *mark; 652 653 if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) { 654 #ifdef CONFIG_CLS_U32_PERF 655 kfree(n->pf); 656 #endif 657 kfree(n); 658 return -EINVAL; 659 } 660 mark = RTA_DATA(tb[TCA_U32_MARK-1]); 661 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); 662 n->mark.success = 0; 663 } 664 #endif 665 666 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]); 667 if (err == 0) { 668 struct tc_u_knode **ins; 669 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) 670 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) 671 break; 672 673 n->next = *ins; 674 wmb(); 675 *ins = n; 676 677 *arg = (unsigned long)n; 678 return 0; 679 } 680 #ifdef CONFIG_CLS_U32_PERF 681 kfree(n->pf); 682 #endif 683 kfree(n); 684 return err; 685 } 686 687 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) 688 { 689 struct tc_u_common *tp_c = tp->data; 690 struct tc_u_hnode *ht; 691 struct tc_u_knode *n; 692 unsigned h; 693 694 if (arg->stop) 695 return; 696 697 for (ht = tp_c->hlist; ht; ht = ht->next) { 698 if (ht->prio != tp->prio) 699 continue; 700 if (arg->count >= arg->skip) { 701 if (arg->fn(tp, (unsigned long)ht, arg) < 0) { 702 arg->stop = 1; 703 return; 704 } 705 } 706 arg->count++; 707 for (h = 0; h <= ht->divisor; h++) { 708 for (n = ht->ht[h]; n; n = n->next) { 709 if (arg->count < arg->skip) { 710 arg->count++; 711 continue; 712 } 713 if (arg->fn(tp, (unsigned long)n, arg) < 0) { 714 arg->stop = 1; 715 return; 716 } 717 arg->count++; 718 } 719 } 720 } 721 } 722 723 static int u32_dump(struct tcf_proto *tp, unsigned long fh, 724 struct sk_buff *skb, struct tcmsg *t) 725 { 726 struct tc_u_knode *n = (struct tc_u_knode*)fh; 727 unsigned char *b = skb->tail; 728 struct rtattr *rta; 729 730 if (n == NULL) 731 return skb->len; 732 733 t->tcm_handle = n->handle; 734 735 rta = (struct rtattr*)b; 736 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 737 738 if (TC_U32_KEY(n->handle) == 0) { 739 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; 740 u32 divisor = ht->divisor+1; 741 RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor); 742 } else { 743 RTA_PUT(skb, TCA_U32_SEL, 744 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), 745 &n->sel); 746 if (n->ht_up) { 747 u32 htid = n->handle & 0xFFFFF000; 748 RTA_PUT(skb, TCA_U32_HASH, 4, &htid); 749 } 750 if (n->res.classid) 751 RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid); 752 if (n->ht_down) 753 RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle); 754 755 #ifdef CONFIG_CLS_U32_MARK 756 if (n->mark.val || n->mark.mask) 757 RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); 758 #endif 759 760 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) 761 goto rtattr_failure; 762 763 #ifdef CONFIG_NET_CLS_IND 764 if(strlen(n->indev)) 765 RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev); 766 #endif 767 #ifdef CONFIG_CLS_U32_PERF 768 RTA_PUT(skb, TCA_U32_PCNT, 769 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 770 n->pf); 771 #endif 772 } 773 774 rta->rta_len = skb->tail - b; 775 if (TC_U32_KEY(n->handle)) 776 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) 777 goto rtattr_failure; 778 return skb->len; 779 780 rtattr_failure: 781 skb_trim(skb, b - skb->data); 782 return -1; 783 } 784 785 static struct tcf_proto_ops cls_u32_ops = { 786 .next = NULL, 787 .kind = "u32", 788 .classify = u32_classify, 789 .init = u32_init, 790 .destroy = u32_destroy, 791 .get = u32_get, 792 .put = u32_put, 793 .change = u32_change, 794 .delete = u32_delete, 795 .walk = u32_walk, 796 .dump = u32_dump, 797 .owner = THIS_MODULE, 798 }; 799 800 static int __init init_u32(void) 801 { 802 printk("u32 classifier\n"); 803 #ifdef CONFIG_CLS_U32_PERF 804 printk(" Perfomance counters on\n"); 805 #endif 806 #ifdef CONFIG_NET_CLS_POLICE 807 printk(" OLD policer on \n"); 808 #endif 809 #ifdef CONFIG_NET_CLS_IND 810 printk(" input device check on \n"); 811 #endif 812 #ifdef CONFIG_NET_CLS_ACT 813 printk(" Actions configured \n"); 814 #endif 815 return register_tcf_proto_ops(&cls_u32_ops); 816 } 817 818 static void __exit exit_u32(void) 819 { 820 unregister_tcf_proto_ops(&cls_u32_ops); 821 } 822 823 module_init(init_u32) 824 module_exit(exit_u32) 825 MODULE_LICENSE("GPL"); 826