1 /* 2 * net/sched/cls_route.c ROUTE4 classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <net/dst.h> 20 #include <net/route.h> 21 #include <net/netlink.h> 22 #include <net/act_api.h> 23 #include <net/pkt_cls.h> 24 25 /* 26 * 1. For now we assume that route tags < 256. 27 * It allows to use direct table lookups, instead of hash tables. 28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements 29 * are mutually exclusive. 30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" 31 */ 32 struct route4_fastmap { 33 struct route4_filter *filter; 34 u32 id; 35 int iif; 36 }; 37 38 struct route4_head { 39 struct route4_fastmap fastmap[16]; 40 struct route4_bucket __rcu *table[256 + 1]; 41 struct rcu_head rcu; 42 }; 43 44 struct route4_bucket { 45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ 46 struct route4_filter __rcu *ht[16 + 16 + 1]; 47 struct rcu_head rcu; 48 }; 49 50 struct route4_filter { 51 struct route4_filter __rcu *next; 52 u32 id; 53 int iif; 54 55 struct tcf_result res; 56 struct tcf_exts exts; 57 u32 handle; 58 struct route4_bucket *bkt; 59 struct tcf_proto *tp; 60 union { 61 struct work_struct work; 62 struct rcu_head rcu; 63 }; 64 }; 65 66 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) 67 68 static inline int route4_fastmap_hash(u32 id, int iif) 69 { 70 return id & 0xF; 71 } 72 73 static DEFINE_SPINLOCK(fastmap_lock); 74 static void 75 route4_reset_fastmap(struct route4_head *head) 76 { 77 spin_lock_bh(&fastmap_lock); 78 memset(head->fastmap, 0, sizeof(head->fastmap)); 79 spin_unlock_bh(&fastmap_lock); 80 } 81 82 static void 83 route4_set_fastmap(struct route4_head *head, u32 id, int iif, 84 struct route4_filter *f) 85 { 86 int h = route4_fastmap_hash(id, iif); 87 88 /* fastmap updates must look atomic to aling id, iff, filter */ 89 spin_lock_bh(&fastmap_lock); 90 head->fastmap[h].id = id; 91 head->fastmap[h].iif = iif; 92 head->fastmap[h].filter = f; 93 spin_unlock_bh(&fastmap_lock); 94 } 95 96 static inline int route4_hash_to(u32 id) 97 { 98 return id & 0xFF; 99 } 100 101 static inline int route4_hash_from(u32 id) 102 { 103 return (id >> 16) & 0xF; 104 } 105 106 static inline int route4_hash_iif(int iif) 107 { 108 return 16 + ((iif >> 16) & 0xF); 109 } 110 111 static inline int route4_hash_wild(void) 112 { 113 return 32; 114 } 115 116 #define ROUTE4_APPLY_RESULT() \ 117 { \ 118 *res = f->res; \ 119 if (tcf_exts_has_actions(&f->exts)) { \ 120 int r = tcf_exts_exec(skb, &f->exts, res); \ 121 if (r < 0) { \ 122 dont_cache = 1; \ 123 continue; \ 124 } \ 125 return r; \ 126 } else if (!dont_cache) \ 127 route4_set_fastmap(head, id, iif, f); \ 128 return 0; \ 129 } 130 131 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, 132 struct tcf_result *res) 133 { 134 struct route4_head *head = rcu_dereference_bh(tp->root); 135 struct dst_entry *dst; 136 struct route4_bucket *b; 137 struct route4_filter *f; 138 u32 id, h; 139 int iif, dont_cache = 0; 140 141 dst = skb_dst(skb); 142 if (!dst) 143 goto failure; 144 145 id = dst->tclassid; 146 147 iif = inet_iif(skb); 148 149 h = route4_fastmap_hash(id, iif); 150 151 spin_lock(&fastmap_lock); 152 if (id == head->fastmap[h].id && 153 iif == head->fastmap[h].iif && 154 (f = head->fastmap[h].filter) != NULL) { 155 if (f == ROUTE4_FAILURE) { 156 spin_unlock(&fastmap_lock); 157 goto failure; 158 } 159 160 *res = f->res; 161 spin_unlock(&fastmap_lock); 162 return 0; 163 } 164 spin_unlock(&fastmap_lock); 165 166 h = route4_hash_to(id); 167 168 restart: 169 b = rcu_dereference_bh(head->table[h]); 170 if (b) { 171 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); 172 f; 173 f = rcu_dereference_bh(f->next)) 174 if (f->id == id) 175 ROUTE4_APPLY_RESULT(); 176 177 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); 178 f; 179 f = rcu_dereference_bh(f->next)) 180 if (f->iif == iif) 181 ROUTE4_APPLY_RESULT(); 182 183 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); 184 f; 185 f = rcu_dereference_bh(f->next)) 186 ROUTE4_APPLY_RESULT(); 187 } 188 if (h < 256) { 189 h = 256; 190 id &= ~0xFFFF; 191 goto restart; 192 } 193 194 if (!dont_cache) 195 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); 196 failure: 197 return -1; 198 } 199 200 static inline u32 to_hash(u32 id) 201 { 202 u32 h = id & 0xFF; 203 204 if (id & 0x8000) 205 h += 256; 206 return h; 207 } 208 209 static inline u32 from_hash(u32 id) 210 { 211 id &= 0xFFFF; 212 if (id == 0xFFFF) 213 return 32; 214 if (!(id & 0x8000)) { 215 if (id > 255) 216 return 256; 217 return id & 0xF; 218 } 219 return 16 + (id & 0xF); 220 } 221 222 static void *route4_get(struct tcf_proto *tp, u32 handle) 223 { 224 struct route4_head *head = rtnl_dereference(tp->root); 225 struct route4_bucket *b; 226 struct route4_filter *f; 227 unsigned int h1, h2; 228 229 h1 = to_hash(handle); 230 if (h1 > 256) 231 return NULL; 232 233 h2 = from_hash(handle >> 16); 234 if (h2 > 32) 235 return NULL; 236 237 b = rtnl_dereference(head->table[h1]); 238 if (b) { 239 for (f = rtnl_dereference(b->ht[h2]); 240 f; 241 f = rtnl_dereference(f->next)) 242 if (f->handle == handle) 243 return f; 244 } 245 return NULL; 246 } 247 248 static int route4_init(struct tcf_proto *tp) 249 { 250 struct route4_head *head; 251 252 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); 253 if (head == NULL) 254 return -ENOBUFS; 255 256 rcu_assign_pointer(tp->root, head); 257 return 0; 258 } 259 260 static void route4_delete_filter_work(struct work_struct *work) 261 { 262 struct route4_filter *f = container_of(work, struct route4_filter, work); 263 264 rtnl_lock(); 265 tcf_exts_destroy(&f->exts); 266 kfree(f); 267 rtnl_unlock(); 268 } 269 270 static void route4_delete_filter(struct rcu_head *head) 271 { 272 struct route4_filter *f = container_of(head, struct route4_filter, rcu); 273 274 INIT_WORK(&f->work, route4_delete_filter_work); 275 tcf_queue_work(&f->work); 276 } 277 278 static void route4_destroy(struct tcf_proto *tp) 279 { 280 struct route4_head *head = rtnl_dereference(tp->root); 281 int h1, h2; 282 283 if (head == NULL) 284 return; 285 286 for (h1 = 0; h1 <= 256; h1++) { 287 struct route4_bucket *b; 288 289 b = rtnl_dereference(head->table[h1]); 290 if (b) { 291 for (h2 = 0; h2 <= 32; h2++) { 292 struct route4_filter *f; 293 294 while ((f = rtnl_dereference(b->ht[h2])) != NULL) { 295 struct route4_filter *next; 296 297 next = rtnl_dereference(f->next); 298 RCU_INIT_POINTER(b->ht[h2], next); 299 tcf_unbind_filter(tp, &f->res); 300 call_rcu(&f->rcu, route4_delete_filter); 301 } 302 } 303 RCU_INIT_POINTER(head->table[h1], NULL); 304 kfree_rcu(b, rcu); 305 } 306 } 307 kfree_rcu(head, rcu); 308 } 309 310 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last) 311 { 312 struct route4_head *head = rtnl_dereference(tp->root); 313 struct route4_filter *f = arg; 314 struct route4_filter __rcu **fp; 315 struct route4_filter *nf; 316 struct route4_bucket *b; 317 unsigned int h = 0; 318 int i, h1; 319 320 if (!head || !f) 321 return -EINVAL; 322 323 h = f->handle; 324 b = f->bkt; 325 326 fp = &b->ht[from_hash(h >> 16)]; 327 for (nf = rtnl_dereference(*fp); nf; 328 fp = &nf->next, nf = rtnl_dereference(*fp)) { 329 if (nf == f) { 330 /* unlink it */ 331 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 332 333 /* Remove any fastmap lookups that might ref filter 334 * notice we unlink'd the filter so we can't get it 335 * back in the fastmap. 336 */ 337 route4_reset_fastmap(head); 338 339 /* Delete it */ 340 tcf_unbind_filter(tp, &f->res); 341 call_rcu(&f->rcu, route4_delete_filter); 342 343 /* Strip RTNL protected tree */ 344 for (i = 0; i <= 32; i++) { 345 struct route4_filter *rt; 346 347 rt = rtnl_dereference(b->ht[i]); 348 if (rt) 349 goto out; 350 } 351 352 /* OK, session has no flows */ 353 RCU_INIT_POINTER(head->table[to_hash(h)], NULL); 354 kfree_rcu(b, rcu); 355 break; 356 } 357 } 358 359 out: 360 *last = true; 361 for (h1 = 0; h1 <= 256; h1++) { 362 if (rcu_access_pointer(head->table[h1])) { 363 *last = false; 364 break; 365 } 366 } 367 368 return 0; 369 } 370 371 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = { 372 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 }, 373 [TCA_ROUTE4_TO] = { .type = NLA_U32 }, 374 [TCA_ROUTE4_FROM] = { .type = NLA_U32 }, 375 [TCA_ROUTE4_IIF] = { .type = NLA_U32 }, 376 }; 377 378 static int route4_set_parms(struct net *net, struct tcf_proto *tp, 379 unsigned long base, struct route4_filter *f, 380 u32 handle, struct route4_head *head, 381 struct nlattr **tb, struct nlattr *est, int new, 382 bool ovr) 383 { 384 u32 id = 0, to = 0, nhandle = 0x8000; 385 struct route4_filter *fp; 386 unsigned int h1; 387 struct route4_bucket *b; 388 int err; 389 390 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr); 391 if (err < 0) 392 return err; 393 394 if (tb[TCA_ROUTE4_TO]) { 395 if (new && handle & 0x8000) 396 return -EINVAL; 397 to = nla_get_u32(tb[TCA_ROUTE4_TO]); 398 if (to > 0xFF) 399 return -EINVAL; 400 nhandle = to; 401 } 402 403 if (tb[TCA_ROUTE4_FROM]) { 404 if (tb[TCA_ROUTE4_IIF]) 405 return -EINVAL; 406 id = nla_get_u32(tb[TCA_ROUTE4_FROM]); 407 if (id > 0xFF) 408 return -EINVAL; 409 nhandle |= id << 16; 410 } else if (tb[TCA_ROUTE4_IIF]) { 411 id = nla_get_u32(tb[TCA_ROUTE4_IIF]); 412 if (id > 0x7FFF) 413 return -EINVAL; 414 nhandle |= (id | 0x8000) << 16; 415 } else 416 nhandle |= 0xFFFF << 16; 417 418 if (handle && new) { 419 nhandle |= handle & 0x7F00; 420 if (nhandle != handle) 421 return -EINVAL; 422 } 423 424 h1 = to_hash(nhandle); 425 b = rtnl_dereference(head->table[h1]); 426 if (!b) { 427 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); 428 if (b == NULL) 429 return -ENOBUFS; 430 431 rcu_assign_pointer(head->table[h1], b); 432 } else { 433 unsigned int h2 = from_hash(nhandle >> 16); 434 435 for (fp = rtnl_dereference(b->ht[h2]); 436 fp; 437 fp = rtnl_dereference(fp->next)) 438 if (fp->handle == f->handle) 439 return -EEXIST; 440 } 441 442 if (tb[TCA_ROUTE4_TO]) 443 f->id = to; 444 445 if (tb[TCA_ROUTE4_FROM]) 446 f->id = to | id<<16; 447 else if (tb[TCA_ROUTE4_IIF]) 448 f->iif = id; 449 450 f->handle = nhandle; 451 f->bkt = b; 452 f->tp = tp; 453 454 if (tb[TCA_ROUTE4_CLASSID]) { 455 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); 456 tcf_bind_filter(tp, &f->res, base); 457 } 458 459 return 0; 460 } 461 462 static int route4_change(struct net *net, struct sk_buff *in_skb, 463 struct tcf_proto *tp, unsigned long base, u32 handle, 464 struct nlattr **tca, void **arg, bool ovr) 465 { 466 struct route4_head *head = rtnl_dereference(tp->root); 467 struct route4_filter __rcu **fp; 468 struct route4_filter *fold, *f1, *pfp, *f = NULL; 469 struct route4_bucket *b; 470 struct nlattr *opt = tca[TCA_OPTIONS]; 471 struct nlattr *tb[TCA_ROUTE4_MAX + 1]; 472 unsigned int h, th; 473 int err; 474 bool new = true; 475 476 if (opt == NULL) 477 return handle ? -EINVAL : 0; 478 479 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL); 480 if (err < 0) 481 return err; 482 483 fold = *arg; 484 if (fold && handle && fold->handle != handle) 485 return -EINVAL; 486 487 err = -ENOBUFS; 488 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); 489 if (!f) 490 goto errout; 491 492 err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 493 if (err < 0) 494 goto errout; 495 496 if (fold) { 497 f->id = fold->id; 498 f->iif = fold->iif; 499 f->res = fold->res; 500 f->handle = fold->handle; 501 502 f->tp = fold->tp; 503 f->bkt = fold->bkt; 504 new = false; 505 } 506 507 err = route4_set_parms(net, tp, base, f, handle, head, tb, 508 tca[TCA_RATE], new, ovr); 509 if (err < 0) 510 goto errout; 511 512 h = from_hash(f->handle >> 16); 513 fp = &f->bkt->ht[h]; 514 for (pfp = rtnl_dereference(*fp); 515 (f1 = rtnl_dereference(*fp)) != NULL; 516 fp = &f1->next) 517 if (f->handle < f1->handle) 518 break; 519 520 netif_keep_dst(qdisc_dev(tp->q)); 521 rcu_assign_pointer(f->next, f1); 522 rcu_assign_pointer(*fp, f); 523 524 if (fold && fold->handle && f->handle != fold->handle) { 525 th = to_hash(fold->handle); 526 h = from_hash(fold->handle >> 16); 527 b = rtnl_dereference(head->table[th]); 528 if (b) { 529 fp = &b->ht[h]; 530 for (pfp = rtnl_dereference(*fp); pfp; 531 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 532 if (pfp == f) { 533 *fp = f->next; 534 break; 535 } 536 } 537 } 538 } 539 540 route4_reset_fastmap(head); 541 *arg = f; 542 if (fold) { 543 tcf_unbind_filter(tp, &fold->res); 544 call_rcu(&fold->rcu, route4_delete_filter); 545 } 546 return 0; 547 548 errout: 549 if (f) 550 tcf_exts_destroy(&f->exts); 551 kfree(f); 552 return err; 553 } 554 555 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) 556 { 557 struct route4_head *head = rtnl_dereference(tp->root); 558 unsigned int h, h1; 559 560 if (head == NULL) 561 arg->stop = 1; 562 563 if (arg->stop) 564 return; 565 566 for (h = 0; h <= 256; h++) { 567 struct route4_bucket *b = rtnl_dereference(head->table[h]); 568 569 if (b) { 570 for (h1 = 0; h1 <= 32; h1++) { 571 struct route4_filter *f; 572 573 for (f = rtnl_dereference(b->ht[h1]); 574 f; 575 f = rtnl_dereference(f->next)) { 576 if (arg->count < arg->skip) { 577 arg->count++; 578 continue; 579 } 580 if (arg->fn(tp, f, arg) < 0) { 581 arg->stop = 1; 582 return; 583 } 584 arg->count++; 585 } 586 } 587 } 588 } 589 } 590 591 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh, 592 struct sk_buff *skb, struct tcmsg *t) 593 { 594 struct route4_filter *f = fh; 595 struct nlattr *nest; 596 u32 id; 597 598 if (f == NULL) 599 return skb->len; 600 601 t->tcm_handle = f->handle; 602 603 nest = nla_nest_start(skb, TCA_OPTIONS); 604 if (nest == NULL) 605 goto nla_put_failure; 606 607 if (!(f->handle & 0x8000)) { 608 id = f->id & 0xFF; 609 if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) 610 goto nla_put_failure; 611 } 612 if (f->handle & 0x80000000) { 613 if ((f->handle >> 16) != 0xFFFF && 614 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) 615 goto nla_put_failure; 616 } else { 617 id = f->id >> 16; 618 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) 619 goto nla_put_failure; 620 } 621 if (f->res.classid && 622 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) 623 goto nla_put_failure; 624 625 if (tcf_exts_dump(skb, &f->exts) < 0) 626 goto nla_put_failure; 627 628 nla_nest_end(skb, nest); 629 630 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 631 goto nla_put_failure; 632 633 return skb->len; 634 635 nla_put_failure: 636 nla_nest_cancel(skb, nest); 637 return -1; 638 } 639 640 static void route4_bind_class(void *fh, u32 classid, unsigned long cl) 641 { 642 struct route4_filter *f = fh; 643 644 if (f && f->res.classid == classid) 645 f->res.class = cl; 646 } 647 648 static struct tcf_proto_ops cls_route4_ops __read_mostly = { 649 .kind = "route", 650 .classify = route4_classify, 651 .init = route4_init, 652 .destroy = route4_destroy, 653 .get = route4_get, 654 .change = route4_change, 655 .delete = route4_delete, 656 .walk = route4_walk, 657 .dump = route4_dump, 658 .bind_class = route4_bind_class, 659 .owner = THIS_MODULE, 660 }; 661 662 static int __init init_route4(void) 663 { 664 return register_tcf_proto_ops(&cls_route4_ops); 665 } 666 667 static void __exit exit_route4(void) 668 { 669 unregister_tcf_proto_ops(&cls_route4_ops); 670 } 671 672 module_init(init_route4) 673 module_exit(exit_route4) 674 MODULE_LICENSE("GPL"); 675