1 /* 2 * net/sched/cls_route.c ROUTE4 classifier. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <net/dst.h> 20 #include <net/route.h> 21 #include <net/netlink.h> 22 #include <net/act_api.h> 23 #include <net/pkt_cls.h> 24 25 /* 26 * 1. For now we assume that route tags < 256. 27 * It allows to use direct table lookups, instead of hash tables. 28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements 29 * are mutually exclusive. 30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" 31 */ 32 struct route4_fastmap { 33 struct route4_filter *filter; 34 u32 id; 35 int iif; 36 }; 37 38 struct route4_head { 39 struct route4_fastmap fastmap[16]; 40 struct route4_bucket __rcu *table[256 + 1]; 41 struct rcu_head rcu; 42 }; 43 44 struct route4_bucket { 45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ 46 struct route4_filter __rcu *ht[16 + 16 + 1]; 47 struct rcu_head rcu; 48 }; 49 50 struct route4_filter { 51 struct route4_filter __rcu *next; 52 u32 id; 53 int iif; 54 55 struct tcf_result res; 56 struct tcf_exts exts; 57 u32 handle; 58 struct route4_bucket *bkt; 59 struct tcf_proto *tp; 60 struct rcu_work rwork; 61 }; 62 63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) 64 65 static inline int route4_fastmap_hash(u32 id, int iif) 66 { 67 return id & 0xF; 68 } 69 70 static DEFINE_SPINLOCK(fastmap_lock); 71 static void 72 route4_reset_fastmap(struct route4_head *head) 73 { 74 spin_lock_bh(&fastmap_lock); 75 memset(head->fastmap, 0, sizeof(head->fastmap)); 76 spin_unlock_bh(&fastmap_lock); 77 } 78 79 static void 80 route4_set_fastmap(struct route4_head *head, u32 id, int iif, 81 struct route4_filter *f) 82 { 83 int h = route4_fastmap_hash(id, iif); 84 85 /* fastmap updates must look atomic to aling id, iff, filter */ 86 spin_lock_bh(&fastmap_lock); 87 head->fastmap[h].id = id; 88 head->fastmap[h].iif = iif; 89 head->fastmap[h].filter = f; 90 spin_unlock_bh(&fastmap_lock); 91 } 92 93 static inline int route4_hash_to(u32 id) 94 { 95 return id & 0xFF; 96 } 97 98 static inline int route4_hash_from(u32 id) 99 { 100 return (id >> 16) & 0xF; 101 } 102 103 static inline int route4_hash_iif(int iif) 104 { 105 return 16 + ((iif >> 16) & 0xF); 106 } 107 108 static inline int route4_hash_wild(void) 109 { 110 return 32; 111 } 112 113 #define ROUTE4_APPLY_RESULT() \ 114 { \ 115 *res = f->res; \ 116 if (tcf_exts_has_actions(&f->exts)) { \ 117 int r = tcf_exts_exec(skb, &f->exts, res); \ 118 if (r < 0) { \ 119 dont_cache = 1; \ 120 continue; \ 121 } \ 122 return r; \ 123 } else if (!dont_cache) \ 124 route4_set_fastmap(head, id, iif, f); \ 125 return 0; \ 126 } 127 128 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, 129 struct tcf_result *res) 130 { 131 struct route4_head *head = rcu_dereference_bh(tp->root); 132 struct dst_entry *dst; 133 struct route4_bucket *b; 134 struct route4_filter *f; 135 u32 id, h; 136 int iif, dont_cache = 0; 137 138 dst = skb_dst(skb); 139 if (!dst) 140 goto failure; 141 142 id = dst->tclassid; 143 144 iif = inet_iif(skb); 145 146 h = route4_fastmap_hash(id, iif); 147 148 spin_lock(&fastmap_lock); 149 if (id == head->fastmap[h].id && 150 iif == head->fastmap[h].iif && 151 (f = head->fastmap[h].filter) != NULL) { 152 if (f == ROUTE4_FAILURE) { 153 spin_unlock(&fastmap_lock); 154 goto failure; 155 } 156 157 *res = f->res; 158 spin_unlock(&fastmap_lock); 159 return 0; 160 } 161 spin_unlock(&fastmap_lock); 162 163 h = route4_hash_to(id); 164 165 restart: 166 b = rcu_dereference_bh(head->table[h]); 167 if (b) { 168 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); 169 f; 170 f = rcu_dereference_bh(f->next)) 171 if (f->id == id) 172 ROUTE4_APPLY_RESULT(); 173 174 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); 175 f; 176 f = rcu_dereference_bh(f->next)) 177 if (f->iif == iif) 178 ROUTE4_APPLY_RESULT(); 179 180 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); 181 f; 182 f = rcu_dereference_bh(f->next)) 183 ROUTE4_APPLY_RESULT(); 184 } 185 if (h < 256) { 186 h = 256; 187 id &= ~0xFFFF; 188 goto restart; 189 } 190 191 if (!dont_cache) 192 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); 193 failure: 194 return -1; 195 } 196 197 static inline u32 to_hash(u32 id) 198 { 199 u32 h = id & 0xFF; 200 201 if (id & 0x8000) 202 h += 256; 203 return h; 204 } 205 206 static inline u32 from_hash(u32 id) 207 { 208 id &= 0xFFFF; 209 if (id == 0xFFFF) 210 return 32; 211 if (!(id & 0x8000)) { 212 if (id > 255) 213 return 256; 214 return id & 0xF; 215 } 216 return 16 + (id & 0xF); 217 } 218 219 static void *route4_get(struct tcf_proto *tp, u32 handle) 220 { 221 struct route4_head *head = rtnl_dereference(tp->root); 222 struct route4_bucket *b; 223 struct route4_filter *f; 224 unsigned int h1, h2; 225 226 h1 = to_hash(handle); 227 if (h1 > 256) 228 return NULL; 229 230 h2 = from_hash(handle >> 16); 231 if (h2 > 32) 232 return NULL; 233 234 b = rtnl_dereference(head->table[h1]); 235 if (b) { 236 for (f = rtnl_dereference(b->ht[h2]); 237 f; 238 f = rtnl_dereference(f->next)) 239 if (f->handle == handle) 240 return f; 241 } 242 return NULL; 243 } 244 245 static int route4_init(struct tcf_proto *tp) 246 { 247 struct route4_head *head; 248 249 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); 250 if (head == NULL) 251 return -ENOBUFS; 252 253 rcu_assign_pointer(tp->root, head); 254 return 0; 255 } 256 257 static void __route4_delete_filter(struct route4_filter *f) 258 { 259 tcf_exts_destroy(&f->exts); 260 tcf_exts_put_net(&f->exts); 261 kfree(f); 262 } 263 264 static void route4_delete_filter_work(struct work_struct *work) 265 { 266 struct route4_filter *f = container_of(to_rcu_work(work), 267 struct route4_filter, 268 rwork); 269 rtnl_lock(); 270 __route4_delete_filter(f); 271 rtnl_unlock(); 272 } 273 274 static void route4_queue_work(struct route4_filter *f) 275 { 276 tcf_queue_work(&f->rwork, route4_delete_filter_work); 277 } 278 279 static void route4_destroy(struct tcf_proto *tp, bool rtnl_held, 280 struct netlink_ext_ack *extack) 281 { 282 struct route4_head *head = rtnl_dereference(tp->root); 283 int h1, h2; 284 285 if (head == NULL) 286 return; 287 288 for (h1 = 0; h1 <= 256; h1++) { 289 struct route4_bucket *b; 290 291 b = rtnl_dereference(head->table[h1]); 292 if (b) { 293 for (h2 = 0; h2 <= 32; h2++) { 294 struct route4_filter *f; 295 296 while ((f = rtnl_dereference(b->ht[h2])) != NULL) { 297 struct route4_filter *next; 298 299 next = rtnl_dereference(f->next); 300 RCU_INIT_POINTER(b->ht[h2], next); 301 tcf_unbind_filter(tp, &f->res); 302 if (tcf_exts_get_net(&f->exts)) 303 route4_queue_work(f); 304 else 305 __route4_delete_filter(f); 306 } 307 } 308 RCU_INIT_POINTER(head->table[h1], NULL); 309 kfree_rcu(b, rcu); 310 } 311 } 312 kfree_rcu(head, rcu); 313 } 314 315 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last, 316 bool rtnl_held, struct netlink_ext_ack *extack) 317 { 318 struct route4_head *head = rtnl_dereference(tp->root); 319 struct route4_filter *f = arg; 320 struct route4_filter __rcu **fp; 321 struct route4_filter *nf; 322 struct route4_bucket *b; 323 unsigned int h = 0; 324 int i, h1; 325 326 if (!head || !f) 327 return -EINVAL; 328 329 h = f->handle; 330 b = f->bkt; 331 332 fp = &b->ht[from_hash(h >> 16)]; 333 for (nf = rtnl_dereference(*fp); nf; 334 fp = &nf->next, nf = rtnl_dereference(*fp)) { 335 if (nf == f) { 336 /* unlink it */ 337 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 338 339 /* Remove any fastmap lookups that might ref filter 340 * notice we unlink'd the filter so we can't get it 341 * back in the fastmap. 342 */ 343 route4_reset_fastmap(head); 344 345 /* Delete it */ 346 tcf_unbind_filter(tp, &f->res); 347 tcf_exts_get_net(&f->exts); 348 tcf_queue_work(&f->rwork, route4_delete_filter_work); 349 350 /* Strip RTNL protected tree */ 351 for (i = 0; i <= 32; i++) { 352 struct route4_filter *rt; 353 354 rt = rtnl_dereference(b->ht[i]); 355 if (rt) 356 goto out; 357 } 358 359 /* OK, session has no flows */ 360 RCU_INIT_POINTER(head->table[to_hash(h)], NULL); 361 kfree_rcu(b, rcu); 362 break; 363 } 364 } 365 366 out: 367 *last = true; 368 for (h1 = 0; h1 <= 256; h1++) { 369 if (rcu_access_pointer(head->table[h1])) { 370 *last = false; 371 break; 372 } 373 } 374 375 return 0; 376 } 377 378 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = { 379 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 }, 380 [TCA_ROUTE4_TO] = { .type = NLA_U32 }, 381 [TCA_ROUTE4_FROM] = { .type = NLA_U32 }, 382 [TCA_ROUTE4_IIF] = { .type = NLA_U32 }, 383 }; 384 385 static int route4_set_parms(struct net *net, struct tcf_proto *tp, 386 unsigned long base, struct route4_filter *f, 387 u32 handle, struct route4_head *head, 388 struct nlattr **tb, struct nlattr *est, int new, 389 bool ovr, struct netlink_ext_ack *extack) 390 { 391 u32 id = 0, to = 0, nhandle = 0x8000; 392 struct route4_filter *fp; 393 unsigned int h1; 394 struct route4_bucket *b; 395 int err; 396 397 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack); 398 if (err < 0) 399 return err; 400 401 if (tb[TCA_ROUTE4_TO]) { 402 if (new && handle & 0x8000) 403 return -EINVAL; 404 to = nla_get_u32(tb[TCA_ROUTE4_TO]); 405 if (to > 0xFF) 406 return -EINVAL; 407 nhandle = to; 408 } 409 410 if (tb[TCA_ROUTE4_FROM]) { 411 if (tb[TCA_ROUTE4_IIF]) 412 return -EINVAL; 413 id = nla_get_u32(tb[TCA_ROUTE4_FROM]); 414 if (id > 0xFF) 415 return -EINVAL; 416 nhandle |= id << 16; 417 } else if (tb[TCA_ROUTE4_IIF]) { 418 id = nla_get_u32(tb[TCA_ROUTE4_IIF]); 419 if (id > 0x7FFF) 420 return -EINVAL; 421 nhandle |= (id | 0x8000) << 16; 422 } else 423 nhandle |= 0xFFFF << 16; 424 425 if (handle && new) { 426 nhandle |= handle & 0x7F00; 427 if (nhandle != handle) 428 return -EINVAL; 429 } 430 431 h1 = to_hash(nhandle); 432 b = rtnl_dereference(head->table[h1]); 433 if (!b) { 434 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); 435 if (b == NULL) 436 return -ENOBUFS; 437 438 rcu_assign_pointer(head->table[h1], b); 439 } else { 440 unsigned int h2 = from_hash(nhandle >> 16); 441 442 for (fp = rtnl_dereference(b->ht[h2]); 443 fp; 444 fp = rtnl_dereference(fp->next)) 445 if (fp->handle == f->handle) 446 return -EEXIST; 447 } 448 449 if (tb[TCA_ROUTE4_TO]) 450 f->id = to; 451 452 if (tb[TCA_ROUTE4_FROM]) 453 f->id = to | id<<16; 454 else if (tb[TCA_ROUTE4_IIF]) 455 f->iif = id; 456 457 f->handle = nhandle; 458 f->bkt = b; 459 f->tp = tp; 460 461 if (tb[TCA_ROUTE4_CLASSID]) { 462 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); 463 tcf_bind_filter(tp, &f->res, base); 464 } 465 466 return 0; 467 } 468 469 static int route4_change(struct net *net, struct sk_buff *in_skb, 470 struct tcf_proto *tp, unsigned long base, u32 handle, 471 struct nlattr **tca, void **arg, bool ovr, 472 bool rtnl_held, struct netlink_ext_ack *extack) 473 { 474 struct route4_head *head = rtnl_dereference(tp->root); 475 struct route4_filter __rcu **fp; 476 struct route4_filter *fold, *f1, *pfp, *f = NULL; 477 struct route4_bucket *b; 478 struct nlattr *opt = tca[TCA_OPTIONS]; 479 struct nlattr *tb[TCA_ROUTE4_MAX + 1]; 480 unsigned int h, th; 481 int err; 482 bool new = true; 483 484 if (opt == NULL) 485 return handle ? -EINVAL : 0; 486 487 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL); 488 if (err < 0) 489 return err; 490 491 fold = *arg; 492 if (fold && handle && fold->handle != handle) 493 return -EINVAL; 494 495 err = -ENOBUFS; 496 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); 497 if (!f) 498 goto errout; 499 500 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); 501 if (err < 0) 502 goto errout; 503 504 if (fold) { 505 f->id = fold->id; 506 f->iif = fold->iif; 507 f->res = fold->res; 508 f->handle = fold->handle; 509 510 f->tp = fold->tp; 511 f->bkt = fold->bkt; 512 new = false; 513 } 514 515 err = route4_set_parms(net, tp, base, f, handle, head, tb, 516 tca[TCA_RATE], new, ovr, extack); 517 if (err < 0) 518 goto errout; 519 520 h = from_hash(f->handle >> 16); 521 fp = &f->bkt->ht[h]; 522 for (pfp = rtnl_dereference(*fp); 523 (f1 = rtnl_dereference(*fp)) != NULL; 524 fp = &f1->next) 525 if (f->handle < f1->handle) 526 break; 527 528 tcf_block_netif_keep_dst(tp->chain->block); 529 rcu_assign_pointer(f->next, f1); 530 rcu_assign_pointer(*fp, f); 531 532 if (fold && fold->handle && f->handle != fold->handle) { 533 th = to_hash(fold->handle); 534 h = from_hash(fold->handle >> 16); 535 b = rtnl_dereference(head->table[th]); 536 if (b) { 537 fp = &b->ht[h]; 538 for (pfp = rtnl_dereference(*fp); pfp; 539 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 540 if (pfp == f) { 541 *fp = f->next; 542 break; 543 } 544 } 545 } 546 } 547 548 route4_reset_fastmap(head); 549 *arg = f; 550 if (fold) { 551 tcf_unbind_filter(tp, &fold->res); 552 tcf_exts_get_net(&fold->exts); 553 tcf_queue_work(&fold->rwork, route4_delete_filter_work); 554 } 555 return 0; 556 557 errout: 558 if (f) 559 tcf_exts_destroy(&f->exts); 560 kfree(f); 561 return err; 562 } 563 564 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg, 565 bool rtnl_held) 566 { 567 struct route4_head *head = rtnl_dereference(tp->root); 568 unsigned int h, h1; 569 570 if (head == NULL || arg->stop) 571 return; 572 573 for (h = 0; h <= 256; h++) { 574 struct route4_bucket *b = rtnl_dereference(head->table[h]); 575 576 if (b) { 577 for (h1 = 0; h1 <= 32; h1++) { 578 struct route4_filter *f; 579 580 for (f = rtnl_dereference(b->ht[h1]); 581 f; 582 f = rtnl_dereference(f->next)) { 583 if (arg->count < arg->skip) { 584 arg->count++; 585 continue; 586 } 587 if (arg->fn(tp, f, arg) < 0) { 588 arg->stop = 1; 589 return; 590 } 591 arg->count++; 592 } 593 } 594 } 595 } 596 } 597 598 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh, 599 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 600 { 601 struct route4_filter *f = fh; 602 struct nlattr *nest; 603 u32 id; 604 605 if (f == NULL) 606 return skb->len; 607 608 t->tcm_handle = f->handle; 609 610 nest = nla_nest_start(skb, TCA_OPTIONS); 611 if (nest == NULL) 612 goto nla_put_failure; 613 614 if (!(f->handle & 0x8000)) { 615 id = f->id & 0xFF; 616 if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) 617 goto nla_put_failure; 618 } 619 if (f->handle & 0x80000000) { 620 if ((f->handle >> 16) != 0xFFFF && 621 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) 622 goto nla_put_failure; 623 } else { 624 id = f->id >> 16; 625 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) 626 goto nla_put_failure; 627 } 628 if (f->res.classid && 629 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) 630 goto nla_put_failure; 631 632 if (tcf_exts_dump(skb, &f->exts) < 0) 633 goto nla_put_failure; 634 635 nla_nest_end(skb, nest); 636 637 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 638 goto nla_put_failure; 639 640 return skb->len; 641 642 nla_put_failure: 643 nla_nest_cancel(skb, nest); 644 return -1; 645 } 646 647 static void route4_bind_class(void *fh, u32 classid, unsigned long cl) 648 { 649 struct route4_filter *f = fh; 650 651 if (f && f->res.classid == classid) 652 f->res.class = cl; 653 } 654 655 static struct tcf_proto_ops cls_route4_ops __read_mostly = { 656 .kind = "route", 657 .classify = route4_classify, 658 .init = route4_init, 659 .destroy = route4_destroy, 660 .get = route4_get, 661 .change = route4_change, 662 .delete = route4_delete, 663 .walk = route4_walk, 664 .dump = route4_dump, 665 .bind_class = route4_bind_class, 666 .owner = THIS_MODULE, 667 }; 668 669 static int __init init_route4(void) 670 { 671 return register_tcf_proto_ops(&cls_route4_ops); 672 } 673 674 static void __exit exit_route4(void) 675 { 676 unregister_tcf_proto_ops(&cls_route4_ops); 677 } 678 679 module_init(init_route4) 680 module_exit(exit_route4) 681 MODULE_LICENSE("GPL"); 682