1 /* 2 * Linux INET6 implementation 3 * Forwarding Information Database 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 /* 15 * Changes: 16 * Yuji SEKIYA @USAGI: Support default route on router node; 17 * remove ip6_null_entry from the top of 18 * routing table. 19 * Ville Nuorvala: Fixed routing subtrees. 20 */ 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/net.h> 24 #include <linux/route.h> 25 #include <linux/netdevice.h> 26 #include <linux/in6.h> 27 #include <linux/init.h> 28 #include <linux/list.h> 29 30 #ifdef CONFIG_PROC_FS 31 #include <linux/proc_fs.h> 32 #endif 33 34 #include <net/ipv6.h> 35 #include <net/ndisc.h> 36 #include <net/addrconf.h> 37 38 #include <net/ip6_fib.h> 39 #include <net/ip6_route.h> 40 41 #define RT6_DEBUG 2 42 43 #if RT6_DEBUG >= 3 44 #define RT6_TRACE(x...) printk(KERN_DEBUG x) 45 #else 46 #define RT6_TRACE(x...) do { ; } while (0) 47 #endif 48 49 static struct kmem_cache * fib6_node_kmem __read_mostly; 50 51 enum fib_walk_state_t 52 { 53 #ifdef CONFIG_IPV6_SUBTREES 54 FWS_S, 55 #endif 56 FWS_L, 57 FWS_R, 58 FWS_C, 59 FWS_U 60 }; 61 62 struct fib6_cleaner_t 63 { 64 struct fib6_walker_t w; 65 struct net *net; 66 int (*func)(struct rt6_info *, void *arg); 67 void *arg; 68 }; 69 70 static DEFINE_RWLOCK(fib6_walker_lock); 71 72 #ifdef CONFIG_IPV6_SUBTREES 73 #define FWS_INIT FWS_S 74 #else 75 #define FWS_INIT FWS_L 76 #endif 77 78 static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 79 struct rt6_info *rt); 80 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); 81 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); 82 static int fib6_walk(struct fib6_walker_t *w); 83 static int fib6_walk_continue(struct fib6_walker_t *w); 84 85 /* 86 * A routing update causes an increase of the serial number on the 87 * affected subtree. This allows for cached routes to be asynchronously 88 * tested when modifications are made to the destination cache as a 89 * result of redirects, path MTU changes, etc. 90 */ 91 92 static __u32 rt_sernum; 93 94 static void fib6_gc_timer_cb(unsigned long arg); 95 96 static struct fib6_walker_t fib6_walker_list = { 97 .prev = &fib6_walker_list, 98 .next = &fib6_walker_list, 99 }; 100 101 #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) 102 103 static inline void fib6_walker_link(struct fib6_walker_t *w) 104 { 105 write_lock_bh(&fib6_walker_lock); 106 w->next = fib6_walker_list.next; 107 w->prev = &fib6_walker_list; 108 w->next->prev = w; 109 w->prev->next = w; 110 write_unlock_bh(&fib6_walker_lock); 111 } 112 113 static inline void fib6_walker_unlink(struct fib6_walker_t *w) 114 { 115 write_lock_bh(&fib6_walker_lock); 116 w->next->prev = w->prev; 117 w->prev->next = w->next; 118 w->prev = w->next = w; 119 write_unlock_bh(&fib6_walker_lock); 120 } 121 static __inline__ u32 fib6_new_sernum(void) 122 { 123 u32 n = ++rt_sernum; 124 if ((__s32)n <= 0) 125 rt_sernum = n = 1; 126 return n; 127 } 128 129 /* 130 * Auxiliary address test functions for the radix tree. 131 * 132 * These assume a 32bit processor (although it will work on 133 * 64bit processors) 134 */ 135 136 /* 137 * test bit 138 */ 139 140 static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 141 { 142 __be32 *addr = token; 143 144 return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; 145 } 146 147 static __inline__ struct fib6_node * node_alloc(void) 148 { 149 struct fib6_node *fn; 150 151 fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); 152 153 return fn; 154 } 155 156 static __inline__ void node_free(struct fib6_node * fn) 157 { 158 kmem_cache_free(fib6_node_kmem, fn); 159 } 160 161 static __inline__ void rt6_release(struct rt6_info *rt) 162 { 163 if (atomic_dec_and_test(&rt->rt6i_ref)) 164 dst_free(&rt->u.dst); 165 } 166 167 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 168 #define FIB_TABLE_HASHSZ 256 169 #else 170 #define FIB_TABLE_HASHSZ 1 171 #endif 172 173 static void fib6_link_table(struct net *net, struct fib6_table *tb) 174 { 175 unsigned int h; 176 177 /* 178 * Initialize table lock at a single place to give lockdep a key, 179 * tables aren't visible prior to being linked to the list. 180 */ 181 rwlock_init(&tb->tb6_lock); 182 183 h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1); 184 185 /* 186 * No protection necessary, this is the only list mutatation 187 * operation, tables never disappear once they exist. 188 */ 189 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); 190 } 191 192 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 193 194 static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) 195 { 196 struct fib6_table *table; 197 198 table = kzalloc(sizeof(*table), GFP_ATOMIC); 199 if (table != NULL) { 200 table->tb6_id = id; 201 table->tb6_root.leaf = net->ipv6.ip6_null_entry; 202 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 203 } 204 205 return table; 206 } 207 208 struct fib6_table *fib6_new_table(struct net *net, u32 id) 209 { 210 struct fib6_table *tb; 211 212 if (id == 0) 213 id = RT6_TABLE_MAIN; 214 tb = fib6_get_table(net, id); 215 if (tb) 216 return tb; 217 218 tb = fib6_alloc_table(net, id); 219 if (tb != NULL) 220 fib6_link_table(net, tb); 221 222 return tb; 223 } 224 225 struct fib6_table *fib6_get_table(struct net *net, u32 id) 226 { 227 struct fib6_table *tb; 228 struct hlist_head *head; 229 struct hlist_node *node; 230 unsigned int h; 231 232 if (id == 0) 233 id = RT6_TABLE_MAIN; 234 h = id & (FIB_TABLE_HASHSZ - 1); 235 rcu_read_lock(); 236 head = &net->ipv6.fib_table_hash[h]; 237 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { 238 if (tb->tb6_id == id) { 239 rcu_read_unlock(); 240 return tb; 241 } 242 } 243 rcu_read_unlock(); 244 245 return NULL; 246 } 247 248 static void fib6_tables_init(struct net *net) 249 { 250 fib6_link_table(net, net->ipv6.fib6_main_tbl); 251 fib6_link_table(net, net->ipv6.fib6_local_tbl); 252 } 253 #else 254 255 struct fib6_table *fib6_new_table(struct net *net, u32 id) 256 { 257 return fib6_get_table(net, id); 258 } 259 260 struct fib6_table *fib6_get_table(struct net *net, u32 id) 261 { 262 return net->ipv6.fib6_main_tbl; 263 } 264 265 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, 266 int flags, pol_lookup_t lookup) 267 { 268 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); 269 } 270 271 static void fib6_tables_init(struct net *net) 272 { 273 fib6_link_table(net, net->ipv6.fib6_main_tbl); 274 } 275 276 #endif 277 278 static int fib6_dump_node(struct fib6_walker_t *w) 279 { 280 int res; 281 struct rt6_info *rt; 282 283 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 284 res = rt6_dump_route(rt, w->args); 285 if (res < 0) { 286 /* Frame is full, suspend walking */ 287 w->leaf = rt; 288 return 1; 289 } 290 BUG_TRAP(res!=0); 291 } 292 w->leaf = NULL; 293 return 0; 294 } 295 296 static void fib6_dump_end(struct netlink_callback *cb) 297 { 298 struct fib6_walker_t *w = (void*)cb->args[2]; 299 300 if (w) { 301 cb->args[2] = 0; 302 kfree(w); 303 } 304 cb->done = (void*)cb->args[3]; 305 cb->args[1] = 3; 306 } 307 308 static int fib6_dump_done(struct netlink_callback *cb) 309 { 310 fib6_dump_end(cb); 311 return cb->done ? cb->done(cb) : 0; 312 } 313 314 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, 315 struct netlink_callback *cb) 316 { 317 struct fib6_walker_t *w; 318 int res; 319 320 w = (void *)cb->args[2]; 321 w->root = &table->tb6_root; 322 323 if (cb->args[4] == 0) { 324 read_lock_bh(&table->tb6_lock); 325 res = fib6_walk(w); 326 read_unlock_bh(&table->tb6_lock); 327 if (res > 0) 328 cb->args[4] = 1; 329 } else { 330 read_lock_bh(&table->tb6_lock); 331 res = fib6_walk_continue(w); 332 read_unlock_bh(&table->tb6_lock); 333 if (res != 0) { 334 if (res < 0) 335 fib6_walker_unlink(w); 336 goto end; 337 } 338 fib6_walker_unlink(w); 339 cb->args[4] = 0; 340 } 341 end: 342 return res; 343 } 344 345 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) 346 { 347 struct net *net = sock_net(skb->sk); 348 unsigned int h, s_h; 349 unsigned int e = 0, s_e; 350 struct rt6_rtnl_dump_arg arg; 351 struct fib6_walker_t *w; 352 struct fib6_table *tb; 353 struct hlist_node *node; 354 struct hlist_head *head; 355 int res = 0; 356 357 s_h = cb->args[0]; 358 s_e = cb->args[1]; 359 360 w = (void *)cb->args[2]; 361 if (w == NULL) { 362 /* New dump: 363 * 364 * 1. hook callback destructor. 365 */ 366 cb->args[3] = (long)cb->done; 367 cb->done = fib6_dump_done; 368 369 /* 370 * 2. allocate and initialize walker. 371 */ 372 w = kzalloc(sizeof(*w), GFP_ATOMIC); 373 if (w == NULL) 374 return -ENOMEM; 375 w->func = fib6_dump_node; 376 cb->args[2] = (long)w; 377 } 378 379 arg.skb = skb; 380 arg.cb = cb; 381 w->args = &arg; 382 383 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 384 e = 0; 385 head = &net->ipv6.fib_table_hash[h]; 386 hlist_for_each_entry(tb, node, head, tb6_hlist) { 387 if (e < s_e) 388 goto next; 389 res = fib6_dump_table(tb, skb, cb); 390 if (res != 0) 391 goto out; 392 next: 393 e++; 394 } 395 } 396 out: 397 cb->args[1] = e; 398 cb->args[0] = h; 399 400 res = res < 0 ? res : skb->len; 401 if (res <= 0) 402 fib6_dump_end(cb); 403 return res; 404 } 405 406 /* 407 * Routing Table 408 * 409 * return the appropriate node for a routing tree "add" operation 410 * by either creating and inserting or by returning an existing 411 * node. 412 */ 413 414 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, 415 int addrlen, int plen, 416 int offset) 417 { 418 struct fib6_node *fn, *in, *ln; 419 struct fib6_node *pn = NULL; 420 struct rt6key *key; 421 int bit; 422 __be32 dir = 0; 423 __u32 sernum = fib6_new_sernum(); 424 425 RT6_TRACE("fib6_add_1\n"); 426 427 /* insert node in tree */ 428 429 fn = root; 430 431 do { 432 key = (struct rt6key *)((u8 *)fn->leaf + offset); 433 434 /* 435 * Prefix match 436 */ 437 if (plen < fn->fn_bit || 438 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) 439 goto insert_above; 440 441 /* 442 * Exact match ? 443 */ 444 445 if (plen == fn->fn_bit) { 446 /* clean up an intermediate node */ 447 if ((fn->fn_flags & RTN_RTINFO) == 0) { 448 rt6_release(fn->leaf); 449 fn->leaf = NULL; 450 } 451 452 fn->fn_sernum = sernum; 453 454 return fn; 455 } 456 457 /* 458 * We have more bits to go 459 */ 460 461 /* Try to walk down on tree. */ 462 fn->fn_sernum = sernum; 463 dir = addr_bit_set(addr, fn->fn_bit); 464 pn = fn; 465 fn = dir ? fn->right: fn->left; 466 } while (fn); 467 468 /* 469 * We walked to the bottom of tree. 470 * Create new leaf node without children. 471 */ 472 473 ln = node_alloc(); 474 475 if (ln == NULL) 476 return NULL; 477 ln->fn_bit = plen; 478 479 ln->parent = pn; 480 ln->fn_sernum = sernum; 481 482 if (dir) 483 pn->right = ln; 484 else 485 pn->left = ln; 486 487 return ln; 488 489 490 insert_above: 491 /* 492 * split since we don't have a common prefix anymore or 493 * we have a less significant route. 494 * we've to insert an intermediate node on the list 495 * this new node will point to the one we need to create 496 * and the current 497 */ 498 499 pn = fn->parent; 500 501 /* find 1st bit in difference between the 2 addrs. 502 503 See comment in __ipv6_addr_diff: bit may be an invalid value, 504 but if it is >= plen, the value is ignored in any case. 505 */ 506 507 bit = __ipv6_addr_diff(addr, &key->addr, addrlen); 508 509 /* 510 * (intermediate)[in] 511 * / \ 512 * (new leaf node)[ln] (old node)[fn] 513 */ 514 if (plen > bit) { 515 in = node_alloc(); 516 ln = node_alloc(); 517 518 if (in == NULL || ln == NULL) { 519 if (in) 520 node_free(in); 521 if (ln) 522 node_free(ln); 523 return NULL; 524 } 525 526 /* 527 * new intermediate node. 528 * RTN_RTINFO will 529 * be off since that an address that chooses one of 530 * the branches would not match less specific routes 531 * in the other branch 532 */ 533 534 in->fn_bit = bit; 535 536 in->parent = pn; 537 in->leaf = fn->leaf; 538 atomic_inc(&in->leaf->rt6i_ref); 539 540 in->fn_sernum = sernum; 541 542 /* update parent pointer */ 543 if (dir) 544 pn->right = in; 545 else 546 pn->left = in; 547 548 ln->fn_bit = plen; 549 550 ln->parent = in; 551 fn->parent = in; 552 553 ln->fn_sernum = sernum; 554 555 if (addr_bit_set(addr, bit)) { 556 in->right = ln; 557 in->left = fn; 558 } else { 559 in->left = ln; 560 in->right = fn; 561 } 562 } else { /* plen <= bit */ 563 564 /* 565 * (new leaf node)[ln] 566 * / \ 567 * (old node)[fn] NULL 568 */ 569 570 ln = node_alloc(); 571 572 if (ln == NULL) 573 return NULL; 574 575 ln->fn_bit = plen; 576 577 ln->parent = pn; 578 579 ln->fn_sernum = sernum; 580 581 if (dir) 582 pn->right = ln; 583 else 584 pn->left = ln; 585 586 if (addr_bit_set(&key->addr, plen)) 587 ln->right = fn; 588 else 589 ln->left = fn; 590 591 fn->parent = ln; 592 } 593 return ln; 594 } 595 596 /* 597 * Insert routing information in a node. 598 */ 599 600 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, 601 struct nl_info *info) 602 { 603 struct rt6_info *iter = NULL; 604 struct rt6_info **ins; 605 606 ins = &fn->leaf; 607 608 for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { 609 /* 610 * Search for duplicates 611 */ 612 613 if (iter->rt6i_metric == rt->rt6i_metric) { 614 /* 615 * Same priority level 616 */ 617 618 if (iter->rt6i_dev == rt->rt6i_dev && 619 iter->rt6i_idev == rt->rt6i_idev && 620 ipv6_addr_equal(&iter->rt6i_gateway, 621 &rt->rt6i_gateway)) { 622 if (!(iter->rt6i_flags&RTF_EXPIRES)) 623 return -EEXIST; 624 iter->rt6i_expires = rt->rt6i_expires; 625 if (!(rt->rt6i_flags&RTF_EXPIRES)) { 626 iter->rt6i_flags &= ~RTF_EXPIRES; 627 iter->rt6i_expires = 0; 628 } 629 return -EEXIST; 630 } 631 } 632 633 if (iter->rt6i_metric > rt->rt6i_metric) 634 break; 635 636 ins = &iter->u.dst.rt6_next; 637 } 638 639 /* Reset round-robin state, if necessary */ 640 if (ins == &fn->leaf) 641 fn->rr_ptr = NULL; 642 643 /* 644 * insert node 645 */ 646 647 rt->u.dst.rt6_next = iter; 648 *ins = rt; 649 rt->rt6i_node = fn; 650 atomic_inc(&rt->rt6i_ref); 651 inet6_rt_notify(RTM_NEWROUTE, rt, info); 652 info->nl_net->ipv6.rt6_stats->fib_rt_entries++; 653 654 if ((fn->fn_flags & RTN_RTINFO) == 0) { 655 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 656 fn->fn_flags |= RTN_RTINFO; 657 } 658 659 return 0; 660 } 661 662 static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) 663 { 664 if (!timer_pending(&net->ipv6.ip6_fib_timer) && 665 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) 666 mod_timer(&net->ipv6.ip6_fib_timer, 667 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); 668 } 669 670 void fib6_force_start_gc(struct net *net) 671 { 672 if (!timer_pending(&net->ipv6.ip6_fib_timer)) 673 mod_timer(&net->ipv6.ip6_fib_timer, 674 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); 675 } 676 677 /* 678 * Add routing information to the routing tree. 679 * <destination addr>/<source addr> 680 * with source addr info in sub-trees 681 */ 682 683 int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) 684 { 685 struct fib6_node *fn, *pn = NULL; 686 int err = -ENOMEM; 687 688 fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), 689 rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst)); 690 691 if (fn == NULL) 692 goto out; 693 694 pn = fn; 695 696 #ifdef CONFIG_IPV6_SUBTREES 697 if (rt->rt6i_src.plen) { 698 struct fib6_node *sn; 699 700 if (fn->subtree == NULL) { 701 struct fib6_node *sfn; 702 703 /* 704 * Create subtree. 705 * 706 * fn[main tree] 707 * | 708 * sfn[subtree root] 709 * \ 710 * sn[new leaf node] 711 */ 712 713 /* Create subtree root node */ 714 sfn = node_alloc(); 715 if (sfn == NULL) 716 goto st_failure; 717 718 sfn->leaf = info->nl_net->ipv6.ip6_null_entry; 719 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); 720 sfn->fn_flags = RTN_ROOT; 721 sfn->fn_sernum = fib6_new_sernum(); 722 723 /* Now add the first leaf node to new subtree */ 724 725 sn = fib6_add_1(sfn, &rt->rt6i_src.addr, 726 sizeof(struct in6_addr), rt->rt6i_src.plen, 727 offsetof(struct rt6_info, rt6i_src)); 728 729 if (sn == NULL) { 730 /* If it is failed, discard just allocated 731 root, and then (in st_failure) stale node 732 in main tree. 733 */ 734 node_free(sfn); 735 goto st_failure; 736 } 737 738 /* Now link new subtree to main tree */ 739 sfn->parent = fn; 740 fn->subtree = sfn; 741 } else { 742 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, 743 sizeof(struct in6_addr), rt->rt6i_src.plen, 744 offsetof(struct rt6_info, rt6i_src)); 745 746 if (sn == NULL) 747 goto st_failure; 748 } 749 750 if (fn->leaf == NULL) { 751 fn->leaf = rt; 752 atomic_inc(&rt->rt6i_ref); 753 } 754 fn = sn; 755 } 756 #endif 757 758 err = fib6_add_rt2node(fn, rt, info); 759 760 if (err == 0) { 761 fib6_start_gc(info->nl_net, rt); 762 if (!(rt->rt6i_flags&RTF_CACHE)) 763 fib6_prune_clones(info->nl_net, pn, rt); 764 } 765 766 out: 767 if (err) { 768 #ifdef CONFIG_IPV6_SUBTREES 769 /* 770 * If fib6_add_1 has cleared the old leaf pointer in the 771 * super-tree leaf node we have to find a new one for it. 772 */ 773 if (pn != fn && pn->leaf == rt) { 774 pn->leaf = NULL; 775 atomic_dec(&rt->rt6i_ref); 776 } 777 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { 778 pn->leaf = fib6_find_prefix(info->nl_net, pn); 779 #if RT6_DEBUG >= 2 780 if (!pn->leaf) { 781 BUG_TRAP(pn->leaf != NULL); 782 pn->leaf = info->nl_net->ipv6.ip6_null_entry; 783 } 784 #endif 785 atomic_inc(&pn->leaf->rt6i_ref); 786 } 787 #endif 788 dst_free(&rt->u.dst); 789 } 790 return err; 791 792 #ifdef CONFIG_IPV6_SUBTREES 793 /* Subtree creation failed, probably main tree node 794 is orphan. If it is, shoot it. 795 */ 796 st_failure: 797 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 798 fib6_repair_tree(info->nl_net, fn); 799 dst_free(&rt->u.dst); 800 return err; 801 #endif 802 } 803 804 /* 805 * Routing tree lookup 806 * 807 */ 808 809 struct lookup_args { 810 int offset; /* key offset on rt6_info */ 811 struct in6_addr *addr; /* search key */ 812 }; 813 814 static struct fib6_node * fib6_lookup_1(struct fib6_node *root, 815 struct lookup_args *args) 816 { 817 struct fib6_node *fn; 818 __be32 dir; 819 820 if (unlikely(args->offset == 0)) 821 return NULL; 822 823 /* 824 * Descend on a tree 825 */ 826 827 fn = root; 828 829 for (;;) { 830 struct fib6_node *next; 831 832 dir = addr_bit_set(args->addr, fn->fn_bit); 833 834 next = dir ? fn->right : fn->left; 835 836 if (next) { 837 fn = next; 838 continue; 839 } 840 841 break; 842 } 843 844 while(fn) { 845 if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) { 846 struct rt6key *key; 847 848 key = (struct rt6key *) ((u8 *) fn->leaf + 849 args->offset); 850 851 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 852 #ifdef CONFIG_IPV6_SUBTREES 853 if (fn->subtree) 854 fn = fib6_lookup_1(fn->subtree, args + 1); 855 #endif 856 if (!fn || fn->fn_flags & RTN_RTINFO) 857 return fn; 858 } 859 } 860 861 if (fn->fn_flags & RTN_ROOT) 862 break; 863 864 fn = fn->parent; 865 } 866 867 return NULL; 868 } 869 870 struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, 871 struct in6_addr *saddr) 872 { 873 struct fib6_node *fn; 874 struct lookup_args args[] = { 875 { 876 .offset = offsetof(struct rt6_info, rt6i_dst), 877 .addr = daddr, 878 }, 879 #ifdef CONFIG_IPV6_SUBTREES 880 { 881 .offset = offsetof(struct rt6_info, rt6i_src), 882 .addr = saddr, 883 }, 884 #endif 885 { 886 .offset = 0, /* sentinel */ 887 } 888 }; 889 890 fn = fib6_lookup_1(root, daddr ? args : args + 1); 891 892 if (fn == NULL || fn->fn_flags & RTN_TL_ROOT) 893 fn = root; 894 895 return fn; 896 } 897 898 /* 899 * Get node with specified destination prefix (and source prefix, 900 * if subtrees are used) 901 */ 902 903 904 static struct fib6_node * fib6_locate_1(struct fib6_node *root, 905 struct in6_addr *addr, 906 int plen, int offset) 907 { 908 struct fib6_node *fn; 909 910 for (fn = root; fn ; ) { 911 struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset); 912 913 /* 914 * Prefix match 915 */ 916 if (plen < fn->fn_bit || 917 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) 918 return NULL; 919 920 if (plen == fn->fn_bit) 921 return fn; 922 923 /* 924 * We have more bits to go 925 */ 926 if (addr_bit_set(addr, fn->fn_bit)) 927 fn = fn->right; 928 else 929 fn = fn->left; 930 } 931 return NULL; 932 } 933 934 struct fib6_node * fib6_locate(struct fib6_node *root, 935 struct in6_addr *daddr, int dst_len, 936 struct in6_addr *saddr, int src_len) 937 { 938 struct fib6_node *fn; 939 940 fn = fib6_locate_1(root, daddr, dst_len, 941 offsetof(struct rt6_info, rt6i_dst)); 942 943 #ifdef CONFIG_IPV6_SUBTREES 944 if (src_len) { 945 BUG_TRAP(saddr!=NULL); 946 if (fn && fn->subtree) 947 fn = fib6_locate_1(fn->subtree, saddr, src_len, 948 offsetof(struct rt6_info, rt6i_src)); 949 } 950 #endif 951 952 if (fn && fn->fn_flags&RTN_RTINFO) 953 return fn; 954 955 return NULL; 956 } 957 958 959 /* 960 * Deletion 961 * 962 */ 963 964 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn) 965 { 966 if (fn->fn_flags&RTN_ROOT) 967 return net->ipv6.ip6_null_entry; 968 969 while(fn) { 970 if(fn->left) 971 return fn->left->leaf; 972 973 if(fn->right) 974 return fn->right->leaf; 975 976 fn = FIB6_SUBTREE(fn); 977 } 978 return NULL; 979 } 980 981 /* 982 * Called to trim the tree of intermediate nodes when possible. "fn" 983 * is the node we want to try and remove. 984 */ 985 986 static struct fib6_node *fib6_repair_tree(struct net *net, 987 struct fib6_node *fn) 988 { 989 int children; 990 int nstate; 991 struct fib6_node *child, *pn; 992 struct fib6_walker_t *w; 993 int iter = 0; 994 995 for (;;) { 996 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); 997 iter++; 998 999 BUG_TRAP(!(fn->fn_flags&RTN_RTINFO)); 1000 BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT)); 1001 BUG_TRAP(fn->leaf==NULL); 1002 1003 children = 0; 1004 child = NULL; 1005 if (fn->right) child = fn->right, children |= 1; 1006 if (fn->left) child = fn->left, children |= 2; 1007 1008 if (children == 3 || FIB6_SUBTREE(fn) 1009 #ifdef CONFIG_IPV6_SUBTREES 1010 /* Subtree root (i.e. fn) may have one child */ 1011 || (children && fn->fn_flags&RTN_ROOT) 1012 #endif 1013 ) { 1014 fn->leaf = fib6_find_prefix(net, fn); 1015 #if RT6_DEBUG >= 2 1016 if (fn->leaf==NULL) { 1017 BUG_TRAP(fn->leaf); 1018 fn->leaf = net->ipv6.ip6_null_entry; 1019 } 1020 #endif 1021 atomic_inc(&fn->leaf->rt6i_ref); 1022 return fn->parent; 1023 } 1024 1025 pn = fn->parent; 1026 #ifdef CONFIG_IPV6_SUBTREES 1027 if (FIB6_SUBTREE(pn) == fn) { 1028 BUG_TRAP(fn->fn_flags&RTN_ROOT); 1029 FIB6_SUBTREE(pn) = NULL; 1030 nstate = FWS_L; 1031 } else { 1032 BUG_TRAP(!(fn->fn_flags&RTN_ROOT)); 1033 #endif 1034 if (pn->right == fn) pn->right = child; 1035 else if (pn->left == fn) pn->left = child; 1036 #if RT6_DEBUG >= 2 1037 else BUG_TRAP(0); 1038 #endif 1039 if (child) 1040 child->parent = pn; 1041 nstate = FWS_R; 1042 #ifdef CONFIG_IPV6_SUBTREES 1043 } 1044 #endif 1045 1046 read_lock(&fib6_walker_lock); 1047 FOR_WALKERS(w) { 1048 if (child == NULL) { 1049 if (w->root == fn) { 1050 w->root = w->node = NULL; 1051 RT6_TRACE("W %p adjusted by delroot 1\n", w); 1052 } else if (w->node == fn) { 1053 RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); 1054 w->node = pn; 1055 w->state = nstate; 1056 } 1057 } else { 1058 if (w->root == fn) { 1059 w->root = child; 1060 RT6_TRACE("W %p adjusted by delroot 2\n", w); 1061 } 1062 if (w->node == fn) { 1063 w->node = child; 1064 if (children&2) { 1065 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); 1066 w->state = w->state>=FWS_R ? FWS_U : FWS_INIT; 1067 } else { 1068 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state); 1069 w->state = w->state>=FWS_C ? FWS_U : FWS_INIT; 1070 } 1071 } 1072 } 1073 } 1074 read_unlock(&fib6_walker_lock); 1075 1076 node_free(fn); 1077 if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn)) 1078 return pn; 1079 1080 rt6_release(pn->leaf); 1081 pn->leaf = NULL; 1082 fn = pn; 1083 } 1084 } 1085 1086 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, 1087 struct nl_info *info) 1088 { 1089 struct fib6_walker_t *w; 1090 struct rt6_info *rt = *rtp; 1091 struct net *net = info->nl_net; 1092 1093 RT6_TRACE("fib6_del_route\n"); 1094 1095 /* Unlink it */ 1096 *rtp = rt->u.dst.rt6_next; 1097 rt->rt6i_node = NULL; 1098 net->ipv6.rt6_stats->fib_rt_entries--; 1099 net->ipv6.rt6_stats->fib_discarded_routes++; 1100 1101 /* Reset round-robin state, if necessary */ 1102 if (fn->rr_ptr == rt) 1103 fn->rr_ptr = NULL; 1104 1105 /* Adjust walkers */ 1106 read_lock(&fib6_walker_lock); 1107 FOR_WALKERS(w) { 1108 if (w->state == FWS_C && w->leaf == rt) { 1109 RT6_TRACE("walker %p adjusted by delroute\n", w); 1110 w->leaf = rt->u.dst.rt6_next; 1111 if (w->leaf == NULL) 1112 w->state = FWS_U; 1113 } 1114 } 1115 read_unlock(&fib6_walker_lock); 1116 1117 rt->u.dst.rt6_next = NULL; 1118 1119 /* If it was last route, expunge its radix tree node */ 1120 if (fn->leaf == NULL) { 1121 fn->fn_flags &= ~RTN_RTINFO; 1122 net->ipv6.rt6_stats->fib_route_nodes--; 1123 fn = fib6_repair_tree(net, fn); 1124 } 1125 1126 if (atomic_read(&rt->rt6i_ref) != 1) { 1127 /* This route is used as dummy address holder in some split 1128 * nodes. It is not leaked, but it still holds other resources, 1129 * which must be released in time. So, scan ascendant nodes 1130 * and replace dummy references to this route with references 1131 * to still alive ones. 1132 */ 1133 while (fn) { 1134 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { 1135 fn->leaf = fib6_find_prefix(net, fn); 1136 atomic_inc(&fn->leaf->rt6i_ref); 1137 rt6_release(rt); 1138 } 1139 fn = fn->parent; 1140 } 1141 /* No more references are possible at this point. */ 1142 BUG_ON(atomic_read(&rt->rt6i_ref) != 1); 1143 } 1144 1145 inet6_rt_notify(RTM_DELROUTE, rt, info); 1146 rt6_release(rt); 1147 } 1148 1149 int fib6_del(struct rt6_info *rt, struct nl_info *info) 1150 { 1151 struct net *net = info->nl_net; 1152 struct fib6_node *fn = rt->rt6i_node; 1153 struct rt6_info **rtp; 1154 1155 #if RT6_DEBUG >= 2 1156 if (rt->u.dst.obsolete>0) { 1157 BUG_TRAP(fn==NULL); 1158 return -ENOENT; 1159 } 1160 #endif 1161 if (fn == NULL || rt == net->ipv6.ip6_null_entry) 1162 return -ENOENT; 1163 1164 BUG_TRAP(fn->fn_flags&RTN_RTINFO); 1165 1166 if (!(rt->rt6i_flags&RTF_CACHE)) { 1167 struct fib6_node *pn = fn; 1168 #ifdef CONFIG_IPV6_SUBTREES 1169 /* clones of this route might be in another subtree */ 1170 if (rt->rt6i_src.plen) { 1171 while (!(pn->fn_flags&RTN_ROOT)) 1172 pn = pn->parent; 1173 pn = pn->parent; 1174 } 1175 #endif 1176 fib6_prune_clones(info->nl_net, pn, rt); 1177 } 1178 1179 /* 1180 * Walk the leaf entries looking for ourself 1181 */ 1182 1183 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { 1184 if (*rtp == rt) { 1185 fib6_del_route(fn, rtp, info); 1186 return 0; 1187 } 1188 } 1189 return -ENOENT; 1190 } 1191 1192 /* 1193 * Tree traversal function. 1194 * 1195 * Certainly, it is not interrupt safe. 1196 * However, it is internally reenterable wrt itself and fib6_add/fib6_del. 1197 * It means, that we can modify tree during walking 1198 * and use this function for garbage collection, clone pruning, 1199 * cleaning tree when a device goes down etc. etc. 1200 * 1201 * It guarantees that every node will be traversed, 1202 * and that it will be traversed only once. 1203 * 1204 * Callback function w->func may return: 1205 * 0 -> continue walking. 1206 * positive value -> walking is suspended (used by tree dumps, 1207 * and probably by gc, if it will be split to several slices) 1208 * negative value -> terminate walking. 1209 * 1210 * The function itself returns: 1211 * 0 -> walk is complete. 1212 * >0 -> walk is incomplete (i.e. suspended) 1213 * <0 -> walk is terminated by an error. 1214 */ 1215 1216 static int fib6_walk_continue(struct fib6_walker_t *w) 1217 { 1218 struct fib6_node *fn, *pn; 1219 1220 for (;;) { 1221 fn = w->node; 1222 if (fn == NULL) 1223 return 0; 1224 1225 if (w->prune && fn != w->root && 1226 fn->fn_flags&RTN_RTINFO && w->state < FWS_C) { 1227 w->state = FWS_C; 1228 w->leaf = fn->leaf; 1229 } 1230 switch (w->state) { 1231 #ifdef CONFIG_IPV6_SUBTREES 1232 case FWS_S: 1233 if (FIB6_SUBTREE(fn)) { 1234 w->node = FIB6_SUBTREE(fn); 1235 continue; 1236 } 1237 w->state = FWS_L; 1238 #endif 1239 case FWS_L: 1240 if (fn->left) { 1241 w->node = fn->left; 1242 w->state = FWS_INIT; 1243 continue; 1244 } 1245 w->state = FWS_R; 1246 case FWS_R: 1247 if (fn->right) { 1248 w->node = fn->right; 1249 w->state = FWS_INIT; 1250 continue; 1251 } 1252 w->state = FWS_C; 1253 w->leaf = fn->leaf; 1254 case FWS_C: 1255 if (w->leaf && fn->fn_flags&RTN_RTINFO) { 1256 int err = w->func(w); 1257 if (err) 1258 return err; 1259 continue; 1260 } 1261 w->state = FWS_U; 1262 case FWS_U: 1263 if (fn == w->root) 1264 return 0; 1265 pn = fn->parent; 1266 w->node = pn; 1267 #ifdef CONFIG_IPV6_SUBTREES 1268 if (FIB6_SUBTREE(pn) == fn) { 1269 BUG_TRAP(fn->fn_flags&RTN_ROOT); 1270 w->state = FWS_L; 1271 continue; 1272 } 1273 #endif 1274 if (pn->left == fn) { 1275 w->state = FWS_R; 1276 continue; 1277 } 1278 if (pn->right == fn) { 1279 w->state = FWS_C; 1280 w->leaf = w->node->leaf; 1281 continue; 1282 } 1283 #if RT6_DEBUG >= 2 1284 BUG_TRAP(0); 1285 #endif 1286 } 1287 } 1288 } 1289 1290 static int fib6_walk(struct fib6_walker_t *w) 1291 { 1292 int res; 1293 1294 w->state = FWS_INIT; 1295 w->node = w->root; 1296 1297 fib6_walker_link(w); 1298 res = fib6_walk_continue(w); 1299 if (res <= 0) 1300 fib6_walker_unlink(w); 1301 return res; 1302 } 1303 1304 static int fib6_clean_node(struct fib6_walker_t *w) 1305 { 1306 int res; 1307 struct rt6_info *rt; 1308 struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); 1309 struct nl_info info = { 1310 .nl_net = c->net, 1311 }; 1312 1313 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 1314 res = c->func(rt, c->arg); 1315 if (res < 0) { 1316 w->leaf = rt; 1317 res = fib6_del(rt, &info); 1318 if (res) { 1319 #if RT6_DEBUG >= 2 1320 printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res); 1321 #endif 1322 continue; 1323 } 1324 return 0; 1325 } 1326 BUG_TRAP(res==0); 1327 } 1328 w->leaf = rt; 1329 return 0; 1330 } 1331 1332 /* 1333 * Convenient frontend to tree walker. 1334 * 1335 * func is called on each route. 1336 * It may return -1 -> delete this route. 1337 * 0 -> continue walking 1338 * 1339 * prune==1 -> only immediate children of node (certainly, 1340 * ignoring pure split nodes) will be scanned. 1341 */ 1342 1343 static void fib6_clean_tree(struct net *net, struct fib6_node *root, 1344 int (*func)(struct rt6_info *, void *arg), 1345 int prune, void *arg) 1346 { 1347 struct fib6_cleaner_t c; 1348 1349 c.w.root = root; 1350 c.w.func = fib6_clean_node; 1351 c.w.prune = prune; 1352 c.func = func; 1353 c.arg = arg; 1354 c.net = net; 1355 1356 fib6_walk(&c.w); 1357 } 1358 1359 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), 1360 int prune, void *arg) 1361 { 1362 struct fib6_table *table; 1363 struct hlist_node *node; 1364 struct hlist_head *head; 1365 unsigned int h; 1366 1367 rcu_read_lock(); 1368 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 1369 head = &net->ipv6.fib_table_hash[h]; 1370 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { 1371 write_lock_bh(&table->tb6_lock); 1372 fib6_clean_tree(net, &table->tb6_root, 1373 func, prune, arg); 1374 write_unlock_bh(&table->tb6_lock); 1375 } 1376 } 1377 rcu_read_unlock(); 1378 } 1379 1380 static int fib6_prune_clone(struct rt6_info *rt, void *arg) 1381 { 1382 if (rt->rt6i_flags & RTF_CACHE) { 1383 RT6_TRACE("pruning clone %p\n", rt); 1384 return -1; 1385 } 1386 1387 return 0; 1388 } 1389 1390 static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 1391 struct rt6_info *rt) 1392 { 1393 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); 1394 } 1395 1396 /* 1397 * Garbage collection 1398 */ 1399 1400 static struct fib6_gc_args 1401 { 1402 int timeout; 1403 int more; 1404 } gc_args; 1405 1406 static int fib6_age(struct rt6_info *rt, void *arg) 1407 { 1408 unsigned long now = jiffies; 1409 1410 /* 1411 * check addrconf expiration here. 1412 * Routes are expired even if they are in use. 1413 * 1414 * Also age clones. Note, that clones are aged out 1415 * only if they are not in use now. 1416 */ 1417 1418 if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) { 1419 if (time_after(now, rt->rt6i_expires)) { 1420 RT6_TRACE("expiring %p\n", rt); 1421 return -1; 1422 } 1423 gc_args.more++; 1424 } else if (rt->rt6i_flags & RTF_CACHE) { 1425 if (atomic_read(&rt->u.dst.__refcnt) == 0 && 1426 time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { 1427 RT6_TRACE("aging clone %p\n", rt); 1428 return -1; 1429 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1430 (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) { 1431 RT6_TRACE("purging route %p via non-router but gateway\n", 1432 rt); 1433 return -1; 1434 } 1435 gc_args.more++; 1436 } 1437 1438 return 0; 1439 } 1440 1441 static DEFINE_SPINLOCK(fib6_gc_lock); 1442 1443 void fib6_run_gc(unsigned long expires, struct net *net) 1444 { 1445 if (expires != ~0UL) { 1446 spin_lock_bh(&fib6_gc_lock); 1447 gc_args.timeout = expires ? (int)expires : 1448 net->ipv6.sysctl.ip6_rt_gc_interval; 1449 } else { 1450 if (!spin_trylock_bh(&fib6_gc_lock)) { 1451 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ); 1452 return; 1453 } 1454 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; 1455 } 1456 1457 gc_args.more = icmp6_dst_gc(); 1458 1459 fib6_clean_all(net, fib6_age, 0, NULL); 1460 1461 if (gc_args.more) 1462 mod_timer(&net->ipv6.ip6_fib_timer, 1463 round_jiffies(jiffies 1464 + net->ipv6.sysctl.ip6_rt_gc_interval)); 1465 else 1466 del_timer(&net->ipv6.ip6_fib_timer); 1467 spin_unlock_bh(&fib6_gc_lock); 1468 } 1469 1470 static void fib6_gc_timer_cb(unsigned long arg) 1471 { 1472 fib6_run_gc(0, (struct net *)arg); 1473 } 1474 1475 static int fib6_net_init(struct net *net) 1476 { 1477 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1478 1479 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); 1480 if (!net->ipv6.rt6_stats) 1481 goto out_timer; 1482 1483 net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ, 1484 sizeof(*net->ipv6.fib_table_hash), 1485 GFP_KERNEL); 1486 if (!net->ipv6.fib_table_hash) 1487 goto out_rt6_stats; 1488 1489 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl), 1490 GFP_KERNEL); 1491 if (!net->ipv6.fib6_main_tbl) 1492 goto out_fib_table_hash; 1493 1494 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN; 1495 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1496 net->ipv6.fib6_main_tbl->tb6_root.fn_flags = 1497 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1498 1499 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1500 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), 1501 GFP_KERNEL); 1502 if (!net->ipv6.fib6_local_tbl) 1503 goto out_fib6_main_tbl; 1504 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL; 1505 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; 1506 net->ipv6.fib6_local_tbl->tb6_root.fn_flags = 1507 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 1508 #endif 1509 fib6_tables_init(net); 1510 1511 return 0; 1512 1513 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1514 out_fib6_main_tbl: 1515 kfree(net->ipv6.fib6_main_tbl); 1516 #endif 1517 out_fib_table_hash: 1518 kfree(net->ipv6.fib_table_hash); 1519 out_rt6_stats: 1520 kfree(net->ipv6.rt6_stats); 1521 out_timer: 1522 return -ENOMEM; 1523 } 1524 1525 static void fib6_net_exit(struct net *net) 1526 { 1527 rt6_ifdown(net, NULL); 1528 del_timer_sync(&net->ipv6.ip6_fib_timer); 1529 1530 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 1531 kfree(net->ipv6.fib6_local_tbl); 1532 #endif 1533 kfree(net->ipv6.fib6_main_tbl); 1534 kfree(net->ipv6.fib_table_hash); 1535 kfree(net->ipv6.rt6_stats); 1536 } 1537 1538 static struct pernet_operations fib6_net_ops = { 1539 .init = fib6_net_init, 1540 .exit = fib6_net_exit, 1541 }; 1542 1543 int __init fib6_init(void) 1544 { 1545 int ret = -ENOMEM; 1546 1547 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1548 sizeof(struct fib6_node), 1549 0, SLAB_HWCACHE_ALIGN, 1550 NULL); 1551 if (!fib6_node_kmem) 1552 goto out; 1553 1554 ret = register_pernet_subsys(&fib6_net_ops); 1555 if (ret) 1556 goto out_kmem_cache_create; 1557 1558 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); 1559 if (ret) 1560 goto out_unregister_subsys; 1561 out: 1562 return ret; 1563 1564 out_unregister_subsys: 1565 unregister_pernet_subsys(&fib6_net_ops); 1566 out_kmem_cache_create: 1567 kmem_cache_destroy(fib6_node_kmem); 1568 goto out; 1569 } 1570 1571 void fib6_gc_cleanup(void) 1572 { 1573 unregister_pernet_subsys(&fib6_net_ops); 1574 kmem_cache_destroy(fib6_node_kmem); 1575 } 1576