1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * IPv4 Forwarding Information Base: semantics. 7 * 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <asm/uaccess.h> 17 #include <linux/bitops.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/jiffies.h> 21 #include <linux/mm.h> 22 #include <linux/string.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/errno.h> 26 #include <linux/in.h> 27 #include <linux/inet.h> 28 #include <linux/inetdevice.h> 29 #include <linux/netdevice.h> 30 #include <linux/if_arp.h> 31 #include <linux/proc_fs.h> 32 #include <linux/skbuff.h> 33 #include <linux/init.h> 34 #include <linux/slab.h> 35 36 #include <net/arp.h> 37 #include <net/ip.h> 38 #include <net/protocol.h> 39 #include <net/route.h> 40 #include <net/tcp.h> 41 #include <net/sock.h> 42 #include <net/ip_fib.h> 43 #include <net/netlink.h> 44 #include <net/nexthop.h> 45 46 #include "fib_lookup.h" 47 48 static DEFINE_SPINLOCK(fib_info_lock); 49 static struct hlist_head *fib_info_hash; 50 static struct hlist_head *fib_info_laddrhash; 51 static unsigned int fib_info_hash_size; 52 static unsigned int fib_info_cnt; 53 54 #define DEVINDEX_HASHBITS 8 55 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) 56 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; 57 58 #ifdef CONFIG_IP_ROUTE_MULTIPATH 59 60 static DEFINE_SPINLOCK(fib_multipath_lock); 61 62 #define for_nexthops(fi) { \ 63 int nhsel; const struct fib_nh *nh; \ 64 for (nhsel = 0, nh = (fi)->fib_nh; \ 65 nhsel < (fi)->fib_nhs; \ 66 nh++, nhsel++) 67 68 #define change_nexthops(fi) { \ 69 int nhsel; struct fib_nh *nexthop_nh; \ 70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 71 nhsel < (fi)->fib_nhs; \ 72 nexthop_nh++, nhsel++) 73 74 #else /* CONFIG_IP_ROUTE_MULTIPATH */ 75 76 /* Hope, that gcc will optimize it to get rid of dummy loop */ 77 78 #define for_nexthops(fi) { \ 79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ 80 for (nhsel = 0; nhsel < 1; nhsel++) 81 82 #define change_nexthops(fi) { \ 83 int nhsel; \ 84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ 85 for (nhsel = 0; nhsel < 1; nhsel++) 86 87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 88 89 #define endfor_nexthops(fi) } 90 91 92 const struct fib_prop fib_props[RTN_MAX + 1] = { 93 [RTN_UNSPEC] = { 94 .error = 0, 95 .scope = RT_SCOPE_NOWHERE, 96 }, 97 [RTN_UNICAST] = { 98 .error = 0, 99 .scope = RT_SCOPE_UNIVERSE, 100 }, 101 [RTN_LOCAL] = { 102 .error = 0, 103 .scope = RT_SCOPE_HOST, 104 }, 105 [RTN_BROADCAST] = { 106 .error = 0, 107 .scope = RT_SCOPE_LINK, 108 }, 109 [RTN_ANYCAST] = { 110 .error = 0, 111 .scope = RT_SCOPE_LINK, 112 }, 113 [RTN_MULTICAST] = { 114 .error = 0, 115 .scope = RT_SCOPE_UNIVERSE, 116 }, 117 [RTN_BLACKHOLE] = { 118 .error = -EINVAL, 119 .scope = RT_SCOPE_UNIVERSE, 120 }, 121 [RTN_UNREACHABLE] = { 122 .error = -EHOSTUNREACH, 123 .scope = RT_SCOPE_UNIVERSE, 124 }, 125 [RTN_PROHIBIT] = { 126 .error = -EACCES, 127 .scope = RT_SCOPE_UNIVERSE, 128 }, 129 [RTN_THROW] = { 130 .error = -EAGAIN, 131 .scope = RT_SCOPE_UNIVERSE, 132 }, 133 [RTN_NAT] = { 134 .error = -EINVAL, 135 .scope = RT_SCOPE_NOWHERE, 136 }, 137 [RTN_XRESOLVE] = { 138 .error = -EINVAL, 139 .scope = RT_SCOPE_NOWHERE, 140 }, 141 }; 142 143 static void free_nh_exceptions(struct fib_nh *nh) 144 { 145 struct fnhe_hash_bucket *hash = nh->nh_exceptions; 146 int i; 147 148 for (i = 0; i < FNHE_HASH_SIZE; i++) { 149 struct fib_nh_exception *fnhe; 150 151 fnhe = rcu_dereference_protected(hash[i].chain, 1); 152 while (fnhe) { 153 struct fib_nh_exception *next; 154 155 next = rcu_dereference_protected(fnhe->fnhe_next, 1); 156 kfree(fnhe); 157 158 fnhe = next; 159 } 160 } 161 kfree(hash); 162 } 163 164 /* Release a nexthop info record */ 165 static void free_fib_info_rcu(struct rcu_head *head) 166 { 167 struct fib_info *fi = container_of(head, struct fib_info, rcu); 168 169 change_nexthops(fi) { 170 if (nexthop_nh->nh_dev) 171 dev_put(nexthop_nh->nh_dev); 172 if (nexthop_nh->nh_exceptions) 173 free_nh_exceptions(nexthop_nh); 174 if (nexthop_nh->nh_rth_output) 175 dst_free(&nexthop_nh->nh_rth_output->dst); 176 if (nexthop_nh->nh_rth_input) 177 dst_free(&nexthop_nh->nh_rth_input->dst); 178 } endfor_nexthops(fi); 179 180 release_net(fi->fib_net); 181 if (fi->fib_metrics != (u32 *) dst_default_metrics) 182 kfree(fi->fib_metrics); 183 kfree(fi); 184 } 185 186 void free_fib_info(struct fib_info *fi) 187 { 188 if (fi->fib_dead == 0) { 189 pr_warn("Freeing alive fib_info %p\n", fi); 190 return; 191 } 192 fib_info_cnt--; 193 #ifdef CONFIG_IP_ROUTE_CLASSID 194 change_nexthops(fi) { 195 if (nexthop_nh->nh_tclassid) 196 fi->fib_net->ipv4.fib_num_tclassid_users--; 197 } endfor_nexthops(fi); 198 #endif 199 call_rcu(&fi->rcu, free_fib_info_rcu); 200 } 201 202 void fib_release_info(struct fib_info *fi) 203 { 204 spin_lock_bh(&fib_info_lock); 205 if (fi && --fi->fib_treeref == 0) { 206 hlist_del(&fi->fib_hash); 207 if (fi->fib_prefsrc) 208 hlist_del(&fi->fib_lhash); 209 change_nexthops(fi) { 210 if (!nexthop_nh->nh_dev) 211 continue; 212 hlist_del(&nexthop_nh->nh_hash); 213 } endfor_nexthops(fi) 214 fi->fib_dead = 1; 215 fib_info_put(fi); 216 } 217 spin_unlock_bh(&fib_info_lock); 218 } 219 220 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 221 { 222 const struct fib_nh *onh = ofi->fib_nh; 223 224 for_nexthops(fi) { 225 if (nh->nh_oif != onh->nh_oif || 226 nh->nh_gw != onh->nh_gw || 227 nh->nh_scope != onh->nh_scope || 228 #ifdef CONFIG_IP_ROUTE_MULTIPATH 229 nh->nh_weight != onh->nh_weight || 230 #endif 231 #ifdef CONFIG_IP_ROUTE_CLASSID 232 nh->nh_tclassid != onh->nh_tclassid || 233 #endif 234 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD)) 235 return -1; 236 onh++; 237 } endfor_nexthops(fi); 238 return 0; 239 } 240 241 static inline unsigned int fib_devindex_hashfn(unsigned int val) 242 { 243 unsigned int mask = DEVINDEX_HASHSIZE - 1; 244 245 return (val ^ 246 (val >> DEVINDEX_HASHBITS) ^ 247 (val >> (DEVINDEX_HASHBITS * 2))) & mask; 248 } 249 250 static inline unsigned int fib_info_hashfn(const struct fib_info *fi) 251 { 252 unsigned int mask = (fib_info_hash_size - 1); 253 unsigned int val = fi->fib_nhs; 254 255 val ^= (fi->fib_protocol << 8) | fi->fib_scope; 256 val ^= (__force u32)fi->fib_prefsrc; 257 val ^= fi->fib_priority; 258 for_nexthops(fi) { 259 val ^= fib_devindex_hashfn(nh->nh_oif); 260 } endfor_nexthops(fi) 261 262 return (val ^ (val >> 7) ^ (val >> 12)) & mask; 263 } 264 265 static struct fib_info *fib_find_info(const struct fib_info *nfi) 266 { 267 struct hlist_head *head; 268 struct hlist_node *node; 269 struct fib_info *fi; 270 unsigned int hash; 271 272 hash = fib_info_hashfn(nfi); 273 head = &fib_info_hash[hash]; 274 275 hlist_for_each_entry(fi, node, head, fib_hash) { 276 if (!net_eq(fi->fib_net, nfi->fib_net)) 277 continue; 278 if (fi->fib_nhs != nfi->fib_nhs) 279 continue; 280 if (nfi->fib_protocol == fi->fib_protocol && 281 nfi->fib_scope == fi->fib_scope && 282 nfi->fib_prefsrc == fi->fib_prefsrc && 283 nfi->fib_priority == fi->fib_priority && 284 memcmp(nfi->fib_metrics, fi->fib_metrics, 285 sizeof(u32) * RTAX_MAX) == 0 && 286 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 287 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 288 return fi; 289 } 290 291 return NULL; 292 } 293 294 /* Check, that the gateway is already configured. 295 * Used only by redirect accept routine. 296 */ 297 int ip_fib_check_default(__be32 gw, struct net_device *dev) 298 { 299 struct hlist_head *head; 300 struct hlist_node *node; 301 struct fib_nh *nh; 302 unsigned int hash; 303 304 spin_lock(&fib_info_lock); 305 306 hash = fib_devindex_hashfn(dev->ifindex); 307 head = &fib_info_devhash[hash]; 308 hlist_for_each_entry(nh, node, head, nh_hash) { 309 if (nh->nh_dev == dev && 310 nh->nh_gw == gw && 311 !(nh->nh_flags & RTNH_F_DEAD)) { 312 spin_unlock(&fib_info_lock); 313 return 0; 314 } 315 } 316 317 spin_unlock(&fib_info_lock); 318 319 return -1; 320 } 321 322 static inline size_t fib_nlmsg_size(struct fib_info *fi) 323 { 324 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) 325 + nla_total_size(4) /* RTA_TABLE */ 326 + nla_total_size(4) /* RTA_DST */ 327 + nla_total_size(4) /* RTA_PRIORITY */ 328 + nla_total_size(4); /* RTA_PREFSRC */ 329 330 /* space for nested metrics */ 331 payload += nla_total_size((RTAX_MAX * nla_total_size(4))); 332 333 if (fi->fib_nhs) { 334 /* Also handles the special case fib_nhs == 1 */ 335 336 /* each nexthop is packed in an attribute */ 337 size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); 338 339 /* may contain flow and gateway attribute */ 340 nhsize += 2 * nla_total_size(4); 341 342 /* all nexthops are packed in a nested attribute */ 343 payload += nla_total_size(fi->fib_nhs * nhsize); 344 } 345 346 return payload; 347 } 348 349 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, 350 int dst_len, u32 tb_id, struct nl_info *info, 351 unsigned int nlm_flags) 352 { 353 struct sk_buff *skb; 354 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 355 int err = -ENOBUFS; 356 357 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); 358 if (skb == NULL) 359 goto errout; 360 361 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 362 fa->fa_type, key, dst_len, 363 fa->fa_tos, fa->fa_info, nlm_flags); 364 if (err < 0) { 365 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ 366 WARN_ON(err == -EMSGSIZE); 367 kfree_skb(skb); 368 goto errout; 369 } 370 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE, 371 info->nlh, GFP_KERNEL); 372 return; 373 errout: 374 if (err < 0) 375 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); 376 } 377 378 /* Return the first fib alias matching TOS with 379 * priority less than or equal to PRIO. 380 */ 381 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio) 382 { 383 if (fah) { 384 struct fib_alias *fa; 385 list_for_each_entry(fa, fah, fa_list) { 386 if (fa->fa_tos > tos) 387 continue; 388 if (fa->fa_info->fib_priority >= prio || 389 fa->fa_tos < tos) 390 return fa; 391 } 392 } 393 return NULL; 394 } 395 396 int fib_detect_death(struct fib_info *fi, int order, 397 struct fib_info **last_resort, int *last_idx, int dflt) 398 { 399 struct neighbour *n; 400 int state = NUD_NONE; 401 402 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev); 403 if (n) { 404 state = n->nud_state; 405 neigh_release(n); 406 } 407 if (state == NUD_REACHABLE) 408 return 0; 409 if ((state & NUD_VALID) && order != dflt) 410 return 0; 411 if ((state & NUD_VALID) || 412 (*last_idx < 0 && order > dflt)) { 413 *last_resort = fi; 414 *last_idx = order; 415 } 416 return 1; 417 } 418 419 #ifdef CONFIG_IP_ROUTE_MULTIPATH 420 421 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining) 422 { 423 int nhs = 0; 424 425 while (rtnh_ok(rtnh, remaining)) { 426 nhs++; 427 rtnh = rtnh_next(rtnh, &remaining); 428 } 429 430 /* leftover implies invalid nexthop configuration, discard it */ 431 return remaining > 0 ? 0 : nhs; 432 } 433 434 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, 435 int remaining, struct fib_config *cfg) 436 { 437 change_nexthops(fi) { 438 int attrlen; 439 440 if (!rtnh_ok(rtnh, remaining)) 441 return -EINVAL; 442 443 nexthop_nh->nh_flags = 444 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 445 nexthop_nh->nh_oif = rtnh->rtnh_ifindex; 446 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; 447 448 attrlen = rtnh_attrlen(rtnh); 449 if (attrlen > 0) { 450 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 451 452 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 453 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; 454 #ifdef CONFIG_IP_ROUTE_CLASSID 455 nla = nla_find(attrs, attrlen, RTA_FLOW); 456 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 457 if (nexthop_nh->nh_tclassid) 458 fi->fib_net->ipv4.fib_num_tclassid_users++; 459 #endif 460 } 461 462 rtnh = rtnh_next(rtnh, &remaining); 463 } endfor_nexthops(fi); 464 465 return 0; 466 } 467 468 #endif 469 470 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) 471 { 472 #ifdef CONFIG_IP_ROUTE_MULTIPATH 473 struct rtnexthop *rtnh; 474 int remaining; 475 #endif 476 477 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) 478 return 1; 479 480 if (cfg->fc_oif || cfg->fc_gw) { 481 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 482 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 483 return 0; 484 return 1; 485 } 486 487 #ifdef CONFIG_IP_ROUTE_MULTIPATH 488 if (cfg->fc_mp == NULL) 489 return 0; 490 491 rtnh = cfg->fc_mp; 492 remaining = cfg->fc_mp_len; 493 494 for_nexthops(fi) { 495 int attrlen; 496 497 if (!rtnh_ok(rtnh, remaining)) 498 return -EINVAL; 499 500 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif) 501 return 1; 502 503 attrlen = rtnh_attrlen(rtnh); 504 if (attrlen < 0) { 505 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 506 507 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 508 if (nla && nla_get_be32(nla) != nh->nh_gw) 509 return 1; 510 #ifdef CONFIG_IP_ROUTE_CLASSID 511 nla = nla_find(attrs, attrlen, RTA_FLOW); 512 if (nla && nla_get_u32(nla) != nh->nh_tclassid) 513 return 1; 514 #endif 515 } 516 517 rtnh = rtnh_next(rtnh, &remaining); 518 } endfor_nexthops(fi); 519 #endif 520 return 0; 521 } 522 523 524 /* 525 * Picture 526 * ------- 527 * 528 * Semantics of nexthop is very messy by historical reasons. 529 * We have to take into account, that: 530 * a) gateway can be actually local interface address, 531 * so that gatewayed route is direct. 532 * b) gateway must be on-link address, possibly 533 * described not by an ifaddr, but also by a direct route. 534 * c) If both gateway and interface are specified, they should not 535 * contradict. 536 * d) If we use tunnel routes, gateway could be not on-link. 537 * 538 * Attempt to reconcile all of these (alas, self-contradictory) conditions 539 * results in pretty ugly and hairy code with obscure logic. 540 * 541 * I chose to generalized it instead, so that the size 542 * of code does not increase practically, but it becomes 543 * much more general. 544 * Every prefix is assigned a "scope" value: "host" is local address, 545 * "link" is direct route, 546 * [ ... "site" ... "interior" ... ] 547 * and "universe" is true gateway route with global meaning. 548 * 549 * Every prefix refers to a set of "nexthop"s (gw, oif), 550 * where gw must have narrower scope. This recursion stops 551 * when gw has LOCAL scope or if "nexthop" is declared ONLINK, 552 * which means that gw is forced to be on link. 553 * 554 * Code is still hairy, but now it is apparently logically 555 * consistent and very flexible. F.e. as by-product it allows 556 * to co-exists in peace independent exterior and interior 557 * routing processes. 558 * 559 * Normally it looks as following. 560 * 561 * {universe prefix} -> (gw, oif) [scope link] 562 * | 563 * |-> {link prefix} -> (gw, oif) [scope local] 564 * | 565 * |-> {local prefix} (terminal node) 566 */ 567 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, 568 struct fib_nh *nh) 569 { 570 int err; 571 struct net *net; 572 struct net_device *dev; 573 574 net = cfg->fc_nlinfo.nl_net; 575 if (nh->nh_gw) { 576 struct fib_result res; 577 578 if (nh->nh_flags & RTNH_F_ONLINK) { 579 580 if (cfg->fc_scope >= RT_SCOPE_LINK) 581 return -EINVAL; 582 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) 583 return -EINVAL; 584 dev = __dev_get_by_index(net, nh->nh_oif); 585 if (!dev) 586 return -ENODEV; 587 if (!(dev->flags & IFF_UP)) 588 return -ENETDOWN; 589 nh->nh_dev = dev; 590 dev_hold(dev); 591 nh->nh_scope = RT_SCOPE_LINK; 592 return 0; 593 } 594 rcu_read_lock(); 595 { 596 struct flowi4 fl4 = { 597 .daddr = nh->nh_gw, 598 .flowi4_scope = cfg->fc_scope + 1, 599 .flowi4_oif = nh->nh_oif, 600 }; 601 602 /* It is not necessary, but requires a bit of thinking */ 603 if (fl4.flowi4_scope < RT_SCOPE_LINK) 604 fl4.flowi4_scope = RT_SCOPE_LINK; 605 err = fib_lookup(net, &fl4, &res); 606 if (err) { 607 rcu_read_unlock(); 608 return err; 609 } 610 } 611 err = -EINVAL; 612 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) 613 goto out; 614 nh->nh_scope = res.scope; 615 nh->nh_oif = FIB_RES_OIF(res); 616 nh->nh_dev = dev = FIB_RES_DEV(res); 617 if (!dev) 618 goto out; 619 dev_hold(dev); 620 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; 621 } else { 622 struct in_device *in_dev; 623 624 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) 625 return -EINVAL; 626 627 rcu_read_lock(); 628 err = -ENODEV; 629 in_dev = inetdev_by_index(net, nh->nh_oif); 630 if (in_dev == NULL) 631 goto out; 632 err = -ENETDOWN; 633 if (!(in_dev->dev->flags & IFF_UP)) 634 goto out; 635 nh->nh_dev = in_dev->dev; 636 dev_hold(nh->nh_dev); 637 nh->nh_scope = RT_SCOPE_HOST; 638 err = 0; 639 } 640 out: 641 rcu_read_unlock(); 642 return err; 643 } 644 645 static inline unsigned int fib_laddr_hashfn(__be32 val) 646 { 647 unsigned int mask = (fib_info_hash_size - 1); 648 649 return ((__force u32)val ^ 650 ((__force u32)val >> 7) ^ 651 ((__force u32)val >> 14)) & mask; 652 } 653 654 static struct hlist_head *fib_info_hash_alloc(int bytes) 655 { 656 if (bytes <= PAGE_SIZE) 657 return kzalloc(bytes, GFP_KERNEL); 658 else 659 return (struct hlist_head *) 660 __get_free_pages(GFP_KERNEL | __GFP_ZERO, 661 get_order(bytes)); 662 } 663 664 static void fib_info_hash_free(struct hlist_head *hash, int bytes) 665 { 666 if (!hash) 667 return; 668 669 if (bytes <= PAGE_SIZE) 670 kfree(hash); 671 else 672 free_pages((unsigned long) hash, get_order(bytes)); 673 } 674 675 static void fib_info_hash_move(struct hlist_head *new_info_hash, 676 struct hlist_head *new_laddrhash, 677 unsigned int new_size) 678 { 679 struct hlist_head *old_info_hash, *old_laddrhash; 680 unsigned int old_size = fib_info_hash_size; 681 unsigned int i, bytes; 682 683 spin_lock_bh(&fib_info_lock); 684 old_info_hash = fib_info_hash; 685 old_laddrhash = fib_info_laddrhash; 686 fib_info_hash_size = new_size; 687 688 for (i = 0; i < old_size; i++) { 689 struct hlist_head *head = &fib_info_hash[i]; 690 struct hlist_node *node, *n; 691 struct fib_info *fi; 692 693 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { 694 struct hlist_head *dest; 695 unsigned int new_hash; 696 697 hlist_del(&fi->fib_hash); 698 699 new_hash = fib_info_hashfn(fi); 700 dest = &new_info_hash[new_hash]; 701 hlist_add_head(&fi->fib_hash, dest); 702 } 703 } 704 fib_info_hash = new_info_hash; 705 706 for (i = 0; i < old_size; i++) { 707 struct hlist_head *lhead = &fib_info_laddrhash[i]; 708 struct hlist_node *node, *n; 709 struct fib_info *fi; 710 711 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { 712 struct hlist_head *ldest; 713 unsigned int new_hash; 714 715 hlist_del(&fi->fib_lhash); 716 717 new_hash = fib_laddr_hashfn(fi->fib_prefsrc); 718 ldest = &new_laddrhash[new_hash]; 719 hlist_add_head(&fi->fib_lhash, ldest); 720 } 721 } 722 fib_info_laddrhash = new_laddrhash; 723 724 spin_unlock_bh(&fib_info_lock); 725 726 bytes = old_size * sizeof(struct hlist_head *); 727 fib_info_hash_free(old_info_hash, bytes); 728 fib_info_hash_free(old_laddrhash, bytes); 729 } 730 731 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) 732 { 733 nh->nh_saddr = inet_select_addr(nh->nh_dev, 734 nh->nh_gw, 735 nh->nh_parent->fib_scope); 736 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); 737 738 return nh->nh_saddr; 739 } 740 741 struct fib_info *fib_create_info(struct fib_config *cfg) 742 { 743 int err; 744 struct fib_info *fi = NULL; 745 struct fib_info *ofi; 746 int nhs = 1; 747 struct net *net = cfg->fc_nlinfo.nl_net; 748 749 if (cfg->fc_type > RTN_MAX) 750 goto err_inval; 751 752 /* Fast check to catch the most weird cases */ 753 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) 754 goto err_inval; 755 756 #ifdef CONFIG_IP_ROUTE_MULTIPATH 757 if (cfg->fc_mp) { 758 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len); 759 if (nhs == 0) 760 goto err_inval; 761 } 762 #endif 763 764 err = -ENOBUFS; 765 if (fib_info_cnt >= fib_info_hash_size) { 766 unsigned int new_size = fib_info_hash_size << 1; 767 struct hlist_head *new_info_hash; 768 struct hlist_head *new_laddrhash; 769 unsigned int bytes; 770 771 if (!new_size) 772 new_size = 1; 773 bytes = new_size * sizeof(struct hlist_head *); 774 new_info_hash = fib_info_hash_alloc(bytes); 775 new_laddrhash = fib_info_hash_alloc(bytes); 776 if (!new_info_hash || !new_laddrhash) { 777 fib_info_hash_free(new_info_hash, bytes); 778 fib_info_hash_free(new_laddrhash, bytes); 779 } else 780 fib_info_hash_move(new_info_hash, new_laddrhash, new_size); 781 782 if (!fib_info_hash_size) 783 goto failure; 784 } 785 786 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 787 if (fi == NULL) 788 goto failure; 789 if (cfg->fc_mx) { 790 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 791 if (!fi->fib_metrics) 792 goto failure; 793 } else 794 fi->fib_metrics = (u32 *) dst_default_metrics; 795 fib_info_cnt++; 796 797 fi->fib_net = hold_net(net); 798 fi->fib_protocol = cfg->fc_protocol; 799 fi->fib_scope = cfg->fc_scope; 800 fi->fib_flags = cfg->fc_flags; 801 fi->fib_priority = cfg->fc_priority; 802 fi->fib_prefsrc = cfg->fc_prefsrc; 803 804 fi->fib_nhs = nhs; 805 change_nexthops(fi) { 806 nexthop_nh->nh_parent = fi; 807 } endfor_nexthops(fi) 808 809 if (cfg->fc_mx) { 810 struct nlattr *nla; 811 int remaining; 812 813 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 814 int type = nla_type(nla); 815 816 if (type) { 817 u32 val; 818 819 if (type > RTAX_MAX) 820 goto err_inval; 821 val = nla_get_u32(nla); 822 if (type == RTAX_ADVMSS && val > 65535 - 40) 823 val = 65535 - 40; 824 if (type == RTAX_MTU && val > 65535 - 15) 825 val = 65535 - 15; 826 fi->fib_metrics[type - 1] = val; 827 } 828 } 829 } 830 831 if (cfg->fc_mp) { 832 #ifdef CONFIG_IP_ROUTE_MULTIPATH 833 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg); 834 if (err != 0) 835 goto failure; 836 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) 837 goto err_inval; 838 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) 839 goto err_inval; 840 #ifdef CONFIG_IP_ROUTE_CLASSID 841 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) 842 goto err_inval; 843 #endif 844 #else 845 goto err_inval; 846 #endif 847 } else { 848 struct fib_nh *nh = fi->fib_nh; 849 850 nh->nh_oif = cfg->fc_oif; 851 nh->nh_gw = cfg->fc_gw; 852 nh->nh_flags = cfg->fc_flags; 853 #ifdef CONFIG_IP_ROUTE_CLASSID 854 nh->nh_tclassid = cfg->fc_flow; 855 if (nh->nh_tclassid) 856 fi->fib_net->ipv4.fib_num_tclassid_users++; 857 #endif 858 #ifdef CONFIG_IP_ROUTE_MULTIPATH 859 nh->nh_weight = 1; 860 #endif 861 } 862 863 if (fib_props[cfg->fc_type].error) { 864 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) 865 goto err_inval; 866 goto link_it; 867 } else { 868 switch (cfg->fc_type) { 869 case RTN_UNICAST: 870 case RTN_LOCAL: 871 case RTN_BROADCAST: 872 case RTN_ANYCAST: 873 case RTN_MULTICAST: 874 break; 875 default: 876 goto err_inval; 877 } 878 } 879 880 if (cfg->fc_scope > RT_SCOPE_HOST) 881 goto err_inval; 882 883 if (cfg->fc_scope == RT_SCOPE_HOST) { 884 struct fib_nh *nh = fi->fib_nh; 885 886 /* Local address is added. */ 887 if (nhs != 1 || nh->nh_gw) 888 goto err_inval; 889 nh->nh_scope = RT_SCOPE_NOWHERE; 890 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); 891 err = -ENODEV; 892 if (nh->nh_dev == NULL) 893 goto failure; 894 } else { 895 change_nexthops(fi) { 896 err = fib_check_nh(cfg, fi, nexthop_nh); 897 if (err != 0) 898 goto failure; 899 } endfor_nexthops(fi) 900 } 901 902 if (fi->fib_prefsrc) { 903 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || 904 fi->fib_prefsrc != cfg->fc_dst) 905 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) 906 goto err_inval; 907 } 908 909 change_nexthops(fi) { 910 fib_info_update_nh_saddr(net, nexthop_nh); 911 } endfor_nexthops(fi) 912 913 link_it: 914 ofi = fib_find_info(fi); 915 if (ofi) { 916 fi->fib_dead = 1; 917 free_fib_info(fi); 918 ofi->fib_treeref++; 919 return ofi; 920 } 921 922 fi->fib_treeref++; 923 atomic_inc(&fi->fib_clntref); 924 spin_lock_bh(&fib_info_lock); 925 hlist_add_head(&fi->fib_hash, 926 &fib_info_hash[fib_info_hashfn(fi)]); 927 if (fi->fib_prefsrc) { 928 struct hlist_head *head; 929 930 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; 931 hlist_add_head(&fi->fib_lhash, head); 932 } 933 change_nexthops(fi) { 934 struct hlist_head *head; 935 unsigned int hash; 936 937 if (!nexthop_nh->nh_dev) 938 continue; 939 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); 940 head = &fib_info_devhash[hash]; 941 hlist_add_head(&nexthop_nh->nh_hash, head); 942 } endfor_nexthops(fi) 943 spin_unlock_bh(&fib_info_lock); 944 return fi; 945 946 err_inval: 947 err = -EINVAL; 948 949 failure: 950 if (fi) { 951 fi->fib_dead = 1; 952 free_fib_info(fi); 953 } 954 955 return ERR_PTR(err); 956 } 957 958 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 959 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, 960 struct fib_info *fi, unsigned int flags) 961 { 962 struct nlmsghdr *nlh; 963 struct rtmsg *rtm; 964 965 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags); 966 if (nlh == NULL) 967 return -EMSGSIZE; 968 969 rtm = nlmsg_data(nlh); 970 rtm->rtm_family = AF_INET; 971 rtm->rtm_dst_len = dst_len; 972 rtm->rtm_src_len = 0; 973 rtm->rtm_tos = tos; 974 if (tb_id < 256) 975 rtm->rtm_table = tb_id; 976 else 977 rtm->rtm_table = RT_TABLE_COMPAT; 978 if (nla_put_u32(skb, RTA_TABLE, tb_id)) 979 goto nla_put_failure; 980 rtm->rtm_type = type; 981 rtm->rtm_flags = fi->fib_flags; 982 rtm->rtm_scope = fi->fib_scope; 983 rtm->rtm_protocol = fi->fib_protocol; 984 985 if (rtm->rtm_dst_len && 986 nla_put_be32(skb, RTA_DST, dst)) 987 goto nla_put_failure; 988 if (fi->fib_priority && 989 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) 990 goto nla_put_failure; 991 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 992 goto nla_put_failure; 993 994 if (fi->fib_prefsrc && 995 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc)) 996 goto nla_put_failure; 997 if (fi->fib_nhs == 1) { 998 if (fi->fib_nh->nh_gw && 999 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw)) 1000 goto nla_put_failure; 1001 if (fi->fib_nh->nh_oif && 1002 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) 1003 goto nla_put_failure; 1004 #ifdef CONFIG_IP_ROUTE_CLASSID 1005 if (fi->fib_nh[0].nh_tclassid && 1006 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) 1007 goto nla_put_failure; 1008 #endif 1009 } 1010 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1011 if (fi->fib_nhs > 1) { 1012 struct rtnexthop *rtnh; 1013 struct nlattr *mp; 1014 1015 mp = nla_nest_start(skb, RTA_MULTIPATH); 1016 if (mp == NULL) 1017 goto nla_put_failure; 1018 1019 for_nexthops(fi) { 1020 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 1021 if (rtnh == NULL) 1022 goto nla_put_failure; 1023 1024 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 1025 rtnh->rtnh_hops = nh->nh_weight - 1; 1026 rtnh->rtnh_ifindex = nh->nh_oif; 1027 1028 if (nh->nh_gw && 1029 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw)) 1030 goto nla_put_failure; 1031 #ifdef CONFIG_IP_ROUTE_CLASSID 1032 if (nh->nh_tclassid && 1033 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) 1034 goto nla_put_failure; 1035 #endif 1036 /* length of rtnetlink header + attributes */ 1037 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 1038 } endfor_nexthops(fi); 1039 1040 nla_nest_end(skb, mp); 1041 } 1042 #endif 1043 return nlmsg_end(skb, nlh); 1044 1045 nla_put_failure: 1046 nlmsg_cancel(skb, nlh); 1047 return -EMSGSIZE; 1048 } 1049 1050 /* 1051 * Update FIB if: 1052 * - local address disappeared -> we must delete all the entries 1053 * referring to it. 1054 * - device went down -> we must shutdown all nexthops going via it. 1055 */ 1056 int fib_sync_down_addr(struct net *net, __be32 local) 1057 { 1058 int ret = 0; 1059 unsigned int hash = fib_laddr_hashfn(local); 1060 struct hlist_head *head = &fib_info_laddrhash[hash]; 1061 struct hlist_node *node; 1062 struct fib_info *fi; 1063 1064 if (fib_info_laddrhash == NULL || local == 0) 1065 return 0; 1066 1067 hlist_for_each_entry(fi, node, head, fib_lhash) { 1068 if (!net_eq(fi->fib_net, net)) 1069 continue; 1070 if (fi->fib_prefsrc == local) { 1071 fi->fib_flags |= RTNH_F_DEAD; 1072 ret++; 1073 } 1074 } 1075 return ret; 1076 } 1077 1078 int fib_sync_down_dev(struct net_device *dev, int force) 1079 { 1080 int ret = 0; 1081 int scope = RT_SCOPE_NOWHERE; 1082 struct fib_info *prev_fi = NULL; 1083 unsigned int hash = fib_devindex_hashfn(dev->ifindex); 1084 struct hlist_head *head = &fib_info_devhash[hash]; 1085 struct hlist_node *node; 1086 struct fib_nh *nh; 1087 1088 if (force) 1089 scope = -1; 1090 1091 hlist_for_each_entry(nh, node, head, nh_hash) { 1092 struct fib_info *fi = nh->nh_parent; 1093 int dead; 1094 1095 BUG_ON(!fi->fib_nhs); 1096 if (nh->nh_dev != dev || fi == prev_fi) 1097 continue; 1098 prev_fi = fi; 1099 dead = 0; 1100 change_nexthops(fi) { 1101 if (nexthop_nh->nh_flags & RTNH_F_DEAD) 1102 dead++; 1103 else if (nexthop_nh->nh_dev == dev && 1104 nexthop_nh->nh_scope != scope) { 1105 nexthop_nh->nh_flags |= RTNH_F_DEAD; 1106 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1107 spin_lock_bh(&fib_multipath_lock); 1108 fi->fib_power -= nexthop_nh->nh_power; 1109 nexthop_nh->nh_power = 0; 1110 spin_unlock_bh(&fib_multipath_lock); 1111 #endif 1112 dead++; 1113 } 1114 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1115 if (force > 1 && nexthop_nh->nh_dev == dev) { 1116 dead = fi->fib_nhs; 1117 break; 1118 } 1119 #endif 1120 } endfor_nexthops(fi) 1121 if (dead == fi->fib_nhs) { 1122 fi->fib_flags |= RTNH_F_DEAD; 1123 ret++; 1124 } 1125 } 1126 1127 return ret; 1128 } 1129 1130 /* Must be invoked inside of an RCU protected region. */ 1131 void fib_select_default(struct fib_result *res) 1132 { 1133 struct fib_info *fi = NULL, *last_resort = NULL; 1134 struct list_head *fa_head = res->fa_head; 1135 struct fib_table *tb = res->table; 1136 int order = -1, last_idx = -1; 1137 struct fib_alias *fa; 1138 1139 list_for_each_entry_rcu(fa, fa_head, fa_list) { 1140 struct fib_info *next_fi = fa->fa_info; 1141 1142 if (next_fi->fib_scope != res->scope || 1143 fa->fa_type != RTN_UNICAST) 1144 continue; 1145 1146 if (next_fi->fib_priority > res->fi->fib_priority) 1147 break; 1148 if (!next_fi->fib_nh[0].nh_gw || 1149 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1150 continue; 1151 1152 fib_alias_accessed(fa); 1153 1154 if (fi == NULL) { 1155 if (next_fi != res->fi) 1156 break; 1157 } else if (!fib_detect_death(fi, order, &last_resort, 1158 &last_idx, tb->tb_default)) { 1159 fib_result_assign(res, fi); 1160 tb->tb_default = order; 1161 goto out; 1162 } 1163 fi = next_fi; 1164 order++; 1165 } 1166 1167 if (order <= 0 || fi == NULL) { 1168 tb->tb_default = -1; 1169 goto out; 1170 } 1171 1172 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1173 tb->tb_default)) { 1174 fib_result_assign(res, fi); 1175 tb->tb_default = order; 1176 goto out; 1177 } 1178 1179 if (last_idx >= 0) 1180 fib_result_assign(res, last_resort); 1181 tb->tb_default = last_idx; 1182 out: 1183 return; 1184 } 1185 1186 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1187 1188 /* 1189 * Dead device goes up. We wake up dead nexthops. 1190 * It takes sense only on multipath routes. 1191 */ 1192 int fib_sync_up(struct net_device *dev) 1193 { 1194 struct fib_info *prev_fi; 1195 unsigned int hash; 1196 struct hlist_head *head; 1197 struct hlist_node *node; 1198 struct fib_nh *nh; 1199 int ret; 1200 1201 if (!(dev->flags & IFF_UP)) 1202 return 0; 1203 1204 prev_fi = NULL; 1205 hash = fib_devindex_hashfn(dev->ifindex); 1206 head = &fib_info_devhash[hash]; 1207 ret = 0; 1208 1209 hlist_for_each_entry(nh, node, head, nh_hash) { 1210 struct fib_info *fi = nh->nh_parent; 1211 int alive; 1212 1213 BUG_ON(!fi->fib_nhs); 1214 if (nh->nh_dev != dev || fi == prev_fi) 1215 continue; 1216 1217 prev_fi = fi; 1218 alive = 0; 1219 change_nexthops(fi) { 1220 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1221 alive++; 1222 continue; 1223 } 1224 if (nexthop_nh->nh_dev == NULL || 1225 !(nexthop_nh->nh_dev->flags & IFF_UP)) 1226 continue; 1227 if (nexthop_nh->nh_dev != dev || 1228 !__in_dev_get_rtnl(dev)) 1229 continue; 1230 alive++; 1231 spin_lock_bh(&fib_multipath_lock); 1232 nexthop_nh->nh_power = 0; 1233 nexthop_nh->nh_flags &= ~RTNH_F_DEAD; 1234 spin_unlock_bh(&fib_multipath_lock); 1235 } endfor_nexthops(fi) 1236 1237 if (alive > 0) { 1238 fi->fib_flags &= ~RTNH_F_DEAD; 1239 ret++; 1240 } 1241 } 1242 1243 return ret; 1244 } 1245 1246 /* 1247 * The algorithm is suboptimal, but it provides really 1248 * fair weighted route distribution. 1249 */ 1250 void fib_select_multipath(struct fib_result *res) 1251 { 1252 struct fib_info *fi = res->fi; 1253 int w; 1254 1255 spin_lock_bh(&fib_multipath_lock); 1256 if (fi->fib_power <= 0) { 1257 int power = 0; 1258 change_nexthops(fi) { 1259 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) { 1260 power += nexthop_nh->nh_weight; 1261 nexthop_nh->nh_power = nexthop_nh->nh_weight; 1262 } 1263 } endfor_nexthops(fi); 1264 fi->fib_power = power; 1265 if (power <= 0) { 1266 spin_unlock_bh(&fib_multipath_lock); 1267 /* Race condition: route has just become dead. */ 1268 res->nh_sel = 0; 1269 return; 1270 } 1271 } 1272 1273 1274 /* w should be random number [0..fi->fib_power-1], 1275 * it is pretty bad approximation. 1276 */ 1277 1278 w = jiffies % fi->fib_power; 1279 1280 change_nexthops(fi) { 1281 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) && 1282 nexthop_nh->nh_power) { 1283 w -= nexthop_nh->nh_power; 1284 if (w <= 0) { 1285 nexthop_nh->nh_power--; 1286 fi->fib_power--; 1287 res->nh_sel = nhsel; 1288 spin_unlock_bh(&fib_multipath_lock); 1289 return; 1290 } 1291 } 1292 } endfor_nexthops(fi); 1293 1294 /* Race condition: route has just become dead. */ 1295 res->nh_sel = 0; 1296 spin_unlock_bh(&fib_multipath_lock); 1297 } 1298 #endif 1299