1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * ROUTE - implementation of the IP router. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi> 13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 14 * 15 * Fixes: 16 * Alan Cox : Verify area fixes. 17 * Alan Cox : cli() protects routing changes 18 * Rui Oliveira : ICMP routing table updates 19 * (rco@di.uminho.pt) Routing table insertion and update 20 * Linus Torvalds : Rewrote bits to be sensible 21 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Super /proc >4K 23 * Alan Cox : MTU in route table 24 * Alan Cox : MSS actually. Also added the window 25 * clamper. 26 * Sam Lantinga : Fixed route matching in rt_del() 27 * Alan Cox : Routing cache support. 28 * Alan Cox : Removed compatibility cruft. 29 * Alan Cox : RTF_REJECT support. 30 * Alan Cox : TCP irtt support. 31 * Jonathan Naylor : Added Metric support. 32 * Miquel van Smoorenburg : BSD API fixes. 33 * Miquel van Smoorenburg : Metrics. 34 * Alan Cox : Use __u32 properly 35 * Alan Cox : Aligned routing errors more closely with BSD 36 * our system is still very different. 37 * Alan Cox : Faster /proc handling 38 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * routing caches and better behaviour. 40 * 41 * Olaf Erb : irtt wasn't being copied right. 42 * Bjorn Ekwall : Kerneld route support. 43 * Alan Cox : Multicast fixed (I hope) 44 * Pavel Krauz : Limited broadcast fixed 45 * Mike McLagan : Routing by source 46 * Alexey Kuznetsov : End of old history. Split to fib.c and 47 * route.c and rewritten from scratch. 48 * Andi Kleen : Load-limit warning messages. 49 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow. 51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. 52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful. 53 * Marc Boucher : routing by fwmark 54 * Robert Olsson : Added rt_cache statistics 55 * Arnaldo C. Melo : Convert proc stuff to seq_file 56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect 58 * Ilia Sotnikov : Removed TOS from hash calculations 59 */ 60 61 #define pr_fmt(fmt) "IPv4: " fmt 62 63 #include <linux/module.h> 64 #include <linux/bitops.h> 65 #include <linux/kernel.h> 66 #include <linux/mm.h> 67 #include <linux/memblock.h> 68 #include <linux/socket.h> 69 #include <linux/errno.h> 70 #include <linux/in.h> 71 #include <linux/inet.h> 72 #include <linux/netdevice.h> 73 #include <linux/proc_fs.h> 74 #include <linux/init.h> 75 #include <linux/skbuff.h> 76 #include <linux/inetdevice.h> 77 #include <linux/igmp.h> 78 #include <linux/pkt_sched.h> 79 #include <linux/mroute.h> 80 #include <linux/netfilter_ipv4.h> 81 #include <linux/random.h> 82 #include <linux/rcupdate.h> 83 #include <linux/slab.h> 84 #include <linux/jhash.h> 85 #include <net/dst.h> 86 #include <net/dst_metadata.h> 87 #include <net/inet_dscp.h> 88 #include <net/net_namespace.h> 89 #include <net/ip.h> 90 #include <net/route.h> 91 #include <net/inetpeer.h> 92 #include <net/sock.h> 93 #include <net/ip_fib.h> 94 #include <net/nexthop.h> 95 #include <net/tcp.h> 96 #include <net/icmp.h> 97 #include <net/xfrm.h> 98 #include <net/lwtunnel.h> 99 #include <net/netevent.h> 100 #include <net/rtnetlink.h> 101 #ifdef CONFIG_SYSCTL 102 #include <linux/sysctl.h> 103 #endif 104 #include <net/secure_seq.h> 105 #include <net/ip_tunnels.h> 106 107 #include "fib_lookup.h" 108 109 #define RT_GC_TIMEOUT (300*HZ) 110 111 #define DEFAULT_MIN_PMTU (512 + 20 + 20) 112 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ) 113 #define DEFAULT_MIN_ADVMSS 256 114 static int ip_rt_max_size; 115 static int ip_rt_redirect_number __read_mostly = 9; 116 static int ip_rt_redirect_load __read_mostly = HZ / 50; 117 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); 118 static int ip_rt_error_cost __read_mostly = HZ; 119 static int ip_rt_error_burst __read_mostly = 5 * HZ; 120 121 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 122 123 /* 124 * Interface to generic destination cache. 125 */ 126 127 INDIRECT_CALLABLE_SCOPE 128 struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 129 static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 130 INDIRECT_CALLABLE_SCOPE 131 unsigned int ipv4_mtu(const struct dst_entry *dst); 132 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 133 static void ipv4_link_failure(struct sk_buff *skb); 134 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 135 struct sk_buff *skb, u32 mtu, 136 bool confirm_neigh); 137 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, 138 struct sk_buff *skb); 139 static void ipv4_dst_destroy(struct dst_entry *dst); 140 141 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 142 { 143 WARN_ON(1); 144 return NULL; 145 } 146 147 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 148 struct sk_buff *skb, 149 const void *daddr); 150 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); 151 152 static struct dst_ops ipv4_dst_ops = { 153 .family = AF_INET, 154 .check = ipv4_dst_check, 155 .default_advmss = ipv4_default_advmss, 156 .mtu = ipv4_mtu, 157 .cow_metrics = ipv4_cow_metrics, 158 .destroy = ipv4_dst_destroy, 159 .negative_advice = ipv4_negative_advice, 160 .link_failure = ipv4_link_failure, 161 .update_pmtu = ip_rt_update_pmtu, 162 .redirect = ip_do_redirect, 163 .local_out = __ip_local_out, 164 .neigh_lookup = ipv4_neigh_lookup, 165 .confirm_neigh = ipv4_confirm_neigh, 166 }; 167 168 #define ECN_OR_COST(class) TC_PRIO_##class 169 170 const __u8 ip_tos2prio[16] = { 171 TC_PRIO_BESTEFFORT, 172 ECN_OR_COST(BESTEFFORT), 173 TC_PRIO_BESTEFFORT, 174 ECN_OR_COST(BESTEFFORT), 175 TC_PRIO_BULK, 176 ECN_OR_COST(BULK), 177 TC_PRIO_BULK, 178 ECN_OR_COST(BULK), 179 TC_PRIO_INTERACTIVE, 180 ECN_OR_COST(INTERACTIVE), 181 TC_PRIO_INTERACTIVE, 182 ECN_OR_COST(INTERACTIVE), 183 TC_PRIO_INTERACTIVE_BULK, 184 ECN_OR_COST(INTERACTIVE_BULK), 185 TC_PRIO_INTERACTIVE_BULK, 186 ECN_OR_COST(INTERACTIVE_BULK) 187 }; 188 EXPORT_SYMBOL(ip_tos2prio); 189 190 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 191 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) 192 193 #ifdef CONFIG_PROC_FS 194 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 195 { 196 if (*pos) 197 return NULL; 198 return SEQ_START_TOKEN; 199 } 200 201 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 202 { 203 ++*pos; 204 return NULL; 205 } 206 207 static void rt_cache_seq_stop(struct seq_file *seq, void *v) 208 { 209 } 210 211 static int rt_cache_seq_show(struct seq_file *seq, void *v) 212 { 213 if (v == SEQ_START_TOKEN) 214 seq_printf(seq, "%-127s\n", 215 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" 216 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" 217 "HHUptod\tSpecDst"); 218 return 0; 219 } 220 221 static const struct seq_operations rt_cache_seq_ops = { 222 .start = rt_cache_seq_start, 223 .next = rt_cache_seq_next, 224 .stop = rt_cache_seq_stop, 225 .show = rt_cache_seq_show, 226 }; 227 228 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) 229 { 230 int cpu; 231 232 if (*pos == 0) 233 return SEQ_START_TOKEN; 234 235 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 236 if (!cpu_possible(cpu)) 237 continue; 238 *pos = cpu+1; 239 return &per_cpu(rt_cache_stat, cpu); 240 } 241 return NULL; 242 } 243 244 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 245 { 246 int cpu; 247 248 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 249 if (!cpu_possible(cpu)) 250 continue; 251 *pos = cpu+1; 252 return &per_cpu(rt_cache_stat, cpu); 253 } 254 (*pos)++; 255 return NULL; 256 257 } 258 259 static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 260 { 261 262 } 263 264 static int rt_cpu_seq_show(struct seq_file *seq, void *v) 265 { 266 struct rt_cache_stat *st = v; 267 268 if (v == SEQ_START_TOKEN) { 269 seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 270 return 0; 271 } 272 273 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x " 274 "%08x %08x %08x %08x %08x %08x " 275 "%08x %08x %08x %08x\n", 276 dst_entries_get_slow(&ipv4_dst_ops), 277 0, /* st->in_hit */ 278 st->in_slow_tot, 279 st->in_slow_mc, 280 st->in_no_route, 281 st->in_brd, 282 st->in_martian_dst, 283 st->in_martian_src, 284 285 0, /* st->out_hit */ 286 st->out_slow_tot, 287 st->out_slow_mc, 288 289 0, /* st->gc_total */ 290 0, /* st->gc_ignored */ 291 0, /* st->gc_goal_miss */ 292 0, /* st->gc_dst_overflow */ 293 0, /* st->in_hlist_search */ 294 0 /* st->out_hlist_search */ 295 ); 296 return 0; 297 } 298 299 static const struct seq_operations rt_cpu_seq_ops = { 300 .start = rt_cpu_seq_start, 301 .next = rt_cpu_seq_next, 302 .stop = rt_cpu_seq_stop, 303 .show = rt_cpu_seq_show, 304 }; 305 306 #ifdef CONFIG_IP_ROUTE_CLASSID 307 static int rt_acct_proc_show(struct seq_file *m, void *v) 308 { 309 struct ip_rt_acct *dst, *src; 310 unsigned int i, j; 311 312 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); 313 if (!dst) 314 return -ENOMEM; 315 316 for_each_possible_cpu(i) { 317 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); 318 for (j = 0; j < 256; j++) { 319 dst[j].o_bytes += src[j].o_bytes; 320 dst[j].o_packets += src[j].o_packets; 321 dst[j].i_bytes += src[j].i_bytes; 322 dst[j].i_packets += src[j].i_packets; 323 } 324 } 325 326 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); 327 kfree(dst); 328 return 0; 329 } 330 #endif 331 332 static int __net_init ip_rt_do_proc_init(struct net *net) 333 { 334 struct proc_dir_entry *pde; 335 336 pde = proc_create_seq("rt_cache", 0444, net->proc_net, 337 &rt_cache_seq_ops); 338 if (!pde) 339 goto err1; 340 341 pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat, 342 &rt_cpu_seq_ops); 343 if (!pde) 344 goto err2; 345 346 #ifdef CONFIG_IP_ROUTE_CLASSID 347 pde = proc_create_single("rt_acct", 0, net->proc_net, 348 rt_acct_proc_show); 349 if (!pde) 350 goto err3; 351 #endif 352 return 0; 353 354 #ifdef CONFIG_IP_ROUTE_CLASSID 355 err3: 356 remove_proc_entry("rt_cache", net->proc_net_stat); 357 #endif 358 err2: 359 remove_proc_entry("rt_cache", net->proc_net); 360 err1: 361 return -ENOMEM; 362 } 363 364 static void __net_exit ip_rt_do_proc_exit(struct net *net) 365 { 366 remove_proc_entry("rt_cache", net->proc_net_stat); 367 remove_proc_entry("rt_cache", net->proc_net); 368 #ifdef CONFIG_IP_ROUTE_CLASSID 369 remove_proc_entry("rt_acct", net->proc_net); 370 #endif 371 } 372 373 static struct pernet_operations ip_rt_proc_ops __net_initdata = { 374 .init = ip_rt_do_proc_init, 375 .exit = ip_rt_do_proc_exit, 376 }; 377 378 static int __init ip_rt_proc_init(void) 379 { 380 return register_pernet_subsys(&ip_rt_proc_ops); 381 } 382 383 #else 384 static inline int ip_rt_proc_init(void) 385 { 386 return 0; 387 } 388 #endif /* CONFIG_PROC_FS */ 389 390 static inline bool rt_is_expired(const struct rtable *rth) 391 { 392 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); 393 } 394 395 void rt_cache_flush(struct net *net) 396 { 397 rt_genid_bump_ipv4(net); 398 } 399 400 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 401 struct sk_buff *skb, 402 const void *daddr) 403 { 404 const struct rtable *rt = container_of(dst, struct rtable, dst); 405 struct net_device *dev = dst->dev; 406 struct neighbour *n; 407 408 rcu_read_lock(); 409 410 if (likely(rt->rt_gw_family == AF_INET)) { 411 n = ip_neigh_gw4(dev, rt->rt_gw4); 412 } else if (rt->rt_gw_family == AF_INET6) { 413 n = ip_neigh_gw6(dev, &rt->rt_gw6); 414 } else { 415 __be32 pkey; 416 417 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr); 418 n = ip_neigh_gw4(dev, pkey); 419 } 420 421 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt)) 422 n = NULL; 423 424 rcu_read_unlock(); 425 426 return n; 427 } 428 429 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) 430 { 431 const struct rtable *rt = container_of(dst, struct rtable, dst); 432 struct net_device *dev = dst->dev; 433 const __be32 *pkey = daddr; 434 435 if (rt->rt_gw_family == AF_INET) { 436 pkey = (const __be32 *)&rt->rt_gw4; 437 } else if (rt->rt_gw_family == AF_INET6) { 438 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6); 439 } else if (!daddr || 440 (rt->rt_flags & 441 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) { 442 return; 443 } 444 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); 445 } 446 447 /* Hash tables of size 2048..262144 depending on RAM size. 448 * Each bucket uses 8 bytes. 449 */ 450 static u32 ip_idents_mask __read_mostly; 451 static atomic_t *ip_idents __read_mostly; 452 static u32 *ip_tstamps __read_mostly; 453 454 /* In order to protect privacy, we add a perturbation to identifiers 455 * if one generator is seldom used. This makes hard for an attacker 456 * to infer how many packets were sent between two points in time. 457 */ 458 static u32 ip_idents_reserve(u32 hash, int segs) 459 { 460 u32 bucket, old, now = (u32)jiffies; 461 atomic_t *p_id; 462 u32 *p_tstamp; 463 u32 delta = 0; 464 465 bucket = hash & ip_idents_mask; 466 p_tstamp = ip_tstamps + bucket; 467 p_id = ip_idents + bucket; 468 old = READ_ONCE(*p_tstamp); 469 470 if (old != now && cmpxchg(p_tstamp, old, now) == old) 471 delta = get_random_u32_below(now - old); 472 473 /* If UBSAN reports an error there, please make sure your compiler 474 * supports -fno-strict-overflow before reporting it that was a bug 475 * in UBSAN, and it has been fixed in GCC-8. 476 */ 477 return atomic_add_return(segs + delta, p_id) - segs; 478 } 479 480 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) 481 { 482 u32 hash, id; 483 484 /* Note the following code is not safe, but this is okay. */ 485 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) 486 get_random_bytes(&net->ipv4.ip_id_key, 487 sizeof(net->ipv4.ip_id_key)); 488 489 hash = siphash_3u32((__force u32)iph->daddr, 490 (__force u32)iph->saddr, 491 iph->protocol, 492 &net->ipv4.ip_id_key); 493 id = ip_idents_reserve(hash, segs); 494 iph->id = htons(id); 495 } 496 EXPORT_SYMBOL(__ip_select_ident); 497 498 static void __build_flow_key(const struct net *net, struct flowi4 *fl4, 499 const struct sock *sk, const struct iphdr *iph, 500 int oif, __u8 tos, u8 prot, u32 mark, 501 int flow_flags) 502 { 503 __u8 scope = RT_SCOPE_UNIVERSE; 504 505 if (sk) { 506 oif = sk->sk_bound_dev_if; 507 mark = READ_ONCE(sk->sk_mark); 508 tos = ip_sock_rt_tos(sk); 509 scope = ip_sock_rt_scope(sk); 510 prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW : 511 sk->sk_protocol; 512 } 513 514 flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope, 515 prot, flow_flags, iph->daddr, iph->saddr, 0, 0, 516 sock_net_uid(net, sk)); 517 } 518 519 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, 520 const struct sock *sk) 521 { 522 const struct net *net = dev_net(skb->dev); 523 const struct iphdr *iph = ip_hdr(skb); 524 int oif = skb->dev->ifindex; 525 u8 prot = iph->protocol; 526 u32 mark = skb->mark; 527 __u8 tos = iph->tos; 528 529 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); 530 } 531 532 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) 533 { 534 const struct inet_sock *inet = inet_sk(sk); 535 const struct ip_options_rcu *inet_opt; 536 __be32 daddr = inet->inet_daddr; 537 538 rcu_read_lock(); 539 inet_opt = rcu_dereference(inet->inet_opt); 540 if (inet_opt && inet_opt->opt.srr) 541 daddr = inet_opt->opt.faddr; 542 flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), 543 ip_sock_rt_tos(sk) & IPTOS_RT_MASK, 544 ip_sock_rt_scope(sk), 545 inet_test_bit(HDRINCL, sk) ? 546 IPPROTO_RAW : sk->sk_protocol, 547 inet_sk_flowi_flags(sk), 548 daddr, inet->inet_saddr, 0, 0, sk->sk_uid); 549 rcu_read_unlock(); 550 } 551 552 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, 553 const struct sk_buff *skb) 554 { 555 if (skb) 556 build_skb_flow_key(fl4, skb, sk); 557 else 558 build_sk_flow_key(fl4, sk); 559 } 560 561 static DEFINE_SPINLOCK(fnhe_lock); 562 563 static void fnhe_flush_routes(struct fib_nh_exception *fnhe) 564 { 565 struct rtable *rt; 566 567 rt = rcu_dereference(fnhe->fnhe_rth_input); 568 if (rt) { 569 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); 570 dst_dev_put(&rt->dst); 571 dst_release(&rt->dst); 572 } 573 rt = rcu_dereference(fnhe->fnhe_rth_output); 574 if (rt) { 575 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); 576 dst_dev_put(&rt->dst); 577 dst_release(&rt->dst); 578 } 579 } 580 581 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) 582 { 583 struct fib_nh_exception __rcu **fnhe_p, **oldest_p; 584 struct fib_nh_exception *fnhe, *oldest = NULL; 585 586 for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) { 587 fnhe = rcu_dereference_protected(*fnhe_p, 588 lockdep_is_held(&fnhe_lock)); 589 if (!fnhe) 590 break; 591 if (!oldest || 592 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) { 593 oldest = fnhe; 594 oldest_p = fnhe_p; 595 } 596 } 597 fnhe_flush_routes(oldest); 598 *oldest_p = oldest->fnhe_next; 599 kfree_rcu(oldest, rcu); 600 } 601 602 static u32 fnhe_hashfun(__be32 daddr) 603 { 604 static siphash_aligned_key_t fnhe_hash_key; 605 u64 hval; 606 607 net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); 608 hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); 609 return hash_64(hval, FNHE_HASH_SHIFT); 610 } 611 612 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 613 { 614 rt->rt_pmtu = fnhe->fnhe_pmtu; 615 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; 616 rt->dst.expires = fnhe->fnhe_expires; 617 618 if (fnhe->fnhe_gw) { 619 rt->rt_flags |= RTCF_REDIRECTED; 620 rt->rt_uses_gateway = 1; 621 rt->rt_gw_family = AF_INET; 622 rt->rt_gw4 = fnhe->fnhe_gw; 623 } 624 } 625 626 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr, 627 __be32 gw, u32 pmtu, bool lock, 628 unsigned long expires) 629 { 630 struct fnhe_hash_bucket *hash; 631 struct fib_nh_exception *fnhe; 632 struct rtable *rt; 633 u32 genid, hval; 634 unsigned int i; 635 int depth; 636 637 genid = fnhe_genid(dev_net(nhc->nhc_dev)); 638 hval = fnhe_hashfun(daddr); 639 640 spin_lock_bh(&fnhe_lock); 641 642 hash = rcu_dereference(nhc->nhc_exceptions); 643 if (!hash) { 644 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC); 645 if (!hash) 646 goto out_unlock; 647 rcu_assign_pointer(nhc->nhc_exceptions, hash); 648 } 649 650 hash += hval; 651 652 depth = 0; 653 for (fnhe = rcu_dereference(hash->chain); fnhe; 654 fnhe = rcu_dereference(fnhe->fnhe_next)) { 655 if (fnhe->fnhe_daddr == daddr) 656 break; 657 depth++; 658 } 659 660 if (fnhe) { 661 if (fnhe->fnhe_genid != genid) 662 fnhe->fnhe_genid = genid; 663 if (gw) 664 fnhe->fnhe_gw = gw; 665 if (pmtu) { 666 fnhe->fnhe_pmtu = pmtu; 667 fnhe->fnhe_mtu_locked = lock; 668 } 669 fnhe->fnhe_expires = max(1UL, expires); 670 /* Update all cached dsts too */ 671 rt = rcu_dereference(fnhe->fnhe_rth_input); 672 if (rt) 673 fill_route_from_fnhe(rt, fnhe); 674 rt = rcu_dereference(fnhe->fnhe_rth_output); 675 if (rt) 676 fill_route_from_fnhe(rt, fnhe); 677 } else { 678 /* Randomize max depth to avoid some side channels attacks. */ 679 int max_depth = FNHE_RECLAIM_DEPTH + 680 get_random_u32_below(FNHE_RECLAIM_DEPTH); 681 682 while (depth > max_depth) { 683 fnhe_remove_oldest(hash); 684 depth--; 685 } 686 687 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); 688 if (!fnhe) 689 goto out_unlock; 690 691 fnhe->fnhe_next = hash->chain; 692 693 fnhe->fnhe_genid = genid; 694 fnhe->fnhe_daddr = daddr; 695 fnhe->fnhe_gw = gw; 696 fnhe->fnhe_pmtu = pmtu; 697 fnhe->fnhe_mtu_locked = lock; 698 fnhe->fnhe_expires = max(1UL, expires); 699 700 rcu_assign_pointer(hash->chain, fnhe); 701 702 /* Exception created; mark the cached routes for the nexthop 703 * stale, so anyone caching it rechecks if this exception 704 * applies to them. 705 */ 706 rt = rcu_dereference(nhc->nhc_rth_input); 707 if (rt) 708 rt->dst.obsolete = DST_OBSOLETE_KILL; 709 710 for_each_possible_cpu(i) { 711 struct rtable __rcu **prt; 712 713 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i); 714 rt = rcu_dereference(*prt); 715 if (rt) 716 rt->dst.obsolete = DST_OBSOLETE_KILL; 717 } 718 } 719 720 fnhe->fnhe_stamp = jiffies; 721 722 out_unlock: 723 spin_unlock_bh(&fnhe_lock); 724 } 725 726 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, 727 bool kill_route) 728 { 729 __be32 new_gw = icmp_hdr(skb)->un.gateway; 730 __be32 old_gw = ip_hdr(skb)->saddr; 731 struct net_device *dev = skb->dev; 732 struct in_device *in_dev; 733 struct fib_result res; 734 struct neighbour *n; 735 struct net *net; 736 737 switch (icmp_hdr(skb)->code & 7) { 738 case ICMP_REDIR_NET: 739 case ICMP_REDIR_NETTOS: 740 case ICMP_REDIR_HOST: 741 case ICMP_REDIR_HOSTTOS: 742 break; 743 744 default: 745 return; 746 } 747 748 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw) 749 return; 750 751 in_dev = __in_dev_get_rcu(dev); 752 if (!in_dev) 753 return; 754 755 net = dev_net(dev); 756 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || 757 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || 758 ipv4_is_zeronet(new_gw)) 759 goto reject_redirect; 760 761 if (!IN_DEV_SHARED_MEDIA(in_dev)) { 762 if (!inet_addr_onlink(in_dev, new_gw, old_gw)) 763 goto reject_redirect; 764 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 765 goto reject_redirect; 766 } else { 767 if (inet_addr_type(net, new_gw) != RTN_UNICAST) 768 goto reject_redirect; 769 } 770 771 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw); 772 if (!n) 773 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); 774 if (!IS_ERR(n)) { 775 if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { 776 neigh_event_send(n, NULL); 777 } else { 778 if (fib_lookup(net, fl4, &res, 0) == 0) { 779 struct fib_nh_common *nhc; 780 781 fib_select_path(net, &res, fl4, skb); 782 nhc = FIB_RES_NHC(res); 783 update_or_create_fnhe(nhc, fl4->daddr, new_gw, 784 0, false, 785 jiffies + ip_rt_gc_timeout); 786 } 787 if (kill_route) 788 rt->dst.obsolete = DST_OBSOLETE_KILL; 789 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); 790 } 791 neigh_release(n); 792 } 793 return; 794 795 reject_redirect: 796 #ifdef CONFIG_IP_ROUTE_VERBOSE 797 if (IN_DEV_LOG_MARTIANS(in_dev)) { 798 const struct iphdr *iph = (const struct iphdr *) skb->data; 799 __be32 daddr = iph->daddr; 800 __be32 saddr = iph->saddr; 801 802 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" 803 " Advised path = %pI4 -> %pI4\n", 804 &old_gw, dev->name, &new_gw, 805 &saddr, &daddr); 806 } 807 #endif 808 ; 809 } 810 811 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 812 { 813 struct rtable *rt; 814 struct flowi4 fl4; 815 const struct iphdr *iph = (const struct iphdr *) skb->data; 816 struct net *net = dev_net(skb->dev); 817 int oif = skb->dev->ifindex; 818 u8 prot = iph->protocol; 819 u32 mark = skb->mark; 820 __u8 tos = iph->tos; 821 822 rt = (struct rtable *) dst; 823 824 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); 825 __ip_do_redirect(rt, skb, &fl4, true); 826 } 827 828 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 829 { 830 struct rtable *rt = (struct rtable *)dst; 831 struct dst_entry *ret = dst; 832 833 if (rt) { 834 if (dst->obsolete > 0) { 835 ip_rt_put(rt); 836 ret = NULL; 837 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 838 rt->dst.expires) { 839 ip_rt_put(rt); 840 ret = NULL; 841 } 842 } 843 return ret; 844 } 845 846 /* 847 * Algorithm: 848 * 1. The first ip_rt_redirect_number redirects are sent 849 * with exponential backoff, then we stop sending them at all, 850 * assuming that the host ignores our redirects. 851 * 2. If we did not see packets requiring redirects 852 * during ip_rt_redirect_silence, we assume that the host 853 * forgot redirected route and start to send redirects again. 854 * 855 * This algorithm is much cheaper and more intelligent than dumb load limiting 856 * in icmp.c. 857 * 858 * NOTE. Do not forget to inhibit load limiting for redirects (redundant) 859 * and "frag. need" (breaks PMTU discovery) in icmp.c. 860 */ 861 862 void ip_rt_send_redirect(struct sk_buff *skb) 863 { 864 struct rtable *rt = skb_rtable(skb); 865 struct in_device *in_dev; 866 struct inet_peer *peer; 867 struct net *net; 868 int log_martians; 869 int vif; 870 871 rcu_read_lock(); 872 in_dev = __in_dev_get_rcu(rt->dst.dev); 873 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 874 rcu_read_unlock(); 875 return; 876 } 877 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 878 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); 879 rcu_read_unlock(); 880 881 net = dev_net(rt->dst.dev); 882 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1); 883 if (!peer) { 884 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, 885 rt_nexthop(rt, ip_hdr(skb)->daddr)); 886 return; 887 } 888 889 /* No redirected packets during ip_rt_redirect_silence; 890 * reset the algorithm. 891 */ 892 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { 893 peer->rate_tokens = 0; 894 peer->n_redirects = 0; 895 } 896 897 /* Too many ignored redirects; do not send anything 898 * set dst.rate_last to the last seen redirected packet. 899 */ 900 if (peer->n_redirects >= ip_rt_redirect_number) { 901 peer->rate_last = jiffies; 902 goto out_put_peer; 903 } 904 905 /* Check for load limit; set rate_last to the latest sent 906 * redirect. 907 */ 908 if (peer->n_redirects == 0 || 909 time_after(jiffies, 910 (peer->rate_last + 911 (ip_rt_redirect_load << peer->n_redirects)))) { 912 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); 913 914 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 915 peer->rate_last = jiffies; 916 ++peer->n_redirects; 917 if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians && 918 peer->n_redirects == ip_rt_redirect_number) 919 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 920 &ip_hdr(skb)->saddr, inet_iif(skb), 921 &ip_hdr(skb)->daddr, &gw); 922 } 923 out_put_peer: 924 inet_putpeer(peer); 925 } 926 927 static int ip_error(struct sk_buff *skb) 928 { 929 struct rtable *rt = skb_rtable(skb); 930 struct net_device *dev = skb->dev; 931 struct in_device *in_dev; 932 struct inet_peer *peer; 933 unsigned long now; 934 struct net *net; 935 SKB_DR(reason); 936 bool send; 937 int code; 938 939 if (netif_is_l3_master(skb->dev)) { 940 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); 941 if (!dev) 942 goto out; 943 } 944 945 in_dev = __in_dev_get_rcu(dev); 946 947 /* IP on this device is disabled. */ 948 if (!in_dev) 949 goto out; 950 951 net = dev_net(rt->dst.dev); 952 if (!IN_DEV_FORWARD(in_dev)) { 953 switch (rt->dst.error) { 954 case EHOSTUNREACH: 955 SKB_DR_SET(reason, IP_INADDRERRORS); 956 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); 957 break; 958 959 case ENETUNREACH: 960 SKB_DR_SET(reason, IP_INNOROUTES); 961 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 962 break; 963 } 964 goto out; 965 } 966 967 switch (rt->dst.error) { 968 case EINVAL: 969 default: 970 goto out; 971 case EHOSTUNREACH: 972 code = ICMP_HOST_UNREACH; 973 break; 974 case ENETUNREACH: 975 code = ICMP_NET_UNREACH; 976 SKB_DR_SET(reason, IP_INNOROUTES); 977 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 978 break; 979 case EACCES: 980 code = ICMP_PKT_FILTERED; 981 break; 982 } 983 984 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 985 l3mdev_master_ifindex(skb->dev), 1); 986 987 send = true; 988 if (peer) { 989 now = jiffies; 990 peer->rate_tokens += now - peer->rate_last; 991 if (peer->rate_tokens > ip_rt_error_burst) 992 peer->rate_tokens = ip_rt_error_burst; 993 peer->rate_last = now; 994 if (peer->rate_tokens >= ip_rt_error_cost) 995 peer->rate_tokens -= ip_rt_error_cost; 996 else 997 send = false; 998 inet_putpeer(peer); 999 } 1000 if (send) 1001 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1002 1003 out: kfree_skb_reason(skb, reason); 1004 return 0; 1005 } 1006 1007 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1008 { 1009 struct dst_entry *dst = &rt->dst; 1010 struct net *net = dev_net(dst->dev); 1011 struct fib_result res; 1012 bool lock = false; 1013 u32 old_mtu; 1014 1015 if (ip_mtu_locked(dst)) 1016 return; 1017 1018 old_mtu = ipv4_mtu(dst); 1019 if (old_mtu < mtu) 1020 return; 1021 1022 if (mtu < net->ipv4.ip_rt_min_pmtu) { 1023 lock = true; 1024 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu); 1025 } 1026 1027 if (rt->rt_pmtu == mtu && !lock && 1028 time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2)) 1029 return; 1030 1031 rcu_read_lock(); 1032 if (fib_lookup(net, fl4, &res, 0) == 0) { 1033 struct fib_nh_common *nhc; 1034 1035 fib_select_path(net, &res, fl4, NULL); 1036 nhc = FIB_RES_NHC(res); 1037 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, 1038 jiffies + net->ipv4.ip_rt_mtu_expires); 1039 } 1040 rcu_read_unlock(); 1041 } 1042 1043 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1044 struct sk_buff *skb, u32 mtu, 1045 bool confirm_neigh) 1046 { 1047 struct rtable *rt = (struct rtable *) dst; 1048 struct flowi4 fl4; 1049 1050 ip_rt_build_flow_key(&fl4, sk, skb); 1051 1052 /* Don't make lookup fail for bridged encapsulations */ 1053 if (skb && netif_is_any_bridge_port(skb->dev)) 1054 fl4.flowi4_oif = 0; 1055 1056 __ip_rt_update_pmtu(rt, &fl4, mtu); 1057 } 1058 1059 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, 1060 int oif, u8 protocol) 1061 { 1062 const struct iphdr *iph = (const struct iphdr *)skb->data; 1063 struct flowi4 fl4; 1064 struct rtable *rt; 1065 u32 mark = IP4_REPLY_MARK(net, skb->mark); 1066 1067 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark, 1068 0); 1069 rt = __ip_route_output_key(net, &fl4); 1070 if (!IS_ERR(rt)) { 1071 __ip_rt_update_pmtu(rt, &fl4, mtu); 1072 ip_rt_put(rt); 1073 } 1074 } 1075 EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 1076 1077 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1078 { 1079 const struct iphdr *iph = (const struct iphdr *)skb->data; 1080 struct flowi4 fl4; 1081 struct rtable *rt; 1082 1083 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0); 1084 1085 if (!fl4.flowi4_mark) 1086 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark); 1087 1088 rt = __ip_route_output_key(sock_net(sk), &fl4); 1089 if (!IS_ERR(rt)) { 1090 __ip_rt_update_pmtu(rt, &fl4, mtu); 1091 ip_rt_put(rt); 1092 } 1093 } 1094 1095 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1096 { 1097 const struct iphdr *iph = (const struct iphdr *)skb->data; 1098 struct flowi4 fl4; 1099 struct rtable *rt; 1100 struct dst_entry *odst = NULL; 1101 bool new = false; 1102 struct net *net = sock_net(sk); 1103 1104 bh_lock_sock(sk); 1105 1106 if (!ip_sk_accept_pmtu(sk)) 1107 goto out; 1108 1109 odst = sk_dst_get(sk); 1110 1111 if (sock_owned_by_user(sk) || !odst) { 1112 __ipv4_sk_update_pmtu(skb, sk, mtu); 1113 goto out; 1114 } 1115 1116 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1117 1118 rt = (struct rtable *)odst; 1119 if (odst->obsolete && !odst->ops->check(odst, 0)) { 1120 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1121 if (IS_ERR(rt)) 1122 goto out; 1123 1124 new = true; 1125 } 1126 1127 __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu); 1128 1129 if (!dst_check(&rt->dst, 0)) { 1130 if (new) 1131 dst_release(&rt->dst); 1132 1133 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1134 if (IS_ERR(rt)) 1135 goto out; 1136 1137 new = true; 1138 } 1139 1140 if (new) 1141 sk_dst_set(sk, &rt->dst); 1142 1143 out: 1144 bh_unlock_sock(sk); 1145 dst_release(odst); 1146 } 1147 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1148 1149 void ipv4_redirect(struct sk_buff *skb, struct net *net, 1150 int oif, u8 protocol) 1151 { 1152 const struct iphdr *iph = (const struct iphdr *)skb->data; 1153 struct flowi4 fl4; 1154 struct rtable *rt; 1155 1156 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0); 1157 rt = __ip_route_output_key(net, &fl4); 1158 if (!IS_ERR(rt)) { 1159 __ip_do_redirect(rt, skb, &fl4, false); 1160 ip_rt_put(rt); 1161 } 1162 } 1163 EXPORT_SYMBOL_GPL(ipv4_redirect); 1164 1165 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) 1166 { 1167 const struct iphdr *iph = (const struct iphdr *)skb->data; 1168 struct flowi4 fl4; 1169 struct rtable *rt; 1170 struct net *net = sock_net(sk); 1171 1172 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1173 rt = __ip_route_output_key(net, &fl4); 1174 if (!IS_ERR(rt)) { 1175 __ip_do_redirect(rt, skb, &fl4, false); 1176 ip_rt_put(rt); 1177 } 1178 } 1179 EXPORT_SYMBOL_GPL(ipv4_sk_redirect); 1180 1181 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst, 1182 u32 cookie) 1183 { 1184 struct rtable *rt = (struct rtable *) dst; 1185 1186 /* All IPV4 dsts are created with ->obsolete set to the value 1187 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1188 * into this function always. 1189 * 1190 * When a PMTU/redirect information update invalidates a route, 1191 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or 1192 * DST_OBSOLETE_DEAD. 1193 */ 1194 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) 1195 return NULL; 1196 return dst; 1197 } 1198 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check); 1199 1200 static void ipv4_send_dest_unreach(struct sk_buff *skb) 1201 { 1202 struct net_device *dev; 1203 struct ip_options opt; 1204 int res; 1205 1206 /* Recompile ip options since IPCB may not be valid anymore. 1207 * Also check we have a reasonable ipv4 header. 1208 */ 1209 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) || 1210 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5) 1211 return; 1212 1213 memset(&opt, 0, sizeof(opt)); 1214 if (ip_hdr(skb)->ihl > 5) { 1215 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4)) 1216 return; 1217 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); 1218 1219 rcu_read_lock(); 1220 dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev; 1221 res = __ip_options_compile(dev_net(dev), &opt, skb, NULL); 1222 rcu_read_unlock(); 1223 1224 if (res) 1225 return; 1226 } 1227 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); 1228 } 1229 1230 static void ipv4_link_failure(struct sk_buff *skb) 1231 { 1232 struct rtable *rt; 1233 1234 ipv4_send_dest_unreach(skb); 1235 1236 rt = skb_rtable(skb); 1237 if (rt) 1238 dst_set_expires(&rt->dst, 0); 1239 } 1240 1241 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb) 1242 { 1243 pr_debug("%s: %pI4 -> %pI4, %s\n", 1244 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1245 skb->dev ? skb->dev->name : "?"); 1246 kfree_skb(skb); 1247 WARN_ON(1); 1248 return 0; 1249 } 1250 1251 /* 1252 * We do not cache source address of outgoing interface, 1253 * because it is used only by IP RR, TS and SRR options, 1254 * so that it out of fast path. 1255 * 1256 * BTW remember: "addr" is allowed to be not aligned 1257 * in IP options! 1258 */ 1259 1260 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) 1261 { 1262 __be32 src; 1263 1264 if (rt_is_output_route(rt)) 1265 src = ip_hdr(skb)->saddr; 1266 else { 1267 struct fib_result res; 1268 struct iphdr *iph = ip_hdr(skb); 1269 struct flowi4 fl4 = { 1270 .daddr = iph->daddr, 1271 .saddr = iph->saddr, 1272 .flowi4_tos = RT_TOS(iph->tos), 1273 .flowi4_oif = rt->dst.dev->ifindex, 1274 .flowi4_iif = skb->dev->ifindex, 1275 .flowi4_mark = skb->mark, 1276 }; 1277 1278 rcu_read_lock(); 1279 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) 1280 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res); 1281 else 1282 src = inet_select_addr(rt->dst.dev, 1283 rt_nexthop(rt, iph->daddr), 1284 RT_SCOPE_UNIVERSE); 1285 rcu_read_unlock(); 1286 } 1287 memcpy(addr, &src, 4); 1288 } 1289 1290 #ifdef CONFIG_IP_ROUTE_CLASSID 1291 static void set_class_tag(struct rtable *rt, u32 tag) 1292 { 1293 if (!(rt->dst.tclassid & 0xFFFF)) 1294 rt->dst.tclassid |= tag & 0xFFFF; 1295 if (!(rt->dst.tclassid & 0xFFFF0000)) 1296 rt->dst.tclassid |= tag & 0xFFFF0000; 1297 } 1298 #endif 1299 1300 static unsigned int ipv4_default_advmss(const struct dst_entry *dst) 1301 { 1302 struct net *net = dev_net(dst->dev); 1303 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); 1304 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, 1305 net->ipv4.ip_rt_min_advmss); 1306 1307 return min(advmss, IPV4_MAX_PMTU - header_size); 1308 } 1309 1310 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst) 1311 { 1312 return ip_dst_mtu_maybe_forward(dst, false); 1313 } 1314 EXPORT_INDIRECT_CALLABLE(ipv4_mtu); 1315 1316 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr) 1317 { 1318 struct fnhe_hash_bucket *hash; 1319 struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1320 u32 hval = fnhe_hashfun(daddr); 1321 1322 spin_lock_bh(&fnhe_lock); 1323 1324 hash = rcu_dereference_protected(nhc->nhc_exceptions, 1325 lockdep_is_held(&fnhe_lock)); 1326 hash += hval; 1327 1328 fnhe_p = &hash->chain; 1329 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1330 while (fnhe) { 1331 if (fnhe->fnhe_daddr == daddr) { 1332 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1333 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1334 /* set fnhe_daddr to 0 to ensure it won't bind with 1335 * new dsts in rt_bind_exception(). 1336 */ 1337 fnhe->fnhe_daddr = 0; 1338 fnhe_flush_routes(fnhe); 1339 kfree_rcu(fnhe, rcu); 1340 break; 1341 } 1342 fnhe_p = &fnhe->fnhe_next; 1343 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1344 lockdep_is_held(&fnhe_lock)); 1345 } 1346 1347 spin_unlock_bh(&fnhe_lock); 1348 } 1349 1350 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc, 1351 __be32 daddr) 1352 { 1353 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions); 1354 struct fib_nh_exception *fnhe; 1355 u32 hval; 1356 1357 if (!hash) 1358 return NULL; 1359 1360 hval = fnhe_hashfun(daddr); 1361 1362 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1363 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1364 if (fnhe->fnhe_daddr == daddr) { 1365 if (fnhe->fnhe_expires && 1366 time_after(jiffies, fnhe->fnhe_expires)) { 1367 ip_del_fnhe(nhc, daddr); 1368 break; 1369 } 1370 return fnhe; 1371 } 1372 } 1373 return NULL; 1374 } 1375 1376 /* MTU selection: 1377 * 1. mtu on route is locked - use it 1378 * 2. mtu from nexthop exception 1379 * 3. mtu from egress device 1380 */ 1381 1382 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) 1383 { 1384 struct fib_nh_common *nhc = res->nhc; 1385 struct net_device *dev = nhc->nhc_dev; 1386 struct fib_info *fi = res->fi; 1387 u32 mtu = 0; 1388 1389 if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) || 1390 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) 1391 mtu = fi->fib_mtu; 1392 1393 if (likely(!mtu)) { 1394 struct fib_nh_exception *fnhe; 1395 1396 fnhe = find_exception(nhc, daddr); 1397 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires)) 1398 mtu = fnhe->fnhe_pmtu; 1399 } 1400 1401 if (likely(!mtu)) 1402 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU); 1403 1404 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu); 1405 } 1406 1407 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, 1408 __be32 daddr, const bool do_cache) 1409 { 1410 bool ret = false; 1411 1412 spin_lock_bh(&fnhe_lock); 1413 1414 if (daddr == fnhe->fnhe_daddr) { 1415 struct rtable __rcu **porig; 1416 struct rtable *orig; 1417 int genid = fnhe_genid(dev_net(rt->dst.dev)); 1418 1419 if (rt_is_input_route(rt)) 1420 porig = &fnhe->fnhe_rth_input; 1421 else 1422 porig = &fnhe->fnhe_rth_output; 1423 orig = rcu_dereference(*porig); 1424 1425 if (fnhe->fnhe_genid != genid) { 1426 fnhe->fnhe_genid = genid; 1427 fnhe->fnhe_gw = 0; 1428 fnhe->fnhe_pmtu = 0; 1429 fnhe->fnhe_expires = 0; 1430 fnhe->fnhe_mtu_locked = false; 1431 fnhe_flush_routes(fnhe); 1432 orig = NULL; 1433 } 1434 fill_route_from_fnhe(rt, fnhe); 1435 if (!rt->rt_gw4) { 1436 rt->rt_gw4 = daddr; 1437 rt->rt_gw_family = AF_INET; 1438 } 1439 1440 if (do_cache) { 1441 dst_hold(&rt->dst); 1442 rcu_assign_pointer(*porig, rt); 1443 if (orig) { 1444 dst_dev_put(&orig->dst); 1445 dst_release(&orig->dst); 1446 } 1447 ret = true; 1448 } 1449 1450 fnhe->fnhe_stamp = jiffies; 1451 } 1452 spin_unlock_bh(&fnhe_lock); 1453 1454 return ret; 1455 } 1456 1457 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) 1458 { 1459 struct rtable *orig, *prev, **p; 1460 bool ret = true; 1461 1462 if (rt_is_input_route(rt)) { 1463 p = (struct rtable **)&nhc->nhc_rth_input; 1464 } else { 1465 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 1466 } 1467 orig = *p; 1468 1469 /* hold dst before doing cmpxchg() to avoid race condition 1470 * on this dst 1471 */ 1472 dst_hold(&rt->dst); 1473 prev = cmpxchg(p, orig, rt); 1474 if (prev == orig) { 1475 if (orig) { 1476 rt_add_uncached_list(orig); 1477 dst_release(&orig->dst); 1478 } 1479 } else { 1480 dst_release(&rt->dst); 1481 ret = false; 1482 } 1483 1484 return ret; 1485 } 1486 1487 struct uncached_list { 1488 spinlock_t lock; 1489 struct list_head head; 1490 struct list_head quarantine; 1491 }; 1492 1493 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1494 1495 void rt_add_uncached_list(struct rtable *rt) 1496 { 1497 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1498 1499 rt->dst.rt_uncached_list = ul; 1500 1501 spin_lock_bh(&ul->lock); 1502 list_add_tail(&rt->dst.rt_uncached, &ul->head); 1503 spin_unlock_bh(&ul->lock); 1504 } 1505 1506 void rt_del_uncached_list(struct rtable *rt) 1507 { 1508 if (!list_empty(&rt->dst.rt_uncached)) { 1509 struct uncached_list *ul = rt->dst.rt_uncached_list; 1510 1511 spin_lock_bh(&ul->lock); 1512 list_del_init(&rt->dst.rt_uncached); 1513 spin_unlock_bh(&ul->lock); 1514 } 1515 } 1516 1517 static void ipv4_dst_destroy(struct dst_entry *dst) 1518 { 1519 struct rtable *rt = (struct rtable *)dst; 1520 1521 ip_dst_metrics_put(dst); 1522 rt_del_uncached_list(rt); 1523 } 1524 1525 void rt_flush_dev(struct net_device *dev) 1526 { 1527 struct rtable *rt, *safe; 1528 int cpu; 1529 1530 for_each_possible_cpu(cpu) { 1531 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 1532 1533 if (list_empty(&ul->head)) 1534 continue; 1535 1536 spin_lock_bh(&ul->lock); 1537 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { 1538 if (rt->dst.dev != dev) 1539 continue; 1540 rt->dst.dev = blackhole_netdev; 1541 netdev_ref_replace(dev, blackhole_netdev, 1542 &rt->dst.dev_tracker, GFP_ATOMIC); 1543 list_move(&rt->dst.rt_uncached, &ul->quarantine); 1544 } 1545 spin_unlock_bh(&ul->lock); 1546 } 1547 } 1548 1549 static bool rt_cache_valid(const struct rtable *rt) 1550 { 1551 return rt && 1552 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1553 !rt_is_expired(rt); 1554 } 1555 1556 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, 1557 const struct fib_result *res, 1558 struct fib_nh_exception *fnhe, 1559 struct fib_info *fi, u16 type, u32 itag, 1560 const bool do_cache) 1561 { 1562 bool cached = false; 1563 1564 if (fi) { 1565 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1566 1567 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) { 1568 rt->rt_uses_gateway = 1; 1569 rt->rt_gw_family = nhc->nhc_gw_family; 1570 /* only INET and INET6 are supported */ 1571 if (likely(nhc->nhc_gw_family == AF_INET)) 1572 rt->rt_gw4 = nhc->nhc_gw.ipv4; 1573 else 1574 rt->rt_gw6 = nhc->nhc_gw.ipv6; 1575 } 1576 1577 ip_dst_init_metrics(&rt->dst, fi->fib_metrics); 1578 1579 #ifdef CONFIG_IP_ROUTE_CLASSID 1580 if (nhc->nhc_family == AF_INET) { 1581 struct fib_nh *nh; 1582 1583 nh = container_of(nhc, struct fib_nh, nh_common); 1584 rt->dst.tclassid = nh->nh_tclassid; 1585 } 1586 #endif 1587 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 1588 if (unlikely(fnhe)) 1589 cached = rt_bind_exception(rt, fnhe, daddr, do_cache); 1590 else if (do_cache) 1591 cached = rt_cache_route(nhc, rt); 1592 if (unlikely(!cached)) { 1593 /* Routes we intend to cache in nexthop exception or 1594 * FIB nexthop have the DST_NOCACHE bit clear. 1595 * However, if we are unsuccessful at storing this 1596 * route into the cache we really need to set it. 1597 */ 1598 if (!rt->rt_gw4) { 1599 rt->rt_gw_family = AF_INET; 1600 rt->rt_gw4 = daddr; 1601 } 1602 rt_add_uncached_list(rt); 1603 } 1604 } else 1605 rt_add_uncached_list(rt); 1606 1607 #ifdef CONFIG_IP_ROUTE_CLASSID 1608 #ifdef CONFIG_IP_MULTIPLE_TABLES 1609 set_class_tag(rt, res->tclassid); 1610 #endif 1611 set_class_tag(rt, itag); 1612 #endif 1613 } 1614 1615 struct rtable *rt_dst_alloc(struct net_device *dev, 1616 unsigned int flags, u16 type, 1617 bool noxfrm) 1618 { 1619 struct rtable *rt; 1620 1621 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK, 1622 (noxfrm ? DST_NOXFRM : 0)); 1623 1624 if (rt) { 1625 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1626 rt->rt_flags = flags; 1627 rt->rt_type = type; 1628 rt->rt_is_input = 0; 1629 rt->rt_iif = 0; 1630 rt->rt_pmtu = 0; 1631 rt->rt_mtu_locked = 0; 1632 rt->rt_uses_gateway = 0; 1633 rt->rt_gw_family = 0; 1634 rt->rt_gw4 = 0; 1635 1636 rt->dst.output = ip_output; 1637 if (flags & RTCF_LOCAL) 1638 rt->dst.input = ip_local_deliver; 1639 } 1640 1641 return rt; 1642 } 1643 EXPORT_SYMBOL(rt_dst_alloc); 1644 1645 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) 1646 { 1647 struct rtable *new_rt; 1648 1649 new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK, 1650 rt->dst.flags); 1651 1652 if (new_rt) { 1653 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1654 new_rt->rt_flags = rt->rt_flags; 1655 new_rt->rt_type = rt->rt_type; 1656 new_rt->rt_is_input = rt->rt_is_input; 1657 new_rt->rt_iif = rt->rt_iif; 1658 new_rt->rt_pmtu = rt->rt_pmtu; 1659 new_rt->rt_mtu_locked = rt->rt_mtu_locked; 1660 new_rt->rt_gw_family = rt->rt_gw_family; 1661 if (rt->rt_gw_family == AF_INET) 1662 new_rt->rt_gw4 = rt->rt_gw4; 1663 else if (rt->rt_gw_family == AF_INET6) 1664 new_rt->rt_gw6 = rt->rt_gw6; 1665 1666 new_rt->dst.input = rt->dst.input; 1667 new_rt->dst.output = rt->dst.output; 1668 new_rt->dst.error = rt->dst.error; 1669 new_rt->dst.lastuse = jiffies; 1670 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate); 1671 } 1672 return new_rt; 1673 } 1674 EXPORT_SYMBOL(rt_dst_clone); 1675 1676 /* called in rcu_read_lock() section */ 1677 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1678 u8 tos, struct net_device *dev, 1679 struct in_device *in_dev, u32 *itag) 1680 { 1681 int err; 1682 1683 /* Primary sanity checks. */ 1684 if (!in_dev) 1685 return -EINVAL; 1686 1687 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1688 skb->protocol != htons(ETH_P_IP)) 1689 return -EINVAL; 1690 1691 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) 1692 return -EINVAL; 1693 1694 if (ipv4_is_zeronet(saddr)) { 1695 if (!ipv4_is_local_multicast(daddr) && 1696 ip_hdr(skb)->protocol != IPPROTO_IGMP) 1697 return -EINVAL; 1698 } else { 1699 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 1700 in_dev, itag); 1701 if (err < 0) 1702 return err; 1703 } 1704 return 0; 1705 } 1706 1707 /* called in rcu_read_lock() section */ 1708 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1709 u8 tos, struct net_device *dev, int our) 1710 { 1711 struct in_device *in_dev = __in_dev_get_rcu(dev); 1712 unsigned int flags = RTCF_MULTICAST; 1713 struct rtable *rth; 1714 u32 itag = 0; 1715 int err; 1716 1717 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); 1718 if (err) 1719 return err; 1720 1721 if (our) 1722 flags |= RTCF_LOCAL; 1723 1724 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 1725 IPCB(skb)->flags |= IPSKB_NOPOLICY; 1726 1727 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, 1728 false); 1729 if (!rth) 1730 return -ENOBUFS; 1731 1732 #ifdef CONFIG_IP_ROUTE_CLASSID 1733 rth->dst.tclassid = itag; 1734 #endif 1735 rth->dst.output = ip_rt_bug; 1736 rth->rt_is_input= 1; 1737 1738 #ifdef CONFIG_IP_MROUTE 1739 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1740 rth->dst.input = ip_mr_input; 1741 #endif 1742 RT_CACHE_STAT_INC(in_slow_mc); 1743 1744 skb_dst_drop(skb); 1745 skb_dst_set(skb, &rth->dst); 1746 return 0; 1747 } 1748 1749 1750 static void ip_handle_martian_source(struct net_device *dev, 1751 struct in_device *in_dev, 1752 struct sk_buff *skb, 1753 __be32 daddr, 1754 __be32 saddr) 1755 { 1756 RT_CACHE_STAT_INC(in_martian_src); 1757 #ifdef CONFIG_IP_ROUTE_VERBOSE 1758 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { 1759 /* 1760 * RFC1812 recommendation, if source is martian, 1761 * the only hint is MAC header. 1762 */ 1763 pr_warn("martian source %pI4 from %pI4, on dev %s\n", 1764 &daddr, &saddr, dev->name); 1765 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 1766 print_hex_dump(KERN_WARNING, "ll header: ", 1767 DUMP_PREFIX_OFFSET, 16, 1, 1768 skb_mac_header(skb), 1769 dev->hard_header_len, false); 1770 } 1771 } 1772 #endif 1773 } 1774 1775 /* called in rcu_read_lock() section */ 1776 static int __mkroute_input(struct sk_buff *skb, 1777 const struct fib_result *res, 1778 struct in_device *in_dev, 1779 __be32 daddr, __be32 saddr, u32 tos) 1780 { 1781 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1782 struct net_device *dev = nhc->nhc_dev; 1783 struct fib_nh_exception *fnhe; 1784 struct rtable *rth; 1785 int err; 1786 struct in_device *out_dev; 1787 bool do_cache; 1788 u32 itag = 0; 1789 1790 /* get a working reference to the output device */ 1791 out_dev = __in_dev_get_rcu(dev); 1792 if (!out_dev) { 1793 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); 1794 return -EINVAL; 1795 } 1796 1797 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1798 in_dev->dev, in_dev, &itag); 1799 if (err < 0) { 1800 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1801 saddr); 1802 1803 goto cleanup; 1804 } 1805 1806 do_cache = res->fi && !itag; 1807 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1808 skb->protocol == htons(ETH_P_IP)) { 1809 __be32 gw; 1810 1811 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0; 1812 if (IN_DEV_SHARED_MEDIA(out_dev) || 1813 inet_addr_onlink(out_dev, saddr, gw)) 1814 IPCB(skb)->flags |= IPSKB_DOREDIRECT; 1815 } 1816 1817 if (skb->protocol != htons(ETH_P_IP)) { 1818 /* Not IP (i.e. ARP). Do not create route, if it is 1819 * invalid for proxy arp. DNAT routes are always valid. 1820 * 1821 * Proxy arp feature have been extended to allow, ARP 1822 * replies back to the same interface, to support 1823 * Private VLAN switch technologies. See arp.c. 1824 */ 1825 if (out_dev == in_dev && 1826 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { 1827 err = -EINVAL; 1828 goto cleanup; 1829 } 1830 } 1831 1832 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 1833 IPCB(skb)->flags |= IPSKB_NOPOLICY; 1834 1835 fnhe = find_exception(nhc, daddr); 1836 if (do_cache) { 1837 if (fnhe) 1838 rth = rcu_dereference(fnhe->fnhe_rth_input); 1839 else 1840 rth = rcu_dereference(nhc->nhc_rth_input); 1841 if (rt_cache_valid(rth)) { 1842 skb_dst_set_noref(skb, &rth->dst); 1843 goto out; 1844 } 1845 } 1846 1847 rth = rt_dst_alloc(out_dev->dev, 0, res->type, 1848 IN_DEV_ORCONF(out_dev, NOXFRM)); 1849 if (!rth) { 1850 err = -ENOBUFS; 1851 goto cleanup; 1852 } 1853 1854 rth->rt_is_input = 1; 1855 RT_CACHE_STAT_INC(in_slow_tot); 1856 1857 rth->dst.input = ip_forward; 1858 1859 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag, 1860 do_cache); 1861 lwtunnel_set_redirect(&rth->dst); 1862 skb_dst_set(skb, &rth->dst); 1863 out: 1864 err = 0; 1865 cleanup: 1866 return err; 1867 } 1868 1869 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1870 /* To make ICMP packets follow the right flow, the multipath hash is 1871 * calculated from the inner IP addresses. 1872 */ 1873 static void ip_multipath_l3_keys(const struct sk_buff *skb, 1874 struct flow_keys *hash_keys) 1875 { 1876 const struct iphdr *outer_iph = ip_hdr(skb); 1877 const struct iphdr *key_iph = outer_iph; 1878 const struct iphdr *inner_iph; 1879 const struct icmphdr *icmph; 1880 struct iphdr _inner_iph; 1881 struct icmphdr _icmph; 1882 1883 if (likely(outer_iph->protocol != IPPROTO_ICMP)) 1884 goto out; 1885 1886 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) 1887 goto out; 1888 1889 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), 1890 &_icmph); 1891 if (!icmph) 1892 goto out; 1893 1894 if (!icmp_is_err(icmph->type)) 1895 goto out; 1896 1897 inner_iph = skb_header_pointer(skb, 1898 outer_iph->ihl * 4 + sizeof(_icmph), 1899 sizeof(_inner_iph), &_inner_iph); 1900 if (!inner_iph) 1901 goto out; 1902 1903 key_iph = inner_iph; 1904 out: 1905 hash_keys->addrs.v4addrs.src = key_iph->saddr; 1906 hash_keys->addrs.v4addrs.dst = key_iph->daddr; 1907 } 1908 1909 static u32 fib_multipath_custom_hash_outer(const struct net *net, 1910 const struct sk_buff *skb, 1911 bool *p_has_inner) 1912 { 1913 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 1914 struct flow_keys keys, hash_keys; 1915 1916 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 1917 return 0; 1918 1919 memset(&hash_keys, 0, sizeof(hash_keys)); 1920 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 1921 1922 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1923 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 1924 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1925 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 1926 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1927 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 1928 hash_keys.basic.ip_proto = keys.basic.ip_proto; 1929 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 1930 hash_keys.ports.src = keys.ports.src; 1931 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 1932 hash_keys.ports.dst = keys.ports.dst; 1933 1934 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); 1935 return flow_hash_from_keys(&hash_keys); 1936 } 1937 1938 static u32 fib_multipath_custom_hash_inner(const struct net *net, 1939 const struct sk_buff *skb, 1940 bool has_inner) 1941 { 1942 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 1943 struct flow_keys keys, hash_keys; 1944 1945 /* We assume the packet carries an encapsulation, but if none was 1946 * encountered during dissection of the outer flow, then there is no 1947 * point in calling the flow dissector again. 1948 */ 1949 if (!has_inner) 1950 return 0; 1951 1952 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)) 1953 return 0; 1954 1955 memset(&hash_keys, 0, sizeof(hash_keys)); 1956 skb_flow_dissect_flow_keys(skb, &keys, 0); 1957 1958 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION)) 1959 return 0; 1960 1961 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1962 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1963 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 1964 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1965 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 1966 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1967 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1968 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1969 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 1970 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 1971 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 1972 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 1973 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL) 1974 hash_keys.tags.flow_label = keys.tags.flow_label; 1975 } 1976 1977 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO) 1978 hash_keys.basic.ip_proto = keys.basic.ip_proto; 1979 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT) 1980 hash_keys.ports.src = keys.ports.src; 1981 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) 1982 hash_keys.ports.dst = keys.ports.dst; 1983 1984 return flow_hash_from_keys(&hash_keys); 1985 } 1986 1987 static u32 fib_multipath_custom_hash_skb(const struct net *net, 1988 const struct sk_buff *skb) 1989 { 1990 u32 mhash, mhash_inner; 1991 bool has_inner = true; 1992 1993 mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner); 1994 mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner); 1995 1996 return jhash_2words(mhash, mhash_inner, 0); 1997 } 1998 1999 static u32 fib_multipath_custom_hash_fl4(const struct net *net, 2000 const struct flowi4 *fl4) 2001 { 2002 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 2003 struct flow_keys hash_keys; 2004 2005 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2006 return 0; 2007 2008 memset(&hash_keys, 0, sizeof(hash_keys)); 2009 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2010 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2011 hash_keys.addrs.v4addrs.src = fl4->saddr; 2012 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2013 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2014 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2015 hash_keys.basic.ip_proto = fl4->flowi4_proto; 2016 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 2017 hash_keys.ports.src = fl4->fl4_sport; 2018 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2019 hash_keys.ports.dst = fl4->fl4_dport; 2020 2021 return flow_hash_from_keys(&hash_keys); 2022 } 2023 2024 /* if skb is set it will be used and fl4 can be NULL */ 2025 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, 2026 const struct sk_buff *skb, struct flow_keys *flkeys) 2027 { 2028 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0; 2029 struct flow_keys hash_keys; 2030 u32 mhash = 0; 2031 2032 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { 2033 case 0: 2034 memset(&hash_keys, 0, sizeof(hash_keys)); 2035 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2036 if (skb) { 2037 ip_multipath_l3_keys(skb, &hash_keys); 2038 } else { 2039 hash_keys.addrs.v4addrs.src = fl4->saddr; 2040 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2041 } 2042 mhash = flow_hash_from_keys(&hash_keys); 2043 break; 2044 case 1: 2045 /* skb is currently provided only when forwarding */ 2046 if (skb) { 2047 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 2048 struct flow_keys keys; 2049 2050 /* short-circuit if we already have L4 hash present */ 2051 if (skb->l4_hash) 2052 return skb_get_hash_raw(skb) >> 1; 2053 2054 memset(&hash_keys, 0, sizeof(hash_keys)); 2055 2056 if (!flkeys) { 2057 skb_flow_dissect_flow_keys(skb, &keys, flag); 2058 flkeys = &keys; 2059 } 2060 2061 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2062 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 2063 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 2064 hash_keys.ports.src = flkeys->ports.src; 2065 hash_keys.ports.dst = flkeys->ports.dst; 2066 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2067 } else { 2068 memset(&hash_keys, 0, sizeof(hash_keys)); 2069 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2070 hash_keys.addrs.v4addrs.src = fl4->saddr; 2071 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2072 hash_keys.ports.src = fl4->fl4_sport; 2073 hash_keys.ports.dst = fl4->fl4_dport; 2074 hash_keys.basic.ip_proto = fl4->flowi4_proto; 2075 } 2076 mhash = flow_hash_from_keys(&hash_keys); 2077 break; 2078 case 2: 2079 memset(&hash_keys, 0, sizeof(hash_keys)); 2080 /* skb is currently provided only when forwarding */ 2081 if (skb) { 2082 struct flow_keys keys; 2083 2084 skb_flow_dissect_flow_keys(skb, &keys, 0); 2085 /* Inner can be v4 or v6 */ 2086 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2087 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2088 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 2089 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 2090 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2091 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2092 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2093 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2094 hash_keys.tags.flow_label = keys.tags.flow_label; 2095 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2096 } else { 2097 /* Same as case 0 */ 2098 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2099 ip_multipath_l3_keys(skb, &hash_keys); 2100 } 2101 } else { 2102 /* Same as case 0 */ 2103 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2104 hash_keys.addrs.v4addrs.src = fl4->saddr; 2105 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2106 } 2107 mhash = flow_hash_from_keys(&hash_keys); 2108 break; 2109 case 3: 2110 if (skb) 2111 mhash = fib_multipath_custom_hash_skb(net, skb); 2112 else 2113 mhash = fib_multipath_custom_hash_fl4(net, fl4); 2114 break; 2115 } 2116 2117 if (multipath_hash) 2118 mhash = jhash_2words(mhash, multipath_hash, 0); 2119 2120 return mhash >> 1; 2121 } 2122 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 2123 2124 static int ip_mkroute_input(struct sk_buff *skb, 2125 struct fib_result *res, 2126 struct in_device *in_dev, 2127 __be32 daddr, __be32 saddr, u32 tos, 2128 struct flow_keys *hkeys) 2129 { 2130 #ifdef CONFIG_IP_ROUTE_MULTIPATH 2131 if (res->fi && fib_info_num_path(res->fi) > 1) { 2132 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); 2133 2134 fib_select_multipath(res, h); 2135 IPCB(skb)->flags |= IPSKB_MULTIPATH; 2136 } 2137 #endif 2138 2139 /* create a routing cache entry */ 2140 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); 2141 } 2142 2143 /* Implements all the saddr-related checks as ip_route_input_slow(), 2144 * assuming daddr is valid and the destination is not a local broadcast one. 2145 * Uses the provided hint instead of performing a route lookup. 2146 */ 2147 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2148 u8 tos, struct net_device *dev, 2149 const struct sk_buff *hint) 2150 { 2151 struct in_device *in_dev = __in_dev_get_rcu(dev); 2152 struct rtable *rt = skb_rtable(hint); 2153 struct net *net = dev_net(dev); 2154 int err = -EINVAL; 2155 u32 tag = 0; 2156 2157 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) 2158 goto martian_source; 2159 2160 if (ipv4_is_zeronet(saddr)) 2161 goto martian_source; 2162 2163 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 2164 goto martian_source; 2165 2166 if (rt->rt_type != RTN_LOCAL) 2167 goto skip_validate_source; 2168 2169 tos &= IPTOS_RT_MASK; 2170 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag); 2171 if (err < 0) 2172 goto martian_source; 2173 2174 skip_validate_source: 2175 skb_dst_copy(skb, hint); 2176 return 0; 2177 2178 martian_source: 2179 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2180 return err; 2181 } 2182 2183 /* get device for dst_alloc with local routes */ 2184 static struct net_device *ip_rt_get_dev(struct net *net, 2185 const struct fib_result *res) 2186 { 2187 struct fib_nh_common *nhc = res->fi ? res->nhc : NULL; 2188 struct net_device *dev = NULL; 2189 2190 if (nhc) 2191 dev = l3mdev_master_dev_rcu(nhc->nhc_dev); 2192 2193 return dev ? : net->loopback_dev; 2194 } 2195 2196 /* 2197 * NOTE. We drop all the packets that has local source 2198 * addresses, because every properly looped back packet 2199 * must have correct destination already attached by output routine. 2200 * Changes in the enforced policies must be applied also to 2201 * ip_route_use_hint(). 2202 * 2203 * Such approach solves two big problems: 2204 * 1. Not simplex devices are handled properly. 2205 * 2. IP spoofing attempts are filtered with 100% of guarantee. 2206 * called with rcu_read_lock() 2207 */ 2208 2209 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2210 u8 tos, struct net_device *dev, 2211 struct fib_result *res) 2212 { 2213 struct in_device *in_dev = __in_dev_get_rcu(dev); 2214 struct flow_keys *flkeys = NULL, _flkeys; 2215 struct net *net = dev_net(dev); 2216 struct ip_tunnel_info *tun_info; 2217 int err = -EINVAL; 2218 unsigned int flags = 0; 2219 u32 itag = 0; 2220 struct rtable *rth; 2221 struct flowi4 fl4; 2222 bool do_cache = true; 2223 2224 /* IP on this device is disabled. */ 2225 2226 if (!in_dev) 2227 goto out; 2228 2229 /* Check for the most weird martians, which can be not detected 2230 * by fib_lookup. 2231 */ 2232 2233 tun_info = skb_tunnel_info(skb); 2234 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 2235 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id; 2236 else 2237 fl4.flowi4_tun_key.tun_id = 0; 2238 skb_dst_drop(skb); 2239 2240 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) 2241 goto martian_source; 2242 2243 res->fi = NULL; 2244 res->table = NULL; 2245 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 2246 goto brd_input; 2247 2248 /* Accept zero addresses only to limited broadcast; 2249 * I even do not know to fix it or not. Waiting for complains :-) 2250 */ 2251 if (ipv4_is_zeronet(saddr)) 2252 goto martian_source; 2253 2254 if (ipv4_is_zeronet(daddr)) 2255 goto martian_destination; 2256 2257 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), 2258 * and call it once if daddr or/and saddr are loopback addresses 2259 */ 2260 if (ipv4_is_loopback(daddr)) { 2261 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 2262 goto martian_destination; 2263 } else if (ipv4_is_loopback(saddr)) { 2264 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 2265 goto martian_source; 2266 } 2267 2268 /* 2269 * Now we are ready to route packet. 2270 */ 2271 fl4.flowi4_l3mdev = 0; 2272 fl4.flowi4_oif = 0; 2273 fl4.flowi4_iif = dev->ifindex; 2274 fl4.flowi4_mark = skb->mark; 2275 fl4.flowi4_tos = tos; 2276 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 2277 fl4.flowi4_flags = 0; 2278 fl4.daddr = daddr; 2279 fl4.saddr = saddr; 2280 fl4.flowi4_uid = sock_net_uid(net, NULL); 2281 fl4.flowi4_multipath_hash = 0; 2282 2283 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { 2284 flkeys = &_flkeys; 2285 } else { 2286 fl4.flowi4_proto = 0; 2287 fl4.fl4_sport = 0; 2288 fl4.fl4_dport = 0; 2289 } 2290 2291 err = fib_lookup(net, &fl4, res, 0); 2292 if (err != 0) { 2293 if (!IN_DEV_FORWARD(in_dev)) 2294 err = -EHOSTUNREACH; 2295 goto no_route; 2296 } 2297 2298 if (res->type == RTN_BROADCAST) { 2299 if (IN_DEV_BFORWARD(in_dev)) 2300 goto make_route; 2301 /* not do cache if bc_forwarding is enabled */ 2302 if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING)) 2303 do_cache = false; 2304 goto brd_input; 2305 } 2306 2307 if (res->type == RTN_LOCAL) { 2308 err = fib_validate_source(skb, saddr, daddr, tos, 2309 0, dev, in_dev, &itag); 2310 if (err < 0) 2311 goto martian_source; 2312 goto local_input; 2313 } 2314 2315 if (!IN_DEV_FORWARD(in_dev)) { 2316 err = -EHOSTUNREACH; 2317 goto no_route; 2318 } 2319 if (res->type != RTN_UNICAST) 2320 goto martian_destination; 2321 2322 make_route: 2323 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); 2324 out: return err; 2325 2326 brd_input: 2327 if (skb->protocol != htons(ETH_P_IP)) 2328 goto e_inval; 2329 2330 if (!ipv4_is_zeronet(saddr)) { 2331 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 2332 in_dev, &itag); 2333 if (err < 0) 2334 goto martian_source; 2335 } 2336 flags |= RTCF_BROADCAST; 2337 res->type = RTN_BROADCAST; 2338 RT_CACHE_STAT_INC(in_brd); 2339 2340 local_input: 2341 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 2342 IPCB(skb)->flags |= IPSKB_NOPOLICY; 2343 2344 do_cache &= res->fi && !itag; 2345 if (do_cache) { 2346 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2347 2348 rth = rcu_dereference(nhc->nhc_rth_input); 2349 if (rt_cache_valid(rth)) { 2350 skb_dst_set_noref(skb, &rth->dst); 2351 err = 0; 2352 goto out; 2353 } 2354 } 2355 2356 rth = rt_dst_alloc(ip_rt_get_dev(net, res), 2357 flags | RTCF_LOCAL, res->type, false); 2358 if (!rth) 2359 goto e_nobufs; 2360 2361 rth->dst.output= ip_rt_bug; 2362 #ifdef CONFIG_IP_ROUTE_CLASSID 2363 rth->dst.tclassid = itag; 2364 #endif 2365 rth->rt_is_input = 1; 2366 2367 RT_CACHE_STAT_INC(in_slow_tot); 2368 if (res->type == RTN_UNREACHABLE) { 2369 rth->dst.input= ip_error; 2370 rth->dst.error= -err; 2371 rth->rt_flags &= ~RTCF_LOCAL; 2372 } 2373 2374 if (do_cache) { 2375 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2376 2377 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 2378 if (lwtunnel_input_redirect(rth->dst.lwtstate)) { 2379 WARN_ON(rth->dst.input == lwtunnel_input); 2380 rth->dst.lwtstate->orig_input = rth->dst.input; 2381 rth->dst.input = lwtunnel_input; 2382 } 2383 2384 if (unlikely(!rt_cache_route(nhc, rth))) 2385 rt_add_uncached_list(rth); 2386 } 2387 skb_dst_set(skb, &rth->dst); 2388 err = 0; 2389 goto out; 2390 2391 no_route: 2392 RT_CACHE_STAT_INC(in_no_route); 2393 res->type = RTN_UNREACHABLE; 2394 res->fi = NULL; 2395 res->table = NULL; 2396 goto local_input; 2397 2398 /* 2399 * Do not cache martian addresses: they should be logged (RFC1812) 2400 */ 2401 martian_destination: 2402 RT_CACHE_STAT_INC(in_martian_dst); 2403 #ifdef CONFIG_IP_ROUTE_VERBOSE 2404 if (IN_DEV_LOG_MARTIANS(in_dev)) 2405 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", 2406 &daddr, &saddr, dev->name); 2407 #endif 2408 2409 e_inval: 2410 err = -EINVAL; 2411 goto out; 2412 2413 e_nobufs: 2414 err = -ENOBUFS; 2415 goto out; 2416 2417 martian_source: 2418 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2419 goto out; 2420 } 2421 2422 /* called with rcu_read_lock held */ 2423 static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2424 u8 tos, struct net_device *dev, struct fib_result *res) 2425 { 2426 /* Multicast recognition logic is moved from route cache to here. 2427 * The problem was that too many Ethernet cards have broken/missing 2428 * hardware multicast filters :-( As result the host on multicasting 2429 * network acquires a lot of useless route cache entries, sort of 2430 * SDR messages from all the world. Now we try to get rid of them. 2431 * Really, provided software IP multicast filter is organized 2432 * reasonably (at least, hashed), it does not result in a slowdown 2433 * comparing with route cache reject entries. 2434 * Note, that multicast routers are not affected, because 2435 * route cache entry is created eventually. 2436 */ 2437 if (ipv4_is_multicast(daddr)) { 2438 struct in_device *in_dev = __in_dev_get_rcu(dev); 2439 int our = 0; 2440 int err = -EINVAL; 2441 2442 if (!in_dev) 2443 return err; 2444 our = ip_check_mc_rcu(in_dev, daddr, saddr, 2445 ip_hdr(skb)->protocol); 2446 2447 /* check l3 master if no match yet */ 2448 if (!our && netif_is_l3_slave(dev)) { 2449 struct in_device *l3_in_dev; 2450 2451 l3_in_dev = __in_dev_get_rcu(skb->dev); 2452 if (l3_in_dev) 2453 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr, 2454 ip_hdr(skb)->protocol); 2455 } 2456 2457 if (our 2458 #ifdef CONFIG_IP_MROUTE 2459 || 2460 (!ipv4_is_local_multicast(daddr) && 2461 IN_DEV_MFORWARD(in_dev)) 2462 #endif 2463 ) { 2464 err = ip_route_input_mc(skb, daddr, saddr, 2465 tos, dev, our); 2466 } 2467 return err; 2468 } 2469 2470 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res); 2471 } 2472 2473 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2474 u8 tos, struct net_device *dev) 2475 { 2476 struct fib_result res; 2477 int err; 2478 2479 tos &= IPTOS_RT_MASK; 2480 rcu_read_lock(); 2481 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res); 2482 rcu_read_unlock(); 2483 2484 return err; 2485 } 2486 EXPORT_SYMBOL(ip_route_input_noref); 2487 2488 /* called with rcu_read_lock() */ 2489 static struct rtable *__mkroute_output(const struct fib_result *res, 2490 const struct flowi4 *fl4, int orig_oif, 2491 struct net_device *dev_out, 2492 unsigned int flags) 2493 { 2494 struct fib_info *fi = res->fi; 2495 struct fib_nh_exception *fnhe; 2496 struct in_device *in_dev; 2497 u16 type = res->type; 2498 struct rtable *rth; 2499 bool do_cache; 2500 2501 in_dev = __in_dev_get_rcu(dev_out); 2502 if (!in_dev) 2503 return ERR_PTR(-EINVAL); 2504 2505 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) 2506 if (ipv4_is_loopback(fl4->saddr) && 2507 !(dev_out->flags & IFF_LOOPBACK) && 2508 !netif_is_l3_master(dev_out)) 2509 return ERR_PTR(-EINVAL); 2510 2511 if (ipv4_is_lbcast(fl4->daddr)) 2512 type = RTN_BROADCAST; 2513 else if (ipv4_is_multicast(fl4->daddr)) 2514 type = RTN_MULTICAST; 2515 else if (ipv4_is_zeronet(fl4->daddr)) 2516 return ERR_PTR(-EINVAL); 2517 2518 if (dev_out->flags & IFF_LOOPBACK) 2519 flags |= RTCF_LOCAL; 2520 2521 do_cache = true; 2522 if (type == RTN_BROADCAST) { 2523 flags |= RTCF_BROADCAST | RTCF_LOCAL; 2524 fi = NULL; 2525 } else if (type == RTN_MULTICAST) { 2526 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2527 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, 2528 fl4->flowi4_proto)) 2529 flags &= ~RTCF_LOCAL; 2530 else 2531 do_cache = false; 2532 /* If multicast route do not exist use 2533 * default one, but do not gateway in this case. 2534 * Yes, it is hack. 2535 */ 2536 if (fi && res->prefixlen < 4) 2537 fi = NULL; 2538 } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2539 (orig_oif != dev_out->ifindex)) { 2540 /* For local routes that require a particular output interface 2541 * we do not want to cache the result. Caching the result 2542 * causes incorrect behaviour when there are multiple source 2543 * addresses on the interface, the end result being that if the 2544 * intended recipient is waiting on that interface for the 2545 * packet he won't receive it because it will be delivered on 2546 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2547 * be set to the loopback interface as well. 2548 */ 2549 do_cache = false; 2550 } 2551 2552 fnhe = NULL; 2553 do_cache &= fi != NULL; 2554 if (fi) { 2555 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2556 struct rtable __rcu **prth; 2557 2558 fnhe = find_exception(nhc, fl4->daddr); 2559 if (!do_cache) 2560 goto add; 2561 if (fnhe) { 2562 prth = &fnhe->fnhe_rth_output; 2563 } else { 2564 if (unlikely(fl4->flowi4_flags & 2565 FLOWI_FLAG_KNOWN_NH && 2566 !(nhc->nhc_gw_family && 2567 nhc->nhc_scope == RT_SCOPE_LINK))) { 2568 do_cache = false; 2569 goto add; 2570 } 2571 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 2572 } 2573 rth = rcu_dereference(*prth); 2574 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2575 return rth; 2576 } 2577 2578 add: 2579 rth = rt_dst_alloc(dev_out, flags, type, 2580 IN_DEV_ORCONF(in_dev, NOXFRM)); 2581 if (!rth) 2582 return ERR_PTR(-ENOBUFS); 2583 2584 rth->rt_iif = orig_oif; 2585 2586 RT_CACHE_STAT_INC(out_slow_tot); 2587 2588 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2589 if (flags & RTCF_LOCAL && 2590 !(dev_out->flags & IFF_LOOPBACK)) { 2591 rth->dst.output = ip_mc_output; 2592 RT_CACHE_STAT_INC(out_slow_mc); 2593 } 2594 #ifdef CONFIG_IP_MROUTE 2595 if (type == RTN_MULTICAST) { 2596 if (IN_DEV_MFORWARD(in_dev) && 2597 !ipv4_is_local_multicast(fl4->daddr)) { 2598 rth->dst.input = ip_mr_input; 2599 rth->dst.output = ip_mc_output; 2600 } 2601 } 2602 #endif 2603 } 2604 2605 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache); 2606 lwtunnel_set_redirect(&rth->dst); 2607 2608 return rth; 2609 } 2610 2611 /* 2612 * Major route resolver routine. 2613 */ 2614 2615 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, 2616 const struct sk_buff *skb) 2617 { 2618 struct fib_result res = { 2619 .type = RTN_UNSPEC, 2620 .fi = NULL, 2621 .table = NULL, 2622 .tclassid = 0, 2623 }; 2624 struct rtable *rth; 2625 2626 fl4->flowi4_iif = LOOPBACK_IFINDEX; 2627 fl4->flowi4_tos &= IPTOS_RT_MASK; 2628 2629 rcu_read_lock(); 2630 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); 2631 rcu_read_unlock(); 2632 2633 return rth; 2634 } 2635 EXPORT_SYMBOL_GPL(ip_route_output_key_hash); 2636 2637 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, 2638 struct fib_result *res, 2639 const struct sk_buff *skb) 2640 { 2641 struct net_device *dev_out = NULL; 2642 int orig_oif = fl4->flowi4_oif; 2643 unsigned int flags = 0; 2644 struct rtable *rth; 2645 int err; 2646 2647 if (fl4->saddr) { 2648 if (ipv4_is_multicast(fl4->saddr) || 2649 ipv4_is_lbcast(fl4->saddr) || 2650 ipv4_is_zeronet(fl4->saddr)) { 2651 rth = ERR_PTR(-EINVAL); 2652 goto out; 2653 } 2654 2655 rth = ERR_PTR(-ENETUNREACH); 2656 2657 /* I removed check for oif == dev_out->oif here. 2658 * It was wrong for two reasons: 2659 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr 2660 * is assigned to multiple interfaces. 2661 * 2. Moreover, we are allowed to send packets with saddr 2662 * of another iface. --ANK 2663 */ 2664 2665 if (fl4->flowi4_oif == 0 && 2666 (ipv4_is_multicast(fl4->daddr) || 2667 ipv4_is_lbcast(fl4->daddr))) { 2668 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2669 dev_out = __ip_dev_find(net, fl4->saddr, false); 2670 if (!dev_out) 2671 goto out; 2672 2673 /* Special hack: user can direct multicasts 2674 * and limited broadcast via necessary interface 2675 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2676 * This hack is not just for fun, it allows 2677 * vic,vat and friends to work. 2678 * They bind socket to loopback, set ttl to zero 2679 * and expect that it will work. 2680 * From the viewpoint of routing cache they are broken, 2681 * because we are not allowed to build multicast path 2682 * with loopback source addr (look, routing cache 2683 * cannot know, that ttl is zero, so that packet 2684 * will not leave this host and route is valid). 2685 * Luckily, this hack is good workaround. 2686 */ 2687 2688 fl4->flowi4_oif = dev_out->ifindex; 2689 goto make_route; 2690 } 2691 2692 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2693 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2694 if (!__ip_dev_find(net, fl4->saddr, false)) 2695 goto out; 2696 } 2697 } 2698 2699 2700 if (fl4->flowi4_oif) { 2701 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); 2702 rth = ERR_PTR(-ENODEV); 2703 if (!dev_out) 2704 goto out; 2705 2706 /* RACE: Check return value of inet_select_addr instead. */ 2707 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2708 rth = ERR_PTR(-ENETUNREACH); 2709 goto out; 2710 } 2711 if (ipv4_is_local_multicast(fl4->daddr) || 2712 ipv4_is_lbcast(fl4->daddr) || 2713 fl4->flowi4_proto == IPPROTO_IGMP) { 2714 if (!fl4->saddr) 2715 fl4->saddr = inet_select_addr(dev_out, 0, 2716 RT_SCOPE_LINK); 2717 goto make_route; 2718 } 2719 if (!fl4->saddr) { 2720 if (ipv4_is_multicast(fl4->daddr)) 2721 fl4->saddr = inet_select_addr(dev_out, 0, 2722 fl4->flowi4_scope); 2723 else if (!fl4->daddr) 2724 fl4->saddr = inet_select_addr(dev_out, 0, 2725 RT_SCOPE_HOST); 2726 } 2727 } 2728 2729 if (!fl4->daddr) { 2730 fl4->daddr = fl4->saddr; 2731 if (!fl4->daddr) 2732 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); 2733 dev_out = net->loopback_dev; 2734 fl4->flowi4_oif = LOOPBACK_IFINDEX; 2735 res->type = RTN_LOCAL; 2736 flags |= RTCF_LOCAL; 2737 goto make_route; 2738 } 2739 2740 err = fib_lookup(net, fl4, res, 0); 2741 if (err) { 2742 res->fi = NULL; 2743 res->table = NULL; 2744 if (fl4->flowi4_oif && 2745 (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) { 2746 /* Apparently, routing tables are wrong. Assume, 2747 * that the destination is on link. 2748 * 2749 * WHY? DW. 2750 * Because we are allowed to send to iface 2751 * even if it has NO routes and NO assigned 2752 * addresses. When oif is specified, routing 2753 * tables are looked up with only one purpose: 2754 * to catch if destination is gatewayed, rather than 2755 * direct. Moreover, if MSG_DONTROUTE is set, 2756 * we send packet, ignoring both routing tables 2757 * and ifaddr state. --ANK 2758 * 2759 * 2760 * We could make it even if oif is unknown, 2761 * likely IPv6, but we do not. 2762 */ 2763 2764 if (fl4->saddr == 0) 2765 fl4->saddr = inet_select_addr(dev_out, 0, 2766 RT_SCOPE_LINK); 2767 res->type = RTN_UNICAST; 2768 goto make_route; 2769 } 2770 rth = ERR_PTR(err); 2771 goto out; 2772 } 2773 2774 if (res->type == RTN_LOCAL) { 2775 if (!fl4->saddr) { 2776 if (res->fi->fib_prefsrc) 2777 fl4->saddr = res->fi->fib_prefsrc; 2778 else 2779 fl4->saddr = fl4->daddr; 2780 } 2781 2782 /* L3 master device is the loopback for that domain */ 2783 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : 2784 net->loopback_dev; 2785 2786 /* make sure orig_oif points to fib result device even 2787 * though packet rx/tx happens over loopback or l3mdev 2788 */ 2789 orig_oif = FIB_RES_OIF(*res); 2790 2791 fl4->flowi4_oif = dev_out->ifindex; 2792 flags |= RTCF_LOCAL; 2793 goto make_route; 2794 } 2795 2796 fib_select_path(net, res, fl4, skb); 2797 2798 dev_out = FIB_RES_DEV(*res); 2799 2800 make_route: 2801 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); 2802 2803 out: 2804 return rth; 2805 } 2806 2807 static struct dst_ops ipv4_dst_blackhole_ops = { 2808 .family = AF_INET, 2809 .default_advmss = ipv4_default_advmss, 2810 .neigh_lookup = ipv4_neigh_lookup, 2811 .check = dst_blackhole_check, 2812 .cow_metrics = dst_blackhole_cow_metrics, 2813 .update_pmtu = dst_blackhole_update_pmtu, 2814 .redirect = dst_blackhole_redirect, 2815 .mtu = dst_blackhole_mtu, 2816 }; 2817 2818 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2819 { 2820 struct rtable *ort = (struct rtable *) dst_orig; 2821 struct rtable *rt; 2822 2823 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0); 2824 if (rt) { 2825 struct dst_entry *new = &rt->dst; 2826 2827 new->__use = 1; 2828 new->input = dst_discard; 2829 new->output = dst_discard_out; 2830 2831 new->dev = net->loopback_dev; 2832 netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC); 2833 2834 rt->rt_is_input = ort->rt_is_input; 2835 rt->rt_iif = ort->rt_iif; 2836 rt->rt_pmtu = ort->rt_pmtu; 2837 rt->rt_mtu_locked = ort->rt_mtu_locked; 2838 2839 rt->rt_genid = rt_genid_ipv4(net); 2840 rt->rt_flags = ort->rt_flags; 2841 rt->rt_type = ort->rt_type; 2842 rt->rt_uses_gateway = ort->rt_uses_gateway; 2843 rt->rt_gw_family = ort->rt_gw_family; 2844 if (rt->rt_gw_family == AF_INET) 2845 rt->rt_gw4 = ort->rt_gw4; 2846 else if (rt->rt_gw_family == AF_INET6) 2847 rt->rt_gw6 = ort->rt_gw6; 2848 } 2849 2850 dst_release(dst_orig); 2851 2852 return rt ? &rt->dst : ERR_PTR(-ENOMEM); 2853 } 2854 2855 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, 2856 const struct sock *sk) 2857 { 2858 struct rtable *rt = __ip_route_output_key(net, flp4); 2859 2860 if (IS_ERR(rt)) 2861 return rt; 2862 2863 if (flp4->flowi4_proto) { 2864 flp4->flowi4_oif = rt->dst.dev->ifindex; 2865 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, 2866 flowi4_to_flowi(flp4), 2867 sk, 0); 2868 } 2869 2870 return rt; 2871 } 2872 EXPORT_SYMBOL_GPL(ip_route_output_flow); 2873 2874 /* called with rcu_read_lock held */ 2875 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2876 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2877 struct sk_buff *skb, u32 portid, u32 seq, 2878 unsigned int flags) 2879 { 2880 struct rtmsg *r; 2881 struct nlmsghdr *nlh; 2882 unsigned long expires = 0; 2883 u32 error; 2884 u32 metrics[RTAX_MAX]; 2885 2886 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags); 2887 if (!nlh) 2888 return -EMSGSIZE; 2889 2890 r = nlmsg_data(nlh); 2891 r->rtm_family = AF_INET; 2892 r->rtm_dst_len = 32; 2893 r->rtm_src_len = 0; 2894 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0; 2895 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; 2896 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2897 goto nla_put_failure; 2898 r->rtm_type = rt->rt_type; 2899 r->rtm_scope = RT_SCOPE_UNIVERSE; 2900 r->rtm_protocol = RTPROT_UNSPEC; 2901 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2902 if (rt->rt_flags & RTCF_NOTIFY) 2903 r->rtm_flags |= RTM_F_NOTIFY; 2904 if (IPCB(skb)->flags & IPSKB_DOREDIRECT) 2905 r->rtm_flags |= RTCF_DOREDIRECT; 2906 2907 if (nla_put_in_addr(skb, RTA_DST, dst)) 2908 goto nla_put_failure; 2909 if (src) { 2910 r->rtm_src_len = 32; 2911 if (nla_put_in_addr(skb, RTA_SRC, src)) 2912 goto nla_put_failure; 2913 } 2914 if (rt->dst.dev && 2915 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 2916 goto nla_put_failure; 2917 if (rt->dst.lwtstate && 2918 lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) 2919 goto nla_put_failure; 2920 #ifdef CONFIG_IP_ROUTE_CLASSID 2921 if (rt->dst.tclassid && 2922 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2923 goto nla_put_failure; 2924 #endif 2925 if (fl4 && !rt_is_input_route(rt) && 2926 fl4->saddr != src) { 2927 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) 2928 goto nla_put_failure; 2929 } 2930 if (rt->rt_uses_gateway) { 2931 if (rt->rt_gw_family == AF_INET && 2932 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { 2933 goto nla_put_failure; 2934 } else if (rt->rt_gw_family == AF_INET6) { 2935 int alen = sizeof(struct in6_addr); 2936 struct nlattr *nla; 2937 struct rtvia *via; 2938 2939 nla = nla_reserve(skb, RTA_VIA, alen + 2); 2940 if (!nla) 2941 goto nla_put_failure; 2942 2943 via = nla_data(nla); 2944 via->rtvia_family = AF_INET6; 2945 memcpy(via->rtvia_addr, &rt->rt_gw6, alen); 2946 } 2947 } 2948 2949 expires = rt->dst.expires; 2950 if (expires) { 2951 unsigned long now = jiffies; 2952 2953 if (time_before(now, expires)) 2954 expires -= now; 2955 else 2956 expires = 0; 2957 } 2958 2959 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2960 if (rt->rt_pmtu && expires) 2961 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2962 if (rt->rt_mtu_locked && expires) 2963 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); 2964 if (rtnetlink_put_metrics(skb, metrics) < 0) 2965 goto nla_put_failure; 2966 2967 if (fl4) { 2968 if (fl4->flowi4_mark && 2969 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) 2970 goto nla_put_failure; 2971 2972 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && 2973 nla_put_u32(skb, RTA_UID, 2974 from_kuid_munged(current_user_ns(), 2975 fl4->flowi4_uid))) 2976 goto nla_put_failure; 2977 2978 if (rt_is_input_route(rt)) { 2979 #ifdef CONFIG_IP_MROUTE 2980 if (ipv4_is_multicast(dst) && 2981 !ipv4_is_local_multicast(dst) && 2982 IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) { 2983 int err = ipmr_get_route(net, skb, 2984 fl4->saddr, fl4->daddr, 2985 r, portid); 2986 2987 if (err <= 0) { 2988 if (err == 0) 2989 return 0; 2990 goto nla_put_failure; 2991 } 2992 } else 2993 #endif 2994 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) 2995 goto nla_put_failure; 2996 } 2997 } 2998 2999 error = rt->dst.error; 3000 3001 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) 3002 goto nla_put_failure; 3003 3004 nlmsg_end(skb, nlh); 3005 return 0; 3006 3007 nla_put_failure: 3008 nlmsg_cancel(skb, nlh); 3009 return -EMSGSIZE; 3010 } 3011 3012 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 3013 struct netlink_callback *cb, u32 table_id, 3014 struct fnhe_hash_bucket *bucket, int genid, 3015 int *fa_index, int fa_start, unsigned int flags) 3016 { 3017 int i; 3018 3019 for (i = 0; i < FNHE_HASH_SIZE; i++) { 3020 struct fib_nh_exception *fnhe; 3021 3022 for (fnhe = rcu_dereference(bucket[i].chain); fnhe; 3023 fnhe = rcu_dereference(fnhe->fnhe_next)) { 3024 struct rtable *rt; 3025 int err; 3026 3027 if (*fa_index < fa_start) 3028 goto next; 3029 3030 if (fnhe->fnhe_genid != genid) 3031 goto next; 3032 3033 if (fnhe->fnhe_expires && 3034 time_after(jiffies, fnhe->fnhe_expires)) 3035 goto next; 3036 3037 rt = rcu_dereference(fnhe->fnhe_rth_input); 3038 if (!rt) 3039 rt = rcu_dereference(fnhe->fnhe_rth_output); 3040 if (!rt) 3041 goto next; 3042 3043 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 3044 table_id, NULL, skb, 3045 NETLINK_CB(cb->skb).portid, 3046 cb->nlh->nlmsg_seq, flags); 3047 if (err) 3048 return err; 3049 next: 3050 (*fa_index)++; 3051 } 3052 } 3053 3054 return 0; 3055 } 3056 3057 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 3058 u32 table_id, struct fib_info *fi, 3059 int *fa_index, int fa_start, unsigned int flags) 3060 { 3061 struct net *net = sock_net(cb->skb->sk); 3062 int nhsel, genid = fnhe_genid(net); 3063 3064 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 3065 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 3066 struct fnhe_hash_bucket *bucket; 3067 int err; 3068 3069 if (nhc->nhc_flags & RTNH_F_DEAD) 3070 continue; 3071 3072 rcu_read_lock(); 3073 bucket = rcu_dereference(nhc->nhc_exceptions); 3074 err = 0; 3075 if (bucket) 3076 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 3077 genid, fa_index, fa_start, 3078 flags); 3079 rcu_read_unlock(); 3080 if (err) 3081 return err; 3082 } 3083 3084 return 0; 3085 } 3086 3087 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, 3088 u8 ip_proto, __be16 sport, 3089 __be16 dport) 3090 { 3091 struct sk_buff *skb; 3092 struct iphdr *iph; 3093 3094 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3095 if (!skb) 3096 return NULL; 3097 3098 /* Reserve room for dummy headers, this skb can pass 3099 * through good chunk of routing engine. 3100 */ 3101 skb_reset_mac_header(skb); 3102 skb_reset_network_header(skb); 3103 skb->protocol = htons(ETH_P_IP); 3104 iph = skb_put(skb, sizeof(struct iphdr)); 3105 iph->protocol = ip_proto; 3106 iph->saddr = src; 3107 iph->daddr = dst; 3108 iph->version = 0x4; 3109 iph->frag_off = 0; 3110 iph->ihl = 0x5; 3111 skb_set_transport_header(skb, skb->len); 3112 3113 switch (iph->protocol) { 3114 case IPPROTO_UDP: { 3115 struct udphdr *udph; 3116 3117 udph = skb_put_zero(skb, sizeof(struct udphdr)); 3118 udph->source = sport; 3119 udph->dest = dport; 3120 udph->len = htons(sizeof(struct udphdr)); 3121 udph->check = 0; 3122 break; 3123 } 3124 case IPPROTO_TCP: { 3125 struct tcphdr *tcph; 3126 3127 tcph = skb_put_zero(skb, sizeof(struct tcphdr)); 3128 tcph->source = sport; 3129 tcph->dest = dport; 3130 tcph->doff = sizeof(struct tcphdr) / 4; 3131 tcph->rst = 1; 3132 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), 3133 src, dst, 0); 3134 break; 3135 } 3136 case IPPROTO_ICMP: { 3137 struct icmphdr *icmph; 3138 3139 icmph = skb_put_zero(skb, sizeof(struct icmphdr)); 3140 icmph->type = ICMP_ECHO; 3141 icmph->code = 0; 3142 } 3143 } 3144 3145 return skb; 3146 } 3147 3148 static int inet_rtm_valid_getroute_req(struct sk_buff *skb, 3149 const struct nlmsghdr *nlh, 3150 struct nlattr **tb, 3151 struct netlink_ext_ack *extack) 3152 { 3153 struct rtmsg *rtm; 3154 int i, err; 3155 3156 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { 3157 NL_SET_ERR_MSG(extack, 3158 "ipv4: Invalid header for route get request"); 3159 return -EINVAL; 3160 } 3161 3162 if (!netlink_strict_get_check(skb)) 3163 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 3164 rtm_ipv4_policy, extack); 3165 3166 rtm = nlmsg_data(nlh); 3167 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) || 3168 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) || 3169 rtm->rtm_table || rtm->rtm_protocol || 3170 rtm->rtm_scope || rtm->rtm_type) { 3171 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request"); 3172 return -EINVAL; 3173 } 3174 3175 if (rtm->rtm_flags & ~(RTM_F_NOTIFY | 3176 RTM_F_LOOKUP_TABLE | 3177 RTM_F_FIB_MATCH)) { 3178 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request"); 3179 return -EINVAL; 3180 } 3181 3182 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 3183 rtm_ipv4_policy, extack); 3184 if (err) 3185 return err; 3186 3187 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 3188 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 3189 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4"); 3190 return -EINVAL; 3191 } 3192 3193 for (i = 0; i <= RTA_MAX; i++) { 3194 if (!tb[i]) 3195 continue; 3196 3197 switch (i) { 3198 case RTA_IIF: 3199 case RTA_OIF: 3200 case RTA_SRC: 3201 case RTA_DST: 3202 case RTA_IP_PROTO: 3203 case RTA_SPORT: 3204 case RTA_DPORT: 3205 case RTA_MARK: 3206 case RTA_UID: 3207 break; 3208 default: 3209 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request"); 3210 return -EINVAL; 3211 } 3212 } 3213 3214 return 0; 3215 } 3216 3217 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3218 struct netlink_ext_ack *extack) 3219 { 3220 struct net *net = sock_net(in_skb->sk); 3221 struct nlattr *tb[RTA_MAX+1]; 3222 u32 table_id = RT_TABLE_MAIN; 3223 __be16 sport = 0, dport = 0; 3224 struct fib_result res = {}; 3225 u8 ip_proto = IPPROTO_UDP; 3226 struct rtable *rt = NULL; 3227 struct sk_buff *skb; 3228 struct rtmsg *rtm; 3229 struct flowi4 fl4 = {}; 3230 __be32 dst = 0; 3231 __be32 src = 0; 3232 kuid_t uid; 3233 u32 iif; 3234 int err; 3235 int mark; 3236 3237 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 3238 if (err < 0) 3239 return err; 3240 3241 rtm = nlmsg_data(nlh); 3242 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 3243 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; 3244 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 3245 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; 3246 if (tb[RTA_UID]) 3247 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); 3248 else 3249 uid = (iif ? INVALID_UID : current_uid()); 3250 3251 if (tb[RTA_IP_PROTO]) { 3252 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 3253 &ip_proto, AF_INET, extack); 3254 if (err) 3255 return err; 3256 } 3257 3258 if (tb[RTA_SPORT]) 3259 sport = nla_get_be16(tb[RTA_SPORT]); 3260 3261 if (tb[RTA_DPORT]) 3262 dport = nla_get_be16(tb[RTA_DPORT]); 3263 3264 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport); 3265 if (!skb) 3266 return -ENOBUFS; 3267 3268 fl4.daddr = dst; 3269 fl4.saddr = src; 3270 fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK; 3271 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; 3272 fl4.flowi4_mark = mark; 3273 fl4.flowi4_uid = uid; 3274 if (sport) 3275 fl4.fl4_sport = sport; 3276 if (dport) 3277 fl4.fl4_dport = dport; 3278 fl4.flowi4_proto = ip_proto; 3279 3280 rcu_read_lock(); 3281 3282 if (iif) { 3283 struct net_device *dev; 3284 3285 dev = dev_get_by_index_rcu(net, iif); 3286 if (!dev) { 3287 err = -ENODEV; 3288 goto errout_rcu; 3289 } 3290 3291 fl4.flowi4_iif = iif; /* for rt_fill_info */ 3292 skb->dev = dev; 3293 skb->mark = mark; 3294 err = ip_route_input_rcu(skb, dst, src, 3295 rtm->rtm_tos & IPTOS_RT_MASK, dev, 3296 &res); 3297 3298 rt = skb_rtable(skb); 3299 if (err == 0 && rt->dst.error) 3300 err = -rt->dst.error; 3301 } else { 3302 fl4.flowi4_iif = LOOPBACK_IFINDEX; 3303 skb->dev = net->loopback_dev; 3304 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 3305 err = 0; 3306 if (IS_ERR(rt)) 3307 err = PTR_ERR(rt); 3308 else 3309 skb_dst_set(skb, &rt->dst); 3310 } 3311 3312 if (err) 3313 goto errout_rcu; 3314 3315 if (rtm->rtm_flags & RTM_F_NOTIFY) 3316 rt->rt_flags |= RTCF_NOTIFY; 3317 3318 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) 3319 table_id = res.table ? res.table->tb_id : 0; 3320 3321 /* reset skb for netlink reply msg */ 3322 skb_trim(skb, 0); 3323 skb_reset_network_header(skb); 3324 skb_reset_transport_header(skb); 3325 skb_reset_mac_header(skb); 3326 3327 if (rtm->rtm_flags & RTM_F_FIB_MATCH) { 3328 struct fib_rt_info fri; 3329 3330 if (!res.fi) { 3331 err = fib_props[res.type].error; 3332 if (!err) 3333 err = -EHOSTUNREACH; 3334 goto errout_rcu; 3335 } 3336 fri.fi = res.fi; 3337 fri.tb_id = table_id; 3338 fri.dst = res.prefix; 3339 fri.dst_len = res.prefixlen; 3340 fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos); 3341 fri.type = rt->rt_type; 3342 fri.offload = 0; 3343 fri.trap = 0; 3344 fri.offload_failed = 0; 3345 if (res.fa_head) { 3346 struct fib_alias *fa; 3347 3348 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) { 3349 u8 slen = 32 - fri.dst_len; 3350 3351 if (fa->fa_slen == slen && 3352 fa->tb_id == fri.tb_id && 3353 fa->fa_dscp == fri.dscp && 3354 fa->fa_info == res.fi && 3355 fa->fa_type == fri.type) { 3356 fri.offload = READ_ONCE(fa->offload); 3357 fri.trap = READ_ONCE(fa->trap); 3358 fri.offload_failed = 3359 READ_ONCE(fa->offload_failed); 3360 break; 3361 } 3362 } 3363 } 3364 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, 3365 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0); 3366 } else { 3367 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3368 NETLINK_CB(in_skb).portid, 3369 nlh->nlmsg_seq, 0); 3370 } 3371 if (err < 0) 3372 goto errout_rcu; 3373 3374 rcu_read_unlock(); 3375 3376 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3377 3378 errout_free: 3379 return err; 3380 errout_rcu: 3381 rcu_read_unlock(); 3382 kfree_skb(skb); 3383 goto errout_free; 3384 } 3385 3386 void ip_rt_multicast_event(struct in_device *in_dev) 3387 { 3388 rt_cache_flush(dev_net(in_dev->dev)); 3389 } 3390 3391 #ifdef CONFIG_SYSCTL 3392 static int ip_rt_gc_interval __read_mostly = 60 * HZ; 3393 static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 3394 static int ip_rt_gc_elasticity __read_mostly = 8; 3395 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 3396 3397 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 3398 void *buffer, size_t *lenp, loff_t *ppos) 3399 { 3400 struct net *net = (struct net *)__ctl->extra1; 3401 3402 if (write) { 3403 rt_cache_flush(net); 3404 fnhe_genid_bump(net); 3405 return 0; 3406 } 3407 3408 return -EINVAL; 3409 } 3410 3411 static struct ctl_table ipv4_route_table[] = { 3412 { 3413 .procname = "gc_thresh", 3414 .data = &ipv4_dst_ops.gc_thresh, 3415 .maxlen = sizeof(int), 3416 .mode = 0644, 3417 .proc_handler = proc_dointvec, 3418 }, 3419 { 3420 .procname = "max_size", 3421 .data = &ip_rt_max_size, 3422 .maxlen = sizeof(int), 3423 .mode = 0644, 3424 .proc_handler = proc_dointvec, 3425 }, 3426 { 3427 /* Deprecated. Use gc_min_interval_ms */ 3428 3429 .procname = "gc_min_interval", 3430 .data = &ip_rt_gc_min_interval, 3431 .maxlen = sizeof(int), 3432 .mode = 0644, 3433 .proc_handler = proc_dointvec_jiffies, 3434 }, 3435 { 3436 .procname = "gc_min_interval_ms", 3437 .data = &ip_rt_gc_min_interval, 3438 .maxlen = sizeof(int), 3439 .mode = 0644, 3440 .proc_handler = proc_dointvec_ms_jiffies, 3441 }, 3442 { 3443 .procname = "gc_timeout", 3444 .data = &ip_rt_gc_timeout, 3445 .maxlen = sizeof(int), 3446 .mode = 0644, 3447 .proc_handler = proc_dointvec_jiffies, 3448 }, 3449 { 3450 .procname = "gc_interval", 3451 .data = &ip_rt_gc_interval, 3452 .maxlen = sizeof(int), 3453 .mode = 0644, 3454 .proc_handler = proc_dointvec_jiffies, 3455 }, 3456 { 3457 .procname = "redirect_load", 3458 .data = &ip_rt_redirect_load, 3459 .maxlen = sizeof(int), 3460 .mode = 0644, 3461 .proc_handler = proc_dointvec, 3462 }, 3463 { 3464 .procname = "redirect_number", 3465 .data = &ip_rt_redirect_number, 3466 .maxlen = sizeof(int), 3467 .mode = 0644, 3468 .proc_handler = proc_dointvec, 3469 }, 3470 { 3471 .procname = "redirect_silence", 3472 .data = &ip_rt_redirect_silence, 3473 .maxlen = sizeof(int), 3474 .mode = 0644, 3475 .proc_handler = proc_dointvec, 3476 }, 3477 { 3478 .procname = "error_cost", 3479 .data = &ip_rt_error_cost, 3480 .maxlen = sizeof(int), 3481 .mode = 0644, 3482 .proc_handler = proc_dointvec, 3483 }, 3484 { 3485 .procname = "error_burst", 3486 .data = &ip_rt_error_burst, 3487 .maxlen = sizeof(int), 3488 .mode = 0644, 3489 .proc_handler = proc_dointvec, 3490 }, 3491 { 3492 .procname = "gc_elasticity", 3493 .data = &ip_rt_gc_elasticity, 3494 .maxlen = sizeof(int), 3495 .mode = 0644, 3496 .proc_handler = proc_dointvec, 3497 }, 3498 { } 3499 }; 3500 3501 static const char ipv4_route_flush_procname[] = "flush"; 3502 3503 static struct ctl_table ipv4_route_netns_table[] = { 3504 { 3505 .procname = ipv4_route_flush_procname, 3506 .maxlen = sizeof(int), 3507 .mode = 0200, 3508 .proc_handler = ipv4_sysctl_rtcache_flush, 3509 }, 3510 { 3511 .procname = "min_pmtu", 3512 .data = &init_net.ipv4.ip_rt_min_pmtu, 3513 .maxlen = sizeof(int), 3514 .mode = 0644, 3515 .proc_handler = proc_dointvec_minmax, 3516 .extra1 = &ip_min_valid_pmtu, 3517 }, 3518 { 3519 .procname = "mtu_expires", 3520 .data = &init_net.ipv4.ip_rt_mtu_expires, 3521 .maxlen = sizeof(int), 3522 .mode = 0644, 3523 .proc_handler = proc_dointvec_jiffies, 3524 }, 3525 { 3526 .procname = "min_adv_mss", 3527 .data = &init_net.ipv4.ip_rt_min_advmss, 3528 .maxlen = sizeof(int), 3529 .mode = 0644, 3530 .proc_handler = proc_dointvec, 3531 }, 3532 { }, 3533 }; 3534 3535 static __net_init int sysctl_route_net_init(struct net *net) 3536 { 3537 struct ctl_table *tbl; 3538 size_t table_size = ARRAY_SIZE(ipv4_route_netns_table); 3539 3540 tbl = ipv4_route_netns_table; 3541 if (!net_eq(net, &init_net)) { 3542 int i; 3543 3544 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL); 3545 if (!tbl) 3546 goto err_dup; 3547 3548 /* Don't export non-whitelisted sysctls to unprivileged users */ 3549 if (net->user_ns != &init_user_ns) { 3550 if (tbl[0].procname != ipv4_route_flush_procname) { 3551 tbl[0].procname = NULL; 3552 table_size = 0; 3553 } 3554 } 3555 3556 /* Update the variables to point into the current struct net 3557 * except for the first element flush 3558 */ 3559 for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++) 3560 tbl[i].data += (void *)net - (void *)&init_net; 3561 } 3562 tbl[0].extra1 = net; 3563 3564 net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route", 3565 tbl, table_size); 3566 if (!net->ipv4.route_hdr) 3567 goto err_reg; 3568 return 0; 3569 3570 err_reg: 3571 if (tbl != ipv4_route_netns_table) 3572 kfree(tbl); 3573 err_dup: 3574 return -ENOMEM; 3575 } 3576 3577 static __net_exit void sysctl_route_net_exit(struct net *net) 3578 { 3579 struct ctl_table *tbl; 3580 3581 tbl = net->ipv4.route_hdr->ctl_table_arg; 3582 unregister_net_sysctl_table(net->ipv4.route_hdr); 3583 BUG_ON(tbl == ipv4_route_netns_table); 3584 kfree(tbl); 3585 } 3586 3587 static __net_initdata struct pernet_operations sysctl_route_ops = { 3588 .init = sysctl_route_net_init, 3589 .exit = sysctl_route_net_exit, 3590 }; 3591 #endif 3592 3593 static __net_init int netns_ip_rt_init(struct net *net) 3594 { 3595 /* Set default value for namespaceified sysctls */ 3596 net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU; 3597 net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES; 3598 net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS; 3599 return 0; 3600 } 3601 3602 static struct pernet_operations __net_initdata ip_rt_ops = { 3603 .init = netns_ip_rt_init, 3604 }; 3605 3606 static __net_init int rt_genid_init(struct net *net) 3607 { 3608 atomic_set(&net->ipv4.rt_genid, 0); 3609 atomic_set(&net->fnhe_genid, 0); 3610 atomic_set(&net->ipv4.dev_addr_genid, get_random_u32()); 3611 return 0; 3612 } 3613 3614 static __net_initdata struct pernet_operations rt_genid_ops = { 3615 .init = rt_genid_init, 3616 }; 3617 3618 static int __net_init ipv4_inetpeer_init(struct net *net) 3619 { 3620 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 3621 3622 if (!bp) 3623 return -ENOMEM; 3624 inet_peer_base_init(bp); 3625 net->ipv4.peers = bp; 3626 return 0; 3627 } 3628 3629 static void __net_exit ipv4_inetpeer_exit(struct net *net) 3630 { 3631 struct inet_peer_base *bp = net->ipv4.peers; 3632 3633 net->ipv4.peers = NULL; 3634 inetpeer_invalidate_tree(bp); 3635 kfree(bp); 3636 } 3637 3638 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { 3639 .init = ipv4_inetpeer_init, 3640 .exit = ipv4_inetpeer_exit, 3641 }; 3642 3643 #ifdef CONFIG_IP_ROUTE_CLASSID 3644 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3645 #endif /* CONFIG_IP_ROUTE_CLASSID */ 3646 3647 int __init ip_rt_init(void) 3648 { 3649 void *idents_hash; 3650 int cpu; 3651 3652 /* For modern hosts, this will use 2 MB of memory */ 3653 idents_hash = alloc_large_system_hash("IP idents", 3654 sizeof(*ip_idents) + sizeof(*ip_tstamps), 3655 0, 3656 16, /* one bucket per 64 KB */ 3657 HASH_ZERO, 3658 NULL, 3659 &ip_idents_mask, 3660 2048, 3661 256*1024); 3662 3663 ip_idents = idents_hash; 3664 3665 get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); 3666 3667 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); 3668 3669 for_each_possible_cpu(cpu) { 3670 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 3671 3672 INIT_LIST_HEAD(&ul->head); 3673 INIT_LIST_HEAD(&ul->quarantine); 3674 spin_lock_init(&ul->lock); 3675 } 3676 #ifdef CONFIG_IP_ROUTE_CLASSID 3677 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 3678 if (!ip_rt_acct) 3679 panic("IP: failed to allocate ip_rt_acct\n"); 3680 #endif 3681 3682 ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable, 3683 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3684 3685 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 3686 3687 if (dst_entries_init(&ipv4_dst_ops) < 0) 3688 panic("IP: failed to allocate ipv4_dst_ops counter\n"); 3689 3690 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) 3691 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); 3692 3693 ipv4_dst_ops.gc_thresh = ~0; 3694 ip_rt_max_size = INT_MAX; 3695 3696 devinet_init(); 3697 ip_fib_init(); 3698 3699 if (ip_rt_proc_init()) 3700 pr_err("Unable to create route proc files\n"); 3701 #ifdef CONFIG_XFRM 3702 xfrm_init(); 3703 xfrm4_init(); 3704 #endif 3705 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, 3706 RTNL_FLAG_DOIT_UNLOCKED); 3707 3708 #ifdef CONFIG_SYSCTL 3709 register_pernet_subsys(&sysctl_route_ops); 3710 #endif 3711 register_pernet_subsys(&ip_rt_ops); 3712 register_pernet_subsys(&rt_genid_ops); 3713 register_pernet_subsys(&ipv4_inetpeer_ops); 3714 return 0; 3715 } 3716 3717 #ifdef CONFIG_SYSCTL 3718 /* 3719 * We really need to sanitize the damn ipv4 init order, then all 3720 * this nonsense will go away. 3721 */ 3722 void __init ip_static_sysctl_init(void) 3723 { 3724 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); 3725 } 3726 #endif 3727