1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * ROUTE - implementation of the IP router. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi> 13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 14 * 15 * Fixes: 16 * Alan Cox : Verify area fixes. 17 * Alan Cox : cli() protects routing changes 18 * Rui Oliveira : ICMP routing table updates 19 * (rco@di.uminho.pt) Routing table insertion and update 20 * Linus Torvalds : Rewrote bits to be sensible 21 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Super /proc >4K 23 * Alan Cox : MTU in route table 24 * Alan Cox : MSS actually. Also added the window 25 * clamper. 26 * Sam Lantinga : Fixed route matching in rt_del() 27 * Alan Cox : Routing cache support. 28 * Alan Cox : Removed compatibility cruft. 29 * Alan Cox : RTF_REJECT support. 30 * Alan Cox : TCP irtt support. 31 * Jonathan Naylor : Added Metric support. 32 * Miquel van Smoorenburg : BSD API fixes. 33 * Miquel van Smoorenburg : Metrics. 34 * Alan Cox : Use __u32 properly 35 * Alan Cox : Aligned routing errors more closely with BSD 36 * our system is still very different. 37 * Alan Cox : Faster /proc handling 38 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * routing caches and better behaviour. 40 * 41 * Olaf Erb : irtt wasn't being copied right. 42 * Bjorn Ekwall : Kerneld route support. 43 * Alan Cox : Multicast fixed (I hope) 44 * Pavel Krauz : Limited broadcast fixed 45 * Mike McLagan : Routing by source 46 * Alexey Kuznetsov : End of old history. Split to fib.c and 47 * route.c and rewritten from scratch. 48 * Andi Kleen : Load-limit warning messages. 49 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow. 51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. 52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful. 53 * Marc Boucher : routing by fwmark 54 * Robert Olsson : Added rt_cache statistics 55 * Arnaldo C. Melo : Convert proc stuff to seq_file 56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect 58 * Ilia Sotnikov : Removed TOS from hash calculations 59 */ 60 61 #define pr_fmt(fmt) "IPv4: " fmt 62 63 #include <linux/module.h> 64 #include <linux/bitops.h> 65 #include <linux/kernel.h> 66 #include <linux/mm.h> 67 #include <linux/memblock.h> 68 #include <linux/socket.h> 69 #include <linux/errno.h> 70 #include <linux/in.h> 71 #include <linux/inet.h> 72 #include <linux/netdevice.h> 73 #include <linux/proc_fs.h> 74 #include <linux/init.h> 75 #include <linux/skbuff.h> 76 #include <linux/inetdevice.h> 77 #include <linux/igmp.h> 78 #include <linux/pkt_sched.h> 79 #include <linux/mroute.h> 80 #include <linux/netfilter_ipv4.h> 81 #include <linux/random.h> 82 #include <linux/rcupdate.h> 83 #include <linux/slab.h> 84 #include <linux/jhash.h> 85 #include <net/dst.h> 86 #include <net/dst_metadata.h> 87 #include <net/inet_dscp.h> 88 #include <net/net_namespace.h> 89 #include <net/ip.h> 90 #include <net/route.h> 91 #include <net/inetpeer.h> 92 #include <net/sock.h> 93 #include <net/ip_fib.h> 94 #include <net/nexthop.h> 95 #include <net/tcp.h> 96 #include <net/icmp.h> 97 #include <net/xfrm.h> 98 #include <net/lwtunnel.h> 99 #include <net/netevent.h> 100 #include <net/rtnetlink.h> 101 #ifdef CONFIG_SYSCTL 102 #include <linux/sysctl.h> 103 #endif 104 #include <net/secure_seq.h> 105 #include <net/ip_tunnels.h> 106 107 #include "fib_lookup.h" 108 109 #define RT_GC_TIMEOUT (300*HZ) 110 111 #define DEFAULT_MIN_PMTU (512 + 20 + 20) 112 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ) 113 #define DEFAULT_MIN_ADVMSS 256 114 static int ip_rt_max_size; 115 static int ip_rt_redirect_number __read_mostly = 9; 116 static int ip_rt_redirect_load __read_mostly = HZ / 50; 117 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); 118 static int ip_rt_error_cost __read_mostly = HZ; 119 static int ip_rt_error_burst __read_mostly = 5 * HZ; 120 121 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 122 123 /* 124 * Interface to generic destination cache. 125 */ 126 127 INDIRECT_CALLABLE_SCOPE 128 struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 129 static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 130 INDIRECT_CALLABLE_SCOPE 131 unsigned int ipv4_mtu(const struct dst_entry *dst); 132 static void ipv4_negative_advice(struct sock *sk, 133 struct dst_entry *dst); 134 static void ipv4_link_failure(struct sk_buff *skb); 135 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 136 struct sk_buff *skb, u32 mtu, 137 bool confirm_neigh); 138 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, 139 struct sk_buff *skb); 140 static void ipv4_dst_destroy(struct dst_entry *dst); 141 142 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 143 { 144 WARN_ON(1); 145 return NULL; 146 } 147 148 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 149 struct sk_buff *skb, 150 const void *daddr); 151 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); 152 153 static struct dst_ops ipv4_dst_ops = { 154 .family = AF_INET, 155 .check = ipv4_dst_check, 156 .default_advmss = ipv4_default_advmss, 157 .mtu = ipv4_mtu, 158 .cow_metrics = ipv4_cow_metrics, 159 .destroy = ipv4_dst_destroy, 160 .negative_advice = ipv4_negative_advice, 161 .link_failure = ipv4_link_failure, 162 .update_pmtu = ip_rt_update_pmtu, 163 .redirect = ip_do_redirect, 164 .local_out = __ip_local_out, 165 .neigh_lookup = ipv4_neigh_lookup, 166 .confirm_neigh = ipv4_confirm_neigh, 167 }; 168 169 #define ECN_OR_COST(class) TC_PRIO_##class 170 171 const __u8 ip_tos2prio[16] = { 172 TC_PRIO_BESTEFFORT, 173 ECN_OR_COST(BESTEFFORT), 174 TC_PRIO_BESTEFFORT, 175 ECN_OR_COST(BESTEFFORT), 176 TC_PRIO_BULK, 177 ECN_OR_COST(BULK), 178 TC_PRIO_BULK, 179 ECN_OR_COST(BULK), 180 TC_PRIO_INTERACTIVE, 181 ECN_OR_COST(INTERACTIVE), 182 TC_PRIO_INTERACTIVE, 183 ECN_OR_COST(INTERACTIVE), 184 TC_PRIO_INTERACTIVE_BULK, 185 ECN_OR_COST(INTERACTIVE_BULK), 186 TC_PRIO_INTERACTIVE_BULK, 187 ECN_OR_COST(INTERACTIVE_BULK) 188 }; 189 EXPORT_SYMBOL(ip_tos2prio); 190 191 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 192 #ifndef CONFIG_PREEMPT_RT 193 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) 194 #else 195 #define RT_CACHE_STAT_INC(field) this_cpu_inc(rt_cache_stat.field) 196 #endif 197 198 #ifdef CONFIG_PROC_FS 199 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 200 { 201 if (*pos) 202 return NULL; 203 return SEQ_START_TOKEN; 204 } 205 206 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 207 { 208 ++*pos; 209 return NULL; 210 } 211 212 static void rt_cache_seq_stop(struct seq_file *seq, void *v) 213 { 214 } 215 216 static int rt_cache_seq_show(struct seq_file *seq, void *v) 217 { 218 if (v == SEQ_START_TOKEN) 219 seq_printf(seq, "%-127s\n", 220 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" 221 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" 222 "HHUptod\tSpecDst"); 223 return 0; 224 } 225 226 static const struct seq_operations rt_cache_seq_ops = { 227 .start = rt_cache_seq_start, 228 .next = rt_cache_seq_next, 229 .stop = rt_cache_seq_stop, 230 .show = rt_cache_seq_show, 231 }; 232 233 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) 234 { 235 int cpu; 236 237 if (*pos == 0) 238 return SEQ_START_TOKEN; 239 240 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 241 if (!cpu_possible(cpu)) 242 continue; 243 *pos = cpu+1; 244 return &per_cpu(rt_cache_stat, cpu); 245 } 246 return NULL; 247 } 248 249 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 250 { 251 int cpu; 252 253 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 254 if (!cpu_possible(cpu)) 255 continue; 256 *pos = cpu+1; 257 return &per_cpu(rt_cache_stat, cpu); 258 } 259 (*pos)++; 260 return NULL; 261 262 } 263 264 static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 265 { 266 267 } 268 269 static int rt_cpu_seq_show(struct seq_file *seq, void *v) 270 { 271 struct rt_cache_stat *st = v; 272 273 if (v == SEQ_START_TOKEN) { 274 seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 275 return 0; 276 } 277 278 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x " 279 "%08x %08x %08x %08x %08x %08x " 280 "%08x %08x %08x %08x\n", 281 dst_entries_get_slow(&ipv4_dst_ops), 282 0, /* st->in_hit */ 283 st->in_slow_tot, 284 st->in_slow_mc, 285 st->in_no_route, 286 st->in_brd, 287 st->in_martian_dst, 288 st->in_martian_src, 289 290 0, /* st->out_hit */ 291 st->out_slow_tot, 292 st->out_slow_mc, 293 294 0, /* st->gc_total */ 295 0, /* st->gc_ignored */ 296 0, /* st->gc_goal_miss */ 297 0, /* st->gc_dst_overflow */ 298 0, /* st->in_hlist_search */ 299 0 /* st->out_hlist_search */ 300 ); 301 return 0; 302 } 303 304 static const struct seq_operations rt_cpu_seq_ops = { 305 .start = rt_cpu_seq_start, 306 .next = rt_cpu_seq_next, 307 .stop = rt_cpu_seq_stop, 308 .show = rt_cpu_seq_show, 309 }; 310 311 #ifdef CONFIG_IP_ROUTE_CLASSID 312 static int rt_acct_proc_show(struct seq_file *m, void *v) 313 { 314 struct ip_rt_acct *dst, *src; 315 unsigned int i, j; 316 317 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); 318 if (!dst) 319 return -ENOMEM; 320 321 for_each_possible_cpu(i) { 322 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); 323 for (j = 0; j < 256; j++) { 324 dst[j].o_bytes += src[j].o_bytes; 325 dst[j].o_packets += src[j].o_packets; 326 dst[j].i_bytes += src[j].i_bytes; 327 dst[j].i_packets += src[j].i_packets; 328 } 329 } 330 331 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); 332 kfree(dst); 333 return 0; 334 } 335 #endif 336 337 static int __net_init ip_rt_do_proc_init(struct net *net) 338 { 339 struct proc_dir_entry *pde; 340 341 pde = proc_create_seq("rt_cache", 0444, net->proc_net, 342 &rt_cache_seq_ops); 343 if (!pde) 344 goto err1; 345 346 pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat, 347 &rt_cpu_seq_ops); 348 if (!pde) 349 goto err2; 350 351 #ifdef CONFIG_IP_ROUTE_CLASSID 352 pde = proc_create_single("rt_acct", 0, net->proc_net, 353 rt_acct_proc_show); 354 if (!pde) 355 goto err3; 356 #endif 357 return 0; 358 359 #ifdef CONFIG_IP_ROUTE_CLASSID 360 err3: 361 remove_proc_entry("rt_cache", net->proc_net_stat); 362 #endif 363 err2: 364 remove_proc_entry("rt_cache", net->proc_net); 365 err1: 366 return -ENOMEM; 367 } 368 369 static void __net_exit ip_rt_do_proc_exit(struct net *net) 370 { 371 remove_proc_entry("rt_cache", net->proc_net_stat); 372 remove_proc_entry("rt_cache", net->proc_net); 373 #ifdef CONFIG_IP_ROUTE_CLASSID 374 remove_proc_entry("rt_acct", net->proc_net); 375 #endif 376 } 377 378 static struct pernet_operations ip_rt_proc_ops __net_initdata = { 379 .init = ip_rt_do_proc_init, 380 .exit = ip_rt_do_proc_exit, 381 }; 382 383 static int __init ip_rt_proc_init(void) 384 { 385 return register_pernet_subsys(&ip_rt_proc_ops); 386 } 387 388 #else 389 static inline int ip_rt_proc_init(void) 390 { 391 return 0; 392 } 393 #endif /* CONFIG_PROC_FS */ 394 395 static inline bool rt_is_expired(const struct rtable *rth) 396 { 397 bool res; 398 399 rcu_read_lock(); 400 res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev)); 401 rcu_read_unlock(); 402 403 return res; 404 } 405 406 void rt_cache_flush(struct net *net) 407 { 408 rt_genid_bump_ipv4(net); 409 } 410 411 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 412 struct sk_buff *skb, 413 const void *daddr) 414 { 415 const struct rtable *rt = container_of(dst, struct rtable, dst); 416 struct net_device *dev = dst_dev(dst); 417 struct neighbour *n; 418 419 rcu_read_lock(); 420 421 if (likely(rt->rt_gw_family == AF_INET)) { 422 n = ip_neigh_gw4(dev, rt->rt_gw4); 423 } else if (rt->rt_gw_family == AF_INET6) { 424 n = ip_neigh_gw6(dev, &rt->rt_gw6); 425 } else { 426 __be32 pkey; 427 428 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr); 429 n = ip_neigh_gw4(dev, pkey); 430 } 431 432 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt)) 433 n = NULL; 434 435 rcu_read_unlock(); 436 437 return n; 438 } 439 440 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) 441 { 442 const struct rtable *rt = container_of(dst, struct rtable, dst); 443 struct net_device *dev = dst_dev(dst); 444 const __be32 *pkey = daddr; 445 446 if (rt->rt_gw_family == AF_INET) { 447 pkey = (const __be32 *)&rt->rt_gw4; 448 } else if (rt->rt_gw_family == AF_INET6) { 449 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6); 450 } else if (!daddr || 451 (rt->rt_flags & 452 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) { 453 return; 454 } 455 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); 456 } 457 458 /* Hash tables of size 2048..262144 depending on RAM size. 459 * Each bucket uses 8 bytes. 460 */ 461 static u32 ip_idents_mask __read_mostly; 462 static atomic_t *ip_idents __read_mostly; 463 static u32 *ip_tstamps __read_mostly; 464 465 /* In order to protect privacy, we add a perturbation to identifiers 466 * if one generator is seldom used. This makes hard for an attacker 467 * to infer how many packets were sent between two points in time. 468 */ 469 static u32 ip_idents_reserve(u32 hash, int segs) 470 { 471 u32 bucket, old, now = (u32)jiffies; 472 atomic_t *p_id; 473 u32 *p_tstamp; 474 u32 delta = 0; 475 476 bucket = hash & ip_idents_mask; 477 p_tstamp = ip_tstamps + bucket; 478 p_id = ip_idents + bucket; 479 old = READ_ONCE(*p_tstamp); 480 481 if (old != now && cmpxchg(p_tstamp, old, now) == old) 482 delta = get_random_u32_below(now - old); 483 484 /* If UBSAN reports an error there, please make sure your compiler 485 * supports -fno-strict-overflow before reporting it that was a bug 486 * in UBSAN, and it has been fixed in GCC-8. 487 */ 488 return atomic_add_return(segs + delta, p_id) - segs; 489 } 490 491 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) 492 { 493 u32 hash, id; 494 495 /* Note the following code is not safe, but this is okay. */ 496 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) 497 get_random_bytes(&net->ipv4.ip_id_key, 498 sizeof(net->ipv4.ip_id_key)); 499 500 hash = siphash_3u32((__force u32)iph->daddr, 501 (__force u32)iph->saddr, 502 iph->protocol, 503 &net->ipv4.ip_id_key); 504 id = ip_idents_reserve(hash, segs); 505 iph->id = htons(id); 506 } 507 EXPORT_SYMBOL(__ip_select_ident); 508 509 static void __build_flow_key(const struct net *net, struct flowi4 *fl4, 510 const struct sock *sk, const struct iphdr *iph, 511 int oif, __u8 tos, u8 prot, u32 mark, 512 int flow_flags) 513 { 514 __u8 scope = RT_SCOPE_UNIVERSE; 515 516 if (sk) { 517 oif = sk->sk_bound_dev_if; 518 mark = READ_ONCE(sk->sk_mark); 519 tos = ip_sock_rt_tos(sk); 520 scope = ip_sock_rt_scope(sk); 521 prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW : 522 sk->sk_protocol; 523 } 524 525 flowi4_init_output(fl4, oif, mark, tos & INET_DSCP_MASK, scope, 526 prot, flow_flags, iph->daddr, iph->saddr, 0, 0, 527 sock_net_uid(net, sk)); 528 } 529 530 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, 531 const struct sock *sk) 532 { 533 const struct net *net = dev_net(skb->dev); 534 const struct iphdr *iph = ip_hdr(skb); 535 int oif = skb->dev->ifindex; 536 u8 prot = iph->protocol; 537 u32 mark = skb->mark; 538 __u8 tos = iph->tos; 539 540 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); 541 } 542 543 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) 544 { 545 const struct inet_sock *inet = inet_sk(sk); 546 const struct ip_options_rcu *inet_opt; 547 __be32 daddr = inet->inet_daddr; 548 549 rcu_read_lock(); 550 inet_opt = rcu_dereference(inet->inet_opt); 551 if (inet_opt && inet_opt->opt.srr) 552 daddr = inet_opt->opt.faddr; 553 flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), 554 ip_sock_rt_tos(sk), 555 ip_sock_rt_scope(sk), 556 inet_test_bit(HDRINCL, sk) ? 557 IPPROTO_RAW : sk->sk_protocol, 558 inet_sk_flowi_flags(sk), 559 daddr, inet->inet_saddr, 0, 0, 560 sk_uid(sk)); 561 rcu_read_unlock(); 562 } 563 564 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, 565 const struct sk_buff *skb) 566 { 567 if (skb) 568 build_skb_flow_key(fl4, skb, sk); 569 else 570 build_sk_flow_key(fl4, sk); 571 } 572 573 static DEFINE_SPINLOCK(fnhe_lock); 574 575 static void fnhe_flush_routes(struct fib_nh_exception *fnhe) 576 { 577 struct rtable *rt; 578 579 rt = rcu_dereference(fnhe->fnhe_rth_input); 580 if (rt) { 581 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); 582 dst_dev_put(&rt->dst); 583 dst_release(&rt->dst); 584 } 585 rt = rcu_dereference(fnhe->fnhe_rth_output); 586 if (rt) { 587 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); 588 dst_dev_put(&rt->dst); 589 dst_release(&rt->dst); 590 } 591 } 592 593 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) 594 { 595 struct fib_nh_exception __rcu **fnhe_p, **oldest_p; 596 struct fib_nh_exception *fnhe, *oldest = NULL; 597 598 for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) { 599 fnhe = rcu_dereference_protected(*fnhe_p, 600 lockdep_is_held(&fnhe_lock)); 601 if (!fnhe) 602 break; 603 if (!oldest || 604 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) { 605 oldest = fnhe; 606 oldest_p = fnhe_p; 607 } 608 } 609 fnhe_flush_routes(oldest); 610 *oldest_p = oldest->fnhe_next; 611 kfree_rcu(oldest, rcu); 612 } 613 614 static u32 fnhe_hashfun(__be32 daddr) 615 { 616 static siphash_aligned_key_t fnhe_hash_key; 617 u64 hval; 618 619 net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); 620 hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); 621 return hash_64(hval, FNHE_HASH_SHIFT); 622 } 623 624 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 625 { 626 rt->rt_pmtu = fnhe->fnhe_pmtu; 627 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; 628 rt->dst.expires = fnhe->fnhe_expires; 629 630 if (fnhe->fnhe_gw) { 631 rt->rt_flags |= RTCF_REDIRECTED; 632 rt->rt_uses_gateway = 1; 633 rt->rt_gw_family = AF_INET; 634 rt->rt_gw4 = fnhe->fnhe_gw; 635 } 636 } 637 638 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr, 639 __be32 gw, u32 pmtu, bool lock, 640 unsigned long expires) 641 { 642 struct fnhe_hash_bucket *hash; 643 struct fib_nh_exception *fnhe; 644 struct rtable *rt; 645 u32 genid, hval; 646 unsigned int i; 647 int depth; 648 649 genid = fnhe_genid(dev_net(nhc->nhc_dev)); 650 hval = fnhe_hashfun(daddr); 651 652 spin_lock_bh(&fnhe_lock); 653 654 hash = rcu_dereference(nhc->nhc_exceptions); 655 if (!hash) { 656 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC); 657 if (!hash) 658 goto out_unlock; 659 rcu_assign_pointer(nhc->nhc_exceptions, hash); 660 } 661 662 hash += hval; 663 664 depth = 0; 665 for (fnhe = rcu_dereference(hash->chain); fnhe; 666 fnhe = rcu_dereference(fnhe->fnhe_next)) { 667 if (fnhe->fnhe_daddr == daddr) 668 break; 669 depth++; 670 } 671 672 if (fnhe) { 673 if (fnhe->fnhe_genid != genid) 674 fnhe->fnhe_genid = genid; 675 if (gw) 676 fnhe->fnhe_gw = gw; 677 if (pmtu) { 678 fnhe->fnhe_pmtu = pmtu; 679 fnhe->fnhe_mtu_locked = lock; 680 } 681 fnhe->fnhe_expires = max(1UL, expires); 682 /* Update all cached dsts too */ 683 rt = rcu_dereference(fnhe->fnhe_rth_input); 684 if (rt) 685 fill_route_from_fnhe(rt, fnhe); 686 rt = rcu_dereference(fnhe->fnhe_rth_output); 687 if (rt) 688 fill_route_from_fnhe(rt, fnhe); 689 } else { 690 /* Randomize max depth to avoid some side channels attacks. */ 691 int max_depth = FNHE_RECLAIM_DEPTH + 692 get_random_u32_below(FNHE_RECLAIM_DEPTH); 693 694 while (depth > max_depth) { 695 fnhe_remove_oldest(hash); 696 depth--; 697 } 698 699 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); 700 if (!fnhe) 701 goto out_unlock; 702 703 fnhe->fnhe_next = hash->chain; 704 705 fnhe->fnhe_genid = genid; 706 fnhe->fnhe_daddr = daddr; 707 fnhe->fnhe_gw = gw; 708 fnhe->fnhe_pmtu = pmtu; 709 fnhe->fnhe_mtu_locked = lock; 710 fnhe->fnhe_expires = max(1UL, expires); 711 712 rcu_assign_pointer(hash->chain, fnhe); 713 714 /* Exception created; mark the cached routes for the nexthop 715 * stale, so anyone caching it rechecks if this exception 716 * applies to them. 717 */ 718 rt = rcu_dereference(nhc->nhc_rth_input); 719 if (rt) 720 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); 721 722 for_each_possible_cpu(i) { 723 struct rtable __rcu **prt; 724 725 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i); 726 rt = rcu_dereference(*prt); 727 if (rt) 728 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); 729 } 730 } 731 732 fnhe->fnhe_stamp = jiffies; 733 734 out_unlock: 735 spin_unlock_bh(&fnhe_lock); 736 } 737 738 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, 739 bool kill_route) 740 { 741 __be32 new_gw = icmp_hdr(skb)->un.gateway; 742 __be32 old_gw = ip_hdr(skb)->saddr; 743 struct net_device *dev = skb->dev; 744 struct in_device *in_dev; 745 struct fib_result res; 746 struct neighbour *n; 747 struct net *net; 748 749 switch (icmp_hdr(skb)->code & 7) { 750 case ICMP_REDIR_NET: 751 case ICMP_REDIR_NETTOS: 752 case ICMP_REDIR_HOST: 753 case ICMP_REDIR_HOSTTOS: 754 break; 755 756 default: 757 return; 758 } 759 760 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw) 761 return; 762 763 in_dev = __in_dev_get_rcu(dev); 764 if (!in_dev) 765 return; 766 767 net = dev_net(dev); 768 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || 769 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || 770 ipv4_is_zeronet(new_gw)) 771 goto reject_redirect; 772 773 if (!IN_DEV_SHARED_MEDIA(in_dev)) { 774 if (!inet_addr_onlink(in_dev, new_gw, old_gw)) 775 goto reject_redirect; 776 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 777 goto reject_redirect; 778 } else { 779 if (inet_addr_type(net, new_gw) != RTN_UNICAST) 780 goto reject_redirect; 781 } 782 783 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw); 784 if (!n) 785 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); 786 if (!IS_ERR(n)) { 787 if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { 788 neigh_event_send(n, NULL); 789 } else { 790 if (fib_lookup(net, fl4, &res, 0) == 0) { 791 struct fib_nh_common *nhc; 792 793 fib_select_path(net, &res, fl4, skb); 794 nhc = FIB_RES_NHC(res); 795 update_or_create_fnhe(nhc, fl4->daddr, new_gw, 796 0, false, 797 jiffies + ip_rt_gc_timeout); 798 } 799 if (kill_route) 800 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); 801 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); 802 } 803 neigh_release(n); 804 } 805 return; 806 807 reject_redirect: 808 #ifdef CONFIG_IP_ROUTE_VERBOSE 809 if (IN_DEV_LOG_MARTIANS(in_dev)) { 810 const struct iphdr *iph = (const struct iphdr *) skb->data; 811 __be32 daddr = iph->daddr; 812 __be32 saddr = iph->saddr; 813 814 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" 815 " Advised path = %pI4 -> %pI4\n", 816 &old_gw, dev->name, &new_gw, 817 &saddr, &daddr); 818 } 819 #endif 820 ; 821 } 822 823 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 824 { 825 struct rtable *rt; 826 struct flowi4 fl4; 827 const struct iphdr *iph = (const struct iphdr *) skb->data; 828 struct net *net = dev_net(skb->dev); 829 int oif = skb->dev->ifindex; 830 u8 prot = iph->protocol; 831 u32 mark = skb->mark; 832 __u8 tos = iph->tos; 833 834 rt = dst_rtable(dst); 835 836 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); 837 __ip_do_redirect(rt, skb, &fl4, true); 838 } 839 840 static void ipv4_negative_advice(struct sock *sk, 841 struct dst_entry *dst) 842 { 843 struct rtable *rt = dst_rtable(dst); 844 845 if ((READ_ONCE(dst->obsolete) > 0) || 846 (rt->rt_flags & RTCF_REDIRECTED) || 847 READ_ONCE(rt->dst.expires)) 848 sk_dst_reset(sk); 849 } 850 851 /* 852 * Algorithm: 853 * 1. The first ip_rt_redirect_number redirects are sent 854 * with exponential backoff, then we stop sending them at all, 855 * assuming that the host ignores our redirects. 856 * 2. If we did not see packets requiring redirects 857 * during ip_rt_redirect_silence, we assume that the host 858 * forgot redirected route and start to send redirects again. 859 * 860 * This algorithm is much cheaper and more intelligent than dumb load limiting 861 * in icmp.c. 862 * 863 * NOTE. Do not forget to inhibit load limiting for redirects (redundant) 864 * and "frag. need" (breaks PMTU discovery) in icmp.c. 865 */ 866 867 void ip_rt_send_redirect(struct sk_buff *skb) 868 { 869 struct rtable *rt = skb_rtable(skb); 870 struct in_device *in_dev; 871 struct inet_peer *peer; 872 struct net *net; 873 int log_martians; 874 int vif; 875 876 rcu_read_lock(); 877 in_dev = __in_dev_get_rcu(rt->dst.dev); 878 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 879 rcu_read_unlock(); 880 return; 881 } 882 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 883 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); 884 885 net = dev_net(rt->dst.dev); 886 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif); 887 if (!peer) { 888 rcu_read_unlock(); 889 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, 890 rt_nexthop(rt, ip_hdr(skb)->daddr)); 891 return; 892 } 893 894 /* No redirected packets during ip_rt_redirect_silence; 895 * reset the algorithm. 896 */ 897 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { 898 peer->rate_tokens = 0; 899 peer->n_redirects = 0; 900 } 901 902 /* Too many ignored redirects; do not send anything 903 * set dst.rate_last to the last seen redirected packet. 904 */ 905 if (peer->n_redirects >= ip_rt_redirect_number) { 906 peer->rate_last = jiffies; 907 goto out_unlock; 908 } 909 910 /* Check for load limit; set rate_last to the latest sent 911 * redirect. 912 */ 913 if (peer->n_redirects == 0 || 914 time_after(jiffies, 915 (peer->rate_last + 916 (ip_rt_redirect_load << peer->n_redirects)))) { 917 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); 918 919 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 920 peer->rate_last = jiffies; 921 ++peer->n_redirects; 922 if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians && 923 peer->n_redirects == ip_rt_redirect_number) 924 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 925 &ip_hdr(skb)->saddr, inet_iif(skb), 926 &ip_hdr(skb)->daddr, &gw); 927 } 928 out_unlock: 929 rcu_read_unlock(); 930 } 931 932 static int ip_error(struct sk_buff *skb) 933 { 934 struct rtable *rt = skb_rtable(skb); 935 struct net_device *dev = skb->dev; 936 struct in_device *in_dev; 937 struct inet_peer *peer; 938 unsigned long now; 939 struct net *net; 940 SKB_DR(reason); 941 bool send; 942 int code; 943 944 if (netif_is_l3_master(skb->dev)) { 945 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); 946 if (!dev) 947 goto out; 948 } 949 950 in_dev = __in_dev_get_rcu(dev); 951 952 /* IP on this device is disabled. */ 953 if (!in_dev) 954 goto out; 955 956 net = dev_net(rt->dst.dev); 957 if (!IN_DEV_FORWARD(in_dev)) { 958 switch (rt->dst.error) { 959 case EHOSTUNREACH: 960 SKB_DR_SET(reason, IP_INADDRERRORS); 961 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); 962 break; 963 964 case ENETUNREACH: 965 SKB_DR_SET(reason, IP_INNOROUTES); 966 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 967 break; 968 } 969 goto out; 970 } 971 972 switch (rt->dst.error) { 973 case EINVAL: 974 default: 975 goto out; 976 case EHOSTUNREACH: 977 code = ICMP_HOST_UNREACH; 978 break; 979 case ENETUNREACH: 980 code = ICMP_NET_UNREACH; 981 SKB_DR_SET(reason, IP_INNOROUTES); 982 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 983 break; 984 case EACCES: 985 code = ICMP_PKT_FILTERED; 986 break; 987 } 988 989 rcu_read_lock(); 990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 991 l3mdev_master_ifindex_rcu(skb->dev)); 992 send = true; 993 if (peer) { 994 now = jiffies; 995 peer->rate_tokens += now - peer->rate_last; 996 if (peer->rate_tokens > ip_rt_error_burst) 997 peer->rate_tokens = ip_rt_error_burst; 998 peer->rate_last = now; 999 if (peer->rate_tokens >= ip_rt_error_cost) 1000 peer->rate_tokens -= ip_rt_error_cost; 1001 else 1002 send = false; 1003 } 1004 rcu_read_unlock(); 1005 1006 if (send) 1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1008 1009 out: kfree_skb_reason(skb, reason); 1010 return 0; 1011 } 1012 1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1014 { 1015 struct dst_entry *dst = &rt->dst; 1016 struct fib_result res; 1017 bool lock = false; 1018 struct net *net; 1019 u32 old_mtu; 1020 1021 if (ip_mtu_locked(dst)) 1022 return; 1023 1024 old_mtu = ipv4_mtu(dst); 1025 if (old_mtu < mtu) 1026 return; 1027 1028 rcu_read_lock(); 1029 net = dev_net_rcu(dst_dev(dst)); 1030 if (mtu < net->ipv4.ip_rt_min_pmtu) { 1031 lock = true; 1032 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu); 1033 } 1034 1035 if (rt->rt_pmtu == mtu && !lock && 1036 time_before(jiffies, READ_ONCE(dst->expires) - 1037 net->ipv4.ip_rt_mtu_expires / 2)) 1038 goto out; 1039 1040 if (fib_lookup(net, fl4, &res, 0) == 0) { 1041 struct fib_nh_common *nhc; 1042 1043 fib_select_path(net, &res, fl4, NULL); 1044 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1045 if (fib_info_num_path(res.fi) > 1) { 1046 int nhsel; 1047 1048 for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) { 1049 nhc = fib_info_nhc(res.fi, nhsel); 1050 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, 1051 jiffies + net->ipv4.ip_rt_mtu_expires); 1052 } 1053 goto out; 1054 } 1055 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 1056 nhc = FIB_RES_NHC(res); 1057 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, 1058 jiffies + net->ipv4.ip_rt_mtu_expires); 1059 } 1060 out: 1061 rcu_read_unlock(); 1062 } 1063 1064 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1065 struct sk_buff *skb, u32 mtu, 1066 bool confirm_neigh) 1067 { 1068 struct rtable *rt = dst_rtable(dst); 1069 struct flowi4 fl4; 1070 1071 ip_rt_build_flow_key(&fl4, sk, skb); 1072 1073 /* Don't make lookup fail for bridged encapsulations */ 1074 if (skb && netif_is_any_bridge_port(skb->dev)) 1075 fl4.flowi4_oif = 0; 1076 1077 __ip_rt_update_pmtu(rt, &fl4, mtu); 1078 } 1079 1080 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, 1081 int oif, u8 protocol) 1082 { 1083 const struct iphdr *iph = (const struct iphdr *)skb->data; 1084 struct flowi4 fl4; 1085 struct rtable *rt; 1086 u32 mark = IP4_REPLY_MARK(net, skb->mark); 1087 1088 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark, 1089 0); 1090 rt = __ip_route_output_key(net, &fl4); 1091 if (!IS_ERR(rt)) { 1092 __ip_rt_update_pmtu(rt, &fl4, mtu); 1093 ip_rt_put(rt); 1094 } 1095 } 1096 EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 1097 1098 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1099 { 1100 const struct iphdr *iph = (const struct iphdr *)skb->data; 1101 struct flowi4 fl4; 1102 struct rtable *rt; 1103 1104 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0); 1105 1106 if (!fl4.flowi4_mark) 1107 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark); 1108 1109 rt = __ip_route_output_key(sock_net(sk), &fl4); 1110 if (!IS_ERR(rt)) { 1111 __ip_rt_update_pmtu(rt, &fl4, mtu); 1112 ip_rt_put(rt); 1113 } 1114 } 1115 1116 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1117 { 1118 const struct iphdr *iph = (const struct iphdr *)skb->data; 1119 struct flowi4 fl4; 1120 struct rtable *rt; 1121 struct dst_entry *odst = NULL; 1122 bool new = false; 1123 struct net *net = sock_net(sk); 1124 1125 bh_lock_sock(sk); 1126 1127 if (!ip_sk_accept_pmtu(sk)) 1128 goto out; 1129 1130 odst = sk_dst_get(sk); 1131 1132 if (sock_owned_by_user(sk) || !odst) { 1133 __ipv4_sk_update_pmtu(skb, sk, mtu); 1134 goto out; 1135 } 1136 1137 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1138 1139 rt = dst_rtable(odst); 1140 if (READ_ONCE(odst->obsolete) && !odst->ops->check(odst, 0)) { 1141 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1142 if (IS_ERR(rt)) 1143 goto out; 1144 1145 new = true; 1146 } 1147 1148 __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu); 1149 1150 if (!dst_check(&rt->dst, 0)) { 1151 if (new) 1152 dst_release(&rt->dst); 1153 1154 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1155 if (IS_ERR(rt)) 1156 goto out; 1157 1158 new = true; 1159 } 1160 1161 if (new) 1162 sk_dst_set(sk, &rt->dst); 1163 1164 out: 1165 bh_unlock_sock(sk); 1166 dst_release(odst); 1167 } 1168 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1169 1170 void ipv4_redirect(struct sk_buff *skb, struct net *net, 1171 int oif, u8 protocol) 1172 { 1173 const struct iphdr *iph = (const struct iphdr *)skb->data; 1174 struct flowi4 fl4; 1175 struct rtable *rt; 1176 1177 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0); 1178 rt = __ip_route_output_key(net, &fl4); 1179 if (!IS_ERR(rt)) { 1180 __ip_do_redirect(rt, skb, &fl4, false); 1181 ip_rt_put(rt); 1182 } 1183 } 1184 EXPORT_SYMBOL_GPL(ipv4_redirect); 1185 1186 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) 1187 { 1188 const struct iphdr *iph = (const struct iphdr *)skb->data; 1189 struct flowi4 fl4; 1190 struct rtable *rt; 1191 struct net *net = sock_net(sk); 1192 1193 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1194 rt = __ip_route_output_key(net, &fl4); 1195 if (!IS_ERR(rt)) { 1196 __ip_do_redirect(rt, skb, &fl4, false); 1197 ip_rt_put(rt); 1198 } 1199 } 1200 EXPORT_SYMBOL_GPL(ipv4_sk_redirect); 1201 1202 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst, 1203 u32 cookie) 1204 { 1205 struct rtable *rt = dst_rtable(dst); 1206 1207 /* All IPV4 dsts are created with ->obsolete set to the value 1208 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1209 * into this function always. 1210 * 1211 * When a PMTU/redirect information update invalidates a route, 1212 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or 1213 * DST_OBSOLETE_DEAD. 1214 */ 1215 if (READ_ONCE(dst->obsolete) != DST_OBSOLETE_FORCE_CHK || 1216 rt_is_expired(rt)) 1217 return NULL; 1218 return dst; 1219 } 1220 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check); 1221 1222 static void ipv4_send_dest_unreach(struct sk_buff *skb) 1223 { 1224 struct net_device *dev; 1225 struct ip_options opt; 1226 int res; 1227 1228 /* Recompile ip options since IPCB may not be valid anymore. 1229 * Also check we have a reasonable ipv4 header. 1230 */ 1231 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) || 1232 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5) 1233 return; 1234 1235 memset(&opt, 0, sizeof(opt)); 1236 if (ip_hdr(skb)->ihl > 5) { 1237 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4)) 1238 return; 1239 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); 1240 1241 rcu_read_lock(); 1242 dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev; 1243 res = __ip_options_compile(dev_net(dev), &opt, skb, NULL); 1244 rcu_read_unlock(); 1245 1246 if (res) 1247 return; 1248 } 1249 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); 1250 } 1251 1252 static void ipv4_link_failure(struct sk_buff *skb) 1253 { 1254 struct rtable *rt; 1255 1256 ipv4_send_dest_unreach(skb); 1257 1258 rt = skb_rtable(skb); 1259 if (rt) 1260 dst_set_expires(&rt->dst, 0); 1261 } 1262 1263 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb) 1264 { 1265 pr_debug("%s: %pI4 -> %pI4, %s\n", 1266 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1267 skb->dev ? skb->dev->name : "?"); 1268 kfree_skb(skb); 1269 WARN_ON(1); 1270 return 0; 1271 } 1272 1273 /* 1274 * We do not cache source address of outgoing interface, 1275 * because it is used only by IP RR, TS and SRR options, 1276 * so that it out of fast path. 1277 * 1278 * BTW remember: "addr" is allowed to be not aligned 1279 * in IP options! 1280 */ 1281 1282 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) 1283 { 1284 __be32 src; 1285 1286 if (rt_is_output_route(rt)) 1287 src = ip_hdr(skb)->saddr; 1288 else { 1289 struct fib_result res; 1290 struct iphdr *iph = ip_hdr(skb); 1291 struct flowi4 fl4 = { 1292 .daddr = iph->daddr, 1293 .saddr = iph->saddr, 1294 .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)), 1295 .flowi4_oif = rt->dst.dev->ifindex, 1296 .flowi4_iif = skb->dev->ifindex, 1297 .flowi4_mark = skb->mark, 1298 }; 1299 1300 rcu_read_lock(); 1301 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) 1302 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res); 1303 else 1304 src = inet_select_addr(rt->dst.dev, 1305 rt_nexthop(rt, iph->daddr), 1306 RT_SCOPE_UNIVERSE); 1307 rcu_read_unlock(); 1308 } 1309 memcpy(addr, &src, 4); 1310 } 1311 1312 #ifdef CONFIG_IP_ROUTE_CLASSID 1313 static void set_class_tag(struct rtable *rt, u32 tag) 1314 { 1315 if (!(rt->dst.tclassid & 0xFFFF)) 1316 rt->dst.tclassid |= tag & 0xFFFF; 1317 if (!(rt->dst.tclassid & 0xFFFF0000)) 1318 rt->dst.tclassid |= tag & 0xFFFF0000; 1319 } 1320 #endif 1321 1322 static unsigned int ipv4_default_advmss(const struct dst_entry *dst) 1323 { 1324 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); 1325 unsigned int advmss; 1326 struct net *net; 1327 1328 rcu_read_lock(); 1329 net = dev_net_rcu(dst_dev(dst)); 1330 advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, 1331 net->ipv4.ip_rt_min_advmss); 1332 rcu_read_unlock(); 1333 1334 return min(advmss, IPV4_MAX_PMTU - header_size); 1335 } 1336 1337 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst) 1338 { 1339 return ip_dst_mtu_maybe_forward(dst, false); 1340 } 1341 EXPORT_INDIRECT_CALLABLE(ipv4_mtu); 1342 1343 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr) 1344 { 1345 struct fnhe_hash_bucket *hash; 1346 struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1347 u32 hval = fnhe_hashfun(daddr); 1348 1349 spin_lock_bh(&fnhe_lock); 1350 1351 hash = rcu_dereference_protected(nhc->nhc_exceptions, 1352 lockdep_is_held(&fnhe_lock)); 1353 hash += hval; 1354 1355 fnhe_p = &hash->chain; 1356 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1357 while (fnhe) { 1358 if (fnhe->fnhe_daddr == daddr) { 1359 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1360 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1361 /* set fnhe_daddr to 0 to ensure it won't bind with 1362 * new dsts in rt_bind_exception(). 1363 */ 1364 fnhe->fnhe_daddr = 0; 1365 fnhe_flush_routes(fnhe); 1366 kfree_rcu(fnhe, rcu); 1367 break; 1368 } 1369 fnhe_p = &fnhe->fnhe_next; 1370 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1371 lockdep_is_held(&fnhe_lock)); 1372 } 1373 1374 spin_unlock_bh(&fnhe_lock); 1375 } 1376 1377 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc, 1378 __be32 daddr) 1379 { 1380 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions); 1381 struct fib_nh_exception *fnhe; 1382 u32 hval; 1383 1384 if (!hash) 1385 return NULL; 1386 1387 hval = fnhe_hashfun(daddr); 1388 1389 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1390 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1391 if (fnhe->fnhe_daddr == daddr) { 1392 if (fnhe->fnhe_expires && 1393 time_after(jiffies, fnhe->fnhe_expires)) { 1394 ip_del_fnhe(nhc, daddr); 1395 break; 1396 } 1397 return fnhe; 1398 } 1399 } 1400 return NULL; 1401 } 1402 1403 /* MTU selection: 1404 * 1. mtu on route is locked - use it 1405 * 2. mtu from nexthop exception 1406 * 3. mtu from egress device 1407 */ 1408 1409 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) 1410 { 1411 struct fib_nh_common *nhc = res->nhc; 1412 struct net_device *dev = nhc->nhc_dev; 1413 struct fib_info *fi = res->fi; 1414 u32 mtu = 0; 1415 1416 if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) || 1417 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) 1418 mtu = fi->fib_mtu; 1419 1420 if (likely(!mtu)) { 1421 struct fib_nh_exception *fnhe; 1422 1423 fnhe = find_exception(nhc, daddr); 1424 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires)) 1425 mtu = fnhe->fnhe_pmtu; 1426 } 1427 1428 if (likely(!mtu)) 1429 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU); 1430 1431 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu); 1432 } 1433 1434 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, 1435 __be32 daddr, const bool do_cache) 1436 { 1437 bool ret = false; 1438 1439 spin_lock_bh(&fnhe_lock); 1440 1441 if (daddr == fnhe->fnhe_daddr) { 1442 struct rtable __rcu **porig; 1443 struct rtable *orig; 1444 int genid = fnhe_genid(dev_net(rt->dst.dev)); 1445 1446 if (rt_is_input_route(rt)) 1447 porig = &fnhe->fnhe_rth_input; 1448 else 1449 porig = &fnhe->fnhe_rth_output; 1450 orig = rcu_dereference(*porig); 1451 1452 if (fnhe->fnhe_genid != genid) { 1453 fnhe->fnhe_genid = genid; 1454 fnhe->fnhe_gw = 0; 1455 fnhe->fnhe_pmtu = 0; 1456 fnhe->fnhe_expires = 0; 1457 fnhe->fnhe_mtu_locked = false; 1458 fnhe_flush_routes(fnhe); 1459 orig = NULL; 1460 } 1461 fill_route_from_fnhe(rt, fnhe); 1462 if (!rt->rt_gw4) { 1463 rt->rt_gw4 = daddr; 1464 rt->rt_gw_family = AF_INET; 1465 } 1466 1467 if (do_cache) { 1468 dst_hold(&rt->dst); 1469 rcu_assign_pointer(*porig, rt); 1470 if (orig) { 1471 dst_dev_put(&orig->dst); 1472 dst_release(&orig->dst); 1473 } 1474 ret = true; 1475 } 1476 1477 fnhe->fnhe_stamp = jiffies; 1478 } 1479 spin_unlock_bh(&fnhe_lock); 1480 1481 return ret; 1482 } 1483 1484 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) 1485 { 1486 struct rtable *orig, *prev, **p; 1487 bool ret = true; 1488 1489 if (rt_is_input_route(rt)) { 1490 p = (struct rtable **)&nhc->nhc_rth_input; 1491 } else { 1492 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 1493 } 1494 orig = *p; 1495 1496 /* hold dst before doing cmpxchg() to avoid race condition 1497 * on this dst 1498 */ 1499 dst_hold(&rt->dst); 1500 prev = cmpxchg(p, orig, rt); 1501 if (prev == orig) { 1502 if (orig) { 1503 rt_add_uncached_list(orig); 1504 dst_release(&orig->dst); 1505 } 1506 } else { 1507 dst_release(&rt->dst); 1508 ret = false; 1509 } 1510 1511 return ret; 1512 } 1513 1514 struct uncached_list { 1515 spinlock_t lock; 1516 struct list_head head; 1517 }; 1518 1519 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1520 1521 void rt_add_uncached_list(struct rtable *rt) 1522 { 1523 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1524 1525 rt->dst.rt_uncached_list = ul; 1526 1527 spin_lock_bh(&ul->lock); 1528 list_add_tail(&rt->dst.rt_uncached, &ul->head); 1529 spin_unlock_bh(&ul->lock); 1530 } 1531 1532 void rt_del_uncached_list(struct rtable *rt) 1533 { 1534 if (!list_empty(&rt->dst.rt_uncached)) { 1535 struct uncached_list *ul = rt->dst.rt_uncached_list; 1536 1537 spin_lock_bh(&ul->lock); 1538 list_del_init(&rt->dst.rt_uncached); 1539 spin_unlock_bh(&ul->lock); 1540 } 1541 } 1542 1543 static void ipv4_dst_destroy(struct dst_entry *dst) 1544 { 1545 ip_dst_metrics_put(dst); 1546 rt_del_uncached_list(dst_rtable(dst)); 1547 } 1548 1549 void rt_flush_dev(struct net_device *dev) 1550 { 1551 struct rtable *rt, *safe; 1552 int cpu; 1553 1554 for_each_possible_cpu(cpu) { 1555 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 1556 1557 if (list_empty(&ul->head)) 1558 continue; 1559 1560 spin_lock_bh(&ul->lock); 1561 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { 1562 if (rt->dst.dev != dev) 1563 continue; 1564 rt->dst.dev = blackhole_netdev; 1565 netdev_ref_replace(dev, blackhole_netdev, 1566 &rt->dst.dev_tracker, GFP_ATOMIC); 1567 list_del_init(&rt->dst.rt_uncached); 1568 } 1569 spin_unlock_bh(&ul->lock); 1570 } 1571 } 1572 1573 static bool rt_cache_valid(const struct rtable *rt) 1574 { 1575 return rt && 1576 READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK && 1577 !rt_is_expired(rt); 1578 } 1579 1580 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, 1581 const struct fib_result *res, 1582 struct fib_nh_exception *fnhe, 1583 struct fib_info *fi, u16 type, u32 itag, 1584 const bool do_cache) 1585 { 1586 bool cached = false; 1587 1588 if (fi) { 1589 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1590 1591 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) { 1592 rt->rt_uses_gateway = 1; 1593 rt->rt_gw_family = nhc->nhc_gw_family; 1594 /* only INET and INET6 are supported */ 1595 if (likely(nhc->nhc_gw_family == AF_INET)) 1596 rt->rt_gw4 = nhc->nhc_gw.ipv4; 1597 else 1598 rt->rt_gw6 = nhc->nhc_gw.ipv6; 1599 } 1600 1601 ip_dst_init_metrics(&rt->dst, fi->fib_metrics); 1602 1603 #ifdef CONFIG_IP_ROUTE_CLASSID 1604 if (nhc->nhc_family == AF_INET) { 1605 struct fib_nh *nh; 1606 1607 nh = container_of(nhc, struct fib_nh, nh_common); 1608 rt->dst.tclassid = nh->nh_tclassid; 1609 } 1610 #endif 1611 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 1612 if (unlikely(fnhe)) 1613 cached = rt_bind_exception(rt, fnhe, daddr, do_cache); 1614 else if (do_cache) 1615 cached = rt_cache_route(nhc, rt); 1616 if (unlikely(!cached)) { 1617 /* Routes we intend to cache in nexthop exception or 1618 * FIB nexthop have the DST_NOCACHE bit clear. 1619 * However, if we are unsuccessful at storing this 1620 * route into the cache we really need to set it. 1621 */ 1622 if (!rt->rt_gw4) { 1623 rt->rt_gw_family = AF_INET; 1624 rt->rt_gw4 = daddr; 1625 } 1626 rt_add_uncached_list(rt); 1627 } 1628 } else 1629 rt_add_uncached_list(rt); 1630 1631 #ifdef CONFIG_IP_ROUTE_CLASSID 1632 #ifdef CONFIG_IP_MULTIPLE_TABLES 1633 set_class_tag(rt, res->tclassid); 1634 #endif 1635 set_class_tag(rt, itag); 1636 #endif 1637 } 1638 1639 struct rtable *rt_dst_alloc(struct net_device *dev, 1640 unsigned int flags, u16 type, 1641 bool noxfrm) 1642 { 1643 struct rtable *rt; 1644 1645 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK, 1646 (noxfrm ? DST_NOXFRM : 0)); 1647 1648 if (rt) { 1649 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1650 rt->rt_flags = flags; 1651 rt->rt_type = type; 1652 rt->rt_is_input = 0; 1653 rt->rt_iif = 0; 1654 rt->rt_pmtu = 0; 1655 rt->rt_mtu_locked = 0; 1656 rt->rt_uses_gateway = 0; 1657 rt->rt_gw_family = 0; 1658 rt->rt_gw4 = 0; 1659 1660 rt->dst.output = ip_output; 1661 if (flags & RTCF_LOCAL) 1662 rt->dst.input = ip_local_deliver; 1663 } 1664 1665 return rt; 1666 } 1667 EXPORT_SYMBOL(rt_dst_alloc); 1668 1669 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) 1670 { 1671 struct rtable *new_rt; 1672 1673 new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK, 1674 rt->dst.flags); 1675 1676 if (new_rt) { 1677 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1678 new_rt->rt_flags = rt->rt_flags; 1679 new_rt->rt_type = rt->rt_type; 1680 new_rt->rt_is_input = rt->rt_is_input; 1681 new_rt->rt_iif = rt->rt_iif; 1682 new_rt->rt_pmtu = rt->rt_pmtu; 1683 new_rt->rt_mtu_locked = rt->rt_mtu_locked; 1684 new_rt->rt_gw_family = rt->rt_gw_family; 1685 if (rt->rt_gw_family == AF_INET) 1686 new_rt->rt_gw4 = rt->rt_gw4; 1687 else if (rt->rt_gw_family == AF_INET6) 1688 new_rt->rt_gw6 = rt->rt_gw6; 1689 1690 new_rt->dst.input = READ_ONCE(rt->dst.input); 1691 new_rt->dst.output = READ_ONCE(rt->dst.output); 1692 new_rt->dst.error = rt->dst.error; 1693 new_rt->dst.lastuse = jiffies; 1694 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate); 1695 } 1696 return new_rt; 1697 } 1698 EXPORT_SYMBOL(rt_dst_clone); 1699 1700 /* called in rcu_read_lock() section */ 1701 enum skb_drop_reason 1702 ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1703 dscp_t dscp, struct net_device *dev, 1704 struct in_device *in_dev, u32 *itag) 1705 { 1706 enum skb_drop_reason reason; 1707 1708 /* Primary sanity checks. */ 1709 if (!in_dev) 1710 return SKB_DROP_REASON_NOT_SPECIFIED; 1711 1712 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) 1713 return SKB_DROP_REASON_IP_INVALID_SOURCE; 1714 1715 if (skb->protocol != htons(ETH_P_IP)) 1716 return SKB_DROP_REASON_INVALID_PROTO; 1717 1718 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) 1719 return SKB_DROP_REASON_IP_LOCALNET; 1720 1721 if (ipv4_is_zeronet(saddr)) { 1722 if (!ipv4_is_local_multicast(daddr) && 1723 ip_hdr(skb)->protocol != IPPROTO_IGMP) 1724 return SKB_DROP_REASON_IP_INVALID_SOURCE; 1725 } else { 1726 reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, 1727 dev, in_dev, itag); 1728 if (reason) 1729 return reason; 1730 } 1731 return SKB_NOT_DROPPED_YET; 1732 } 1733 1734 /* called in rcu_read_lock() section */ 1735 static enum skb_drop_reason 1736 ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1737 dscp_t dscp, struct net_device *dev, int our) 1738 { 1739 struct in_device *in_dev = __in_dev_get_rcu(dev); 1740 unsigned int flags = RTCF_MULTICAST; 1741 enum skb_drop_reason reason; 1742 struct rtable *rth; 1743 u32 itag = 0; 1744 1745 reason = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, 1746 &itag); 1747 if (reason) 1748 return reason; 1749 1750 if (our) 1751 flags |= RTCF_LOCAL; 1752 1753 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 1754 IPCB(skb)->flags |= IPSKB_NOPOLICY; 1755 1756 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, 1757 false); 1758 if (!rth) 1759 return SKB_DROP_REASON_NOMEM; 1760 1761 #ifdef CONFIG_IP_ROUTE_CLASSID 1762 rth->dst.tclassid = itag; 1763 #endif 1764 rth->dst.output = ip_rt_bug; 1765 rth->rt_is_input= 1; 1766 1767 #ifdef CONFIG_IP_MROUTE 1768 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1769 rth->dst.input = ip_mr_input; 1770 #endif 1771 RT_CACHE_STAT_INC(in_slow_mc); 1772 1773 skb_dst_drop(skb); 1774 skb_dst_set(skb, &rth->dst); 1775 return SKB_NOT_DROPPED_YET; 1776 } 1777 1778 1779 static void ip_handle_martian_source(struct net_device *dev, 1780 struct in_device *in_dev, 1781 struct sk_buff *skb, 1782 __be32 daddr, 1783 __be32 saddr) 1784 { 1785 RT_CACHE_STAT_INC(in_martian_src); 1786 #ifdef CONFIG_IP_ROUTE_VERBOSE 1787 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { 1788 /* 1789 * RFC1812 recommendation, if source is martian, 1790 * the only hint is MAC header. 1791 */ 1792 pr_warn("martian source %pI4 from %pI4, on dev %s\n", 1793 &daddr, &saddr, dev->name); 1794 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 1795 print_hex_dump(KERN_WARNING, "ll header: ", 1796 DUMP_PREFIX_OFFSET, 16, 1, 1797 skb_mac_header(skb), 1798 dev->hard_header_len, false); 1799 } 1800 } 1801 #endif 1802 } 1803 1804 /* called in rcu_read_lock() section */ 1805 static enum skb_drop_reason 1806 __mkroute_input(struct sk_buff *skb, const struct fib_result *res, 1807 struct in_device *in_dev, __be32 daddr, 1808 __be32 saddr, dscp_t dscp) 1809 { 1810 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 1811 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1812 struct net_device *dev = nhc->nhc_dev; 1813 struct fib_nh_exception *fnhe; 1814 struct rtable *rth; 1815 int err; 1816 struct in_device *out_dev; 1817 bool do_cache; 1818 u32 itag = 0; 1819 1820 /* get a working reference to the output device */ 1821 out_dev = __in_dev_get_rcu(dev); 1822 if (!out_dev) { 1823 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); 1824 return reason; 1825 } 1826 1827 err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res), 1828 in_dev->dev, in_dev, &itag); 1829 if (err < 0) { 1830 reason = -err; 1831 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1832 saddr); 1833 1834 goto cleanup; 1835 } 1836 1837 do_cache = res->fi && !itag; 1838 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1839 skb->protocol == htons(ETH_P_IP)) { 1840 __be32 gw; 1841 1842 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0; 1843 if (IN_DEV_SHARED_MEDIA(out_dev) || 1844 inet_addr_onlink(out_dev, saddr, gw)) 1845 IPCB(skb)->flags |= IPSKB_DOREDIRECT; 1846 } 1847 1848 if (skb->protocol != htons(ETH_P_IP)) { 1849 /* Not IP (i.e. ARP). Do not create route, if it is 1850 * invalid for proxy arp. DNAT routes are always valid. 1851 * 1852 * Proxy arp feature have been extended to allow, ARP 1853 * replies back to the same interface, to support 1854 * Private VLAN switch technologies. See arp.c. 1855 */ 1856 if (out_dev == in_dev && 1857 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { 1858 reason = SKB_DROP_REASON_ARP_PVLAN_DISABLE; 1859 goto cleanup; 1860 } 1861 } 1862 1863 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 1864 IPCB(skb)->flags |= IPSKB_NOPOLICY; 1865 1866 fnhe = find_exception(nhc, daddr); 1867 if (do_cache) { 1868 if (fnhe) 1869 rth = rcu_dereference(fnhe->fnhe_rth_input); 1870 else 1871 rth = rcu_dereference(nhc->nhc_rth_input); 1872 if (rt_cache_valid(rth)) { 1873 skb_dst_set_noref(skb, &rth->dst); 1874 goto out; 1875 } 1876 } 1877 1878 rth = rt_dst_alloc(out_dev->dev, 0, res->type, 1879 IN_DEV_ORCONF(out_dev, NOXFRM)); 1880 if (!rth) { 1881 reason = SKB_DROP_REASON_NOMEM; 1882 goto cleanup; 1883 } 1884 1885 rth->rt_is_input = 1; 1886 RT_CACHE_STAT_INC(in_slow_tot); 1887 1888 rth->dst.input = ip_forward; 1889 1890 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag, 1891 do_cache); 1892 lwtunnel_set_redirect(&rth->dst); 1893 skb_dst_set(skb, &rth->dst); 1894 out: 1895 reason = SKB_NOT_DROPPED_YET; 1896 cleanup: 1897 return reason; 1898 } 1899 1900 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1901 /* To make ICMP packets follow the right flow, the multipath hash is 1902 * calculated from the inner IP addresses. 1903 */ 1904 static void ip_multipath_l3_keys(const struct sk_buff *skb, 1905 struct flow_keys *hash_keys) 1906 { 1907 const struct iphdr *outer_iph = ip_hdr(skb); 1908 const struct iphdr *key_iph = outer_iph; 1909 const struct iphdr *inner_iph; 1910 const struct icmphdr *icmph; 1911 struct iphdr _inner_iph; 1912 struct icmphdr _icmph; 1913 1914 if (likely(outer_iph->protocol != IPPROTO_ICMP)) 1915 goto out; 1916 1917 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) 1918 goto out; 1919 1920 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), 1921 &_icmph); 1922 if (!icmph) 1923 goto out; 1924 1925 if (!icmp_is_err(icmph->type)) 1926 goto out; 1927 1928 inner_iph = skb_header_pointer(skb, 1929 outer_iph->ihl * 4 + sizeof(_icmph), 1930 sizeof(_inner_iph), &_inner_iph); 1931 if (!inner_iph) 1932 goto out; 1933 1934 key_iph = inner_iph; 1935 out: 1936 hash_keys->addrs.v4addrs.src = key_iph->saddr; 1937 hash_keys->addrs.v4addrs.dst = key_iph->daddr; 1938 } 1939 1940 static u32 fib_multipath_custom_hash_outer(const struct net *net, 1941 const struct sk_buff *skb, 1942 bool *p_has_inner) 1943 { 1944 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 1945 struct flow_keys keys, hash_keys; 1946 1947 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 1948 return 0; 1949 1950 memset(&hash_keys, 0, sizeof(hash_keys)); 1951 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 1952 1953 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1954 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 1955 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1956 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 1957 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1958 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 1959 hash_keys.basic.ip_proto = keys.basic.ip_proto; 1960 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) 1961 hash_keys.ports.src = keys.ports.src; 1962 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 1963 hash_keys.ports.dst = keys.ports.dst; 1964 1965 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); 1966 return fib_multipath_hash_from_keys(net, &hash_keys); 1967 } 1968 1969 static u32 fib_multipath_custom_hash_inner(const struct net *net, 1970 const struct sk_buff *skb, 1971 bool has_inner) 1972 { 1973 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 1974 struct flow_keys keys, hash_keys; 1975 1976 /* We assume the packet carries an encapsulation, but if none was 1977 * encountered during dissection of the outer flow, then there is no 1978 * point in calling the flow dissector again. 1979 */ 1980 if (!has_inner) 1981 return 0; 1982 1983 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)) 1984 return 0; 1985 1986 memset(&hash_keys, 0, sizeof(hash_keys)); 1987 skb_flow_dissect_flow_keys(skb, &keys, 0); 1988 1989 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION)) 1990 return 0; 1991 1992 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 1993 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1994 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 1995 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1996 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 1997 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1998 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 1999 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2000 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) 2001 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2002 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) 2003 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2004 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL) 2005 hash_keys.tags.flow_label = keys.tags.flow_label; 2006 } 2007 2008 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO) 2009 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2010 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT) 2011 hash_keys.ports.src = keys.ports.src; 2012 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) 2013 hash_keys.ports.dst = keys.ports.dst; 2014 2015 return fib_multipath_hash_from_keys(net, &hash_keys); 2016 } 2017 2018 static u32 fib_multipath_custom_hash_skb(const struct net *net, 2019 const struct sk_buff *skb) 2020 { 2021 u32 mhash, mhash_inner; 2022 bool has_inner = true; 2023 2024 mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner); 2025 mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner); 2026 2027 return jhash_2words(mhash, mhash_inner, 0); 2028 } 2029 2030 static u32 fib_multipath_custom_hash_fl4(const struct net *net, 2031 const struct flowi4 *fl4) 2032 { 2033 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); 2034 struct flow_keys hash_keys; 2035 2036 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) 2037 return 0; 2038 2039 memset(&hash_keys, 0, sizeof(hash_keys)); 2040 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2041 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) 2042 hash_keys.addrs.v4addrs.src = fl4->saddr; 2043 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) 2044 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2045 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) 2046 hash_keys.basic.ip_proto = fl4->flowi4_proto; 2047 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) { 2048 if (fl4->flowi4_flags & FLOWI_FLAG_ANY_SPORT) 2049 hash_keys.ports.src = (__force __be16)get_random_u16(); 2050 else 2051 hash_keys.ports.src = fl4->fl4_sport; 2052 } 2053 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) 2054 hash_keys.ports.dst = fl4->fl4_dport; 2055 2056 return fib_multipath_hash_from_keys(net, &hash_keys); 2057 } 2058 2059 /* if skb is set it will be used and fl4 can be NULL */ 2060 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, 2061 const struct sk_buff *skb, struct flow_keys *flkeys) 2062 { 2063 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0; 2064 struct flow_keys hash_keys; 2065 u32 mhash = 0; 2066 2067 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { 2068 case 0: 2069 memset(&hash_keys, 0, sizeof(hash_keys)); 2070 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2071 if (skb) { 2072 ip_multipath_l3_keys(skb, &hash_keys); 2073 } else { 2074 hash_keys.addrs.v4addrs.src = fl4->saddr; 2075 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2076 } 2077 mhash = fib_multipath_hash_from_keys(net, &hash_keys); 2078 break; 2079 case 1: 2080 /* skb is currently provided only when forwarding */ 2081 if (skb) { 2082 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 2083 struct flow_keys keys; 2084 2085 /* short-circuit if we already have L4 hash present */ 2086 if (skb->l4_hash) 2087 return skb_get_hash_raw(skb) >> 1; 2088 2089 memset(&hash_keys, 0, sizeof(hash_keys)); 2090 2091 if (!flkeys) { 2092 skb_flow_dissect_flow_keys(skb, &keys, flag); 2093 flkeys = &keys; 2094 } 2095 2096 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2097 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 2098 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 2099 hash_keys.ports.src = flkeys->ports.src; 2100 hash_keys.ports.dst = flkeys->ports.dst; 2101 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 2102 } else { 2103 memset(&hash_keys, 0, sizeof(hash_keys)); 2104 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2105 hash_keys.addrs.v4addrs.src = fl4->saddr; 2106 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2107 if (fl4->flowi4_flags & FLOWI_FLAG_ANY_SPORT) 2108 hash_keys.ports.src = (__force __be16)get_random_u16(); 2109 else 2110 hash_keys.ports.src = fl4->fl4_sport; 2111 hash_keys.ports.dst = fl4->fl4_dport; 2112 hash_keys.basic.ip_proto = fl4->flowi4_proto; 2113 } 2114 mhash = fib_multipath_hash_from_keys(net, &hash_keys); 2115 break; 2116 case 2: 2117 memset(&hash_keys, 0, sizeof(hash_keys)); 2118 /* skb is currently provided only when forwarding */ 2119 if (skb) { 2120 struct flow_keys keys; 2121 2122 skb_flow_dissect_flow_keys(skb, &keys, 0); 2123 /* Inner can be v4 or v6 */ 2124 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2125 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2126 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 2127 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 2128 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2129 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 2130 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; 2131 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; 2132 hash_keys.tags.flow_label = keys.tags.flow_label; 2133 hash_keys.basic.ip_proto = keys.basic.ip_proto; 2134 } else { 2135 /* Same as case 0 */ 2136 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2137 ip_multipath_l3_keys(skb, &hash_keys); 2138 } 2139 } else { 2140 /* Same as case 0 */ 2141 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 2142 hash_keys.addrs.v4addrs.src = fl4->saddr; 2143 hash_keys.addrs.v4addrs.dst = fl4->daddr; 2144 } 2145 mhash = fib_multipath_hash_from_keys(net, &hash_keys); 2146 break; 2147 case 3: 2148 if (skb) 2149 mhash = fib_multipath_custom_hash_skb(net, skb); 2150 else 2151 mhash = fib_multipath_custom_hash_fl4(net, fl4); 2152 break; 2153 } 2154 2155 if (multipath_hash) 2156 mhash = jhash_2words(mhash, multipath_hash, 0); 2157 2158 return mhash >> 1; 2159 } 2160 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 2161 2162 static enum skb_drop_reason 2163 ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, 2164 struct in_device *in_dev, __be32 daddr, 2165 __be32 saddr, dscp_t dscp, struct flow_keys *hkeys) 2166 { 2167 #ifdef CONFIG_IP_ROUTE_MULTIPATH 2168 if (res->fi && fib_info_num_path(res->fi) > 1) { 2169 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); 2170 2171 fib_select_multipath(res, h, NULL); 2172 IPCB(skb)->flags |= IPSKB_MULTIPATH; 2173 } 2174 #endif 2175 2176 /* create a routing cache entry */ 2177 return __mkroute_input(skb, res, in_dev, daddr, saddr, dscp); 2178 } 2179 2180 /* Implements all the saddr-related checks as ip_route_input_slow(), 2181 * assuming daddr is valid and the destination is not a local broadcast one. 2182 * Uses the provided hint instead of performing a route lookup. 2183 */ 2184 enum skb_drop_reason 2185 ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2186 dscp_t dscp, struct net_device *dev, 2187 const struct sk_buff *hint) 2188 { 2189 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 2190 struct in_device *in_dev = __in_dev_get_rcu(dev); 2191 struct rtable *rt = skb_rtable(hint); 2192 struct net *net = dev_net(dev); 2193 u32 tag = 0; 2194 2195 if (!in_dev) 2196 return reason; 2197 2198 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) { 2199 reason = SKB_DROP_REASON_IP_INVALID_SOURCE; 2200 goto martian_source; 2201 } 2202 2203 if (ipv4_is_zeronet(saddr)) { 2204 reason = SKB_DROP_REASON_IP_INVALID_SOURCE; 2205 goto martian_source; 2206 } 2207 2208 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { 2209 reason = SKB_DROP_REASON_IP_LOCALNET; 2210 goto martian_source; 2211 } 2212 2213 if (rt->rt_type != RTN_LOCAL) 2214 goto skip_validate_source; 2215 2216 reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev, 2217 in_dev, &tag); 2218 if (reason) 2219 goto martian_source; 2220 2221 skip_validate_source: 2222 skb_dst_copy(skb, hint); 2223 return SKB_NOT_DROPPED_YET; 2224 2225 martian_source: 2226 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2227 return reason; 2228 } 2229 2230 /* get device for dst_alloc with local routes */ 2231 static struct net_device *ip_rt_get_dev(struct net *net, 2232 const struct fib_result *res) 2233 { 2234 struct fib_nh_common *nhc = res->fi ? res->nhc : NULL; 2235 struct net_device *dev = NULL; 2236 2237 if (nhc) 2238 dev = l3mdev_master_dev_rcu(nhc->nhc_dev); 2239 2240 return dev ? : net->loopback_dev; 2241 } 2242 2243 /* 2244 * NOTE. We drop all the packets that has local source 2245 * addresses, because every properly looped back packet 2246 * must have correct destination already attached by output routine. 2247 * Changes in the enforced policies must be applied also to 2248 * ip_route_use_hint(). 2249 * 2250 * Such approach solves two big problems: 2251 * 1. Not simplex devices are handled properly. 2252 * 2. IP spoofing attempts are filtered with 100% of guarantee. 2253 * called with rcu_read_lock() 2254 */ 2255 2256 static enum skb_drop_reason 2257 ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2258 dscp_t dscp, struct net_device *dev, 2259 struct fib_result *res) 2260 { 2261 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 2262 struct in_device *in_dev = __in_dev_get_rcu(dev); 2263 struct flow_keys *flkeys = NULL, _flkeys; 2264 struct net *net = dev_net(dev); 2265 struct ip_tunnel_info *tun_info; 2266 int err = -EINVAL; 2267 unsigned int flags = 0; 2268 u32 itag = 0; 2269 struct rtable *rth; 2270 struct flowi4 fl4; 2271 bool do_cache = true; 2272 2273 /* IP on this device is disabled. */ 2274 2275 if (!in_dev) 2276 goto out; 2277 2278 /* Check for the most weird martians, which can be not detected 2279 * by fib_lookup. 2280 */ 2281 2282 tun_info = skb_tunnel_info(skb); 2283 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 2284 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id; 2285 else 2286 fl4.flowi4_tun_key.tun_id = 0; 2287 skb_dst_drop(skb); 2288 2289 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) { 2290 reason = SKB_DROP_REASON_IP_INVALID_SOURCE; 2291 goto martian_source; 2292 } 2293 2294 res->fi = NULL; 2295 res->table = NULL; 2296 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 2297 goto brd_input; 2298 2299 /* Accept zero addresses only to limited broadcast; 2300 * I even do not know to fix it or not. Waiting for complains :-) 2301 */ 2302 if (ipv4_is_zeronet(saddr)) { 2303 reason = SKB_DROP_REASON_IP_INVALID_SOURCE; 2304 goto martian_source; 2305 } 2306 2307 if (ipv4_is_zeronet(daddr)) { 2308 reason = SKB_DROP_REASON_IP_INVALID_DEST; 2309 goto martian_destination; 2310 } 2311 2312 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), 2313 * and call it once if daddr or/and saddr are loopback addresses 2314 */ 2315 if (ipv4_is_loopback(daddr)) { 2316 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { 2317 reason = SKB_DROP_REASON_IP_LOCALNET; 2318 goto martian_destination; 2319 } 2320 } else if (ipv4_is_loopback(saddr)) { 2321 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { 2322 reason = SKB_DROP_REASON_IP_LOCALNET; 2323 goto martian_source; 2324 } 2325 } 2326 2327 /* 2328 * Now we are ready to route packet. 2329 */ 2330 fl4.flowi4_l3mdev = 0; 2331 fl4.flowi4_oif = 0; 2332 fl4.flowi4_iif = dev->ifindex; 2333 fl4.flowi4_mark = skb->mark; 2334 fl4.flowi4_tos = inet_dscp_to_dsfield(dscp); 2335 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 2336 fl4.flowi4_flags = 0; 2337 fl4.daddr = daddr; 2338 fl4.saddr = saddr; 2339 fl4.flowi4_uid = sock_net_uid(net, NULL); 2340 fl4.flowi4_multipath_hash = 0; 2341 2342 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { 2343 flkeys = &_flkeys; 2344 } else { 2345 fl4.flowi4_proto = 0; 2346 fl4.fl4_sport = 0; 2347 fl4.fl4_dport = 0; 2348 } 2349 2350 err = fib_lookup(net, &fl4, res, 0); 2351 if (err != 0) { 2352 if (!IN_DEV_FORWARD(in_dev)) 2353 err = -EHOSTUNREACH; 2354 goto no_route; 2355 } 2356 2357 if (res->type == RTN_BROADCAST) { 2358 if (IN_DEV_BFORWARD(in_dev)) 2359 goto make_route; 2360 /* not do cache if bc_forwarding is enabled */ 2361 if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING)) 2362 do_cache = false; 2363 goto brd_input; 2364 } 2365 2366 err = -EINVAL; 2367 if (res->type == RTN_LOCAL) { 2368 reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 2369 0, dev, in_dev, &itag); 2370 if (reason) 2371 goto martian_source; 2372 goto local_input; 2373 } 2374 2375 if (!IN_DEV_FORWARD(in_dev)) { 2376 err = -EHOSTUNREACH; 2377 goto no_route; 2378 } 2379 if (res->type != RTN_UNICAST) { 2380 reason = SKB_DROP_REASON_IP_INVALID_DEST; 2381 goto martian_destination; 2382 } 2383 2384 make_route: 2385 reason = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp, 2386 flkeys); 2387 2388 out: 2389 return reason; 2390 2391 brd_input: 2392 if (skb->protocol != htons(ETH_P_IP)) { 2393 reason = SKB_DROP_REASON_INVALID_PROTO; 2394 goto out; 2395 } 2396 2397 if (!ipv4_is_zeronet(saddr)) { 2398 reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, 2399 dev, in_dev, &itag); 2400 if (reason) 2401 goto martian_source; 2402 } 2403 flags |= RTCF_BROADCAST; 2404 res->type = RTN_BROADCAST; 2405 RT_CACHE_STAT_INC(in_brd); 2406 2407 local_input: 2408 if (IN_DEV_ORCONF(in_dev, NOPOLICY)) 2409 IPCB(skb)->flags |= IPSKB_NOPOLICY; 2410 2411 do_cache &= res->fi && !itag; 2412 if (do_cache) { 2413 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2414 2415 rth = rcu_dereference(nhc->nhc_rth_input); 2416 if (rt_cache_valid(rth)) { 2417 skb_dst_set_noref(skb, &rth->dst); 2418 reason = SKB_NOT_DROPPED_YET; 2419 goto out; 2420 } 2421 } 2422 2423 rth = rt_dst_alloc(ip_rt_get_dev(net, res), 2424 flags | RTCF_LOCAL, res->type, false); 2425 if (!rth) 2426 goto e_nobufs; 2427 2428 rth->dst.output= ip_rt_bug; 2429 #ifdef CONFIG_IP_ROUTE_CLASSID 2430 rth->dst.tclassid = itag; 2431 #endif 2432 rth->rt_is_input = 1; 2433 2434 RT_CACHE_STAT_INC(in_slow_tot); 2435 if (res->type == RTN_UNREACHABLE) { 2436 rth->dst.input= ip_error; 2437 rth->dst.error= -err; 2438 rth->rt_flags &= ~RTCF_LOCAL; 2439 } 2440 2441 if (do_cache) { 2442 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2443 2444 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 2445 if (lwtunnel_input_redirect(rth->dst.lwtstate)) { 2446 WARN_ON(rth->dst.input == lwtunnel_input); 2447 rth->dst.lwtstate->orig_input = rth->dst.input; 2448 rth->dst.input = lwtunnel_input; 2449 } 2450 2451 if (unlikely(!rt_cache_route(nhc, rth))) 2452 rt_add_uncached_list(rth); 2453 } 2454 skb_dst_set(skb, &rth->dst); 2455 reason = SKB_NOT_DROPPED_YET; 2456 goto out; 2457 2458 no_route: 2459 RT_CACHE_STAT_INC(in_no_route); 2460 res->type = RTN_UNREACHABLE; 2461 res->fi = NULL; 2462 res->table = NULL; 2463 goto local_input; 2464 2465 /* 2466 * Do not cache martian addresses: they should be logged (RFC1812) 2467 */ 2468 martian_destination: 2469 RT_CACHE_STAT_INC(in_martian_dst); 2470 #ifdef CONFIG_IP_ROUTE_VERBOSE 2471 if (IN_DEV_LOG_MARTIANS(in_dev)) 2472 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", 2473 &daddr, &saddr, dev->name); 2474 #endif 2475 goto out; 2476 2477 e_nobufs: 2478 reason = SKB_DROP_REASON_NOMEM; 2479 goto out; 2480 2481 martian_source: 2482 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2483 goto out; 2484 } 2485 2486 /* called with rcu_read_lock held */ 2487 static enum skb_drop_reason 2488 ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2489 dscp_t dscp, struct net_device *dev, 2490 struct fib_result *res) 2491 { 2492 /* Multicast recognition logic is moved from route cache to here. 2493 * The problem was that too many Ethernet cards have broken/missing 2494 * hardware multicast filters :-( As result the host on multicasting 2495 * network acquires a lot of useless route cache entries, sort of 2496 * SDR messages from all the world. Now we try to get rid of them. 2497 * Really, provided software IP multicast filter is organized 2498 * reasonably (at least, hashed), it does not result in a slowdown 2499 * comparing with route cache reject entries. 2500 * Note, that multicast routers are not affected, because 2501 * route cache entry is created eventually. 2502 */ 2503 if (ipv4_is_multicast(daddr)) { 2504 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 2505 struct in_device *in_dev = __in_dev_get_rcu(dev); 2506 int our = 0; 2507 2508 if (!in_dev) 2509 return reason; 2510 2511 our = ip_check_mc_rcu(in_dev, daddr, saddr, 2512 ip_hdr(skb)->protocol); 2513 2514 /* check l3 master if no match yet */ 2515 if (!our && netif_is_l3_slave(dev)) { 2516 struct in_device *l3_in_dev; 2517 2518 l3_in_dev = __in_dev_get_rcu(skb->dev); 2519 if (l3_in_dev) 2520 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr, 2521 ip_hdr(skb)->protocol); 2522 } 2523 2524 if (our 2525 #ifdef CONFIG_IP_MROUTE 2526 || 2527 (!ipv4_is_local_multicast(daddr) && 2528 IN_DEV_MFORWARD(in_dev)) 2529 #endif 2530 ) { 2531 reason = ip_route_input_mc(skb, daddr, saddr, dscp, 2532 dev, our); 2533 } 2534 return reason; 2535 } 2536 2537 return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); 2538 } 2539 2540 enum skb_drop_reason ip_route_input_noref(struct sk_buff *skb, __be32 daddr, 2541 __be32 saddr, dscp_t dscp, 2542 struct net_device *dev) 2543 { 2544 enum skb_drop_reason reason; 2545 struct fib_result res; 2546 2547 rcu_read_lock(); 2548 reason = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); 2549 rcu_read_unlock(); 2550 2551 return reason; 2552 } 2553 EXPORT_SYMBOL(ip_route_input_noref); 2554 2555 /* called with rcu_read_lock() */ 2556 static struct rtable *__mkroute_output(const struct fib_result *res, 2557 const struct flowi4 *fl4, int orig_oif, 2558 struct net_device *dev_out, 2559 unsigned int flags) 2560 { 2561 struct fib_info *fi = res->fi; 2562 struct fib_nh_exception *fnhe; 2563 struct in_device *in_dev; 2564 u16 type = res->type; 2565 struct rtable *rth; 2566 bool do_cache; 2567 2568 in_dev = __in_dev_get_rcu(dev_out); 2569 if (!in_dev) 2570 return ERR_PTR(-EINVAL); 2571 2572 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) 2573 if (ipv4_is_loopback(fl4->saddr) && 2574 !(dev_out->flags & IFF_LOOPBACK) && 2575 !netif_is_l3_master(dev_out)) 2576 return ERR_PTR(-EINVAL); 2577 2578 if (ipv4_is_lbcast(fl4->daddr)) { 2579 type = RTN_BROADCAST; 2580 2581 /* reset fi to prevent gateway resolution */ 2582 fi = NULL; 2583 } else if (ipv4_is_multicast(fl4->daddr)) { 2584 type = RTN_MULTICAST; 2585 } else if (ipv4_is_zeronet(fl4->daddr)) { 2586 return ERR_PTR(-EINVAL); 2587 } 2588 2589 if (dev_out->flags & IFF_LOOPBACK) 2590 flags |= RTCF_LOCAL; 2591 2592 do_cache = true; 2593 if (type == RTN_BROADCAST) { 2594 flags |= RTCF_BROADCAST | RTCF_LOCAL; 2595 } else if (type == RTN_MULTICAST) { 2596 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2597 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, 2598 fl4->flowi4_proto)) 2599 flags &= ~RTCF_LOCAL; 2600 else 2601 do_cache = false; 2602 /* If multicast route do not exist use 2603 * default one, but do not gateway in this case. 2604 * Yes, it is hack. 2605 */ 2606 if (fi && res->prefixlen < 4) 2607 fi = NULL; 2608 } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2609 (orig_oif != dev_out->ifindex)) { 2610 /* For local routes that require a particular output interface 2611 * we do not want to cache the result. Caching the result 2612 * causes incorrect behaviour when there are multiple source 2613 * addresses on the interface, the end result being that if the 2614 * intended recipient is waiting on that interface for the 2615 * packet he won't receive it because it will be delivered on 2616 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2617 * be set to the loopback interface as well. 2618 */ 2619 do_cache = false; 2620 } 2621 2622 fnhe = NULL; 2623 do_cache &= fi != NULL; 2624 if (fi) { 2625 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2626 struct rtable __rcu **prth; 2627 2628 fnhe = find_exception(nhc, fl4->daddr); 2629 if (!do_cache) 2630 goto add; 2631 if (fnhe) { 2632 prth = &fnhe->fnhe_rth_output; 2633 } else { 2634 if (unlikely(fl4->flowi4_flags & 2635 FLOWI_FLAG_KNOWN_NH && 2636 !(nhc->nhc_gw_family && 2637 nhc->nhc_scope == RT_SCOPE_LINK))) { 2638 do_cache = false; 2639 goto add; 2640 } 2641 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 2642 } 2643 rth = rcu_dereference(*prth); 2644 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2645 return rth; 2646 } 2647 2648 add: 2649 rth = rt_dst_alloc(dev_out, flags, type, 2650 IN_DEV_ORCONF(in_dev, NOXFRM)); 2651 if (!rth) 2652 return ERR_PTR(-ENOBUFS); 2653 2654 rth->rt_iif = orig_oif; 2655 2656 RT_CACHE_STAT_INC(out_slow_tot); 2657 2658 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2659 if (flags & RTCF_LOCAL && 2660 !(dev_out->flags & IFF_LOOPBACK)) { 2661 rth->dst.output = ip_mc_output; 2662 RT_CACHE_STAT_INC(out_slow_mc); 2663 } 2664 #ifdef CONFIG_IP_MROUTE 2665 if (type == RTN_MULTICAST) { 2666 if (IN_DEV_MFORWARD(in_dev) && 2667 !ipv4_is_local_multicast(fl4->daddr)) { 2668 rth->dst.input = ip_mr_input; 2669 rth->dst.output = ip_mr_output; 2670 } 2671 } 2672 #endif 2673 } 2674 2675 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache); 2676 lwtunnel_set_redirect(&rth->dst); 2677 2678 return rth; 2679 } 2680 2681 /* 2682 * Major route resolver routine. 2683 */ 2684 2685 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, 2686 const struct sk_buff *skb) 2687 { 2688 struct fib_result res = { 2689 .type = RTN_UNSPEC, 2690 .fi = NULL, 2691 .table = NULL, 2692 .tclassid = 0, 2693 }; 2694 struct rtable *rth; 2695 2696 fl4->flowi4_iif = LOOPBACK_IFINDEX; 2697 fl4->flowi4_tos &= INET_DSCP_MASK; 2698 2699 rcu_read_lock(); 2700 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); 2701 rcu_read_unlock(); 2702 2703 return rth; 2704 } 2705 EXPORT_SYMBOL_GPL(ip_route_output_key_hash); 2706 2707 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, 2708 struct fib_result *res, 2709 const struct sk_buff *skb) 2710 { 2711 struct net_device *dev_out = NULL; 2712 int orig_oif = fl4->flowi4_oif; 2713 unsigned int flags = 0; 2714 struct rtable *rth; 2715 int err; 2716 2717 if (fl4->saddr) { 2718 if (ipv4_is_multicast(fl4->saddr) || 2719 ipv4_is_lbcast(fl4->saddr)) { 2720 rth = ERR_PTR(-EINVAL); 2721 goto out; 2722 } 2723 2724 rth = ERR_PTR(-ENETUNREACH); 2725 2726 /* I removed check for oif == dev_out->oif here. 2727 * It was wrong for two reasons: 2728 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr 2729 * is assigned to multiple interfaces. 2730 * 2. Moreover, we are allowed to send packets with saddr 2731 * of another iface. --ANK 2732 */ 2733 2734 if (fl4->flowi4_oif == 0 && 2735 (ipv4_is_multicast(fl4->daddr) || 2736 ipv4_is_lbcast(fl4->daddr))) { 2737 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2738 dev_out = __ip_dev_find(net, fl4->saddr, false); 2739 if (!dev_out) 2740 goto out; 2741 2742 /* Special hack: user can direct multicasts 2743 * and limited broadcast via necessary interface 2744 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2745 * This hack is not just for fun, it allows 2746 * vic,vat and friends to work. 2747 * They bind socket to loopback, set ttl to zero 2748 * and expect that it will work. 2749 * From the viewpoint of routing cache they are broken, 2750 * because we are not allowed to build multicast path 2751 * with loopback source addr (look, routing cache 2752 * cannot know, that ttl is zero, so that packet 2753 * will not leave this host and route is valid). 2754 * Luckily, this hack is good workaround. 2755 */ 2756 2757 fl4->flowi4_oif = dev_out->ifindex; 2758 goto make_route; 2759 } 2760 2761 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2762 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2763 if (!__ip_dev_find(net, fl4->saddr, false)) 2764 goto out; 2765 } 2766 } 2767 2768 2769 if (fl4->flowi4_oif) { 2770 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); 2771 rth = ERR_PTR(-ENODEV); 2772 if (!dev_out) 2773 goto out; 2774 2775 /* RACE: Check return value of inet_select_addr instead. */ 2776 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2777 rth = ERR_PTR(-ENETUNREACH); 2778 goto out; 2779 } 2780 if (ipv4_is_local_multicast(fl4->daddr) || 2781 ipv4_is_lbcast(fl4->daddr) || 2782 fl4->flowi4_proto == IPPROTO_IGMP) { 2783 if (!fl4->saddr) 2784 fl4->saddr = inet_select_addr(dev_out, 0, 2785 RT_SCOPE_LINK); 2786 goto make_route; 2787 } 2788 if (!fl4->saddr) { 2789 if (ipv4_is_multicast(fl4->daddr)) 2790 fl4->saddr = inet_select_addr(dev_out, 0, 2791 fl4->flowi4_scope); 2792 else if (!fl4->daddr) 2793 fl4->saddr = inet_select_addr(dev_out, 0, 2794 RT_SCOPE_HOST); 2795 } 2796 } 2797 2798 if (!fl4->daddr) { 2799 fl4->daddr = fl4->saddr; 2800 if (!fl4->daddr) 2801 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); 2802 dev_out = net->loopback_dev; 2803 fl4->flowi4_oif = LOOPBACK_IFINDEX; 2804 res->type = RTN_LOCAL; 2805 flags |= RTCF_LOCAL; 2806 goto make_route; 2807 } 2808 2809 err = fib_lookup(net, fl4, res, 0); 2810 if (err) { 2811 res->fi = NULL; 2812 res->table = NULL; 2813 if (fl4->flowi4_oif && 2814 (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) { 2815 /* Apparently, routing tables are wrong. Assume, 2816 * that the destination is on link. 2817 * 2818 * WHY? DW. 2819 * Because we are allowed to send to iface 2820 * even if it has NO routes and NO assigned 2821 * addresses. When oif is specified, routing 2822 * tables are looked up with only one purpose: 2823 * to catch if destination is gatewayed, rather than 2824 * direct. Moreover, if MSG_DONTROUTE is set, 2825 * we send packet, ignoring both routing tables 2826 * and ifaddr state. --ANK 2827 * 2828 * 2829 * We could make it even if oif is unknown, 2830 * likely IPv6, but we do not. 2831 */ 2832 2833 if (fl4->saddr == 0) 2834 fl4->saddr = inet_select_addr(dev_out, 0, 2835 RT_SCOPE_LINK); 2836 res->type = RTN_UNICAST; 2837 goto make_route; 2838 } 2839 rth = ERR_PTR(err); 2840 goto out; 2841 } 2842 2843 if (res->type == RTN_LOCAL) { 2844 if (!fl4->saddr) { 2845 if (res->fi->fib_prefsrc) 2846 fl4->saddr = res->fi->fib_prefsrc; 2847 else 2848 fl4->saddr = fl4->daddr; 2849 } 2850 2851 /* L3 master device is the loopback for that domain */ 2852 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : 2853 net->loopback_dev; 2854 2855 /* make sure orig_oif points to fib result device even 2856 * though packet rx/tx happens over loopback or l3mdev 2857 */ 2858 orig_oif = FIB_RES_OIF(*res); 2859 2860 fl4->flowi4_oif = dev_out->ifindex; 2861 flags |= RTCF_LOCAL; 2862 goto make_route; 2863 } 2864 2865 fib_select_path(net, res, fl4, skb); 2866 2867 dev_out = FIB_RES_DEV(*res); 2868 2869 make_route: 2870 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); 2871 2872 out: 2873 return rth; 2874 } 2875 2876 static struct dst_ops ipv4_dst_blackhole_ops = { 2877 .family = AF_INET, 2878 .default_advmss = ipv4_default_advmss, 2879 .neigh_lookup = ipv4_neigh_lookup, 2880 .check = dst_blackhole_check, 2881 .cow_metrics = dst_blackhole_cow_metrics, 2882 .update_pmtu = dst_blackhole_update_pmtu, 2883 .redirect = dst_blackhole_redirect, 2884 .mtu = dst_blackhole_mtu, 2885 }; 2886 2887 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2888 { 2889 struct rtable *ort = dst_rtable(dst_orig); 2890 struct rtable *rt; 2891 2892 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0); 2893 if (rt) { 2894 struct dst_entry *new = &rt->dst; 2895 2896 new->__use = 1; 2897 new->input = dst_discard; 2898 new->output = dst_discard_out; 2899 2900 new->dev = net->loopback_dev; 2901 netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC); 2902 2903 rt->rt_is_input = ort->rt_is_input; 2904 rt->rt_iif = ort->rt_iif; 2905 rt->rt_pmtu = ort->rt_pmtu; 2906 rt->rt_mtu_locked = ort->rt_mtu_locked; 2907 2908 rt->rt_genid = rt_genid_ipv4(net); 2909 rt->rt_flags = ort->rt_flags; 2910 rt->rt_type = ort->rt_type; 2911 rt->rt_uses_gateway = ort->rt_uses_gateway; 2912 rt->rt_gw_family = ort->rt_gw_family; 2913 if (rt->rt_gw_family == AF_INET) 2914 rt->rt_gw4 = ort->rt_gw4; 2915 else if (rt->rt_gw_family == AF_INET6) 2916 rt->rt_gw6 = ort->rt_gw6; 2917 } 2918 2919 dst_release(dst_orig); 2920 2921 return rt ? &rt->dst : ERR_PTR(-ENOMEM); 2922 } 2923 2924 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, 2925 const struct sock *sk) 2926 { 2927 struct rtable *rt = __ip_route_output_key(net, flp4); 2928 2929 if (IS_ERR(rt)) 2930 return rt; 2931 2932 if (flp4->flowi4_proto) { 2933 flp4->flowi4_oif = rt->dst.dev->ifindex; 2934 rt = dst_rtable(xfrm_lookup_route(net, &rt->dst, 2935 flowi4_to_flowi(flp4), 2936 sk, 0)); 2937 } 2938 2939 return rt; 2940 } 2941 EXPORT_SYMBOL_GPL(ip_route_output_flow); 2942 2943 /* called with rcu_read_lock held */ 2944 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2945 struct rtable *rt, u32 table_id, dscp_t dscp, 2946 struct flowi4 *fl4, struct sk_buff *skb, u32 portid, 2947 u32 seq, unsigned int flags) 2948 { 2949 struct rtmsg *r; 2950 struct nlmsghdr *nlh; 2951 unsigned long expires = 0; 2952 u32 error; 2953 u32 metrics[RTAX_MAX]; 2954 2955 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags); 2956 if (!nlh) 2957 return -EMSGSIZE; 2958 2959 r = nlmsg_data(nlh); 2960 r->rtm_family = AF_INET; 2961 r->rtm_dst_len = 32; 2962 r->rtm_src_len = 0; 2963 r->rtm_tos = inet_dscp_to_dsfield(dscp); 2964 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; 2965 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2966 goto nla_put_failure; 2967 r->rtm_type = rt->rt_type; 2968 r->rtm_scope = RT_SCOPE_UNIVERSE; 2969 r->rtm_protocol = RTPROT_UNSPEC; 2970 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2971 if (rt->rt_flags & RTCF_NOTIFY) 2972 r->rtm_flags |= RTM_F_NOTIFY; 2973 if (IPCB(skb)->flags & IPSKB_DOREDIRECT) 2974 r->rtm_flags |= RTCF_DOREDIRECT; 2975 2976 if (nla_put_in_addr(skb, RTA_DST, dst)) 2977 goto nla_put_failure; 2978 if (src) { 2979 r->rtm_src_len = 32; 2980 if (nla_put_in_addr(skb, RTA_SRC, src)) 2981 goto nla_put_failure; 2982 } 2983 if (rt->dst.dev && 2984 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 2985 goto nla_put_failure; 2986 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) 2987 goto nla_put_failure; 2988 #ifdef CONFIG_IP_ROUTE_CLASSID 2989 if (rt->dst.tclassid && 2990 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2991 goto nla_put_failure; 2992 #endif 2993 if (fl4 && !rt_is_input_route(rt) && 2994 fl4->saddr != src) { 2995 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) 2996 goto nla_put_failure; 2997 } 2998 if (rt->rt_uses_gateway) { 2999 if (rt->rt_gw_family == AF_INET && 3000 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { 3001 goto nla_put_failure; 3002 } else if (rt->rt_gw_family == AF_INET6) { 3003 int alen = sizeof(struct in6_addr); 3004 struct nlattr *nla; 3005 struct rtvia *via; 3006 3007 nla = nla_reserve(skb, RTA_VIA, alen + 2); 3008 if (!nla) 3009 goto nla_put_failure; 3010 3011 via = nla_data(nla); 3012 via->rtvia_family = AF_INET6; 3013 memcpy(via->rtvia_addr, &rt->rt_gw6, alen); 3014 } 3015 } 3016 3017 expires = READ_ONCE(rt->dst.expires); 3018 if (expires) { 3019 unsigned long now = jiffies; 3020 3021 if (time_before(now, expires)) 3022 expires -= now; 3023 else 3024 expires = 0; 3025 } 3026 3027 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 3028 if (rt->rt_pmtu && expires) 3029 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 3030 if (rt->rt_mtu_locked && expires) 3031 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); 3032 if (rtnetlink_put_metrics(skb, metrics) < 0) 3033 goto nla_put_failure; 3034 3035 if (fl4) { 3036 if (fl4->flowi4_mark && 3037 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) 3038 goto nla_put_failure; 3039 3040 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && 3041 nla_put_u32(skb, RTA_UID, 3042 from_kuid_munged(current_user_ns(), 3043 fl4->flowi4_uid))) 3044 goto nla_put_failure; 3045 3046 if (rt_is_input_route(rt)) { 3047 #ifdef CONFIG_IP_MROUTE 3048 if (ipv4_is_multicast(dst) && 3049 !ipv4_is_local_multicast(dst) && 3050 IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) { 3051 int err = ipmr_get_route(net, skb, 3052 fl4->saddr, fl4->daddr, 3053 r, portid); 3054 3055 if (err <= 0) { 3056 if (err == 0) 3057 return 0; 3058 goto nla_put_failure; 3059 } 3060 } else 3061 #endif 3062 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) 3063 goto nla_put_failure; 3064 } 3065 } 3066 3067 error = rt->dst.error; 3068 3069 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) 3070 goto nla_put_failure; 3071 3072 nlmsg_end(skb, nlh); 3073 return 0; 3074 3075 nla_put_failure: 3076 nlmsg_cancel(skb, nlh); 3077 return -EMSGSIZE; 3078 } 3079 3080 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 3081 struct netlink_callback *cb, u32 table_id, 3082 struct fnhe_hash_bucket *bucket, int genid, 3083 int *fa_index, int fa_start, unsigned int flags) 3084 { 3085 int i; 3086 3087 for (i = 0; i < FNHE_HASH_SIZE; i++) { 3088 struct fib_nh_exception *fnhe; 3089 3090 for (fnhe = rcu_dereference(bucket[i].chain); fnhe; 3091 fnhe = rcu_dereference(fnhe->fnhe_next)) { 3092 struct rtable *rt; 3093 int err; 3094 3095 if (*fa_index < fa_start) 3096 goto next; 3097 3098 if (fnhe->fnhe_genid != genid) 3099 goto next; 3100 3101 if (fnhe->fnhe_expires && 3102 time_after(jiffies, fnhe->fnhe_expires)) 3103 goto next; 3104 3105 rt = rcu_dereference(fnhe->fnhe_rth_input); 3106 if (!rt) 3107 rt = rcu_dereference(fnhe->fnhe_rth_output); 3108 if (!rt) 3109 goto next; 3110 3111 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 3112 table_id, 0, NULL, skb, 3113 NETLINK_CB(cb->skb).portid, 3114 cb->nlh->nlmsg_seq, flags); 3115 if (err) 3116 return err; 3117 next: 3118 (*fa_index)++; 3119 } 3120 } 3121 3122 return 0; 3123 } 3124 3125 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 3126 u32 table_id, struct fib_info *fi, 3127 int *fa_index, int fa_start, unsigned int flags) 3128 { 3129 struct net *net = sock_net(cb->skb->sk); 3130 int nhsel, genid = fnhe_genid(net); 3131 3132 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 3133 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 3134 struct fnhe_hash_bucket *bucket; 3135 int err; 3136 3137 if (nhc->nhc_flags & RTNH_F_DEAD) 3138 continue; 3139 3140 rcu_read_lock(); 3141 bucket = rcu_dereference(nhc->nhc_exceptions); 3142 err = 0; 3143 if (bucket) 3144 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 3145 genid, fa_index, fa_start, 3146 flags); 3147 rcu_read_unlock(); 3148 if (err) 3149 return err; 3150 } 3151 3152 return 0; 3153 } 3154 3155 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, 3156 u8 ip_proto, __be16 sport, 3157 __be16 dport) 3158 { 3159 struct sk_buff *skb; 3160 struct iphdr *iph; 3161 3162 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3163 if (!skb) 3164 return NULL; 3165 3166 /* Reserve room for dummy headers, this skb can pass 3167 * through good chunk of routing engine. 3168 */ 3169 skb_reset_mac_header(skb); 3170 skb_reset_network_header(skb); 3171 skb->protocol = htons(ETH_P_IP); 3172 iph = skb_put(skb, sizeof(struct iphdr)); 3173 iph->protocol = ip_proto; 3174 iph->saddr = src; 3175 iph->daddr = dst; 3176 iph->version = 0x4; 3177 iph->frag_off = 0; 3178 iph->ihl = 0x5; 3179 skb_set_transport_header(skb, skb->len); 3180 3181 switch (iph->protocol) { 3182 case IPPROTO_UDP: { 3183 struct udphdr *udph; 3184 3185 udph = skb_put_zero(skb, sizeof(struct udphdr)); 3186 udph->source = sport; 3187 udph->dest = dport; 3188 udph->len = htons(sizeof(struct udphdr)); 3189 udph->check = 0; 3190 break; 3191 } 3192 case IPPROTO_TCP: { 3193 struct tcphdr *tcph; 3194 3195 tcph = skb_put_zero(skb, sizeof(struct tcphdr)); 3196 tcph->source = sport; 3197 tcph->dest = dport; 3198 tcph->doff = sizeof(struct tcphdr) / 4; 3199 tcph->rst = 1; 3200 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), 3201 src, dst, 0); 3202 break; 3203 } 3204 case IPPROTO_ICMP: { 3205 struct icmphdr *icmph; 3206 3207 icmph = skb_put_zero(skb, sizeof(struct icmphdr)); 3208 icmph->type = ICMP_ECHO; 3209 icmph->code = 0; 3210 } 3211 } 3212 3213 return skb; 3214 } 3215 3216 static int inet_rtm_valid_getroute_req(struct sk_buff *skb, 3217 const struct nlmsghdr *nlh, 3218 struct nlattr **tb, 3219 struct netlink_ext_ack *extack) 3220 { 3221 struct rtmsg *rtm; 3222 int i, err; 3223 3224 rtm = nlmsg_payload(nlh, sizeof(*rtm)); 3225 if (!rtm) { 3226 NL_SET_ERR_MSG(extack, 3227 "ipv4: Invalid header for route get request"); 3228 return -EINVAL; 3229 } 3230 3231 if (!netlink_strict_get_check(skb)) 3232 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 3233 rtm_ipv4_policy, extack); 3234 3235 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) || 3236 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) || 3237 rtm->rtm_table || rtm->rtm_protocol || 3238 rtm->rtm_scope || rtm->rtm_type) { 3239 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request"); 3240 return -EINVAL; 3241 } 3242 3243 if (rtm->rtm_flags & ~(RTM_F_NOTIFY | 3244 RTM_F_LOOKUP_TABLE | 3245 RTM_F_FIB_MATCH)) { 3246 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request"); 3247 return -EINVAL; 3248 } 3249 3250 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 3251 rtm_ipv4_policy, extack); 3252 if (err) 3253 return err; 3254 3255 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 3256 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 3257 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4"); 3258 return -EINVAL; 3259 } 3260 3261 for (i = 0; i <= RTA_MAX; i++) { 3262 if (!tb[i]) 3263 continue; 3264 3265 switch (i) { 3266 case RTA_IIF: 3267 case RTA_OIF: 3268 case RTA_SRC: 3269 case RTA_DST: 3270 case RTA_IP_PROTO: 3271 case RTA_SPORT: 3272 case RTA_DPORT: 3273 case RTA_MARK: 3274 case RTA_UID: 3275 break; 3276 default: 3277 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request"); 3278 return -EINVAL; 3279 } 3280 } 3281 3282 return 0; 3283 } 3284 3285 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3286 struct netlink_ext_ack *extack) 3287 { 3288 struct net *net = sock_net(in_skb->sk); 3289 struct nlattr *tb[RTA_MAX+1]; 3290 u32 table_id = RT_TABLE_MAIN; 3291 __be16 sport = 0, dport = 0; 3292 struct fib_result res = {}; 3293 u8 ip_proto = IPPROTO_UDP; 3294 struct rtable *rt = NULL; 3295 struct sk_buff *skb; 3296 struct rtmsg *rtm; 3297 struct flowi4 fl4 = {}; 3298 __be32 dst = 0; 3299 __be32 src = 0; 3300 dscp_t dscp; 3301 kuid_t uid; 3302 u32 iif; 3303 int err; 3304 int mark; 3305 3306 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 3307 if (err < 0) 3308 return err; 3309 3310 rtm = nlmsg_data(nlh); 3311 src = nla_get_in_addr_default(tb[RTA_SRC], 0); 3312 dst = nla_get_in_addr_default(tb[RTA_DST], 0); 3313 iif = nla_get_u32_default(tb[RTA_IIF], 0); 3314 mark = nla_get_u32_default(tb[RTA_MARK], 0); 3315 dscp = inet_dsfield_to_dscp(rtm->rtm_tos); 3316 if (tb[RTA_UID]) 3317 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); 3318 else 3319 uid = (iif ? INVALID_UID : current_uid()); 3320 3321 if (tb[RTA_IP_PROTO]) { 3322 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 3323 &ip_proto, AF_INET, extack); 3324 if (err) 3325 return err; 3326 } 3327 3328 if (tb[RTA_SPORT]) 3329 sport = nla_get_be16(tb[RTA_SPORT]); 3330 3331 if (tb[RTA_DPORT]) 3332 dport = nla_get_be16(tb[RTA_DPORT]); 3333 3334 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport); 3335 if (!skb) 3336 return -ENOBUFS; 3337 3338 fl4.daddr = dst; 3339 fl4.saddr = src; 3340 fl4.flowi4_tos = inet_dscp_to_dsfield(dscp); 3341 fl4.flowi4_oif = nla_get_u32_default(tb[RTA_OIF], 0); 3342 fl4.flowi4_mark = mark; 3343 fl4.flowi4_uid = uid; 3344 if (sport) 3345 fl4.fl4_sport = sport; 3346 if (dport) 3347 fl4.fl4_dport = dport; 3348 fl4.flowi4_proto = ip_proto; 3349 3350 rcu_read_lock(); 3351 3352 if (iif) { 3353 struct net_device *dev; 3354 3355 dev = dev_get_by_index_rcu(net, iif); 3356 if (!dev) { 3357 err = -ENODEV; 3358 goto errout_rcu; 3359 } 3360 3361 fl4.flowi4_iif = iif; /* for rt_fill_info */ 3362 skb->dev = dev; 3363 skb->mark = mark; 3364 err = ip_route_input_rcu(skb, dst, src, dscp, dev, 3365 &res) ? -EINVAL : 0; 3366 3367 rt = skb_rtable(skb); 3368 if (err == 0 && rt->dst.error) 3369 err = -rt->dst.error; 3370 } else { 3371 fl4.flowi4_iif = LOOPBACK_IFINDEX; 3372 skb->dev = net->loopback_dev; 3373 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 3374 err = 0; 3375 if (IS_ERR(rt)) 3376 err = PTR_ERR(rt); 3377 else 3378 skb_dst_set(skb, &rt->dst); 3379 } 3380 3381 if (err) 3382 goto errout_rcu; 3383 3384 if (rtm->rtm_flags & RTM_F_NOTIFY) 3385 rt->rt_flags |= RTCF_NOTIFY; 3386 3387 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) 3388 table_id = res.table ? res.table->tb_id : 0; 3389 3390 /* reset skb for netlink reply msg */ 3391 skb_trim(skb, 0); 3392 skb_reset_network_header(skb); 3393 skb_reset_transport_header(skb); 3394 skb_reset_mac_header(skb); 3395 3396 if (rtm->rtm_flags & RTM_F_FIB_MATCH) { 3397 struct fib_rt_info fri; 3398 3399 if (!res.fi) { 3400 err = fib_props[res.type].error; 3401 if (!err) 3402 err = -EHOSTUNREACH; 3403 goto errout_rcu; 3404 } 3405 fri.fi = res.fi; 3406 fri.tb_id = table_id; 3407 fri.dst = res.prefix; 3408 fri.dst_len = res.prefixlen; 3409 fri.dscp = res.dscp; 3410 fri.type = rt->rt_type; 3411 fri.offload = 0; 3412 fri.trap = 0; 3413 fri.offload_failed = 0; 3414 if (res.fa_head) { 3415 struct fib_alias *fa; 3416 3417 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) { 3418 u8 slen = 32 - fri.dst_len; 3419 3420 if (fa->fa_slen == slen && 3421 fa->tb_id == fri.tb_id && 3422 fa->fa_dscp == fri.dscp && 3423 fa->fa_info == res.fi && 3424 fa->fa_type == fri.type) { 3425 fri.offload = READ_ONCE(fa->offload); 3426 fri.trap = READ_ONCE(fa->trap); 3427 fri.offload_failed = 3428 READ_ONCE(fa->offload_failed); 3429 break; 3430 } 3431 } 3432 } 3433 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, 3434 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0); 3435 } else { 3436 err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4, 3437 skb, NETLINK_CB(in_skb).portid, 3438 nlh->nlmsg_seq, 0); 3439 } 3440 if (err < 0) 3441 goto errout_rcu; 3442 3443 rcu_read_unlock(); 3444 3445 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3446 3447 errout_free: 3448 return err; 3449 errout_rcu: 3450 rcu_read_unlock(); 3451 kfree_skb(skb); 3452 goto errout_free; 3453 } 3454 3455 void ip_rt_multicast_event(struct in_device *in_dev) 3456 { 3457 rt_cache_flush(dev_net(in_dev->dev)); 3458 } 3459 3460 #ifdef CONFIG_SYSCTL 3461 static int ip_rt_gc_interval __read_mostly = 60 * HZ; 3462 static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 3463 static int ip_rt_gc_elasticity __read_mostly = 8; 3464 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 3465 3466 static int ipv4_sysctl_rtcache_flush(const struct ctl_table *__ctl, int write, 3467 void *buffer, size_t *lenp, loff_t *ppos) 3468 { 3469 struct net *net = (struct net *)__ctl->extra1; 3470 3471 if (write) { 3472 rt_cache_flush(net); 3473 fnhe_genid_bump(net); 3474 return 0; 3475 } 3476 3477 return -EINVAL; 3478 } 3479 3480 static struct ctl_table ipv4_route_table[] = { 3481 { 3482 .procname = "gc_thresh", 3483 .data = &ipv4_dst_ops.gc_thresh, 3484 .maxlen = sizeof(int), 3485 .mode = 0644, 3486 .proc_handler = proc_dointvec, 3487 }, 3488 { 3489 .procname = "max_size", 3490 .data = &ip_rt_max_size, 3491 .maxlen = sizeof(int), 3492 .mode = 0644, 3493 .proc_handler = proc_dointvec, 3494 }, 3495 { 3496 /* Deprecated. Use gc_min_interval_ms */ 3497 3498 .procname = "gc_min_interval", 3499 .data = &ip_rt_gc_min_interval, 3500 .maxlen = sizeof(int), 3501 .mode = 0644, 3502 .proc_handler = proc_dointvec_jiffies, 3503 }, 3504 { 3505 .procname = "gc_min_interval_ms", 3506 .data = &ip_rt_gc_min_interval, 3507 .maxlen = sizeof(int), 3508 .mode = 0644, 3509 .proc_handler = proc_dointvec_ms_jiffies, 3510 }, 3511 { 3512 .procname = "gc_timeout", 3513 .data = &ip_rt_gc_timeout, 3514 .maxlen = sizeof(int), 3515 .mode = 0644, 3516 .proc_handler = proc_dointvec_jiffies, 3517 }, 3518 { 3519 .procname = "gc_interval", 3520 .data = &ip_rt_gc_interval, 3521 .maxlen = sizeof(int), 3522 .mode = 0644, 3523 .proc_handler = proc_dointvec_jiffies, 3524 }, 3525 { 3526 .procname = "redirect_load", 3527 .data = &ip_rt_redirect_load, 3528 .maxlen = sizeof(int), 3529 .mode = 0644, 3530 .proc_handler = proc_dointvec, 3531 }, 3532 { 3533 .procname = "redirect_number", 3534 .data = &ip_rt_redirect_number, 3535 .maxlen = sizeof(int), 3536 .mode = 0644, 3537 .proc_handler = proc_dointvec, 3538 }, 3539 { 3540 .procname = "redirect_silence", 3541 .data = &ip_rt_redirect_silence, 3542 .maxlen = sizeof(int), 3543 .mode = 0644, 3544 .proc_handler = proc_dointvec, 3545 }, 3546 { 3547 .procname = "error_cost", 3548 .data = &ip_rt_error_cost, 3549 .maxlen = sizeof(int), 3550 .mode = 0644, 3551 .proc_handler = proc_dointvec, 3552 }, 3553 { 3554 .procname = "error_burst", 3555 .data = &ip_rt_error_burst, 3556 .maxlen = sizeof(int), 3557 .mode = 0644, 3558 .proc_handler = proc_dointvec, 3559 }, 3560 { 3561 .procname = "gc_elasticity", 3562 .data = &ip_rt_gc_elasticity, 3563 .maxlen = sizeof(int), 3564 .mode = 0644, 3565 .proc_handler = proc_dointvec, 3566 }, 3567 }; 3568 3569 static const char ipv4_route_flush_procname[] = "flush"; 3570 3571 static struct ctl_table ipv4_route_netns_table[] = { 3572 { 3573 .procname = ipv4_route_flush_procname, 3574 .maxlen = sizeof(int), 3575 .mode = 0200, 3576 .proc_handler = ipv4_sysctl_rtcache_flush, 3577 }, 3578 { 3579 .procname = "min_pmtu", 3580 .data = &init_net.ipv4.ip_rt_min_pmtu, 3581 .maxlen = sizeof(int), 3582 .mode = 0644, 3583 .proc_handler = proc_dointvec_minmax, 3584 .extra1 = &ip_min_valid_pmtu, 3585 }, 3586 { 3587 .procname = "mtu_expires", 3588 .data = &init_net.ipv4.ip_rt_mtu_expires, 3589 .maxlen = sizeof(int), 3590 .mode = 0644, 3591 .proc_handler = proc_dointvec_jiffies, 3592 }, 3593 { 3594 .procname = "min_adv_mss", 3595 .data = &init_net.ipv4.ip_rt_min_advmss, 3596 .maxlen = sizeof(int), 3597 .mode = 0644, 3598 .proc_handler = proc_dointvec, 3599 }, 3600 }; 3601 3602 static __net_init int sysctl_route_net_init(struct net *net) 3603 { 3604 struct ctl_table *tbl; 3605 size_t table_size = ARRAY_SIZE(ipv4_route_netns_table); 3606 3607 tbl = ipv4_route_netns_table; 3608 if (!net_eq(net, &init_net)) { 3609 int i; 3610 3611 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL); 3612 if (!tbl) 3613 goto err_dup; 3614 3615 /* Don't export non-whitelisted sysctls to unprivileged users */ 3616 if (net->user_ns != &init_user_ns) { 3617 if (tbl[0].procname != ipv4_route_flush_procname) 3618 table_size = 0; 3619 } 3620 3621 /* Update the variables to point into the current struct net 3622 * except for the first element flush 3623 */ 3624 for (i = 1; i < table_size; i++) 3625 tbl[i].data += (void *)net - (void *)&init_net; 3626 } 3627 tbl[0].extra1 = net; 3628 3629 net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route", 3630 tbl, table_size); 3631 if (!net->ipv4.route_hdr) 3632 goto err_reg; 3633 return 0; 3634 3635 err_reg: 3636 if (tbl != ipv4_route_netns_table) 3637 kfree(tbl); 3638 err_dup: 3639 return -ENOMEM; 3640 } 3641 3642 static __net_exit void sysctl_route_net_exit(struct net *net) 3643 { 3644 const struct ctl_table *tbl; 3645 3646 tbl = net->ipv4.route_hdr->ctl_table_arg; 3647 unregister_net_sysctl_table(net->ipv4.route_hdr); 3648 BUG_ON(tbl == ipv4_route_netns_table); 3649 kfree(tbl); 3650 } 3651 3652 static __net_initdata struct pernet_operations sysctl_route_ops = { 3653 .init = sysctl_route_net_init, 3654 .exit = sysctl_route_net_exit, 3655 }; 3656 #endif 3657 3658 static __net_init int netns_ip_rt_init(struct net *net) 3659 { 3660 /* Set default value for namespaceified sysctls */ 3661 net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU; 3662 net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES; 3663 net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS; 3664 return 0; 3665 } 3666 3667 static struct pernet_operations __net_initdata ip_rt_ops = { 3668 .init = netns_ip_rt_init, 3669 }; 3670 3671 static __net_init int rt_genid_init(struct net *net) 3672 { 3673 atomic_set(&net->ipv4.rt_genid, 0); 3674 atomic_set(&net->fnhe_genid, 0); 3675 atomic_set(&net->ipv4.dev_addr_genid, get_random_u32()); 3676 return 0; 3677 } 3678 3679 static __net_initdata struct pernet_operations rt_genid_ops = { 3680 .init = rt_genid_init, 3681 }; 3682 3683 static int __net_init ipv4_inetpeer_init(struct net *net) 3684 { 3685 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 3686 3687 if (!bp) 3688 return -ENOMEM; 3689 inet_peer_base_init(bp); 3690 net->ipv4.peers = bp; 3691 return 0; 3692 } 3693 3694 static void __net_exit ipv4_inetpeer_exit(struct net *net) 3695 { 3696 struct inet_peer_base *bp = net->ipv4.peers; 3697 3698 net->ipv4.peers = NULL; 3699 inetpeer_invalidate_tree(bp); 3700 kfree(bp); 3701 } 3702 3703 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { 3704 .init = ipv4_inetpeer_init, 3705 .exit = ipv4_inetpeer_exit, 3706 }; 3707 3708 #ifdef CONFIG_IP_ROUTE_CLASSID 3709 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3710 #endif /* CONFIG_IP_ROUTE_CLASSID */ 3711 3712 static const struct rtnl_msg_handler ip_rt_rtnl_msg_handlers[] __initconst = { 3713 {.protocol = PF_INET, .msgtype = RTM_GETROUTE, 3714 .doit = inet_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED}, 3715 }; 3716 3717 int __init ip_rt_init(void) 3718 { 3719 void *idents_hash; 3720 int cpu; 3721 3722 /* For modern hosts, this will use 2 MB of memory */ 3723 idents_hash = alloc_large_system_hash("IP idents", 3724 sizeof(*ip_idents) + sizeof(*ip_tstamps), 3725 0, 3726 16, /* one bucket per 64 KB */ 3727 HASH_ZERO, 3728 NULL, 3729 &ip_idents_mask, 3730 2048, 3731 256*1024); 3732 3733 ip_idents = idents_hash; 3734 3735 get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); 3736 3737 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); 3738 3739 for_each_possible_cpu(cpu) { 3740 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 3741 3742 INIT_LIST_HEAD(&ul->head); 3743 spin_lock_init(&ul->lock); 3744 } 3745 #ifdef CONFIG_IP_ROUTE_CLASSID 3746 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 3747 if (!ip_rt_acct) 3748 panic("IP: failed to allocate ip_rt_acct\n"); 3749 #endif 3750 3751 ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable, 3752 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3753 3754 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 3755 3756 if (dst_entries_init(&ipv4_dst_ops) < 0) 3757 panic("IP: failed to allocate ipv4_dst_ops counter\n"); 3758 3759 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) 3760 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); 3761 3762 ipv4_dst_ops.gc_thresh = ~0; 3763 ip_rt_max_size = INT_MAX; 3764 3765 devinet_init(); 3766 ip_fib_init(); 3767 3768 if (ip_rt_proc_init()) 3769 pr_err("Unable to create route proc files\n"); 3770 #ifdef CONFIG_XFRM 3771 xfrm_init(); 3772 xfrm4_init(); 3773 #endif 3774 rtnl_register_many(ip_rt_rtnl_msg_handlers); 3775 3776 #ifdef CONFIG_SYSCTL 3777 register_pernet_subsys(&sysctl_route_ops); 3778 #endif 3779 register_pernet_subsys(&ip_rt_ops); 3780 register_pernet_subsys(&rt_genid_ops); 3781 register_pernet_subsys(&ipv4_inetpeer_ops); 3782 return 0; 3783 } 3784 3785 #ifdef CONFIG_SYSCTL 3786 /* 3787 * We really need to sanitize the damn ipv4 init order, then all 3788 * this nonsense will go away. 3789 */ 3790 void __init ip_static_sysctl_init(void) 3791 { 3792 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); 3793 } 3794 #endif 3795