1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * ROUTE - implementation of the IP router. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi> 13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 14 * 15 * Fixes: 16 * Alan Cox : Verify area fixes. 17 * Alan Cox : cli() protects routing changes 18 * Rui Oliveira : ICMP routing table updates 19 * (rco@di.uminho.pt) Routing table insertion and update 20 * Linus Torvalds : Rewrote bits to be sensible 21 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Super /proc >4K 23 * Alan Cox : MTU in route table 24 * Alan Cox : MSS actually. Also added the window 25 * clamper. 26 * Sam Lantinga : Fixed route matching in rt_del() 27 * Alan Cox : Routing cache support. 28 * Alan Cox : Removed compatibility cruft. 29 * Alan Cox : RTF_REJECT support. 30 * Alan Cox : TCP irtt support. 31 * Jonathan Naylor : Added Metric support. 32 * Miquel van Smoorenburg : BSD API fixes. 33 * Miquel van Smoorenburg : Metrics. 34 * Alan Cox : Use __u32 properly 35 * Alan Cox : Aligned routing errors more closely with BSD 36 * our system is still very different. 37 * Alan Cox : Faster /proc handling 38 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * routing caches and better behaviour. 40 * 41 * Olaf Erb : irtt wasn't being copied right. 42 * Bjorn Ekwall : Kerneld route support. 43 * Alan Cox : Multicast fixed (I hope) 44 * Pavel Krauz : Limited broadcast fixed 45 * Mike McLagan : Routing by source 46 * Alexey Kuznetsov : End of old history. Split to fib.c and 47 * route.c and rewritten from scratch. 48 * Andi Kleen : Load-limit warning messages. 49 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow. 51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow. 52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful. 53 * Marc Boucher : routing by fwmark 54 * Robert Olsson : Added rt_cache statistics 55 * Arnaldo C. Melo : Convert proc stuff to seq_file 56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect 58 * Ilia Sotnikov : Removed TOS from hash calculations 59 */ 60 61 #define pr_fmt(fmt) "IPv4: " fmt 62 63 #include <linux/module.h> 64 #include <linux/uaccess.h> 65 #include <linux/bitops.h> 66 #include <linux/types.h> 67 #include <linux/kernel.h> 68 #include <linux/mm.h> 69 #include <linux/string.h> 70 #include <linux/socket.h> 71 #include <linux/sockios.h> 72 #include <linux/errno.h> 73 #include <linux/in.h> 74 #include <linux/inet.h> 75 #include <linux/netdevice.h> 76 #include <linux/proc_fs.h> 77 #include <linux/init.h> 78 #include <linux/skbuff.h> 79 #include <linux/inetdevice.h> 80 #include <linux/igmp.h> 81 #include <linux/pkt_sched.h> 82 #include <linux/mroute.h> 83 #include <linux/netfilter_ipv4.h> 84 #include <linux/random.h> 85 #include <linux/rcupdate.h> 86 #include <linux/times.h> 87 #include <linux/slab.h> 88 #include <linux/jhash.h> 89 #include <net/dst.h> 90 #include <net/dst_metadata.h> 91 #include <net/net_namespace.h> 92 #include <net/protocol.h> 93 #include <net/ip.h> 94 #include <net/route.h> 95 #include <net/inetpeer.h> 96 #include <net/sock.h> 97 #include <net/ip_fib.h> 98 #include <net/nexthop.h> 99 #include <net/arp.h> 100 #include <net/tcp.h> 101 #include <net/icmp.h> 102 #include <net/xfrm.h> 103 #include <net/lwtunnel.h> 104 #include <net/netevent.h> 105 #include <net/rtnetlink.h> 106 #ifdef CONFIG_SYSCTL 107 #include <linux/sysctl.h> 108 #endif 109 #include <net/secure_seq.h> 110 #include <net/ip_tunnels.h> 111 #include <net/l3mdev.h> 112 113 #include "fib_lookup.h" 114 115 #define RT_FL_TOS(oldflp4) \ 116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) 117 118 #define RT_GC_TIMEOUT (300*HZ) 119 120 static int ip_rt_max_size; 121 static int ip_rt_redirect_number __read_mostly = 9; 122 static int ip_rt_redirect_load __read_mostly = HZ / 50; 123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); 124 static int ip_rt_error_cost __read_mostly = HZ; 125 static int ip_rt_error_burst __read_mostly = 5 * HZ; 126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; 127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 128 static int ip_rt_min_advmss __read_mostly = 256; 129 130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; 131 132 /* 133 * Interface to generic destination cache. 134 */ 135 136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 138 static unsigned int ipv4_mtu(const struct dst_entry *dst); 139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 140 static void ipv4_link_failure(struct sk_buff *skb); 141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 142 struct sk_buff *skb, u32 mtu); 143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, 144 struct sk_buff *skb); 145 static void ipv4_dst_destroy(struct dst_entry *dst); 146 147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) 148 { 149 WARN_ON(1); 150 return NULL; 151 } 152 153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 154 struct sk_buff *skb, 155 const void *daddr); 156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); 157 158 static struct dst_ops ipv4_dst_ops = { 159 .family = AF_INET, 160 .check = ipv4_dst_check, 161 .default_advmss = ipv4_default_advmss, 162 .mtu = ipv4_mtu, 163 .cow_metrics = ipv4_cow_metrics, 164 .destroy = ipv4_dst_destroy, 165 .negative_advice = ipv4_negative_advice, 166 .link_failure = ipv4_link_failure, 167 .update_pmtu = ip_rt_update_pmtu, 168 .redirect = ip_do_redirect, 169 .local_out = __ip_local_out, 170 .neigh_lookup = ipv4_neigh_lookup, 171 .confirm_neigh = ipv4_confirm_neigh, 172 }; 173 174 #define ECN_OR_COST(class) TC_PRIO_##class 175 176 const __u8 ip_tos2prio[16] = { 177 TC_PRIO_BESTEFFORT, 178 ECN_OR_COST(BESTEFFORT), 179 TC_PRIO_BESTEFFORT, 180 ECN_OR_COST(BESTEFFORT), 181 TC_PRIO_BULK, 182 ECN_OR_COST(BULK), 183 TC_PRIO_BULK, 184 ECN_OR_COST(BULK), 185 TC_PRIO_INTERACTIVE, 186 ECN_OR_COST(INTERACTIVE), 187 TC_PRIO_INTERACTIVE, 188 ECN_OR_COST(INTERACTIVE), 189 TC_PRIO_INTERACTIVE_BULK, 190 ECN_OR_COST(INTERACTIVE_BULK), 191 TC_PRIO_INTERACTIVE_BULK, 192 ECN_OR_COST(INTERACTIVE_BULK) 193 }; 194 EXPORT_SYMBOL(ip_tos2prio); 195 196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) 198 199 #ifdef CONFIG_PROC_FS 200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 201 { 202 if (*pos) 203 return NULL; 204 return SEQ_START_TOKEN; 205 } 206 207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 208 { 209 ++*pos; 210 return NULL; 211 } 212 213 static void rt_cache_seq_stop(struct seq_file *seq, void *v) 214 { 215 } 216 217 static int rt_cache_seq_show(struct seq_file *seq, void *v) 218 { 219 if (v == SEQ_START_TOKEN) 220 seq_printf(seq, "%-127s\n", 221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" 222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" 223 "HHUptod\tSpecDst"); 224 return 0; 225 } 226 227 static const struct seq_operations rt_cache_seq_ops = { 228 .start = rt_cache_seq_start, 229 .next = rt_cache_seq_next, 230 .stop = rt_cache_seq_stop, 231 .show = rt_cache_seq_show, 232 }; 233 234 static int rt_cache_seq_open(struct inode *inode, struct file *file) 235 { 236 return seq_open(file, &rt_cache_seq_ops); 237 } 238 239 static const struct file_operations rt_cache_seq_fops = { 240 .open = rt_cache_seq_open, 241 .read = seq_read, 242 .llseek = seq_lseek, 243 .release = seq_release, 244 }; 245 246 247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) 248 { 249 int cpu; 250 251 if (*pos == 0) 252 return SEQ_START_TOKEN; 253 254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 255 if (!cpu_possible(cpu)) 256 continue; 257 *pos = cpu+1; 258 return &per_cpu(rt_cache_stat, cpu); 259 } 260 return NULL; 261 } 262 263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 264 { 265 int cpu; 266 267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 268 if (!cpu_possible(cpu)) 269 continue; 270 *pos = cpu+1; 271 return &per_cpu(rt_cache_stat, cpu); 272 } 273 return NULL; 274 275 } 276 277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 278 { 279 280 } 281 282 static int rt_cpu_seq_show(struct seq_file *seq, void *v) 283 { 284 struct rt_cache_stat *st = v; 285 286 if (v == SEQ_START_TOKEN) { 287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 288 return 0; 289 } 290 291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " 292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", 293 dst_entries_get_slow(&ipv4_dst_ops), 294 0, /* st->in_hit */ 295 st->in_slow_tot, 296 st->in_slow_mc, 297 st->in_no_route, 298 st->in_brd, 299 st->in_martian_dst, 300 st->in_martian_src, 301 302 0, /* st->out_hit */ 303 st->out_slow_tot, 304 st->out_slow_mc, 305 306 0, /* st->gc_total */ 307 0, /* st->gc_ignored */ 308 0, /* st->gc_goal_miss */ 309 0, /* st->gc_dst_overflow */ 310 0, /* st->in_hlist_search */ 311 0 /* st->out_hlist_search */ 312 ); 313 return 0; 314 } 315 316 static const struct seq_operations rt_cpu_seq_ops = { 317 .start = rt_cpu_seq_start, 318 .next = rt_cpu_seq_next, 319 .stop = rt_cpu_seq_stop, 320 .show = rt_cpu_seq_show, 321 }; 322 323 324 static int rt_cpu_seq_open(struct inode *inode, struct file *file) 325 { 326 return seq_open(file, &rt_cpu_seq_ops); 327 } 328 329 static const struct file_operations rt_cpu_seq_fops = { 330 .open = rt_cpu_seq_open, 331 .read = seq_read, 332 .llseek = seq_lseek, 333 .release = seq_release, 334 }; 335 336 #ifdef CONFIG_IP_ROUTE_CLASSID 337 static int rt_acct_proc_show(struct seq_file *m, void *v) 338 { 339 struct ip_rt_acct *dst, *src; 340 unsigned int i, j; 341 342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); 343 if (!dst) 344 return -ENOMEM; 345 346 for_each_possible_cpu(i) { 347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); 348 for (j = 0; j < 256; j++) { 349 dst[j].o_bytes += src[j].o_bytes; 350 dst[j].o_packets += src[j].o_packets; 351 dst[j].i_bytes += src[j].i_bytes; 352 dst[j].i_packets += src[j].i_packets; 353 } 354 } 355 356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); 357 kfree(dst); 358 return 0; 359 } 360 #endif 361 362 static int __net_init ip_rt_do_proc_init(struct net *net) 363 { 364 struct proc_dir_entry *pde; 365 366 pde = proc_create("rt_cache", 0444, net->proc_net, 367 &rt_cache_seq_fops); 368 if (!pde) 369 goto err1; 370 371 pde = proc_create("rt_cache", 0444, 372 net->proc_net_stat, &rt_cpu_seq_fops); 373 if (!pde) 374 goto err2; 375 376 #ifdef CONFIG_IP_ROUTE_CLASSID 377 pde = proc_create_single("rt_acct", 0, net->proc_net, 378 rt_acct_proc_show); 379 if (!pde) 380 goto err3; 381 #endif 382 return 0; 383 384 #ifdef CONFIG_IP_ROUTE_CLASSID 385 err3: 386 remove_proc_entry("rt_cache", net->proc_net_stat); 387 #endif 388 err2: 389 remove_proc_entry("rt_cache", net->proc_net); 390 err1: 391 return -ENOMEM; 392 } 393 394 static void __net_exit ip_rt_do_proc_exit(struct net *net) 395 { 396 remove_proc_entry("rt_cache", net->proc_net_stat); 397 remove_proc_entry("rt_cache", net->proc_net); 398 #ifdef CONFIG_IP_ROUTE_CLASSID 399 remove_proc_entry("rt_acct", net->proc_net); 400 #endif 401 } 402 403 static struct pernet_operations ip_rt_proc_ops __net_initdata = { 404 .init = ip_rt_do_proc_init, 405 .exit = ip_rt_do_proc_exit, 406 }; 407 408 static int __init ip_rt_proc_init(void) 409 { 410 return register_pernet_subsys(&ip_rt_proc_ops); 411 } 412 413 #else 414 static inline int ip_rt_proc_init(void) 415 { 416 return 0; 417 } 418 #endif /* CONFIG_PROC_FS */ 419 420 static inline bool rt_is_expired(const struct rtable *rth) 421 { 422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); 423 } 424 425 void rt_cache_flush(struct net *net) 426 { 427 rt_genid_bump_ipv4(net); 428 } 429 430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, 431 struct sk_buff *skb, 432 const void *daddr) 433 { 434 const struct rtable *rt = container_of(dst, struct rtable, dst); 435 struct net_device *dev = dst->dev; 436 struct neighbour *n; 437 438 rcu_read_lock_bh(); 439 440 if (likely(rt->rt_gw_family == AF_INET)) { 441 n = ip_neigh_gw4(dev, rt->rt_gw4); 442 } else if (rt->rt_gw_family == AF_INET6) { 443 n = ip_neigh_gw6(dev, &rt->rt_gw6); 444 } else { 445 __be32 pkey; 446 447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr); 448 n = ip_neigh_gw4(dev, pkey); 449 } 450 451 if (n && !refcount_inc_not_zero(&n->refcnt)) 452 n = NULL; 453 454 rcu_read_unlock_bh(); 455 456 return n; 457 } 458 459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) 460 { 461 const struct rtable *rt = container_of(dst, struct rtable, dst); 462 struct net_device *dev = dst->dev; 463 const __be32 *pkey = daddr; 464 465 if (rt->rt_gw_family == AF_INET) { 466 pkey = (const __be32 *)&rt->rt_gw4; 467 } else if (rt->rt_gw_family == AF_INET6) { 468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6); 469 } else if (!daddr || 470 (rt->rt_flags & 471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) { 472 return; 473 } 474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); 475 } 476 477 #define IP_IDENTS_SZ 2048u 478 479 static atomic_t *ip_idents __read_mostly; 480 static u32 *ip_tstamps __read_mostly; 481 482 /* In order to protect privacy, we add a perturbation to identifiers 483 * if one generator is seldom used. This makes hard for an attacker 484 * to infer how many packets were sent between two points in time. 485 */ 486 u32 ip_idents_reserve(u32 hash, int segs) 487 { 488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; 489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 490 u32 old = READ_ONCE(*p_tstamp); 491 u32 now = (u32)jiffies; 492 u32 new, delta = 0; 493 494 if (old != now && cmpxchg(p_tstamp, old, now) == old) 495 delta = prandom_u32_max(now - old); 496 497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */ 498 do { 499 old = (u32)atomic_read(p_id); 500 new = old + delta + segs; 501 } while (atomic_cmpxchg(p_id, old, new) != old); 502 503 return new - segs; 504 } 505 EXPORT_SYMBOL(ip_idents_reserve); 506 507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) 508 { 509 u32 hash, id; 510 511 /* Note the following code is not safe, but this is okay. */ 512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) 513 get_random_bytes(&net->ipv4.ip_id_key, 514 sizeof(net->ipv4.ip_id_key)); 515 516 hash = siphash_3u32((__force u32)iph->daddr, 517 (__force u32)iph->saddr, 518 iph->protocol, 519 &net->ipv4.ip_id_key); 520 id = ip_idents_reserve(hash, segs); 521 iph->id = htons(id); 522 } 523 EXPORT_SYMBOL(__ip_select_ident); 524 525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4, 526 const struct sock *sk, 527 const struct iphdr *iph, 528 int oif, u8 tos, 529 u8 prot, u32 mark, int flow_flags) 530 { 531 if (sk) { 532 const struct inet_sock *inet = inet_sk(sk); 533 534 oif = sk->sk_bound_dev_if; 535 mark = sk->sk_mark; 536 tos = RT_CONN_FLAGS(sk); 537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; 538 } 539 flowi4_init_output(fl4, oif, mark, tos, 540 RT_SCOPE_UNIVERSE, prot, 541 flow_flags, 542 iph->daddr, iph->saddr, 0, 0, 543 sock_net_uid(net, sk)); 544 } 545 546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, 547 const struct sock *sk) 548 { 549 const struct net *net = dev_net(skb->dev); 550 const struct iphdr *iph = ip_hdr(skb); 551 int oif = skb->dev->ifindex; 552 u8 tos = RT_TOS(iph->tos); 553 u8 prot = iph->protocol; 554 u32 mark = skb->mark; 555 556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); 557 } 558 559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) 560 { 561 const struct inet_sock *inet = inet_sk(sk); 562 const struct ip_options_rcu *inet_opt; 563 __be32 daddr = inet->inet_daddr; 564 565 rcu_read_lock(); 566 inet_opt = rcu_dereference(inet->inet_opt); 567 if (inet_opt && inet_opt->opt.srr) 568 daddr = inet_opt->opt.faddr; 569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 572 inet_sk_flowi_flags(sk), 573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid); 574 rcu_read_unlock(); 575 } 576 577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, 578 const struct sk_buff *skb) 579 { 580 if (skb) 581 build_skb_flow_key(fl4, skb, sk); 582 else 583 build_sk_flow_key(fl4, sk); 584 } 585 586 static DEFINE_SPINLOCK(fnhe_lock); 587 588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe) 589 { 590 struct rtable *rt; 591 592 rt = rcu_dereference(fnhe->fnhe_rth_input); 593 if (rt) { 594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); 595 dst_dev_put(&rt->dst); 596 dst_release(&rt->dst); 597 } 598 rt = rcu_dereference(fnhe->fnhe_rth_output); 599 if (rt) { 600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); 601 dst_dev_put(&rt->dst); 602 dst_release(&rt->dst); 603 } 604 } 605 606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) 607 { 608 struct fib_nh_exception *fnhe, *oldest; 609 610 oldest = rcu_dereference(hash->chain); 611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; 612 fnhe = rcu_dereference(fnhe->fnhe_next)) { 613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) 614 oldest = fnhe; 615 } 616 fnhe_flush_routes(oldest); 617 return oldest; 618 } 619 620 static inline u32 fnhe_hashfun(__be32 daddr) 621 { 622 static u32 fnhe_hashrnd __read_mostly; 623 u32 hval; 624 625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); 626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); 627 return hash_32(hval, FNHE_HASH_SHIFT); 628 } 629 630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 631 { 632 rt->rt_pmtu = fnhe->fnhe_pmtu; 633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; 634 rt->dst.expires = fnhe->fnhe_expires; 635 636 if (fnhe->fnhe_gw) { 637 rt->rt_flags |= RTCF_REDIRECTED; 638 rt->rt_gw_family = AF_INET; 639 rt->rt_gw4 = fnhe->fnhe_gw; 640 } 641 } 642 643 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr, 644 __be32 gw, u32 pmtu, bool lock, 645 unsigned long expires) 646 { 647 struct fnhe_hash_bucket *hash; 648 struct fib_nh_exception *fnhe; 649 struct rtable *rt; 650 u32 genid, hval; 651 unsigned int i; 652 int depth; 653 654 genid = fnhe_genid(dev_net(nhc->nhc_dev)); 655 hval = fnhe_hashfun(daddr); 656 657 spin_lock_bh(&fnhe_lock); 658 659 hash = rcu_dereference(nhc->nhc_exceptions); 660 if (!hash) { 661 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC); 662 if (!hash) 663 goto out_unlock; 664 rcu_assign_pointer(nhc->nhc_exceptions, hash); 665 } 666 667 hash += hval; 668 669 depth = 0; 670 for (fnhe = rcu_dereference(hash->chain); fnhe; 671 fnhe = rcu_dereference(fnhe->fnhe_next)) { 672 if (fnhe->fnhe_daddr == daddr) 673 break; 674 depth++; 675 } 676 677 if (fnhe) { 678 if (fnhe->fnhe_genid != genid) 679 fnhe->fnhe_genid = genid; 680 if (gw) 681 fnhe->fnhe_gw = gw; 682 if (pmtu) { 683 fnhe->fnhe_pmtu = pmtu; 684 fnhe->fnhe_mtu_locked = lock; 685 } 686 fnhe->fnhe_expires = max(1UL, expires); 687 /* Update all cached dsts too */ 688 rt = rcu_dereference(fnhe->fnhe_rth_input); 689 if (rt) 690 fill_route_from_fnhe(rt, fnhe); 691 rt = rcu_dereference(fnhe->fnhe_rth_output); 692 if (rt) 693 fill_route_from_fnhe(rt, fnhe); 694 } else { 695 if (depth > FNHE_RECLAIM_DEPTH) 696 fnhe = fnhe_oldest(hash); 697 else { 698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); 699 if (!fnhe) 700 goto out_unlock; 701 702 fnhe->fnhe_next = hash->chain; 703 rcu_assign_pointer(hash->chain, fnhe); 704 } 705 fnhe->fnhe_genid = genid; 706 fnhe->fnhe_daddr = daddr; 707 fnhe->fnhe_gw = gw; 708 fnhe->fnhe_pmtu = pmtu; 709 fnhe->fnhe_mtu_locked = lock; 710 fnhe->fnhe_expires = max(1UL, expires); 711 712 /* Exception created; mark the cached routes for the nexthop 713 * stale, so anyone caching it rechecks if this exception 714 * applies to them. 715 */ 716 rt = rcu_dereference(nhc->nhc_rth_input); 717 if (rt) 718 rt->dst.obsolete = DST_OBSOLETE_KILL; 719 720 for_each_possible_cpu(i) { 721 struct rtable __rcu **prt; 722 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i); 723 rt = rcu_dereference(*prt); 724 if (rt) 725 rt->dst.obsolete = DST_OBSOLETE_KILL; 726 } 727 } 728 729 fnhe->fnhe_stamp = jiffies; 730 731 out_unlock: 732 spin_unlock_bh(&fnhe_lock); 733 } 734 735 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, 736 bool kill_route) 737 { 738 __be32 new_gw = icmp_hdr(skb)->un.gateway; 739 __be32 old_gw = ip_hdr(skb)->saddr; 740 struct net_device *dev = skb->dev; 741 struct in_device *in_dev; 742 struct fib_result res; 743 struct neighbour *n; 744 struct net *net; 745 746 switch (icmp_hdr(skb)->code & 7) { 747 case ICMP_REDIR_NET: 748 case ICMP_REDIR_NETTOS: 749 case ICMP_REDIR_HOST: 750 case ICMP_REDIR_HOSTTOS: 751 break; 752 753 default: 754 return; 755 } 756 757 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw) 758 return; 759 760 in_dev = __in_dev_get_rcu(dev); 761 if (!in_dev) 762 return; 763 764 net = dev_net(dev); 765 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) || 766 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) || 767 ipv4_is_zeronet(new_gw)) 768 goto reject_redirect; 769 770 if (!IN_DEV_SHARED_MEDIA(in_dev)) { 771 if (!inet_addr_onlink(in_dev, new_gw, old_gw)) 772 goto reject_redirect; 773 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 774 goto reject_redirect; 775 } else { 776 if (inet_addr_type(net, new_gw) != RTN_UNICAST) 777 goto reject_redirect; 778 } 779 780 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); 781 if (!n) 782 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); 783 if (!IS_ERR(n)) { 784 if (!(n->nud_state & NUD_VALID)) { 785 neigh_event_send(n, NULL); 786 } else { 787 if (fib_lookup(net, fl4, &res, 0) == 0) { 788 struct fib_nh_common *nhc = FIB_RES_NHC(res); 789 790 update_or_create_fnhe(nhc, fl4->daddr, new_gw, 791 0, false, 792 jiffies + ip_rt_gc_timeout); 793 } 794 if (kill_route) 795 rt->dst.obsolete = DST_OBSOLETE_KILL; 796 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); 797 } 798 neigh_release(n); 799 } 800 return; 801 802 reject_redirect: 803 #ifdef CONFIG_IP_ROUTE_VERBOSE 804 if (IN_DEV_LOG_MARTIANS(in_dev)) { 805 const struct iphdr *iph = (const struct iphdr *) skb->data; 806 __be32 daddr = iph->daddr; 807 __be32 saddr = iph->saddr; 808 809 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n" 810 " Advised path = %pI4 -> %pI4\n", 811 &old_gw, dev->name, &new_gw, 812 &saddr, &daddr); 813 } 814 #endif 815 ; 816 } 817 818 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 819 { 820 struct rtable *rt; 821 struct flowi4 fl4; 822 const struct iphdr *iph = (const struct iphdr *) skb->data; 823 struct net *net = dev_net(skb->dev); 824 int oif = skb->dev->ifindex; 825 u8 tos = RT_TOS(iph->tos); 826 u8 prot = iph->protocol; 827 u32 mark = skb->mark; 828 829 rt = (struct rtable *) dst; 830 831 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); 832 __ip_do_redirect(rt, skb, &fl4, true); 833 } 834 835 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 836 { 837 struct rtable *rt = (struct rtable *)dst; 838 struct dst_entry *ret = dst; 839 840 if (rt) { 841 if (dst->obsolete > 0) { 842 ip_rt_put(rt); 843 ret = NULL; 844 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 845 rt->dst.expires) { 846 ip_rt_put(rt); 847 ret = NULL; 848 } 849 } 850 return ret; 851 } 852 853 /* 854 * Algorithm: 855 * 1. The first ip_rt_redirect_number redirects are sent 856 * with exponential backoff, then we stop sending them at all, 857 * assuming that the host ignores our redirects. 858 * 2. If we did not see packets requiring redirects 859 * during ip_rt_redirect_silence, we assume that the host 860 * forgot redirected route and start to send redirects again. 861 * 862 * This algorithm is much cheaper and more intelligent than dumb load limiting 863 * in icmp.c. 864 * 865 * NOTE. Do not forget to inhibit load limiting for redirects (redundant) 866 * and "frag. need" (breaks PMTU discovery) in icmp.c. 867 */ 868 869 void ip_rt_send_redirect(struct sk_buff *skb) 870 { 871 struct rtable *rt = skb_rtable(skb); 872 struct in_device *in_dev; 873 struct inet_peer *peer; 874 struct net *net; 875 int log_martians; 876 int vif; 877 878 rcu_read_lock(); 879 in_dev = __in_dev_get_rcu(rt->dst.dev); 880 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 881 rcu_read_unlock(); 882 return; 883 } 884 log_martians = IN_DEV_LOG_MARTIANS(in_dev); 885 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); 886 rcu_read_unlock(); 887 888 net = dev_net(rt->dst.dev); 889 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1); 890 if (!peer) { 891 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, 892 rt_nexthop(rt, ip_hdr(skb)->daddr)); 893 return; 894 } 895 896 /* No redirected packets during ip_rt_redirect_silence; 897 * reset the algorithm. 898 */ 899 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { 900 peer->rate_tokens = 0; 901 peer->n_redirects = 0; 902 } 903 904 /* Too many ignored redirects; do not send anything 905 * set dst.rate_last to the last seen redirected packet. 906 */ 907 if (peer->n_redirects >= ip_rt_redirect_number) { 908 peer->rate_last = jiffies; 909 goto out_put_peer; 910 } 911 912 /* Check for load limit; set rate_last to the latest sent 913 * redirect. 914 */ 915 if (peer->rate_tokens == 0 || 916 time_after(jiffies, 917 (peer->rate_last + 918 (ip_rt_redirect_load << peer->rate_tokens)))) { 919 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); 920 921 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 922 peer->rate_last = jiffies; 923 ++peer->rate_tokens; 924 ++peer->n_redirects; 925 #ifdef CONFIG_IP_ROUTE_VERBOSE 926 if (log_martians && 927 peer->rate_tokens == ip_rt_redirect_number) 928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 929 &ip_hdr(skb)->saddr, inet_iif(skb), 930 &ip_hdr(skb)->daddr, &gw); 931 #endif 932 } 933 out_put_peer: 934 inet_putpeer(peer); 935 } 936 937 static int ip_error(struct sk_buff *skb) 938 { 939 struct rtable *rt = skb_rtable(skb); 940 struct net_device *dev = skb->dev; 941 struct in_device *in_dev; 942 struct inet_peer *peer; 943 unsigned long now; 944 struct net *net; 945 bool send; 946 int code; 947 948 if (netif_is_l3_master(skb->dev)) { 949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); 950 if (!dev) 951 goto out; 952 } 953 954 in_dev = __in_dev_get_rcu(dev); 955 956 /* IP on this device is disabled. */ 957 if (!in_dev) 958 goto out; 959 960 net = dev_net(rt->dst.dev); 961 if (!IN_DEV_FORWARD(in_dev)) { 962 switch (rt->dst.error) { 963 case EHOSTUNREACH: 964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); 965 break; 966 967 case ENETUNREACH: 968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 969 break; 970 } 971 goto out; 972 } 973 974 switch (rt->dst.error) { 975 case EINVAL: 976 default: 977 goto out; 978 case EHOSTUNREACH: 979 code = ICMP_HOST_UNREACH; 980 break; 981 case ENETUNREACH: 982 code = ICMP_NET_UNREACH; 983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); 984 break; 985 case EACCES: 986 code = ICMP_PKT_FILTERED; 987 break; 988 } 989 990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 991 l3mdev_master_ifindex(skb->dev), 1); 992 993 send = true; 994 if (peer) { 995 now = jiffies; 996 peer->rate_tokens += now - peer->rate_last; 997 if (peer->rate_tokens > ip_rt_error_burst) 998 peer->rate_tokens = ip_rt_error_burst; 999 peer->rate_last = now; 1000 if (peer->rate_tokens >= ip_rt_error_cost) 1001 peer->rate_tokens -= ip_rt_error_cost; 1002 else 1003 send = false; 1004 inet_putpeer(peer); 1005 } 1006 if (send) 1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1008 1009 out: kfree_skb(skb); 1010 return 0; 1011 } 1012 1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 1014 { 1015 struct dst_entry *dst = &rt->dst; 1016 u32 old_mtu = ipv4_mtu(dst); 1017 struct fib_result res; 1018 bool lock = false; 1019 1020 if (ip_mtu_locked(dst)) 1021 return; 1022 1023 if (old_mtu < mtu) 1024 return; 1025 1026 if (mtu < ip_rt_min_pmtu) { 1027 lock = true; 1028 mtu = min(old_mtu, ip_rt_min_pmtu); 1029 } 1030 1031 if (rt->rt_pmtu == mtu && !lock && 1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1033 return; 1034 1035 rcu_read_lock(); 1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1037 struct fib_nh_common *nhc = FIB_RES_NHC(res); 1038 1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, 1040 jiffies + ip_rt_mtu_expires); 1041 } 1042 rcu_read_unlock(); 1043 } 1044 1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1046 struct sk_buff *skb, u32 mtu) 1047 { 1048 struct rtable *rt = (struct rtable *) dst; 1049 struct flowi4 fl4; 1050 1051 ip_rt_build_flow_key(&fl4, sk, skb); 1052 __ip_rt_update_pmtu(rt, &fl4, mtu); 1053 } 1054 1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, 1056 int oif, u8 protocol) 1057 { 1058 const struct iphdr *iph = (const struct iphdr *) skb->data; 1059 struct flowi4 fl4; 1060 struct rtable *rt; 1061 u32 mark = IP4_REPLY_MARK(net, skb->mark); 1062 1063 __build_flow_key(net, &fl4, NULL, iph, oif, 1064 RT_TOS(iph->tos), protocol, mark, 0); 1065 rt = __ip_route_output_key(net, &fl4); 1066 if (!IS_ERR(rt)) { 1067 __ip_rt_update_pmtu(rt, &fl4, mtu); 1068 ip_rt_put(rt); 1069 } 1070 } 1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 1072 1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1074 { 1075 const struct iphdr *iph = (const struct iphdr *) skb->data; 1076 struct flowi4 fl4; 1077 struct rtable *rt; 1078 1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0); 1080 1081 if (!fl4.flowi4_mark) 1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark); 1083 1084 rt = __ip_route_output_key(sock_net(sk), &fl4); 1085 if (!IS_ERR(rt)) { 1086 __ip_rt_update_pmtu(rt, &fl4, mtu); 1087 ip_rt_put(rt); 1088 } 1089 } 1090 1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 1092 { 1093 const struct iphdr *iph = (const struct iphdr *) skb->data; 1094 struct flowi4 fl4; 1095 struct rtable *rt; 1096 struct dst_entry *odst = NULL; 1097 bool new = false; 1098 struct net *net = sock_net(sk); 1099 1100 bh_lock_sock(sk); 1101 1102 if (!ip_sk_accept_pmtu(sk)) 1103 goto out; 1104 1105 odst = sk_dst_get(sk); 1106 1107 if (sock_owned_by_user(sk) || !odst) { 1108 __ipv4_sk_update_pmtu(skb, sk, mtu); 1109 goto out; 1110 } 1111 1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1113 1114 rt = (struct rtable *)odst; 1115 if (odst->obsolete && !odst->ops->check(odst, 0)) { 1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1117 if (IS_ERR(rt)) 1118 goto out; 1119 1120 new = true; 1121 } 1122 1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu); 1124 1125 if (!dst_check(&rt->dst, 0)) { 1126 if (new) 1127 dst_release(&rt->dst); 1128 1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1130 if (IS_ERR(rt)) 1131 goto out; 1132 1133 new = true; 1134 } 1135 1136 if (new) 1137 sk_dst_set(sk, &rt->dst); 1138 1139 out: 1140 bh_unlock_sock(sk); 1141 dst_release(odst); 1142 } 1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1144 1145 void ipv4_redirect(struct sk_buff *skb, struct net *net, 1146 int oif, u8 protocol) 1147 { 1148 const struct iphdr *iph = (const struct iphdr *) skb->data; 1149 struct flowi4 fl4; 1150 struct rtable *rt; 1151 1152 __build_flow_key(net, &fl4, NULL, iph, oif, 1153 RT_TOS(iph->tos), protocol, 0, 0); 1154 rt = __ip_route_output_key(net, &fl4); 1155 if (!IS_ERR(rt)) { 1156 __ip_do_redirect(rt, skb, &fl4, false); 1157 ip_rt_put(rt); 1158 } 1159 } 1160 EXPORT_SYMBOL_GPL(ipv4_redirect); 1161 1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) 1163 { 1164 const struct iphdr *iph = (const struct iphdr *) skb->data; 1165 struct flowi4 fl4; 1166 struct rtable *rt; 1167 struct net *net = sock_net(sk); 1168 1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); 1170 rt = __ip_route_output_key(net, &fl4); 1171 if (!IS_ERR(rt)) { 1172 __ip_do_redirect(rt, skb, &fl4, false); 1173 ip_rt_put(rt); 1174 } 1175 } 1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect); 1177 1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1179 { 1180 struct rtable *rt = (struct rtable *) dst; 1181 1182 /* All IPV4 dsts are created with ->obsolete set to the value 1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1184 * into this function always. 1185 * 1186 * When a PMTU/redirect information update invalidates a route, 1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or 1188 * DST_OBSOLETE_DEAD. 1189 */ 1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) 1191 return NULL; 1192 return dst; 1193 } 1194 1195 static void ipv4_send_dest_unreach(struct sk_buff *skb) 1196 { 1197 struct ip_options opt; 1198 int res; 1199 1200 /* Recompile ip options since IPCB may not be valid anymore. 1201 * Also check we have a reasonable ipv4 header. 1202 */ 1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) || 1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5) 1205 return; 1206 1207 memset(&opt, 0, sizeof(opt)); 1208 if (ip_hdr(skb)->ihl > 5) { 1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4)) 1210 return; 1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); 1212 1213 rcu_read_lock(); 1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); 1215 rcu_read_unlock(); 1216 1217 if (res) 1218 return; 1219 } 1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); 1221 } 1222 1223 static void ipv4_link_failure(struct sk_buff *skb) 1224 { 1225 struct rtable *rt; 1226 1227 ipv4_send_dest_unreach(skb); 1228 1229 rt = skb_rtable(skb); 1230 if (rt) 1231 dst_set_expires(&rt->dst, 0); 1232 } 1233 1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb) 1235 { 1236 pr_debug("%s: %pI4 -> %pI4, %s\n", 1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1238 skb->dev ? skb->dev->name : "?"); 1239 kfree_skb(skb); 1240 WARN_ON(1); 1241 return 0; 1242 } 1243 1244 /* 1245 We do not cache source address of outgoing interface, 1246 because it is used only by IP RR, TS and SRR options, 1247 so that it out of fast path. 1248 1249 BTW remember: "addr" is allowed to be not aligned 1250 in IP options! 1251 */ 1252 1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) 1254 { 1255 __be32 src; 1256 1257 if (rt_is_output_route(rt)) 1258 src = ip_hdr(skb)->saddr; 1259 else { 1260 struct fib_result res; 1261 struct iphdr *iph = ip_hdr(skb); 1262 struct flowi4 fl4 = { 1263 .daddr = iph->daddr, 1264 .saddr = iph->saddr, 1265 .flowi4_tos = RT_TOS(iph->tos), 1266 .flowi4_oif = rt->dst.dev->ifindex, 1267 .flowi4_iif = skb->dev->ifindex, 1268 .flowi4_mark = skb->mark, 1269 }; 1270 1271 rcu_read_lock(); 1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) 1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res); 1274 else 1275 src = inet_select_addr(rt->dst.dev, 1276 rt_nexthop(rt, iph->daddr), 1277 RT_SCOPE_UNIVERSE); 1278 rcu_read_unlock(); 1279 } 1280 memcpy(addr, &src, 4); 1281 } 1282 1283 #ifdef CONFIG_IP_ROUTE_CLASSID 1284 static void set_class_tag(struct rtable *rt, u32 tag) 1285 { 1286 if (!(rt->dst.tclassid & 0xFFFF)) 1287 rt->dst.tclassid |= tag & 0xFFFF; 1288 if (!(rt->dst.tclassid & 0xFFFF0000)) 1289 rt->dst.tclassid |= tag & 0xFFFF0000; 1290 } 1291 #endif 1292 1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst) 1294 { 1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); 1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, 1297 ip_rt_min_advmss); 1298 1299 return min(advmss, IPV4_MAX_PMTU - header_size); 1300 } 1301 1302 static unsigned int ipv4_mtu(const struct dst_entry *dst) 1303 { 1304 const struct rtable *rt = (const struct rtable *) dst; 1305 unsigned int mtu = rt->rt_pmtu; 1306 1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1308 mtu = dst_metric_raw(dst, RTAX_MTU); 1309 1310 if (mtu) 1311 return mtu; 1312 1313 mtu = READ_ONCE(dst->dev->mtu); 1314 1315 if (unlikely(ip_mtu_locked(dst))) { 1316 if (rt->rt_gw_family && mtu > 576) 1317 mtu = 576; 1318 } 1319 1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU); 1321 1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1323 } 1324 1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr) 1326 { 1327 struct fnhe_hash_bucket *hash; 1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1329 u32 hval = fnhe_hashfun(daddr); 1330 1331 spin_lock_bh(&fnhe_lock); 1332 1333 hash = rcu_dereference_protected(nhc->nhc_exceptions, 1334 lockdep_is_held(&fnhe_lock)); 1335 hash += hval; 1336 1337 fnhe_p = &hash->chain; 1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1339 while (fnhe) { 1340 if (fnhe->fnhe_daddr == daddr) { 1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1343 /* set fnhe_daddr to 0 to ensure it won't bind with 1344 * new dsts in rt_bind_exception(). 1345 */ 1346 fnhe->fnhe_daddr = 0; 1347 fnhe_flush_routes(fnhe); 1348 kfree_rcu(fnhe, rcu); 1349 break; 1350 } 1351 fnhe_p = &fnhe->fnhe_next; 1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1353 lockdep_is_held(&fnhe_lock)); 1354 } 1355 1356 spin_unlock_bh(&fnhe_lock); 1357 } 1358 1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc, 1360 __be32 daddr) 1361 { 1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions); 1363 struct fib_nh_exception *fnhe; 1364 u32 hval; 1365 1366 if (!hash) 1367 return NULL; 1368 1369 hval = fnhe_hashfun(daddr); 1370 1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1372 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1373 if (fnhe->fnhe_daddr == daddr) { 1374 if (fnhe->fnhe_expires && 1375 time_after(jiffies, fnhe->fnhe_expires)) { 1376 ip_del_fnhe(nhc, daddr); 1377 break; 1378 } 1379 return fnhe; 1380 } 1381 } 1382 return NULL; 1383 } 1384 1385 /* MTU selection: 1386 * 1. mtu on route is locked - use it 1387 * 2. mtu from nexthop exception 1388 * 3. mtu from egress device 1389 */ 1390 1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) 1392 { 1393 struct fib_nh_common *nhc = res->nhc; 1394 struct net_device *dev = nhc->nhc_dev; 1395 struct fib_info *fi = res->fi; 1396 u32 mtu = 0; 1397 1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || 1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) 1400 mtu = fi->fib_mtu; 1401 1402 if (likely(!mtu)) { 1403 struct fib_nh_exception *fnhe; 1404 1405 fnhe = find_exception(nhc, daddr); 1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires)) 1407 mtu = fnhe->fnhe_pmtu; 1408 } 1409 1410 if (likely(!mtu)) 1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU); 1412 1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu); 1414 } 1415 1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, 1417 __be32 daddr, const bool do_cache) 1418 { 1419 bool ret = false; 1420 1421 spin_lock_bh(&fnhe_lock); 1422 1423 if (daddr == fnhe->fnhe_daddr) { 1424 struct rtable __rcu **porig; 1425 struct rtable *orig; 1426 int genid = fnhe_genid(dev_net(rt->dst.dev)); 1427 1428 if (rt_is_input_route(rt)) 1429 porig = &fnhe->fnhe_rth_input; 1430 else 1431 porig = &fnhe->fnhe_rth_output; 1432 orig = rcu_dereference(*porig); 1433 1434 if (fnhe->fnhe_genid != genid) { 1435 fnhe->fnhe_genid = genid; 1436 fnhe->fnhe_gw = 0; 1437 fnhe->fnhe_pmtu = 0; 1438 fnhe->fnhe_expires = 0; 1439 fnhe->fnhe_mtu_locked = false; 1440 fnhe_flush_routes(fnhe); 1441 orig = NULL; 1442 } 1443 fill_route_from_fnhe(rt, fnhe); 1444 if (!rt->rt_gw4) { 1445 rt->rt_gw4 = daddr; 1446 rt->rt_gw_family = AF_INET; 1447 } 1448 1449 if (do_cache) { 1450 dst_hold(&rt->dst); 1451 rcu_assign_pointer(*porig, rt); 1452 if (orig) { 1453 dst_dev_put(&orig->dst); 1454 dst_release(&orig->dst); 1455 } 1456 ret = true; 1457 } 1458 1459 fnhe->fnhe_stamp = jiffies; 1460 } 1461 spin_unlock_bh(&fnhe_lock); 1462 1463 return ret; 1464 } 1465 1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) 1467 { 1468 struct rtable *orig, *prev, **p; 1469 bool ret = true; 1470 1471 if (rt_is_input_route(rt)) { 1472 p = (struct rtable **)&nhc->nhc_rth_input; 1473 } else { 1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 1475 } 1476 orig = *p; 1477 1478 /* hold dst before doing cmpxchg() to avoid race condition 1479 * on this dst 1480 */ 1481 dst_hold(&rt->dst); 1482 prev = cmpxchg(p, orig, rt); 1483 if (prev == orig) { 1484 if (orig) { 1485 dst_dev_put(&orig->dst); 1486 dst_release(&orig->dst); 1487 } 1488 } else { 1489 dst_release(&rt->dst); 1490 ret = false; 1491 } 1492 1493 return ret; 1494 } 1495 1496 struct uncached_list { 1497 spinlock_t lock; 1498 struct list_head head; 1499 }; 1500 1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1502 1503 void rt_add_uncached_list(struct rtable *rt) 1504 { 1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1506 1507 rt->rt_uncached_list = ul; 1508 1509 spin_lock_bh(&ul->lock); 1510 list_add_tail(&rt->rt_uncached, &ul->head); 1511 spin_unlock_bh(&ul->lock); 1512 } 1513 1514 void rt_del_uncached_list(struct rtable *rt) 1515 { 1516 if (!list_empty(&rt->rt_uncached)) { 1517 struct uncached_list *ul = rt->rt_uncached_list; 1518 1519 spin_lock_bh(&ul->lock); 1520 list_del(&rt->rt_uncached); 1521 spin_unlock_bh(&ul->lock); 1522 } 1523 } 1524 1525 static void ipv4_dst_destroy(struct dst_entry *dst) 1526 { 1527 struct rtable *rt = (struct rtable *)dst; 1528 1529 ip_dst_metrics_put(dst); 1530 rt_del_uncached_list(rt); 1531 } 1532 1533 void rt_flush_dev(struct net_device *dev) 1534 { 1535 struct net *net = dev_net(dev); 1536 struct rtable *rt; 1537 int cpu; 1538 1539 for_each_possible_cpu(cpu) { 1540 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 1541 1542 spin_lock_bh(&ul->lock); 1543 list_for_each_entry(rt, &ul->head, rt_uncached) { 1544 if (rt->dst.dev != dev) 1545 continue; 1546 rt->dst.dev = net->loopback_dev; 1547 dev_hold(rt->dst.dev); 1548 dev_put(dev); 1549 } 1550 spin_unlock_bh(&ul->lock); 1551 } 1552 } 1553 1554 static bool rt_cache_valid(const struct rtable *rt) 1555 { 1556 return rt && 1557 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1558 !rt_is_expired(rt); 1559 } 1560 1561 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, 1562 const struct fib_result *res, 1563 struct fib_nh_exception *fnhe, 1564 struct fib_info *fi, u16 type, u32 itag, 1565 const bool do_cache) 1566 { 1567 bool cached = false; 1568 1569 if (fi) { 1570 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1571 1572 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) { 1573 rt->rt_gw_family = nhc->nhc_gw_family; 1574 /* only INET and INET6 are supported */ 1575 if (likely(nhc->nhc_gw_family == AF_INET)) 1576 rt->rt_gw4 = nhc->nhc_gw.ipv4; 1577 else 1578 rt->rt_gw6 = nhc->nhc_gw.ipv6; 1579 } 1580 1581 ip_dst_init_metrics(&rt->dst, fi->fib_metrics); 1582 1583 #ifdef CONFIG_IP_ROUTE_CLASSID 1584 if (nhc->nhc_family == AF_INET) { 1585 struct fib_nh *nh; 1586 1587 nh = container_of(nhc, struct fib_nh, nh_common); 1588 rt->dst.tclassid = nh->nh_tclassid; 1589 } 1590 #endif 1591 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 1592 if (unlikely(fnhe)) 1593 cached = rt_bind_exception(rt, fnhe, daddr, do_cache); 1594 else if (do_cache) 1595 cached = rt_cache_route(nhc, rt); 1596 if (unlikely(!cached)) { 1597 /* Routes we intend to cache in nexthop exception or 1598 * FIB nexthop have the DST_NOCACHE bit clear. 1599 * However, if we are unsuccessful at storing this 1600 * route into the cache we really need to set it. 1601 */ 1602 if (!rt->rt_gw4) { 1603 rt->rt_gw_family = AF_INET; 1604 rt->rt_gw4 = daddr; 1605 } 1606 rt_add_uncached_list(rt); 1607 } 1608 } else 1609 rt_add_uncached_list(rt); 1610 1611 #ifdef CONFIG_IP_ROUTE_CLASSID 1612 #ifdef CONFIG_IP_MULTIPLE_TABLES 1613 set_class_tag(rt, res->tclassid); 1614 #endif 1615 set_class_tag(rt, itag); 1616 #endif 1617 } 1618 1619 struct rtable *rt_dst_alloc(struct net_device *dev, 1620 unsigned int flags, u16 type, 1621 bool nopolicy, bool noxfrm, bool will_cache) 1622 { 1623 struct rtable *rt; 1624 1625 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, 1626 (will_cache ? 0 : DST_HOST) | 1627 (nopolicy ? DST_NOPOLICY : 0) | 1628 (noxfrm ? DST_NOXFRM : 0)); 1629 1630 if (rt) { 1631 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1632 rt->rt_flags = flags; 1633 rt->rt_type = type; 1634 rt->rt_is_input = 0; 1635 rt->rt_iif = 0; 1636 rt->rt_pmtu = 0; 1637 rt->rt_mtu_locked = 0; 1638 rt->rt_gw_family = 0; 1639 rt->rt_gw4 = 0; 1640 INIT_LIST_HEAD(&rt->rt_uncached); 1641 1642 rt->dst.output = ip_output; 1643 if (flags & RTCF_LOCAL) 1644 rt->dst.input = ip_local_deliver; 1645 } 1646 1647 return rt; 1648 } 1649 EXPORT_SYMBOL(rt_dst_alloc); 1650 1651 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) 1652 { 1653 struct rtable *new_rt; 1654 1655 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, 1656 rt->dst.flags); 1657 1658 if (new_rt) { 1659 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev)); 1660 new_rt->rt_flags = rt->rt_flags; 1661 new_rt->rt_type = rt->rt_type; 1662 new_rt->rt_is_input = rt->rt_is_input; 1663 new_rt->rt_iif = rt->rt_iif; 1664 new_rt->rt_pmtu = rt->rt_pmtu; 1665 new_rt->rt_mtu_locked = rt->rt_mtu_locked; 1666 new_rt->rt_gw_family = rt->rt_gw_family; 1667 if (rt->rt_gw_family == AF_INET) 1668 new_rt->rt_gw4 = rt->rt_gw4; 1669 else if (rt->rt_gw_family == AF_INET6) 1670 new_rt->rt_gw6 = rt->rt_gw6; 1671 INIT_LIST_HEAD(&new_rt->rt_uncached); 1672 1673 new_rt->dst.flags |= DST_HOST; 1674 new_rt->dst.input = rt->dst.input; 1675 new_rt->dst.output = rt->dst.output; 1676 new_rt->dst.error = rt->dst.error; 1677 new_rt->dst.lastuse = jiffies; 1678 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate); 1679 } 1680 return new_rt; 1681 } 1682 EXPORT_SYMBOL(rt_dst_clone); 1683 1684 /* called in rcu_read_lock() section */ 1685 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1686 u8 tos, struct net_device *dev, 1687 struct in_device *in_dev, u32 *itag) 1688 { 1689 int err; 1690 1691 /* Primary sanity checks. */ 1692 if (!in_dev) 1693 return -EINVAL; 1694 1695 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1696 skb->protocol != htons(ETH_P_IP)) 1697 return -EINVAL; 1698 1699 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) 1700 return -EINVAL; 1701 1702 if (ipv4_is_zeronet(saddr)) { 1703 if (!ipv4_is_local_multicast(daddr) && 1704 ip_hdr(skb)->protocol != IPPROTO_IGMP) 1705 return -EINVAL; 1706 } else { 1707 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 1708 in_dev, itag); 1709 if (err < 0) 1710 return err; 1711 } 1712 return 0; 1713 } 1714 1715 /* called in rcu_read_lock() section */ 1716 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1717 u8 tos, struct net_device *dev, int our) 1718 { 1719 struct in_device *in_dev = __in_dev_get_rcu(dev); 1720 unsigned int flags = RTCF_MULTICAST; 1721 struct rtable *rth; 1722 u32 itag = 0; 1723 int err; 1724 1725 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); 1726 if (err) 1727 return err; 1728 1729 if (our) 1730 flags |= RTCF_LOCAL; 1731 1732 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, 1733 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); 1734 if (!rth) 1735 return -ENOBUFS; 1736 1737 #ifdef CONFIG_IP_ROUTE_CLASSID 1738 rth->dst.tclassid = itag; 1739 #endif 1740 rth->dst.output = ip_rt_bug; 1741 rth->rt_is_input= 1; 1742 1743 #ifdef CONFIG_IP_MROUTE 1744 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1745 rth->dst.input = ip_mr_input; 1746 #endif 1747 RT_CACHE_STAT_INC(in_slow_mc); 1748 1749 skb_dst_set(skb, &rth->dst); 1750 return 0; 1751 } 1752 1753 1754 static void ip_handle_martian_source(struct net_device *dev, 1755 struct in_device *in_dev, 1756 struct sk_buff *skb, 1757 __be32 daddr, 1758 __be32 saddr) 1759 { 1760 RT_CACHE_STAT_INC(in_martian_src); 1761 #ifdef CONFIG_IP_ROUTE_VERBOSE 1762 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) { 1763 /* 1764 * RFC1812 recommendation, if source is martian, 1765 * the only hint is MAC header. 1766 */ 1767 pr_warn("martian source %pI4 from %pI4, on dev %s\n", 1768 &daddr, &saddr, dev->name); 1769 if (dev->hard_header_len && skb_mac_header_was_set(skb)) { 1770 print_hex_dump(KERN_WARNING, "ll header: ", 1771 DUMP_PREFIX_OFFSET, 16, 1, 1772 skb_mac_header(skb), 1773 dev->hard_header_len, false); 1774 } 1775 } 1776 #endif 1777 } 1778 1779 /* called in rcu_read_lock() section */ 1780 static int __mkroute_input(struct sk_buff *skb, 1781 const struct fib_result *res, 1782 struct in_device *in_dev, 1783 __be32 daddr, __be32 saddr, u32 tos) 1784 { 1785 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 1786 struct net_device *dev = nhc->nhc_dev; 1787 struct fib_nh_exception *fnhe; 1788 struct rtable *rth; 1789 int err; 1790 struct in_device *out_dev; 1791 bool do_cache; 1792 u32 itag = 0; 1793 1794 /* get a working reference to the output device */ 1795 out_dev = __in_dev_get_rcu(dev); 1796 if (!out_dev) { 1797 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); 1798 return -EINVAL; 1799 } 1800 1801 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1802 in_dev->dev, in_dev, &itag); 1803 if (err < 0) { 1804 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1805 saddr); 1806 1807 goto cleanup; 1808 } 1809 1810 do_cache = res->fi && !itag; 1811 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1812 skb->protocol == htons(ETH_P_IP)) { 1813 __be32 gw; 1814 1815 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0; 1816 if (IN_DEV_SHARED_MEDIA(out_dev) || 1817 inet_addr_onlink(out_dev, saddr, gw)) 1818 IPCB(skb)->flags |= IPSKB_DOREDIRECT; 1819 } 1820 1821 if (skb->protocol != htons(ETH_P_IP)) { 1822 /* Not IP (i.e. ARP). Do not create route, if it is 1823 * invalid for proxy arp. DNAT routes are always valid. 1824 * 1825 * Proxy arp feature have been extended to allow, ARP 1826 * replies back to the same interface, to support 1827 * Private VLAN switch technologies. See arp.c. 1828 */ 1829 if (out_dev == in_dev && 1830 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { 1831 err = -EINVAL; 1832 goto cleanup; 1833 } 1834 } 1835 1836 fnhe = find_exception(nhc, daddr); 1837 if (do_cache) { 1838 if (fnhe) 1839 rth = rcu_dereference(fnhe->fnhe_rth_input); 1840 else 1841 rth = rcu_dereference(nhc->nhc_rth_input); 1842 if (rt_cache_valid(rth)) { 1843 skb_dst_set_noref(skb, &rth->dst); 1844 goto out; 1845 } 1846 } 1847 1848 rth = rt_dst_alloc(out_dev->dev, 0, res->type, 1849 IN_DEV_CONF_GET(in_dev, NOPOLICY), 1850 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache); 1851 if (!rth) { 1852 err = -ENOBUFS; 1853 goto cleanup; 1854 } 1855 1856 rth->rt_is_input = 1; 1857 RT_CACHE_STAT_INC(in_slow_tot); 1858 1859 rth->dst.input = ip_forward; 1860 1861 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag, 1862 do_cache); 1863 lwtunnel_set_redirect(&rth->dst); 1864 skb_dst_set(skb, &rth->dst); 1865 out: 1866 err = 0; 1867 cleanup: 1868 return err; 1869 } 1870 1871 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1872 /* To make ICMP packets follow the right flow, the multipath hash is 1873 * calculated from the inner IP addresses. 1874 */ 1875 static void ip_multipath_l3_keys(const struct sk_buff *skb, 1876 struct flow_keys *hash_keys) 1877 { 1878 const struct iphdr *outer_iph = ip_hdr(skb); 1879 const struct iphdr *key_iph = outer_iph; 1880 const struct iphdr *inner_iph; 1881 const struct icmphdr *icmph; 1882 struct iphdr _inner_iph; 1883 struct icmphdr _icmph; 1884 1885 if (likely(outer_iph->protocol != IPPROTO_ICMP)) 1886 goto out; 1887 1888 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) 1889 goto out; 1890 1891 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), 1892 &_icmph); 1893 if (!icmph) 1894 goto out; 1895 1896 if (icmph->type != ICMP_DEST_UNREACH && 1897 icmph->type != ICMP_REDIRECT && 1898 icmph->type != ICMP_TIME_EXCEEDED && 1899 icmph->type != ICMP_PARAMETERPROB) 1900 goto out; 1901 1902 inner_iph = skb_header_pointer(skb, 1903 outer_iph->ihl * 4 + sizeof(_icmph), 1904 sizeof(_inner_iph), &_inner_iph); 1905 if (!inner_iph) 1906 goto out; 1907 1908 key_iph = inner_iph; 1909 out: 1910 hash_keys->addrs.v4addrs.src = key_iph->saddr; 1911 hash_keys->addrs.v4addrs.dst = key_iph->daddr; 1912 } 1913 1914 /* if skb is set it will be used and fl4 can be NULL */ 1915 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, 1916 const struct sk_buff *skb, struct flow_keys *flkeys) 1917 { 1918 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0; 1919 struct flow_keys hash_keys; 1920 u32 mhash; 1921 1922 switch (net->ipv4.sysctl_fib_multipath_hash_policy) { 1923 case 0: 1924 memset(&hash_keys, 0, sizeof(hash_keys)); 1925 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1926 if (skb) { 1927 ip_multipath_l3_keys(skb, &hash_keys); 1928 } else { 1929 hash_keys.addrs.v4addrs.src = fl4->saddr; 1930 hash_keys.addrs.v4addrs.dst = fl4->daddr; 1931 } 1932 break; 1933 case 1: 1934 /* skb is currently provided only when forwarding */ 1935 if (skb) { 1936 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; 1937 struct flow_keys keys; 1938 1939 /* short-circuit if we already have L4 hash present */ 1940 if (skb->l4_hash) 1941 return skb_get_hash_raw(skb) >> 1; 1942 1943 memset(&hash_keys, 0, sizeof(hash_keys)); 1944 1945 if (!flkeys) { 1946 skb_flow_dissect_flow_keys(skb, &keys, flag); 1947 flkeys = &keys; 1948 } 1949 1950 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1951 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src; 1952 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst; 1953 hash_keys.ports.src = flkeys->ports.src; 1954 hash_keys.ports.dst = flkeys->ports.dst; 1955 hash_keys.basic.ip_proto = flkeys->basic.ip_proto; 1956 } else { 1957 memset(&hash_keys, 0, sizeof(hash_keys)); 1958 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1959 hash_keys.addrs.v4addrs.src = fl4->saddr; 1960 hash_keys.addrs.v4addrs.dst = fl4->daddr; 1961 hash_keys.ports.src = fl4->fl4_sport; 1962 hash_keys.ports.dst = fl4->fl4_dport; 1963 hash_keys.basic.ip_proto = fl4->flowi4_proto; 1964 } 1965 break; 1966 case 2: 1967 memset(&hash_keys, 0, sizeof(hash_keys)); 1968 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1969 /* skb is currently provided only when forwarding */ 1970 if (skb) { 1971 struct flow_keys keys; 1972 1973 skb_flow_dissect_flow_keys(skb, &keys, 0); 1974 1975 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; 1976 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; 1977 } else { 1978 /* Same as case 0 */ 1979 hash_keys.addrs.v4addrs.src = fl4->saddr; 1980 hash_keys.addrs.v4addrs.dst = fl4->daddr; 1981 } 1982 break; 1983 } 1984 mhash = flow_hash_from_keys(&hash_keys); 1985 1986 if (multipath_hash) 1987 mhash = jhash_2words(mhash, multipath_hash, 0); 1988 1989 return mhash >> 1; 1990 } 1991 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 1992 1993 static int ip_mkroute_input(struct sk_buff *skb, 1994 struct fib_result *res, 1995 struct in_device *in_dev, 1996 __be32 daddr, __be32 saddr, u32 tos, 1997 struct flow_keys *hkeys) 1998 { 1999 #ifdef CONFIG_IP_ROUTE_MULTIPATH 2000 if (res->fi && fib_info_num_path(res->fi) > 1) { 2001 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); 2002 2003 fib_select_multipath(res, h); 2004 } 2005 #endif 2006 2007 /* create a routing cache entry */ 2008 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); 2009 } 2010 2011 /* 2012 * NOTE. We drop all the packets that has local source 2013 * addresses, because every properly looped back packet 2014 * must have correct destination already attached by output routine. 2015 * 2016 * Such approach solves two big problems: 2017 * 1. Not simplex devices are handled properly. 2018 * 2. IP spoofing attempts are filtered with 100% of guarantee. 2019 * called with rcu_read_lock() 2020 */ 2021 2022 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2023 u8 tos, struct net_device *dev, 2024 struct fib_result *res) 2025 { 2026 struct in_device *in_dev = __in_dev_get_rcu(dev); 2027 struct flow_keys *flkeys = NULL, _flkeys; 2028 struct net *net = dev_net(dev); 2029 struct ip_tunnel_info *tun_info; 2030 int err = -EINVAL; 2031 unsigned int flags = 0; 2032 u32 itag = 0; 2033 struct rtable *rth; 2034 struct flowi4 fl4; 2035 bool do_cache = true; 2036 2037 /* IP on this device is disabled. */ 2038 2039 if (!in_dev) 2040 goto out; 2041 2042 /* Check for the most weird martians, which can be not detected 2043 by fib_lookup. 2044 */ 2045 2046 tun_info = skb_tunnel_info(skb); 2047 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 2048 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id; 2049 else 2050 fl4.flowi4_tun_key.tun_id = 0; 2051 skb_dst_drop(skb); 2052 2053 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) 2054 goto martian_source; 2055 2056 res->fi = NULL; 2057 res->table = NULL; 2058 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0)) 2059 goto brd_input; 2060 2061 /* Accept zero addresses only to limited broadcast; 2062 * I even do not know to fix it or not. Waiting for complains :-) 2063 */ 2064 if (ipv4_is_zeronet(saddr)) 2065 goto martian_source; 2066 2067 if (ipv4_is_zeronet(daddr)) 2068 goto martian_destination; 2069 2070 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), 2071 * and call it once if daddr or/and saddr are loopback addresses 2072 */ 2073 if (ipv4_is_loopback(daddr)) { 2074 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 2075 goto martian_destination; 2076 } else if (ipv4_is_loopback(saddr)) { 2077 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) 2078 goto martian_source; 2079 } 2080 2081 /* 2082 * Now we are ready to route packet. 2083 */ 2084 fl4.flowi4_oif = 0; 2085 fl4.flowi4_iif = dev->ifindex; 2086 fl4.flowi4_mark = skb->mark; 2087 fl4.flowi4_tos = tos; 2088 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 2089 fl4.flowi4_flags = 0; 2090 fl4.daddr = daddr; 2091 fl4.saddr = saddr; 2092 fl4.flowi4_uid = sock_net_uid(net, NULL); 2093 2094 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { 2095 flkeys = &_flkeys; 2096 } else { 2097 fl4.flowi4_proto = 0; 2098 fl4.fl4_sport = 0; 2099 fl4.fl4_dport = 0; 2100 } 2101 2102 err = fib_lookup(net, &fl4, res, 0); 2103 if (err != 0) { 2104 if (!IN_DEV_FORWARD(in_dev)) 2105 err = -EHOSTUNREACH; 2106 goto no_route; 2107 } 2108 2109 if (res->type == RTN_BROADCAST) { 2110 if (IN_DEV_BFORWARD(in_dev)) 2111 goto make_route; 2112 /* not do cache if bc_forwarding is enabled */ 2113 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING)) 2114 do_cache = false; 2115 goto brd_input; 2116 } 2117 2118 if (res->type == RTN_LOCAL) { 2119 err = fib_validate_source(skb, saddr, daddr, tos, 2120 0, dev, in_dev, &itag); 2121 if (err < 0) 2122 goto martian_source; 2123 goto local_input; 2124 } 2125 2126 if (!IN_DEV_FORWARD(in_dev)) { 2127 err = -EHOSTUNREACH; 2128 goto no_route; 2129 } 2130 if (res->type != RTN_UNICAST) 2131 goto martian_destination; 2132 2133 make_route: 2134 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys); 2135 out: return err; 2136 2137 brd_input: 2138 if (skb->protocol != htons(ETH_P_IP)) 2139 goto e_inval; 2140 2141 if (!ipv4_is_zeronet(saddr)) { 2142 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 2143 in_dev, &itag); 2144 if (err < 0) 2145 goto martian_source; 2146 } 2147 flags |= RTCF_BROADCAST; 2148 res->type = RTN_BROADCAST; 2149 RT_CACHE_STAT_INC(in_brd); 2150 2151 local_input: 2152 do_cache &= res->fi && !itag; 2153 if (do_cache) { 2154 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2155 2156 rth = rcu_dereference(nhc->nhc_rth_input); 2157 if (rt_cache_valid(rth)) { 2158 skb_dst_set_noref(skb, &rth->dst); 2159 err = 0; 2160 goto out; 2161 } 2162 } 2163 2164 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev, 2165 flags | RTCF_LOCAL, res->type, 2166 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); 2167 if (!rth) 2168 goto e_nobufs; 2169 2170 rth->dst.output= ip_rt_bug; 2171 #ifdef CONFIG_IP_ROUTE_CLASSID 2172 rth->dst.tclassid = itag; 2173 #endif 2174 rth->rt_is_input = 1; 2175 2176 RT_CACHE_STAT_INC(in_slow_tot); 2177 if (res->type == RTN_UNREACHABLE) { 2178 rth->dst.input= ip_error; 2179 rth->dst.error= -err; 2180 rth->rt_flags &= ~RTCF_LOCAL; 2181 } 2182 2183 if (do_cache) { 2184 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2185 2186 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); 2187 if (lwtunnel_input_redirect(rth->dst.lwtstate)) { 2188 WARN_ON(rth->dst.input == lwtunnel_input); 2189 rth->dst.lwtstate->orig_input = rth->dst.input; 2190 rth->dst.input = lwtunnel_input; 2191 } 2192 2193 if (unlikely(!rt_cache_route(nhc, rth))) 2194 rt_add_uncached_list(rth); 2195 } 2196 skb_dst_set(skb, &rth->dst); 2197 err = 0; 2198 goto out; 2199 2200 no_route: 2201 RT_CACHE_STAT_INC(in_no_route); 2202 res->type = RTN_UNREACHABLE; 2203 res->fi = NULL; 2204 res->table = NULL; 2205 goto local_input; 2206 2207 /* 2208 * Do not cache martian addresses: they should be logged (RFC1812) 2209 */ 2210 martian_destination: 2211 RT_CACHE_STAT_INC(in_martian_dst); 2212 #ifdef CONFIG_IP_ROUTE_VERBOSE 2213 if (IN_DEV_LOG_MARTIANS(in_dev)) 2214 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n", 2215 &daddr, &saddr, dev->name); 2216 #endif 2217 2218 e_inval: 2219 err = -EINVAL; 2220 goto out; 2221 2222 e_nobufs: 2223 err = -ENOBUFS; 2224 goto out; 2225 2226 martian_source: 2227 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2228 goto out; 2229 } 2230 2231 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2232 u8 tos, struct net_device *dev) 2233 { 2234 struct fib_result res; 2235 int err; 2236 2237 tos &= IPTOS_RT_MASK; 2238 rcu_read_lock(); 2239 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res); 2240 rcu_read_unlock(); 2241 2242 return err; 2243 } 2244 EXPORT_SYMBOL(ip_route_input_noref); 2245 2246 /* called with rcu_read_lock held */ 2247 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2248 u8 tos, struct net_device *dev, struct fib_result *res) 2249 { 2250 /* Multicast recognition logic is moved from route cache to here. 2251 The problem was that too many Ethernet cards have broken/missing 2252 hardware multicast filters :-( As result the host on multicasting 2253 network acquires a lot of useless route cache entries, sort of 2254 SDR messages from all the world. Now we try to get rid of them. 2255 Really, provided software IP multicast filter is organized 2256 reasonably (at least, hashed), it does not result in a slowdown 2257 comparing with route cache reject entries. 2258 Note, that multicast routers are not affected, because 2259 route cache entry is created eventually. 2260 */ 2261 if (ipv4_is_multicast(daddr)) { 2262 struct in_device *in_dev = __in_dev_get_rcu(dev); 2263 int our = 0; 2264 int err = -EINVAL; 2265 2266 if (!in_dev) 2267 return err; 2268 our = ip_check_mc_rcu(in_dev, daddr, saddr, 2269 ip_hdr(skb)->protocol); 2270 2271 /* check l3 master if no match yet */ 2272 if (!our && netif_is_l3_slave(dev)) { 2273 struct in_device *l3_in_dev; 2274 2275 l3_in_dev = __in_dev_get_rcu(skb->dev); 2276 if (l3_in_dev) 2277 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr, 2278 ip_hdr(skb)->protocol); 2279 } 2280 2281 if (our 2282 #ifdef CONFIG_IP_MROUTE 2283 || 2284 (!ipv4_is_local_multicast(daddr) && 2285 IN_DEV_MFORWARD(in_dev)) 2286 #endif 2287 ) { 2288 err = ip_route_input_mc(skb, daddr, saddr, 2289 tos, dev, our); 2290 } 2291 return err; 2292 } 2293 2294 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res); 2295 } 2296 2297 /* called with rcu_read_lock() */ 2298 static struct rtable *__mkroute_output(const struct fib_result *res, 2299 const struct flowi4 *fl4, int orig_oif, 2300 struct net_device *dev_out, 2301 unsigned int flags) 2302 { 2303 struct fib_info *fi = res->fi; 2304 struct fib_nh_exception *fnhe; 2305 struct in_device *in_dev; 2306 u16 type = res->type; 2307 struct rtable *rth; 2308 bool do_cache; 2309 2310 in_dev = __in_dev_get_rcu(dev_out); 2311 if (!in_dev) 2312 return ERR_PTR(-EINVAL); 2313 2314 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) 2315 if (ipv4_is_loopback(fl4->saddr) && 2316 !(dev_out->flags & IFF_LOOPBACK) && 2317 !netif_is_l3_master(dev_out)) 2318 return ERR_PTR(-EINVAL); 2319 2320 if (ipv4_is_lbcast(fl4->daddr)) 2321 type = RTN_BROADCAST; 2322 else if (ipv4_is_multicast(fl4->daddr)) 2323 type = RTN_MULTICAST; 2324 else if (ipv4_is_zeronet(fl4->daddr)) 2325 return ERR_PTR(-EINVAL); 2326 2327 if (dev_out->flags & IFF_LOOPBACK) 2328 flags |= RTCF_LOCAL; 2329 2330 do_cache = true; 2331 if (type == RTN_BROADCAST) { 2332 flags |= RTCF_BROADCAST | RTCF_LOCAL; 2333 fi = NULL; 2334 } else if (type == RTN_MULTICAST) { 2335 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2336 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, 2337 fl4->flowi4_proto)) 2338 flags &= ~RTCF_LOCAL; 2339 else 2340 do_cache = false; 2341 /* If multicast route do not exist use 2342 * default one, but do not gateway in this case. 2343 * Yes, it is hack. 2344 */ 2345 if (fi && res->prefixlen < 4) 2346 fi = NULL; 2347 } else if ((type == RTN_LOCAL) && (orig_oif != 0) && 2348 (orig_oif != dev_out->ifindex)) { 2349 /* For local routes that require a particular output interface 2350 * we do not want to cache the result. Caching the result 2351 * causes incorrect behaviour when there are multiple source 2352 * addresses on the interface, the end result being that if the 2353 * intended recipient is waiting on that interface for the 2354 * packet he won't receive it because it will be delivered on 2355 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2356 * be set to the loopback interface as well. 2357 */ 2358 do_cache = false; 2359 } 2360 2361 fnhe = NULL; 2362 do_cache &= fi != NULL; 2363 if (fi) { 2364 struct fib_nh_common *nhc = FIB_RES_NHC(*res); 2365 struct rtable __rcu **prth; 2366 2367 fnhe = find_exception(nhc, fl4->daddr); 2368 if (!do_cache) 2369 goto add; 2370 if (fnhe) { 2371 prth = &fnhe->fnhe_rth_output; 2372 } else { 2373 if (unlikely(fl4->flowi4_flags & 2374 FLOWI_FLAG_KNOWN_NH && 2375 !(nhc->nhc_gw_family && 2376 nhc->nhc_scope == RT_SCOPE_LINK))) { 2377 do_cache = false; 2378 goto add; 2379 } 2380 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output); 2381 } 2382 rth = rcu_dereference(*prth); 2383 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2384 return rth; 2385 } 2386 2387 add: 2388 rth = rt_dst_alloc(dev_out, flags, type, 2389 IN_DEV_CONF_GET(in_dev, NOPOLICY), 2390 IN_DEV_CONF_GET(in_dev, NOXFRM), 2391 do_cache); 2392 if (!rth) 2393 return ERR_PTR(-ENOBUFS); 2394 2395 rth->rt_iif = orig_oif; 2396 2397 RT_CACHE_STAT_INC(out_slow_tot); 2398 2399 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2400 if (flags & RTCF_LOCAL && 2401 !(dev_out->flags & IFF_LOOPBACK)) { 2402 rth->dst.output = ip_mc_output; 2403 RT_CACHE_STAT_INC(out_slow_mc); 2404 } 2405 #ifdef CONFIG_IP_MROUTE 2406 if (type == RTN_MULTICAST) { 2407 if (IN_DEV_MFORWARD(in_dev) && 2408 !ipv4_is_local_multicast(fl4->daddr)) { 2409 rth->dst.input = ip_mr_input; 2410 rth->dst.output = ip_mc_output; 2411 } 2412 } 2413 #endif 2414 } 2415 2416 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache); 2417 lwtunnel_set_redirect(&rth->dst); 2418 2419 return rth; 2420 } 2421 2422 /* 2423 * Major route resolver routine. 2424 */ 2425 2426 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, 2427 const struct sk_buff *skb) 2428 { 2429 __u8 tos = RT_FL_TOS(fl4); 2430 struct fib_result res = { 2431 .type = RTN_UNSPEC, 2432 .fi = NULL, 2433 .table = NULL, 2434 .tclassid = 0, 2435 }; 2436 struct rtable *rth; 2437 2438 fl4->flowi4_iif = LOOPBACK_IFINDEX; 2439 fl4->flowi4_tos = tos & IPTOS_RT_MASK; 2440 fl4->flowi4_scope = ((tos & RTO_ONLINK) ? 2441 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 2442 2443 rcu_read_lock(); 2444 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); 2445 rcu_read_unlock(); 2446 2447 return rth; 2448 } 2449 EXPORT_SYMBOL_GPL(ip_route_output_key_hash); 2450 2451 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, 2452 struct fib_result *res, 2453 const struct sk_buff *skb) 2454 { 2455 struct net_device *dev_out = NULL; 2456 int orig_oif = fl4->flowi4_oif; 2457 unsigned int flags = 0; 2458 struct rtable *rth; 2459 int err = -ENETUNREACH; 2460 2461 if (fl4->saddr) { 2462 rth = ERR_PTR(-EINVAL); 2463 if (ipv4_is_multicast(fl4->saddr) || 2464 ipv4_is_lbcast(fl4->saddr) || 2465 ipv4_is_zeronet(fl4->saddr)) 2466 goto out; 2467 2468 /* I removed check for oif == dev_out->oif here. 2469 It was wrong for two reasons: 2470 1. ip_dev_find(net, saddr) can return wrong iface, if saddr 2471 is assigned to multiple interfaces. 2472 2. Moreover, we are allowed to send packets with saddr 2473 of another iface. --ANK 2474 */ 2475 2476 if (fl4->flowi4_oif == 0 && 2477 (ipv4_is_multicast(fl4->daddr) || 2478 ipv4_is_lbcast(fl4->daddr))) { 2479 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2480 dev_out = __ip_dev_find(net, fl4->saddr, false); 2481 if (!dev_out) 2482 goto out; 2483 2484 /* Special hack: user can direct multicasts 2485 and limited broadcast via necessary interface 2486 without fiddling with IP_MULTICAST_IF or IP_PKTINFO. 2487 This hack is not just for fun, it allows 2488 vic,vat and friends to work. 2489 They bind socket to loopback, set ttl to zero 2490 and expect that it will work. 2491 From the viewpoint of routing cache they are broken, 2492 because we are not allowed to build multicast path 2493 with loopback source addr (look, routing cache 2494 cannot know, that ttl is zero, so that packet 2495 will not leave this host and route is valid). 2496 Luckily, this hack is good workaround. 2497 */ 2498 2499 fl4->flowi4_oif = dev_out->ifindex; 2500 goto make_route; 2501 } 2502 2503 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2504 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2505 if (!__ip_dev_find(net, fl4->saddr, false)) 2506 goto out; 2507 } 2508 } 2509 2510 2511 if (fl4->flowi4_oif) { 2512 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); 2513 rth = ERR_PTR(-ENODEV); 2514 if (!dev_out) 2515 goto out; 2516 2517 /* RACE: Check return value of inet_select_addr instead. */ 2518 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) { 2519 rth = ERR_PTR(-ENETUNREACH); 2520 goto out; 2521 } 2522 if (ipv4_is_local_multicast(fl4->daddr) || 2523 ipv4_is_lbcast(fl4->daddr) || 2524 fl4->flowi4_proto == IPPROTO_IGMP) { 2525 if (!fl4->saddr) 2526 fl4->saddr = inet_select_addr(dev_out, 0, 2527 RT_SCOPE_LINK); 2528 goto make_route; 2529 } 2530 if (!fl4->saddr) { 2531 if (ipv4_is_multicast(fl4->daddr)) 2532 fl4->saddr = inet_select_addr(dev_out, 0, 2533 fl4->flowi4_scope); 2534 else if (!fl4->daddr) 2535 fl4->saddr = inet_select_addr(dev_out, 0, 2536 RT_SCOPE_HOST); 2537 } 2538 } 2539 2540 if (!fl4->daddr) { 2541 fl4->daddr = fl4->saddr; 2542 if (!fl4->daddr) 2543 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK); 2544 dev_out = net->loopback_dev; 2545 fl4->flowi4_oif = LOOPBACK_IFINDEX; 2546 res->type = RTN_LOCAL; 2547 flags |= RTCF_LOCAL; 2548 goto make_route; 2549 } 2550 2551 err = fib_lookup(net, fl4, res, 0); 2552 if (err) { 2553 res->fi = NULL; 2554 res->table = NULL; 2555 if (fl4->flowi4_oif && 2556 (ipv4_is_multicast(fl4->daddr) || 2557 !netif_index_is_l3_master(net, fl4->flowi4_oif))) { 2558 /* Apparently, routing tables are wrong. Assume, 2559 that the destination is on link. 2560 2561 WHY? DW. 2562 Because we are allowed to send to iface 2563 even if it has NO routes and NO assigned 2564 addresses. When oif is specified, routing 2565 tables are looked up with only one purpose: 2566 to catch if destination is gatewayed, rather than 2567 direct. Moreover, if MSG_DONTROUTE is set, 2568 we send packet, ignoring both routing tables 2569 and ifaddr state. --ANK 2570 2571 2572 We could make it even if oif is unknown, 2573 likely IPv6, but we do not. 2574 */ 2575 2576 if (fl4->saddr == 0) 2577 fl4->saddr = inet_select_addr(dev_out, 0, 2578 RT_SCOPE_LINK); 2579 res->type = RTN_UNICAST; 2580 goto make_route; 2581 } 2582 rth = ERR_PTR(err); 2583 goto out; 2584 } 2585 2586 if (res->type == RTN_LOCAL) { 2587 if (!fl4->saddr) { 2588 if (res->fi->fib_prefsrc) 2589 fl4->saddr = res->fi->fib_prefsrc; 2590 else 2591 fl4->saddr = fl4->daddr; 2592 } 2593 2594 /* L3 master device is the loopback for that domain */ 2595 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : 2596 net->loopback_dev; 2597 2598 /* make sure orig_oif points to fib result device even 2599 * though packet rx/tx happens over loopback or l3mdev 2600 */ 2601 orig_oif = FIB_RES_OIF(*res); 2602 2603 fl4->flowi4_oif = dev_out->ifindex; 2604 flags |= RTCF_LOCAL; 2605 goto make_route; 2606 } 2607 2608 fib_select_path(net, res, fl4, skb); 2609 2610 dev_out = FIB_RES_DEV(*res); 2611 fl4->flowi4_oif = dev_out->ifindex; 2612 2613 2614 make_route: 2615 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); 2616 2617 out: 2618 return rth; 2619 } 2620 2621 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) 2622 { 2623 return NULL; 2624 } 2625 2626 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) 2627 { 2628 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2629 2630 return mtu ? : dst->dev->mtu; 2631 } 2632 2633 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 2634 struct sk_buff *skb, u32 mtu) 2635 { 2636 } 2637 2638 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 2639 struct sk_buff *skb) 2640 { 2641 } 2642 2643 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, 2644 unsigned long old) 2645 { 2646 return NULL; 2647 } 2648 2649 static struct dst_ops ipv4_dst_blackhole_ops = { 2650 .family = AF_INET, 2651 .check = ipv4_blackhole_dst_check, 2652 .mtu = ipv4_blackhole_mtu, 2653 .default_advmss = ipv4_default_advmss, 2654 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2655 .redirect = ipv4_rt_blackhole_redirect, 2656 .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2657 .neigh_lookup = ipv4_neigh_lookup, 2658 }; 2659 2660 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2661 { 2662 struct rtable *ort = (struct rtable *) dst_orig; 2663 struct rtable *rt; 2664 2665 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0); 2666 if (rt) { 2667 struct dst_entry *new = &rt->dst; 2668 2669 new->__use = 1; 2670 new->input = dst_discard; 2671 new->output = dst_discard_out; 2672 2673 new->dev = net->loopback_dev; 2674 if (new->dev) 2675 dev_hold(new->dev); 2676 2677 rt->rt_is_input = ort->rt_is_input; 2678 rt->rt_iif = ort->rt_iif; 2679 rt->rt_pmtu = ort->rt_pmtu; 2680 rt->rt_mtu_locked = ort->rt_mtu_locked; 2681 2682 rt->rt_genid = rt_genid_ipv4(net); 2683 rt->rt_flags = ort->rt_flags; 2684 rt->rt_type = ort->rt_type; 2685 rt->rt_gw_family = ort->rt_gw_family; 2686 if (rt->rt_gw_family == AF_INET) 2687 rt->rt_gw4 = ort->rt_gw4; 2688 else if (rt->rt_gw_family == AF_INET6) 2689 rt->rt_gw6 = ort->rt_gw6; 2690 2691 INIT_LIST_HEAD(&rt->rt_uncached); 2692 } 2693 2694 dst_release(dst_orig); 2695 2696 return rt ? &rt->dst : ERR_PTR(-ENOMEM); 2697 } 2698 2699 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, 2700 const struct sock *sk) 2701 { 2702 struct rtable *rt = __ip_route_output_key(net, flp4); 2703 2704 if (IS_ERR(rt)) 2705 return rt; 2706 2707 if (flp4->flowi4_proto) 2708 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, 2709 flowi4_to_flowi(flp4), 2710 sk, 0); 2711 2712 return rt; 2713 } 2714 EXPORT_SYMBOL_GPL(ip_route_output_flow); 2715 2716 /* called with rcu_read_lock held */ 2717 static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2718 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2719 struct sk_buff *skb, u32 portid, u32 seq) 2720 { 2721 struct rtmsg *r; 2722 struct nlmsghdr *nlh; 2723 unsigned long expires = 0; 2724 u32 error; 2725 u32 metrics[RTAX_MAX]; 2726 2727 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2728 if (!nlh) 2729 return -EMSGSIZE; 2730 2731 r = nlmsg_data(nlh); 2732 r->rtm_family = AF_INET; 2733 r->rtm_dst_len = 32; 2734 r->rtm_src_len = 0; 2735 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0; 2736 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; 2737 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2738 goto nla_put_failure; 2739 r->rtm_type = rt->rt_type; 2740 r->rtm_scope = RT_SCOPE_UNIVERSE; 2741 r->rtm_protocol = RTPROT_UNSPEC; 2742 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 2743 if (rt->rt_flags & RTCF_NOTIFY) 2744 r->rtm_flags |= RTM_F_NOTIFY; 2745 if (IPCB(skb)->flags & IPSKB_DOREDIRECT) 2746 r->rtm_flags |= RTCF_DOREDIRECT; 2747 2748 if (nla_put_in_addr(skb, RTA_DST, dst)) 2749 goto nla_put_failure; 2750 if (src) { 2751 r->rtm_src_len = 32; 2752 if (nla_put_in_addr(skb, RTA_SRC, src)) 2753 goto nla_put_failure; 2754 } 2755 if (rt->dst.dev && 2756 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 2757 goto nla_put_failure; 2758 #ifdef CONFIG_IP_ROUTE_CLASSID 2759 if (rt->dst.tclassid && 2760 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) 2761 goto nla_put_failure; 2762 #endif 2763 if (fl4 && !rt_is_input_route(rt) && 2764 fl4->saddr != src) { 2765 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) 2766 goto nla_put_failure; 2767 } 2768 if (rt->rt_gw_family == AF_INET && 2769 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { 2770 goto nla_put_failure; 2771 } else if (rt->rt_gw_family == AF_INET6) { 2772 int alen = sizeof(struct in6_addr); 2773 struct nlattr *nla; 2774 struct rtvia *via; 2775 2776 nla = nla_reserve(skb, RTA_VIA, alen + 2); 2777 if (!nla) 2778 goto nla_put_failure; 2779 2780 via = nla_data(nla); 2781 via->rtvia_family = AF_INET6; 2782 memcpy(via->rtvia_addr, &rt->rt_gw6, alen); 2783 } 2784 2785 expires = rt->dst.expires; 2786 if (expires) { 2787 unsigned long now = jiffies; 2788 2789 if (time_before(now, expires)) 2790 expires -= now; 2791 else 2792 expires = 0; 2793 } 2794 2795 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2796 if (rt->rt_pmtu && expires) 2797 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2798 if (rt->rt_mtu_locked && expires) 2799 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); 2800 if (rtnetlink_put_metrics(skb, metrics) < 0) 2801 goto nla_put_failure; 2802 2803 if (fl4) { 2804 if (fl4->flowi4_mark && 2805 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) 2806 goto nla_put_failure; 2807 2808 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && 2809 nla_put_u32(skb, RTA_UID, 2810 from_kuid_munged(current_user_ns(), 2811 fl4->flowi4_uid))) 2812 goto nla_put_failure; 2813 2814 if (rt_is_input_route(rt)) { 2815 #ifdef CONFIG_IP_MROUTE 2816 if (ipv4_is_multicast(dst) && 2817 !ipv4_is_local_multicast(dst) && 2818 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2819 int err = ipmr_get_route(net, skb, 2820 fl4->saddr, fl4->daddr, 2821 r, portid); 2822 2823 if (err <= 0) { 2824 if (err == 0) 2825 return 0; 2826 goto nla_put_failure; 2827 } 2828 } else 2829 #endif 2830 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif)) 2831 goto nla_put_failure; 2832 } 2833 } 2834 2835 error = rt->dst.error; 2836 2837 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) 2838 goto nla_put_failure; 2839 2840 nlmsg_end(skb, nlh); 2841 return 0; 2842 2843 nla_put_failure: 2844 nlmsg_cancel(skb, nlh); 2845 return -EMSGSIZE; 2846 } 2847 2848 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2849 struct netlink_callback *cb, u32 table_id, 2850 struct fnhe_hash_bucket *bucket, int genid, 2851 int *fa_index, int fa_start) 2852 { 2853 int i; 2854 2855 for (i = 0; i < FNHE_HASH_SIZE; i++) { 2856 struct fib_nh_exception *fnhe; 2857 2858 for (fnhe = rcu_dereference(bucket[i].chain); fnhe; 2859 fnhe = rcu_dereference(fnhe->fnhe_next)) { 2860 struct rtable *rt; 2861 int err; 2862 2863 if (*fa_index < fa_start) 2864 goto next; 2865 2866 if (fnhe->fnhe_genid != genid) 2867 goto next; 2868 2869 if (fnhe->fnhe_expires && 2870 time_after(jiffies, fnhe->fnhe_expires)) 2871 goto next; 2872 2873 rt = rcu_dereference(fnhe->fnhe_rth_input); 2874 if (!rt) 2875 rt = rcu_dereference(fnhe->fnhe_rth_output); 2876 if (!rt) 2877 goto next; 2878 2879 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2880 table_id, NULL, skb, 2881 NETLINK_CB(cb->skb).portid, 2882 cb->nlh->nlmsg_seq); 2883 if (err) 2884 return err; 2885 next: 2886 (*fa_index)++; 2887 } 2888 } 2889 2890 return 0; 2891 } 2892 2893 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2894 u32 table_id, struct fib_info *fi, 2895 int *fa_index, int fa_start) 2896 { 2897 struct net *net = sock_net(cb->skb->sk); 2898 int nhsel, genid = fnhe_genid(net); 2899 2900 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 2901 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 2902 struct fnhe_hash_bucket *bucket; 2903 int err; 2904 2905 if (nhc->nhc_flags & RTNH_F_DEAD) 2906 continue; 2907 2908 rcu_read_lock(); 2909 bucket = rcu_dereference(nhc->nhc_exceptions); 2910 err = 0; 2911 if (bucket) 2912 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2913 genid, fa_index, fa_start); 2914 rcu_read_unlock(); 2915 if (err) 2916 return err; 2917 } 2918 2919 return 0; 2920 } 2921 2922 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst, 2923 u8 ip_proto, __be16 sport, 2924 __be16 dport) 2925 { 2926 struct sk_buff *skb; 2927 struct iphdr *iph; 2928 2929 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2930 if (!skb) 2931 return NULL; 2932 2933 /* Reserve room for dummy headers, this skb can pass 2934 * through good chunk of routing engine. 2935 */ 2936 skb_reset_mac_header(skb); 2937 skb_reset_network_header(skb); 2938 skb->protocol = htons(ETH_P_IP); 2939 iph = skb_put(skb, sizeof(struct iphdr)); 2940 iph->protocol = ip_proto; 2941 iph->saddr = src; 2942 iph->daddr = dst; 2943 iph->version = 0x4; 2944 iph->frag_off = 0; 2945 iph->ihl = 0x5; 2946 skb_set_transport_header(skb, skb->len); 2947 2948 switch (iph->protocol) { 2949 case IPPROTO_UDP: { 2950 struct udphdr *udph; 2951 2952 udph = skb_put_zero(skb, sizeof(struct udphdr)); 2953 udph->source = sport; 2954 udph->dest = dport; 2955 udph->len = sizeof(struct udphdr); 2956 udph->check = 0; 2957 break; 2958 } 2959 case IPPROTO_TCP: { 2960 struct tcphdr *tcph; 2961 2962 tcph = skb_put_zero(skb, sizeof(struct tcphdr)); 2963 tcph->source = sport; 2964 tcph->dest = dport; 2965 tcph->doff = sizeof(struct tcphdr) / 4; 2966 tcph->rst = 1; 2967 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), 2968 src, dst, 0); 2969 break; 2970 } 2971 case IPPROTO_ICMP: { 2972 struct icmphdr *icmph; 2973 2974 icmph = skb_put_zero(skb, sizeof(struct icmphdr)); 2975 icmph->type = ICMP_ECHO; 2976 icmph->code = 0; 2977 } 2978 } 2979 2980 return skb; 2981 } 2982 2983 static int inet_rtm_valid_getroute_req(struct sk_buff *skb, 2984 const struct nlmsghdr *nlh, 2985 struct nlattr **tb, 2986 struct netlink_ext_ack *extack) 2987 { 2988 struct rtmsg *rtm; 2989 int i, err; 2990 2991 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { 2992 NL_SET_ERR_MSG(extack, 2993 "ipv4: Invalid header for route get request"); 2994 return -EINVAL; 2995 } 2996 2997 if (!netlink_strict_get_check(skb)) 2998 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 2999 rtm_ipv4_policy, extack); 3000 3001 rtm = nlmsg_data(nlh); 3002 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) || 3003 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) || 3004 rtm->rtm_table || rtm->rtm_protocol || 3005 rtm->rtm_scope || rtm->rtm_type) { 3006 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request"); 3007 return -EINVAL; 3008 } 3009 3010 if (rtm->rtm_flags & ~(RTM_F_NOTIFY | 3011 RTM_F_LOOKUP_TABLE | 3012 RTM_F_FIB_MATCH)) { 3013 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request"); 3014 return -EINVAL; 3015 } 3016 3017 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 3018 rtm_ipv4_policy, extack); 3019 if (err) 3020 return err; 3021 3022 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 3023 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 3024 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4"); 3025 return -EINVAL; 3026 } 3027 3028 for (i = 0; i <= RTA_MAX; i++) { 3029 if (!tb[i]) 3030 continue; 3031 3032 switch (i) { 3033 case RTA_IIF: 3034 case RTA_OIF: 3035 case RTA_SRC: 3036 case RTA_DST: 3037 case RTA_IP_PROTO: 3038 case RTA_SPORT: 3039 case RTA_DPORT: 3040 case RTA_MARK: 3041 case RTA_UID: 3042 break; 3043 default: 3044 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request"); 3045 return -EINVAL; 3046 } 3047 } 3048 3049 return 0; 3050 } 3051 3052 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3053 struct netlink_ext_ack *extack) 3054 { 3055 struct net *net = sock_net(in_skb->sk); 3056 struct nlattr *tb[RTA_MAX+1]; 3057 u32 table_id = RT_TABLE_MAIN; 3058 __be16 sport = 0, dport = 0; 3059 struct fib_result res = {}; 3060 u8 ip_proto = IPPROTO_UDP; 3061 struct rtable *rt = NULL; 3062 struct sk_buff *skb; 3063 struct rtmsg *rtm; 3064 struct flowi4 fl4 = {}; 3065 __be32 dst = 0; 3066 __be32 src = 0; 3067 kuid_t uid; 3068 u32 iif; 3069 int err; 3070 int mark; 3071 3072 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 3073 if (err < 0) 3074 return err; 3075 3076 rtm = nlmsg_data(nlh); 3077 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 3078 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; 3079 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 3080 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; 3081 if (tb[RTA_UID]) 3082 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); 3083 else 3084 uid = (iif ? INVALID_UID : current_uid()); 3085 3086 if (tb[RTA_IP_PROTO]) { 3087 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], 3088 &ip_proto, AF_INET, extack); 3089 if (err) 3090 return err; 3091 } 3092 3093 if (tb[RTA_SPORT]) 3094 sport = nla_get_be16(tb[RTA_SPORT]); 3095 3096 if (tb[RTA_DPORT]) 3097 dport = nla_get_be16(tb[RTA_DPORT]); 3098 3099 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport); 3100 if (!skb) 3101 return -ENOBUFS; 3102 3103 fl4.daddr = dst; 3104 fl4.saddr = src; 3105 fl4.flowi4_tos = rtm->rtm_tos; 3106 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; 3107 fl4.flowi4_mark = mark; 3108 fl4.flowi4_uid = uid; 3109 if (sport) 3110 fl4.fl4_sport = sport; 3111 if (dport) 3112 fl4.fl4_dport = dport; 3113 fl4.flowi4_proto = ip_proto; 3114 3115 rcu_read_lock(); 3116 3117 if (iif) { 3118 struct net_device *dev; 3119 3120 dev = dev_get_by_index_rcu(net, iif); 3121 if (!dev) { 3122 err = -ENODEV; 3123 goto errout_rcu; 3124 } 3125 3126 fl4.flowi4_iif = iif; /* for rt_fill_info */ 3127 skb->dev = dev; 3128 skb->mark = mark; 3129 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos, 3130 dev, &res); 3131 3132 rt = skb_rtable(skb); 3133 if (err == 0 && rt->dst.error) 3134 err = -rt->dst.error; 3135 } else { 3136 fl4.flowi4_iif = LOOPBACK_IFINDEX; 3137 skb->dev = net->loopback_dev; 3138 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); 3139 err = 0; 3140 if (IS_ERR(rt)) 3141 err = PTR_ERR(rt); 3142 else 3143 skb_dst_set(skb, &rt->dst); 3144 } 3145 3146 if (err) 3147 goto errout_rcu; 3148 3149 if (rtm->rtm_flags & RTM_F_NOTIFY) 3150 rt->rt_flags |= RTCF_NOTIFY; 3151 3152 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) 3153 table_id = res.table ? res.table->tb_id : 0; 3154 3155 /* reset skb for netlink reply msg */ 3156 skb_trim(skb, 0); 3157 skb_reset_network_header(skb); 3158 skb_reset_transport_header(skb); 3159 skb_reset_mac_header(skb); 3160 3161 if (rtm->rtm_flags & RTM_F_FIB_MATCH) { 3162 if (!res.fi) { 3163 err = fib_props[res.type].error; 3164 if (!err) 3165 err = -EHOSTUNREACH; 3166 goto errout_rcu; 3167 } 3168 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, 3169 nlh->nlmsg_seq, RTM_NEWROUTE, table_id, 3170 rt->rt_type, res.prefix, res.prefixlen, 3171 fl4.flowi4_tos, res.fi, 0); 3172 } else { 3173 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3174 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3175 } 3176 if (err < 0) 3177 goto errout_rcu; 3178 3179 rcu_read_unlock(); 3180 3181 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3182 3183 errout_free: 3184 return err; 3185 errout_rcu: 3186 rcu_read_unlock(); 3187 kfree_skb(skb); 3188 goto errout_free; 3189 } 3190 3191 void ip_rt_multicast_event(struct in_device *in_dev) 3192 { 3193 rt_cache_flush(dev_net(in_dev->dev)); 3194 } 3195 3196 #ifdef CONFIG_SYSCTL 3197 static int ip_rt_gc_interval __read_mostly = 60 * HZ; 3198 static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 3199 static int ip_rt_gc_elasticity __read_mostly = 8; 3200 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 3201 3202 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 3203 void __user *buffer, 3204 size_t *lenp, loff_t *ppos) 3205 { 3206 struct net *net = (struct net *)__ctl->extra1; 3207 3208 if (write) { 3209 rt_cache_flush(net); 3210 fnhe_genid_bump(net); 3211 return 0; 3212 } 3213 3214 return -EINVAL; 3215 } 3216 3217 static struct ctl_table ipv4_route_table[] = { 3218 { 3219 .procname = "gc_thresh", 3220 .data = &ipv4_dst_ops.gc_thresh, 3221 .maxlen = sizeof(int), 3222 .mode = 0644, 3223 .proc_handler = proc_dointvec, 3224 }, 3225 { 3226 .procname = "max_size", 3227 .data = &ip_rt_max_size, 3228 .maxlen = sizeof(int), 3229 .mode = 0644, 3230 .proc_handler = proc_dointvec, 3231 }, 3232 { 3233 /* Deprecated. Use gc_min_interval_ms */ 3234 3235 .procname = "gc_min_interval", 3236 .data = &ip_rt_gc_min_interval, 3237 .maxlen = sizeof(int), 3238 .mode = 0644, 3239 .proc_handler = proc_dointvec_jiffies, 3240 }, 3241 { 3242 .procname = "gc_min_interval_ms", 3243 .data = &ip_rt_gc_min_interval, 3244 .maxlen = sizeof(int), 3245 .mode = 0644, 3246 .proc_handler = proc_dointvec_ms_jiffies, 3247 }, 3248 { 3249 .procname = "gc_timeout", 3250 .data = &ip_rt_gc_timeout, 3251 .maxlen = sizeof(int), 3252 .mode = 0644, 3253 .proc_handler = proc_dointvec_jiffies, 3254 }, 3255 { 3256 .procname = "gc_interval", 3257 .data = &ip_rt_gc_interval, 3258 .maxlen = sizeof(int), 3259 .mode = 0644, 3260 .proc_handler = proc_dointvec_jiffies, 3261 }, 3262 { 3263 .procname = "redirect_load", 3264 .data = &ip_rt_redirect_load, 3265 .maxlen = sizeof(int), 3266 .mode = 0644, 3267 .proc_handler = proc_dointvec, 3268 }, 3269 { 3270 .procname = "redirect_number", 3271 .data = &ip_rt_redirect_number, 3272 .maxlen = sizeof(int), 3273 .mode = 0644, 3274 .proc_handler = proc_dointvec, 3275 }, 3276 { 3277 .procname = "redirect_silence", 3278 .data = &ip_rt_redirect_silence, 3279 .maxlen = sizeof(int), 3280 .mode = 0644, 3281 .proc_handler = proc_dointvec, 3282 }, 3283 { 3284 .procname = "error_cost", 3285 .data = &ip_rt_error_cost, 3286 .maxlen = sizeof(int), 3287 .mode = 0644, 3288 .proc_handler = proc_dointvec, 3289 }, 3290 { 3291 .procname = "error_burst", 3292 .data = &ip_rt_error_burst, 3293 .maxlen = sizeof(int), 3294 .mode = 0644, 3295 .proc_handler = proc_dointvec, 3296 }, 3297 { 3298 .procname = "gc_elasticity", 3299 .data = &ip_rt_gc_elasticity, 3300 .maxlen = sizeof(int), 3301 .mode = 0644, 3302 .proc_handler = proc_dointvec, 3303 }, 3304 { 3305 .procname = "mtu_expires", 3306 .data = &ip_rt_mtu_expires, 3307 .maxlen = sizeof(int), 3308 .mode = 0644, 3309 .proc_handler = proc_dointvec_jiffies, 3310 }, 3311 { 3312 .procname = "min_pmtu", 3313 .data = &ip_rt_min_pmtu, 3314 .maxlen = sizeof(int), 3315 .mode = 0644, 3316 .proc_handler = proc_dointvec_minmax, 3317 .extra1 = &ip_min_valid_pmtu, 3318 }, 3319 { 3320 .procname = "min_adv_mss", 3321 .data = &ip_rt_min_advmss, 3322 .maxlen = sizeof(int), 3323 .mode = 0644, 3324 .proc_handler = proc_dointvec, 3325 }, 3326 { } 3327 }; 3328 3329 static const char ipv4_route_flush_procname[] = "flush"; 3330 3331 static struct ctl_table ipv4_route_flush_table[] = { 3332 { 3333 .procname = ipv4_route_flush_procname, 3334 .maxlen = sizeof(int), 3335 .mode = 0200, 3336 .proc_handler = ipv4_sysctl_rtcache_flush, 3337 }, 3338 { }, 3339 }; 3340 3341 static __net_init int sysctl_route_net_init(struct net *net) 3342 { 3343 struct ctl_table *tbl; 3344 3345 tbl = ipv4_route_flush_table; 3346 if (!net_eq(net, &init_net)) { 3347 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); 3348 if (!tbl) 3349 goto err_dup; 3350 3351 /* Don't export non-whitelisted sysctls to unprivileged users */ 3352 if (net->user_ns != &init_user_ns) { 3353 if (tbl[0].procname != ipv4_route_flush_procname) 3354 tbl[0].procname = NULL; 3355 } 3356 } 3357 tbl[0].extra1 = net; 3358 3359 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); 3360 if (!net->ipv4.route_hdr) 3361 goto err_reg; 3362 return 0; 3363 3364 err_reg: 3365 if (tbl != ipv4_route_flush_table) 3366 kfree(tbl); 3367 err_dup: 3368 return -ENOMEM; 3369 } 3370 3371 static __net_exit void sysctl_route_net_exit(struct net *net) 3372 { 3373 struct ctl_table *tbl; 3374 3375 tbl = net->ipv4.route_hdr->ctl_table_arg; 3376 unregister_net_sysctl_table(net->ipv4.route_hdr); 3377 BUG_ON(tbl == ipv4_route_flush_table); 3378 kfree(tbl); 3379 } 3380 3381 static __net_initdata struct pernet_operations sysctl_route_ops = { 3382 .init = sysctl_route_net_init, 3383 .exit = sysctl_route_net_exit, 3384 }; 3385 #endif 3386 3387 static __net_init int rt_genid_init(struct net *net) 3388 { 3389 atomic_set(&net->ipv4.rt_genid, 0); 3390 atomic_set(&net->fnhe_genid, 0); 3391 atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); 3392 return 0; 3393 } 3394 3395 static __net_initdata struct pernet_operations rt_genid_ops = { 3396 .init = rt_genid_init, 3397 }; 3398 3399 static int __net_init ipv4_inetpeer_init(struct net *net) 3400 { 3401 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 3402 3403 if (!bp) 3404 return -ENOMEM; 3405 inet_peer_base_init(bp); 3406 net->ipv4.peers = bp; 3407 return 0; 3408 } 3409 3410 static void __net_exit ipv4_inetpeer_exit(struct net *net) 3411 { 3412 struct inet_peer_base *bp = net->ipv4.peers; 3413 3414 net->ipv4.peers = NULL; 3415 inetpeer_invalidate_tree(bp); 3416 kfree(bp); 3417 } 3418 3419 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = { 3420 .init = ipv4_inetpeer_init, 3421 .exit = ipv4_inetpeer_exit, 3422 }; 3423 3424 #ifdef CONFIG_IP_ROUTE_CLASSID 3425 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; 3426 #endif /* CONFIG_IP_ROUTE_CLASSID */ 3427 3428 int __init ip_rt_init(void) 3429 { 3430 int cpu; 3431 3432 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents), 3433 GFP_KERNEL); 3434 if (!ip_idents) 3435 panic("IP: failed to allocate ip_idents\n"); 3436 3437 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); 3438 3439 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); 3440 if (!ip_tstamps) 3441 panic("IP: failed to allocate ip_tstamps\n"); 3442 3443 for_each_possible_cpu(cpu) { 3444 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 3445 3446 INIT_LIST_HEAD(&ul->head); 3447 spin_lock_init(&ul->lock); 3448 } 3449 #ifdef CONFIG_IP_ROUTE_CLASSID 3450 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 3451 if (!ip_rt_acct) 3452 panic("IP: failed to allocate ip_rt_acct\n"); 3453 #endif 3454 3455 ipv4_dst_ops.kmem_cachep = 3456 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, 3457 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3458 3459 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 3460 3461 if (dst_entries_init(&ipv4_dst_ops) < 0) 3462 panic("IP: failed to allocate ipv4_dst_ops counter\n"); 3463 3464 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0) 3465 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n"); 3466 3467 ipv4_dst_ops.gc_thresh = ~0; 3468 ip_rt_max_size = INT_MAX; 3469 3470 devinet_init(); 3471 ip_fib_init(); 3472 3473 if (ip_rt_proc_init()) 3474 pr_err("Unable to create route proc files\n"); 3475 #ifdef CONFIG_XFRM 3476 xfrm_init(); 3477 xfrm4_init(); 3478 #endif 3479 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, 3480 RTNL_FLAG_DOIT_UNLOCKED); 3481 3482 #ifdef CONFIG_SYSCTL 3483 register_pernet_subsys(&sysctl_route_ops); 3484 #endif 3485 register_pernet_subsys(&rt_genid_ops); 3486 register_pernet_subsys(&ipv4_inetpeer_ops); 3487 return 0; 3488 } 3489 3490 #ifdef CONFIG_SYSCTL 3491 /* 3492 * We really need to sanitize the damn ipv4 init order, then all 3493 * this nonsense will go away. 3494 */ 3495 void __init ip_static_sysctl_init(void) 3496 { 3497 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table); 3498 } 3499 #endif 3500