xref: /linux/net/ipv4/route.c (revision 5c1672705a1a2389f5ad78e0fea6f08ed32d6f18)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		ROUTE - implementation of the IP router.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
14  *
15  * Fixes:
16  *		Alan Cox	:	Verify area fixes.
17  *		Alan Cox	:	cli() protects routing changes
18  *		Rui Oliveira	:	ICMP routing table updates
19  *		(rco@di.uminho.pt)	Routing table insertion and update
20  *		Linus Torvalds	:	Rewrote bits to be sensible
21  *		Alan Cox	:	Added BSD route gw semantics
22  *		Alan Cox	:	Super /proc >4K
23  *		Alan Cox	:	MTU in route table
24  *		Alan Cox	:	MSS actually. Also added the window
25  *					clamper.
26  *		Sam Lantinga	:	Fixed route matching in rt_del()
27  *		Alan Cox	:	Routing cache support.
28  *		Alan Cox	:	Removed compatibility cruft.
29  *		Alan Cox	:	RTF_REJECT support.
30  *		Alan Cox	:	TCP irtt support.
31  *		Jonathan Naylor	:	Added Metric support.
32  *	Miquel van Smoorenburg	:	BSD API fixes.
33  *	Miquel van Smoorenburg	:	Metrics.
34  *		Alan Cox	:	Use __u32 properly
35  *		Alan Cox	:	Aligned routing errors more closely with BSD
36  *					our system is still very different.
37  *		Alan Cox	:	Faster /proc handling
38  *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
39  *					routing caches and better behaviour.
40  *
41  *		Olaf Erb	:	irtt wasn't being copied right.
42  *		Bjorn Ekwall	:	Kerneld route support.
43  *		Alan Cox	:	Multicast fixed (I hope)
44  *		Pavel Krauz	:	Limited broadcast fixed
45  *		Mike McLagan	:	Routing by source
46  *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
47  *					route.c and rewritten from scratch.
48  *		Andi Kleen	:	Load-limit warning messages.
49  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
50  *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
51  *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
52  *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
53  *		Marc Boucher	:	routing by fwmark
54  *	Robert Olsson		:	Added rt_cache statistics
55  *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
56  *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
57  *	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
58  *	Ilia Sotnikov		:	Removed TOS from hash calculations
59  */
60 
61 #define pr_fmt(fmt) "IPv4: " fmt
62 
63 #include <linux/module.h>
64 #include <linux/bitops.h>
65 #include <linux/kernel.h>
66 #include <linux/mm.h>
67 #include <linux/memblock.h>
68 #include <linux/socket.h>
69 #include <linux/errno.h>
70 #include <linux/in.h>
71 #include <linux/inet.h>
72 #include <linux/netdevice.h>
73 #include <linux/proc_fs.h>
74 #include <linux/init.h>
75 #include <linux/skbuff.h>
76 #include <linux/inetdevice.h>
77 #include <linux/igmp.h>
78 #include <linux/pkt_sched.h>
79 #include <linux/mroute.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/random.h>
82 #include <linux/rcupdate.h>
83 #include <linux/slab.h>
84 #include <linux/jhash.h>
85 #include <net/dst.h>
86 #include <net/dst_metadata.h>
87 #include <net/inet_dscp.h>
88 #include <net/net_namespace.h>
89 #include <net/ip.h>
90 #include <net/route.h>
91 #include <net/inetpeer.h>
92 #include <net/sock.h>
93 #include <net/ip_fib.h>
94 #include <net/nexthop.h>
95 #include <net/tcp.h>
96 #include <net/icmp.h>
97 #include <net/xfrm.h>
98 #include <net/lwtunnel.h>
99 #include <net/netevent.h>
100 #include <net/rtnetlink.h>
101 #ifdef CONFIG_SYSCTL
102 #include <linux/sysctl.h>
103 #endif
104 #include <net/secure_seq.h>
105 #include <net/ip_tunnels.h>
106 
107 #include "fib_lookup.h"
108 
109 #define RT_GC_TIMEOUT (300*HZ)
110 
111 #define DEFAULT_MIN_PMTU (512 + 20 + 20)
112 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
113 #define DEFAULT_MIN_ADVMSS 256
114 static int ip_rt_max_size;
115 static int ip_rt_redirect_number __read_mostly	= 9;
116 static int ip_rt_redirect_load __read_mostly	= HZ / 50;
117 static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
118 static int ip_rt_error_cost __read_mostly	= HZ;
119 static int ip_rt_error_burst __read_mostly	= 5 * HZ;
120 
121 static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
122 
123 /*
124  *	Interface to generic destination cache.
125  */
126 
127 INDIRECT_CALLABLE_SCOPE
128 struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
129 static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
130 INDIRECT_CALLABLE_SCOPE
131 unsigned int		ipv4_mtu(const struct dst_entry *dst);
132 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
133 static void		 ipv4_link_failure(struct sk_buff *skb);
134 static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
135 					   struct sk_buff *skb, u32 mtu,
136 					   bool confirm_neigh);
137 static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
138 					struct sk_buff *skb);
139 static void		ipv4_dst_destroy(struct dst_entry *dst);
140 
141 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
142 {
143 	WARN_ON(1);
144 	return NULL;
145 }
146 
147 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
148 					   struct sk_buff *skb,
149 					   const void *daddr);
150 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
151 
152 static struct dst_ops ipv4_dst_ops = {
153 	.family =		AF_INET,
154 	.check =		ipv4_dst_check,
155 	.default_advmss =	ipv4_default_advmss,
156 	.mtu =			ipv4_mtu,
157 	.cow_metrics =		ipv4_cow_metrics,
158 	.destroy =		ipv4_dst_destroy,
159 	.negative_advice =	ipv4_negative_advice,
160 	.link_failure =		ipv4_link_failure,
161 	.update_pmtu =		ip_rt_update_pmtu,
162 	.redirect =		ip_do_redirect,
163 	.local_out =		__ip_local_out,
164 	.neigh_lookup =		ipv4_neigh_lookup,
165 	.confirm_neigh =	ipv4_confirm_neigh,
166 };
167 
168 #define ECN_OR_COST(class)	TC_PRIO_##class
169 
170 const __u8 ip_tos2prio[16] = {
171 	TC_PRIO_BESTEFFORT,
172 	ECN_OR_COST(BESTEFFORT),
173 	TC_PRIO_BESTEFFORT,
174 	ECN_OR_COST(BESTEFFORT),
175 	TC_PRIO_BULK,
176 	ECN_OR_COST(BULK),
177 	TC_PRIO_BULK,
178 	ECN_OR_COST(BULK),
179 	TC_PRIO_INTERACTIVE,
180 	ECN_OR_COST(INTERACTIVE),
181 	TC_PRIO_INTERACTIVE,
182 	ECN_OR_COST(INTERACTIVE),
183 	TC_PRIO_INTERACTIVE_BULK,
184 	ECN_OR_COST(INTERACTIVE_BULK),
185 	TC_PRIO_INTERACTIVE_BULK,
186 	ECN_OR_COST(INTERACTIVE_BULK)
187 };
188 EXPORT_SYMBOL(ip_tos2prio);
189 
190 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
191 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
192 
193 #ifdef CONFIG_PROC_FS
194 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
195 {
196 	if (*pos)
197 		return NULL;
198 	return SEQ_START_TOKEN;
199 }
200 
201 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
202 {
203 	++*pos;
204 	return NULL;
205 }
206 
207 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
208 {
209 }
210 
211 static int rt_cache_seq_show(struct seq_file *seq, void *v)
212 {
213 	if (v == SEQ_START_TOKEN)
214 		seq_printf(seq, "%-127s\n",
215 			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
216 			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
217 			   "HHUptod\tSpecDst");
218 	return 0;
219 }
220 
221 static const struct seq_operations rt_cache_seq_ops = {
222 	.start  = rt_cache_seq_start,
223 	.next   = rt_cache_seq_next,
224 	.stop   = rt_cache_seq_stop,
225 	.show   = rt_cache_seq_show,
226 };
227 
228 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
229 {
230 	int cpu;
231 
232 	if (*pos == 0)
233 		return SEQ_START_TOKEN;
234 
235 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
236 		if (!cpu_possible(cpu))
237 			continue;
238 		*pos = cpu+1;
239 		return &per_cpu(rt_cache_stat, cpu);
240 	}
241 	return NULL;
242 }
243 
244 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
245 {
246 	int cpu;
247 
248 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
249 		if (!cpu_possible(cpu))
250 			continue;
251 		*pos = cpu+1;
252 		return &per_cpu(rt_cache_stat, cpu);
253 	}
254 	(*pos)++;
255 	return NULL;
256 
257 }
258 
259 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
260 {
261 
262 }
263 
264 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
265 {
266 	struct rt_cache_stat *st = v;
267 
268 	if (v == SEQ_START_TOKEN) {
269 		seq_puts(seq, "entries  in_hit   in_slow_tot in_slow_mc in_no_route in_brd   in_martian_dst in_martian_src out_hit  out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
270 		return 0;
271 	}
272 
273 	seq_printf(seq, "%08x %08x %08x    %08x   %08x    %08x %08x       "
274 			"%08x       %08x %08x     %08x    %08x %08x   "
275 			"%08x     %08x        %08x        %08x\n",
276 		   dst_entries_get_slow(&ipv4_dst_ops),
277 		   0, /* st->in_hit */
278 		   st->in_slow_tot,
279 		   st->in_slow_mc,
280 		   st->in_no_route,
281 		   st->in_brd,
282 		   st->in_martian_dst,
283 		   st->in_martian_src,
284 
285 		   0, /* st->out_hit */
286 		   st->out_slow_tot,
287 		   st->out_slow_mc,
288 
289 		   0, /* st->gc_total */
290 		   0, /* st->gc_ignored */
291 		   0, /* st->gc_goal_miss */
292 		   0, /* st->gc_dst_overflow */
293 		   0, /* st->in_hlist_search */
294 		   0  /* st->out_hlist_search */
295 		);
296 	return 0;
297 }
298 
299 static const struct seq_operations rt_cpu_seq_ops = {
300 	.start  = rt_cpu_seq_start,
301 	.next   = rt_cpu_seq_next,
302 	.stop   = rt_cpu_seq_stop,
303 	.show   = rt_cpu_seq_show,
304 };
305 
306 #ifdef CONFIG_IP_ROUTE_CLASSID
307 static int rt_acct_proc_show(struct seq_file *m, void *v)
308 {
309 	struct ip_rt_acct *dst, *src;
310 	unsigned int i, j;
311 
312 	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
313 	if (!dst)
314 		return -ENOMEM;
315 
316 	for_each_possible_cpu(i) {
317 		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
318 		for (j = 0; j < 256; j++) {
319 			dst[j].o_bytes   += src[j].o_bytes;
320 			dst[j].o_packets += src[j].o_packets;
321 			dst[j].i_bytes   += src[j].i_bytes;
322 			dst[j].i_packets += src[j].i_packets;
323 		}
324 	}
325 
326 	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
327 	kfree(dst);
328 	return 0;
329 }
330 #endif
331 
332 static int __net_init ip_rt_do_proc_init(struct net *net)
333 {
334 	struct proc_dir_entry *pde;
335 
336 	pde = proc_create_seq("rt_cache", 0444, net->proc_net,
337 			      &rt_cache_seq_ops);
338 	if (!pde)
339 		goto err1;
340 
341 	pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
342 			      &rt_cpu_seq_ops);
343 	if (!pde)
344 		goto err2;
345 
346 #ifdef CONFIG_IP_ROUTE_CLASSID
347 	pde = proc_create_single("rt_acct", 0, net->proc_net,
348 			rt_acct_proc_show);
349 	if (!pde)
350 		goto err3;
351 #endif
352 	return 0;
353 
354 #ifdef CONFIG_IP_ROUTE_CLASSID
355 err3:
356 	remove_proc_entry("rt_cache", net->proc_net_stat);
357 #endif
358 err2:
359 	remove_proc_entry("rt_cache", net->proc_net);
360 err1:
361 	return -ENOMEM;
362 }
363 
364 static void __net_exit ip_rt_do_proc_exit(struct net *net)
365 {
366 	remove_proc_entry("rt_cache", net->proc_net_stat);
367 	remove_proc_entry("rt_cache", net->proc_net);
368 #ifdef CONFIG_IP_ROUTE_CLASSID
369 	remove_proc_entry("rt_acct", net->proc_net);
370 #endif
371 }
372 
373 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
374 	.init = ip_rt_do_proc_init,
375 	.exit = ip_rt_do_proc_exit,
376 };
377 
378 static int __init ip_rt_proc_init(void)
379 {
380 	return register_pernet_subsys(&ip_rt_proc_ops);
381 }
382 
383 #else
384 static inline int ip_rt_proc_init(void)
385 {
386 	return 0;
387 }
388 #endif /* CONFIG_PROC_FS */
389 
390 static inline bool rt_is_expired(const struct rtable *rth)
391 {
392 	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
393 }
394 
395 void rt_cache_flush(struct net *net)
396 {
397 	rt_genid_bump_ipv4(net);
398 }
399 
400 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
401 					   struct sk_buff *skb,
402 					   const void *daddr)
403 {
404 	const struct rtable *rt = container_of(dst, struct rtable, dst);
405 	struct net_device *dev = dst->dev;
406 	struct neighbour *n;
407 
408 	rcu_read_lock();
409 
410 	if (likely(rt->rt_gw_family == AF_INET)) {
411 		n = ip_neigh_gw4(dev, rt->rt_gw4);
412 	} else if (rt->rt_gw_family == AF_INET6) {
413 		n = ip_neigh_gw6(dev, &rt->rt_gw6);
414         } else {
415 		__be32 pkey;
416 
417 		pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
418 		n = ip_neigh_gw4(dev, pkey);
419 	}
420 
421 	if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
422 		n = NULL;
423 
424 	rcu_read_unlock();
425 
426 	return n;
427 }
428 
429 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
430 {
431 	const struct rtable *rt = container_of(dst, struct rtable, dst);
432 	struct net_device *dev = dst->dev;
433 	const __be32 *pkey = daddr;
434 
435 	if (rt->rt_gw_family == AF_INET) {
436 		pkey = (const __be32 *)&rt->rt_gw4;
437 	} else if (rt->rt_gw_family == AF_INET6) {
438 		return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
439 	} else if (!daddr ||
440 		 (rt->rt_flags &
441 		  (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
442 		return;
443 	}
444 	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
445 }
446 
447 /* Hash tables of size 2048..262144 depending on RAM size.
448  * Each bucket uses 8 bytes.
449  */
450 static u32 ip_idents_mask __read_mostly;
451 static atomic_t *ip_idents __read_mostly;
452 static u32 *ip_tstamps __read_mostly;
453 
454 /* In order to protect privacy, we add a perturbation to identifiers
455  * if one generator is seldom used. This makes hard for an attacker
456  * to infer how many packets were sent between two points in time.
457  */
458 static u32 ip_idents_reserve(u32 hash, int segs)
459 {
460 	u32 bucket, old, now = (u32)jiffies;
461 	atomic_t *p_id;
462 	u32 *p_tstamp;
463 	u32 delta = 0;
464 
465 	bucket = hash & ip_idents_mask;
466 	p_tstamp = ip_tstamps + bucket;
467 	p_id = ip_idents + bucket;
468 	old = READ_ONCE(*p_tstamp);
469 
470 	if (old != now && cmpxchg(p_tstamp, old, now) == old)
471 		delta = get_random_u32_below(now - old);
472 
473 	/* If UBSAN reports an error there, please make sure your compiler
474 	 * supports -fno-strict-overflow before reporting it that was a bug
475 	 * in UBSAN, and it has been fixed in GCC-8.
476 	 */
477 	return atomic_add_return(segs + delta, p_id) - segs;
478 }
479 
480 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
481 {
482 	u32 hash, id;
483 
484 	/* Note the following code is not safe, but this is okay. */
485 	if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
486 		get_random_bytes(&net->ipv4.ip_id_key,
487 				 sizeof(net->ipv4.ip_id_key));
488 
489 	hash = siphash_3u32((__force u32)iph->daddr,
490 			    (__force u32)iph->saddr,
491 			    iph->protocol,
492 			    &net->ipv4.ip_id_key);
493 	id = ip_idents_reserve(hash, segs);
494 	iph->id = htons(id);
495 }
496 EXPORT_SYMBOL(__ip_select_ident);
497 
498 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
499 			     const struct sock *sk, const struct iphdr *iph,
500 			     int oif, __u8 tos, u8 prot, u32 mark,
501 			     int flow_flags)
502 {
503 	__u8 scope = RT_SCOPE_UNIVERSE;
504 
505 	if (sk) {
506 		oif = sk->sk_bound_dev_if;
507 		mark = READ_ONCE(sk->sk_mark);
508 		tos = ip_sock_rt_tos(sk);
509 		scope = ip_sock_rt_scope(sk);
510 		prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
511 						    sk->sk_protocol;
512 	}
513 
514 	flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
515 			   prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
516 			   sock_net_uid(net, sk));
517 }
518 
519 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
520 			       const struct sock *sk)
521 {
522 	const struct net *net = dev_net(skb->dev);
523 	const struct iphdr *iph = ip_hdr(skb);
524 	int oif = skb->dev->ifindex;
525 	u8 prot = iph->protocol;
526 	u32 mark = skb->mark;
527 	__u8 tos = iph->tos;
528 
529 	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
530 }
531 
532 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
533 {
534 	const struct inet_sock *inet = inet_sk(sk);
535 	const struct ip_options_rcu *inet_opt;
536 	__be32 daddr = inet->inet_daddr;
537 
538 	rcu_read_lock();
539 	inet_opt = rcu_dereference(inet->inet_opt);
540 	if (inet_opt && inet_opt->opt.srr)
541 		daddr = inet_opt->opt.faddr;
542 	flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
543 			   ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
544 			   ip_sock_rt_scope(sk),
545 			   inet_test_bit(HDRINCL, sk) ?
546 				IPPROTO_RAW : sk->sk_protocol,
547 			   inet_sk_flowi_flags(sk),
548 			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
549 	rcu_read_unlock();
550 }
551 
552 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
553 				 const struct sk_buff *skb)
554 {
555 	if (skb)
556 		build_skb_flow_key(fl4, skb, sk);
557 	else
558 		build_sk_flow_key(fl4, sk);
559 }
560 
561 static DEFINE_SPINLOCK(fnhe_lock);
562 
563 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
564 {
565 	struct rtable *rt;
566 
567 	rt = rcu_dereference(fnhe->fnhe_rth_input);
568 	if (rt) {
569 		RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
570 		dst_dev_put(&rt->dst);
571 		dst_release(&rt->dst);
572 	}
573 	rt = rcu_dereference(fnhe->fnhe_rth_output);
574 	if (rt) {
575 		RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
576 		dst_dev_put(&rt->dst);
577 		dst_release(&rt->dst);
578 	}
579 }
580 
581 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
582 {
583 	struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
584 	struct fib_nh_exception *fnhe, *oldest = NULL;
585 
586 	for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
587 		fnhe = rcu_dereference_protected(*fnhe_p,
588 						 lockdep_is_held(&fnhe_lock));
589 		if (!fnhe)
590 			break;
591 		if (!oldest ||
592 		    time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
593 			oldest = fnhe;
594 			oldest_p = fnhe_p;
595 		}
596 	}
597 	fnhe_flush_routes(oldest);
598 	*oldest_p = oldest->fnhe_next;
599 	kfree_rcu(oldest, rcu);
600 }
601 
602 static u32 fnhe_hashfun(__be32 daddr)
603 {
604 	static siphash_aligned_key_t fnhe_hash_key;
605 	u64 hval;
606 
607 	net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
608 	hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
609 	return hash_64(hval, FNHE_HASH_SHIFT);
610 }
611 
612 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
613 {
614 	rt->rt_pmtu = fnhe->fnhe_pmtu;
615 	rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
616 	rt->dst.expires = fnhe->fnhe_expires;
617 
618 	if (fnhe->fnhe_gw) {
619 		rt->rt_flags |= RTCF_REDIRECTED;
620 		rt->rt_uses_gateway = 1;
621 		rt->rt_gw_family = AF_INET;
622 		rt->rt_gw4 = fnhe->fnhe_gw;
623 	}
624 }
625 
626 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
627 				  __be32 gw, u32 pmtu, bool lock,
628 				  unsigned long expires)
629 {
630 	struct fnhe_hash_bucket *hash;
631 	struct fib_nh_exception *fnhe;
632 	struct rtable *rt;
633 	u32 genid, hval;
634 	unsigned int i;
635 	int depth;
636 
637 	genid = fnhe_genid(dev_net(nhc->nhc_dev));
638 	hval = fnhe_hashfun(daddr);
639 
640 	spin_lock_bh(&fnhe_lock);
641 
642 	hash = rcu_dereference(nhc->nhc_exceptions);
643 	if (!hash) {
644 		hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
645 		if (!hash)
646 			goto out_unlock;
647 		rcu_assign_pointer(nhc->nhc_exceptions, hash);
648 	}
649 
650 	hash += hval;
651 
652 	depth = 0;
653 	for (fnhe = rcu_dereference(hash->chain); fnhe;
654 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
655 		if (fnhe->fnhe_daddr == daddr)
656 			break;
657 		depth++;
658 	}
659 
660 	if (fnhe) {
661 		if (fnhe->fnhe_genid != genid)
662 			fnhe->fnhe_genid = genid;
663 		if (gw)
664 			fnhe->fnhe_gw = gw;
665 		if (pmtu) {
666 			fnhe->fnhe_pmtu = pmtu;
667 			fnhe->fnhe_mtu_locked = lock;
668 		}
669 		fnhe->fnhe_expires = max(1UL, expires);
670 		/* Update all cached dsts too */
671 		rt = rcu_dereference(fnhe->fnhe_rth_input);
672 		if (rt)
673 			fill_route_from_fnhe(rt, fnhe);
674 		rt = rcu_dereference(fnhe->fnhe_rth_output);
675 		if (rt)
676 			fill_route_from_fnhe(rt, fnhe);
677 	} else {
678 		/* Randomize max depth to avoid some side channels attacks. */
679 		int max_depth = FNHE_RECLAIM_DEPTH +
680 				get_random_u32_below(FNHE_RECLAIM_DEPTH);
681 
682 		while (depth > max_depth) {
683 			fnhe_remove_oldest(hash);
684 			depth--;
685 		}
686 
687 		fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
688 		if (!fnhe)
689 			goto out_unlock;
690 
691 		fnhe->fnhe_next = hash->chain;
692 
693 		fnhe->fnhe_genid = genid;
694 		fnhe->fnhe_daddr = daddr;
695 		fnhe->fnhe_gw = gw;
696 		fnhe->fnhe_pmtu = pmtu;
697 		fnhe->fnhe_mtu_locked = lock;
698 		fnhe->fnhe_expires = max(1UL, expires);
699 
700 		rcu_assign_pointer(hash->chain, fnhe);
701 
702 		/* Exception created; mark the cached routes for the nexthop
703 		 * stale, so anyone caching it rechecks if this exception
704 		 * applies to them.
705 		 */
706 		rt = rcu_dereference(nhc->nhc_rth_input);
707 		if (rt)
708 			rt->dst.obsolete = DST_OBSOLETE_KILL;
709 
710 		for_each_possible_cpu(i) {
711 			struct rtable __rcu **prt;
712 
713 			prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
714 			rt = rcu_dereference(*prt);
715 			if (rt)
716 				rt->dst.obsolete = DST_OBSOLETE_KILL;
717 		}
718 	}
719 
720 	fnhe->fnhe_stamp = jiffies;
721 
722 out_unlock:
723 	spin_unlock_bh(&fnhe_lock);
724 }
725 
726 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
727 			     bool kill_route)
728 {
729 	__be32 new_gw = icmp_hdr(skb)->un.gateway;
730 	__be32 old_gw = ip_hdr(skb)->saddr;
731 	struct net_device *dev = skb->dev;
732 	struct in_device *in_dev;
733 	struct fib_result res;
734 	struct neighbour *n;
735 	struct net *net;
736 
737 	switch (icmp_hdr(skb)->code & 7) {
738 	case ICMP_REDIR_NET:
739 	case ICMP_REDIR_NETTOS:
740 	case ICMP_REDIR_HOST:
741 	case ICMP_REDIR_HOSTTOS:
742 		break;
743 
744 	default:
745 		return;
746 	}
747 
748 	if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
749 		return;
750 
751 	in_dev = __in_dev_get_rcu(dev);
752 	if (!in_dev)
753 		return;
754 
755 	net = dev_net(dev);
756 	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
757 	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
758 	    ipv4_is_zeronet(new_gw))
759 		goto reject_redirect;
760 
761 	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
762 		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
763 			goto reject_redirect;
764 		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
765 			goto reject_redirect;
766 	} else {
767 		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
768 			goto reject_redirect;
769 	}
770 
771 	n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
772 	if (!n)
773 		n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
774 	if (!IS_ERR(n)) {
775 		if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
776 			neigh_event_send(n, NULL);
777 		} else {
778 			if (fib_lookup(net, fl4, &res, 0) == 0) {
779 				struct fib_nh_common *nhc;
780 
781 				fib_select_path(net, &res, fl4, skb);
782 				nhc = FIB_RES_NHC(res);
783 				update_or_create_fnhe(nhc, fl4->daddr, new_gw,
784 						0, false,
785 						jiffies + ip_rt_gc_timeout);
786 			}
787 			if (kill_route)
788 				rt->dst.obsolete = DST_OBSOLETE_KILL;
789 			call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
790 		}
791 		neigh_release(n);
792 	}
793 	return;
794 
795 reject_redirect:
796 #ifdef CONFIG_IP_ROUTE_VERBOSE
797 	if (IN_DEV_LOG_MARTIANS(in_dev)) {
798 		const struct iphdr *iph = (const struct iphdr *) skb->data;
799 		__be32 daddr = iph->daddr;
800 		__be32 saddr = iph->saddr;
801 
802 		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
803 				     "  Advised path = %pI4 -> %pI4\n",
804 				     &old_gw, dev->name, &new_gw,
805 				     &saddr, &daddr);
806 	}
807 #endif
808 	;
809 }
810 
811 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
812 {
813 	struct rtable *rt;
814 	struct flowi4 fl4;
815 	const struct iphdr *iph = (const struct iphdr *) skb->data;
816 	struct net *net = dev_net(skb->dev);
817 	int oif = skb->dev->ifindex;
818 	u8 prot = iph->protocol;
819 	u32 mark = skb->mark;
820 	__u8 tos = iph->tos;
821 
822 	rt = dst_rtable(dst);
823 
824 	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
825 	__ip_do_redirect(rt, skb, &fl4, true);
826 }
827 
828 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
829 {
830 	struct rtable *rt = dst_rtable(dst);
831 	struct dst_entry *ret = dst;
832 
833 	if (rt) {
834 		if (dst->obsolete > 0) {
835 			ip_rt_put(rt);
836 			ret = NULL;
837 		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
838 			   rt->dst.expires) {
839 			ip_rt_put(rt);
840 			ret = NULL;
841 		}
842 	}
843 	return ret;
844 }
845 
846 /*
847  * Algorithm:
848  *	1. The first ip_rt_redirect_number redirects are sent
849  *	   with exponential backoff, then we stop sending them at all,
850  *	   assuming that the host ignores our redirects.
851  *	2. If we did not see packets requiring redirects
852  *	   during ip_rt_redirect_silence, we assume that the host
853  *	   forgot redirected route and start to send redirects again.
854  *
855  * This algorithm is much cheaper and more intelligent than dumb load limiting
856  * in icmp.c.
857  *
858  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
859  * and "frag. need" (breaks PMTU discovery) in icmp.c.
860  */
861 
862 void ip_rt_send_redirect(struct sk_buff *skb)
863 {
864 	struct rtable *rt = skb_rtable(skb);
865 	struct in_device *in_dev;
866 	struct inet_peer *peer;
867 	struct net *net;
868 	int log_martians;
869 	int vif;
870 
871 	rcu_read_lock();
872 	in_dev = __in_dev_get_rcu(rt->dst.dev);
873 	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
874 		rcu_read_unlock();
875 		return;
876 	}
877 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
878 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
879 	rcu_read_unlock();
880 
881 	net = dev_net(rt->dst.dev);
882 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
883 	if (!peer) {
884 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
885 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
886 		return;
887 	}
888 
889 	/* No redirected packets during ip_rt_redirect_silence;
890 	 * reset the algorithm.
891 	 */
892 	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
893 		peer->rate_tokens = 0;
894 		peer->n_redirects = 0;
895 	}
896 
897 	/* Too many ignored redirects; do not send anything
898 	 * set dst.rate_last to the last seen redirected packet.
899 	 */
900 	if (peer->n_redirects >= ip_rt_redirect_number) {
901 		peer->rate_last = jiffies;
902 		goto out_put_peer;
903 	}
904 
905 	/* Check for load limit; set rate_last to the latest sent
906 	 * redirect.
907 	 */
908 	if (peer->n_redirects == 0 ||
909 	    time_after(jiffies,
910 		       (peer->rate_last +
911 			(ip_rt_redirect_load << peer->n_redirects)))) {
912 		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
913 
914 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
915 		peer->rate_last = jiffies;
916 		++peer->n_redirects;
917 		if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
918 		    peer->n_redirects == ip_rt_redirect_number)
919 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
920 					     &ip_hdr(skb)->saddr, inet_iif(skb),
921 					     &ip_hdr(skb)->daddr, &gw);
922 	}
923 out_put_peer:
924 	inet_putpeer(peer);
925 }
926 
927 static int ip_error(struct sk_buff *skb)
928 {
929 	struct rtable *rt = skb_rtable(skb);
930 	struct net_device *dev = skb->dev;
931 	struct in_device *in_dev;
932 	struct inet_peer *peer;
933 	unsigned long now;
934 	struct net *net;
935 	SKB_DR(reason);
936 	bool send;
937 	int code;
938 
939 	if (netif_is_l3_master(skb->dev)) {
940 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
941 		if (!dev)
942 			goto out;
943 	}
944 
945 	in_dev = __in_dev_get_rcu(dev);
946 
947 	/* IP on this device is disabled. */
948 	if (!in_dev)
949 		goto out;
950 
951 	net = dev_net(rt->dst.dev);
952 	if (!IN_DEV_FORWARD(in_dev)) {
953 		switch (rt->dst.error) {
954 		case EHOSTUNREACH:
955 			SKB_DR_SET(reason, IP_INADDRERRORS);
956 			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
957 			break;
958 
959 		case ENETUNREACH:
960 			SKB_DR_SET(reason, IP_INNOROUTES);
961 			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
962 			break;
963 		}
964 		goto out;
965 	}
966 
967 	switch (rt->dst.error) {
968 	case EINVAL:
969 	default:
970 		goto out;
971 	case EHOSTUNREACH:
972 		code = ICMP_HOST_UNREACH;
973 		break;
974 	case ENETUNREACH:
975 		code = ICMP_NET_UNREACH;
976 		SKB_DR_SET(reason, IP_INNOROUTES);
977 		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
978 		break;
979 	case EACCES:
980 		code = ICMP_PKT_FILTERED;
981 		break;
982 	}
983 
984 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
985 			       l3mdev_master_ifindex(skb->dev), 1);
986 
987 	send = true;
988 	if (peer) {
989 		now = jiffies;
990 		peer->rate_tokens += now - peer->rate_last;
991 		if (peer->rate_tokens > ip_rt_error_burst)
992 			peer->rate_tokens = ip_rt_error_burst;
993 		peer->rate_last = now;
994 		if (peer->rate_tokens >= ip_rt_error_cost)
995 			peer->rate_tokens -= ip_rt_error_cost;
996 		else
997 			send = false;
998 		inet_putpeer(peer);
999 	}
1000 	if (send)
1001 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1002 
1003 out:	kfree_skb_reason(skb, reason);
1004 	return 0;
1005 }
1006 
1007 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1008 {
1009 	struct dst_entry *dst = &rt->dst;
1010 	struct net *net = dev_net(dst->dev);
1011 	struct fib_result res;
1012 	bool lock = false;
1013 	u32 old_mtu;
1014 
1015 	if (ip_mtu_locked(dst))
1016 		return;
1017 
1018 	old_mtu = ipv4_mtu(dst);
1019 	if (old_mtu < mtu)
1020 		return;
1021 
1022 	if (mtu < net->ipv4.ip_rt_min_pmtu) {
1023 		lock = true;
1024 		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1025 	}
1026 
1027 	if (rt->rt_pmtu == mtu && !lock &&
1028 	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1029 		return;
1030 
1031 	rcu_read_lock();
1032 	if (fib_lookup(net, fl4, &res, 0) == 0) {
1033 		struct fib_nh_common *nhc;
1034 
1035 		fib_select_path(net, &res, fl4, NULL);
1036 		nhc = FIB_RES_NHC(res);
1037 		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1038 				      jiffies + net->ipv4.ip_rt_mtu_expires);
1039 	}
1040 	rcu_read_unlock();
1041 }
1042 
1043 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1044 			      struct sk_buff *skb, u32 mtu,
1045 			      bool confirm_neigh)
1046 {
1047 	struct rtable *rt = dst_rtable(dst);
1048 	struct flowi4 fl4;
1049 
1050 	ip_rt_build_flow_key(&fl4, sk, skb);
1051 
1052 	/* Don't make lookup fail for bridged encapsulations */
1053 	if (skb && netif_is_any_bridge_port(skb->dev))
1054 		fl4.flowi4_oif = 0;
1055 
1056 	__ip_rt_update_pmtu(rt, &fl4, mtu);
1057 }
1058 
1059 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1060 		      int oif, u8 protocol)
1061 {
1062 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1063 	struct flowi4 fl4;
1064 	struct rtable *rt;
1065 	u32 mark = IP4_REPLY_MARK(net, skb->mark);
1066 
1067 	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1068 			 0);
1069 	rt = __ip_route_output_key(net, &fl4);
1070 	if (!IS_ERR(rt)) {
1071 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1072 		ip_rt_put(rt);
1073 	}
1074 }
1075 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1076 
1077 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1078 {
1079 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1080 	struct flowi4 fl4;
1081 	struct rtable *rt;
1082 
1083 	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1084 
1085 	if (!fl4.flowi4_mark)
1086 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1087 
1088 	rt = __ip_route_output_key(sock_net(sk), &fl4);
1089 	if (!IS_ERR(rt)) {
1090 		__ip_rt_update_pmtu(rt, &fl4, mtu);
1091 		ip_rt_put(rt);
1092 	}
1093 }
1094 
1095 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1096 {
1097 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1098 	struct flowi4 fl4;
1099 	struct rtable *rt;
1100 	struct dst_entry *odst = NULL;
1101 	bool new = false;
1102 	struct net *net = sock_net(sk);
1103 
1104 	bh_lock_sock(sk);
1105 
1106 	if (!ip_sk_accept_pmtu(sk))
1107 		goto out;
1108 
1109 	odst = sk_dst_get(sk);
1110 
1111 	if (sock_owned_by_user(sk) || !odst) {
1112 		__ipv4_sk_update_pmtu(skb, sk, mtu);
1113 		goto out;
1114 	}
1115 
1116 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1117 
1118 	rt = dst_rtable(odst);
1119 	if (odst->obsolete && !odst->ops->check(odst, 0)) {
1120 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1121 		if (IS_ERR(rt))
1122 			goto out;
1123 
1124 		new = true;
1125 	}
1126 
1127 	__ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
1128 
1129 	if (!dst_check(&rt->dst, 0)) {
1130 		if (new)
1131 			dst_release(&rt->dst);
1132 
1133 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1134 		if (IS_ERR(rt))
1135 			goto out;
1136 
1137 		new = true;
1138 	}
1139 
1140 	if (new)
1141 		sk_dst_set(sk, &rt->dst);
1142 
1143 out:
1144 	bh_unlock_sock(sk);
1145 	dst_release(odst);
1146 }
1147 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1148 
1149 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1150 		   int oif, u8 protocol)
1151 {
1152 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1153 	struct flowi4 fl4;
1154 	struct rtable *rt;
1155 
1156 	__build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
1157 	rt = __ip_route_output_key(net, &fl4);
1158 	if (!IS_ERR(rt)) {
1159 		__ip_do_redirect(rt, skb, &fl4, false);
1160 		ip_rt_put(rt);
1161 	}
1162 }
1163 EXPORT_SYMBOL_GPL(ipv4_redirect);
1164 
1165 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1166 {
1167 	const struct iphdr *iph = (const struct iphdr *)skb->data;
1168 	struct flowi4 fl4;
1169 	struct rtable *rt;
1170 	struct net *net = sock_net(sk);
1171 
1172 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1173 	rt = __ip_route_output_key(net, &fl4);
1174 	if (!IS_ERR(rt)) {
1175 		__ip_do_redirect(rt, skb, &fl4, false);
1176 		ip_rt_put(rt);
1177 	}
1178 }
1179 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1180 
1181 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1182 							 u32 cookie)
1183 {
1184 	struct rtable *rt = dst_rtable(dst);
1185 
1186 	/* All IPV4 dsts are created with ->obsolete set to the value
1187 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1188 	 * into this function always.
1189 	 *
1190 	 * When a PMTU/redirect information update invalidates a route,
1191 	 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1192 	 * DST_OBSOLETE_DEAD.
1193 	 */
1194 	if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 		return NULL;
1196 	return dst;
1197 }
1198 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1199 
1200 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1201 {
1202 	struct net_device *dev;
1203 	struct ip_options opt;
1204 	int res;
1205 
1206 	/* Recompile ip options since IPCB may not be valid anymore.
1207 	 * Also check we have a reasonable ipv4 header.
1208 	 */
1209 	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1210 	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1211 		return;
1212 
1213 	memset(&opt, 0, sizeof(opt));
1214 	if (ip_hdr(skb)->ihl > 5) {
1215 		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1216 			return;
1217 		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1218 
1219 		rcu_read_lock();
1220 		dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
1221 		res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
1222 		rcu_read_unlock();
1223 
1224 		if (res)
1225 			return;
1226 	}
1227 	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1228 }
1229 
1230 static void ipv4_link_failure(struct sk_buff *skb)
1231 {
1232 	struct rtable *rt;
1233 
1234 	ipv4_send_dest_unreach(skb);
1235 
1236 	rt = skb_rtable(skb);
1237 	if (rt)
1238 		dst_set_expires(&rt->dst, 0);
1239 }
1240 
1241 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1242 {
1243 	pr_debug("%s: %pI4 -> %pI4, %s\n",
1244 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1245 		 skb->dev ? skb->dev->name : "?");
1246 	kfree_skb(skb);
1247 	WARN_ON(1);
1248 	return 0;
1249 }
1250 
1251 /*
1252  * We do not cache source address of outgoing interface,
1253  * because it is used only by IP RR, TS and SRR options,
1254  * so that it out of fast path.
1255  *
1256  * BTW remember: "addr" is allowed to be not aligned
1257  * in IP options!
1258  */
1259 
1260 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1261 {
1262 	__be32 src;
1263 
1264 	if (rt_is_output_route(rt))
1265 		src = ip_hdr(skb)->saddr;
1266 	else {
1267 		struct fib_result res;
1268 		struct iphdr *iph = ip_hdr(skb);
1269 		struct flowi4 fl4 = {
1270 			.daddr = iph->daddr,
1271 			.saddr = iph->saddr,
1272 			.flowi4_tos = RT_TOS(iph->tos),
1273 			.flowi4_oif = rt->dst.dev->ifindex,
1274 			.flowi4_iif = skb->dev->ifindex,
1275 			.flowi4_mark = skb->mark,
1276 		};
1277 
1278 		rcu_read_lock();
1279 		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1280 			src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1281 		else
1282 			src = inet_select_addr(rt->dst.dev,
1283 					       rt_nexthop(rt, iph->daddr),
1284 					       RT_SCOPE_UNIVERSE);
1285 		rcu_read_unlock();
1286 	}
1287 	memcpy(addr, &src, 4);
1288 }
1289 
1290 #ifdef CONFIG_IP_ROUTE_CLASSID
1291 static void set_class_tag(struct rtable *rt, u32 tag)
1292 {
1293 	if (!(rt->dst.tclassid & 0xFFFF))
1294 		rt->dst.tclassid |= tag & 0xFFFF;
1295 	if (!(rt->dst.tclassid & 0xFFFF0000))
1296 		rt->dst.tclassid |= tag & 0xFFFF0000;
1297 }
1298 #endif
1299 
1300 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1301 {
1302 	struct net *net = dev_net(dst->dev);
1303 	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1304 	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1305 				    net->ipv4.ip_rt_min_advmss);
1306 
1307 	return min(advmss, IPV4_MAX_PMTU - header_size);
1308 }
1309 
1310 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1311 {
1312 	return ip_dst_mtu_maybe_forward(dst, false);
1313 }
1314 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1315 
1316 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1317 {
1318 	struct fnhe_hash_bucket *hash;
1319 	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1320 	u32 hval = fnhe_hashfun(daddr);
1321 
1322 	spin_lock_bh(&fnhe_lock);
1323 
1324 	hash = rcu_dereference_protected(nhc->nhc_exceptions,
1325 					 lockdep_is_held(&fnhe_lock));
1326 	hash += hval;
1327 
1328 	fnhe_p = &hash->chain;
1329 	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1330 	while (fnhe) {
1331 		if (fnhe->fnhe_daddr == daddr) {
1332 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1333 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1334 			/* set fnhe_daddr to 0 to ensure it won't bind with
1335 			 * new dsts in rt_bind_exception().
1336 			 */
1337 			fnhe->fnhe_daddr = 0;
1338 			fnhe_flush_routes(fnhe);
1339 			kfree_rcu(fnhe, rcu);
1340 			break;
1341 		}
1342 		fnhe_p = &fnhe->fnhe_next;
1343 		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1344 						 lockdep_is_held(&fnhe_lock));
1345 	}
1346 
1347 	spin_unlock_bh(&fnhe_lock);
1348 }
1349 
1350 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1351 					       __be32 daddr)
1352 {
1353 	struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1354 	struct fib_nh_exception *fnhe;
1355 	u32 hval;
1356 
1357 	if (!hash)
1358 		return NULL;
1359 
1360 	hval = fnhe_hashfun(daddr);
1361 
1362 	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1363 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1364 		if (fnhe->fnhe_daddr == daddr) {
1365 			if (fnhe->fnhe_expires &&
1366 			    time_after(jiffies, fnhe->fnhe_expires)) {
1367 				ip_del_fnhe(nhc, daddr);
1368 				break;
1369 			}
1370 			return fnhe;
1371 		}
1372 	}
1373 	return NULL;
1374 }
1375 
1376 /* MTU selection:
1377  * 1. mtu on route is locked - use it
1378  * 2. mtu from nexthop exception
1379  * 3. mtu from egress device
1380  */
1381 
1382 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1383 {
1384 	struct fib_nh_common *nhc = res->nhc;
1385 	struct net_device *dev = nhc->nhc_dev;
1386 	struct fib_info *fi = res->fi;
1387 	u32 mtu = 0;
1388 
1389 	if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1390 	    fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1391 		mtu = fi->fib_mtu;
1392 
1393 	if (likely(!mtu)) {
1394 		struct fib_nh_exception *fnhe;
1395 
1396 		fnhe = find_exception(nhc, daddr);
1397 		if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1398 			mtu = fnhe->fnhe_pmtu;
1399 	}
1400 
1401 	if (likely(!mtu))
1402 		mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1403 
1404 	return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1405 }
1406 
1407 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1408 			      __be32 daddr, const bool do_cache)
1409 {
1410 	bool ret = false;
1411 
1412 	spin_lock_bh(&fnhe_lock);
1413 
1414 	if (daddr == fnhe->fnhe_daddr) {
1415 		struct rtable __rcu **porig;
1416 		struct rtable *orig;
1417 		int genid = fnhe_genid(dev_net(rt->dst.dev));
1418 
1419 		if (rt_is_input_route(rt))
1420 			porig = &fnhe->fnhe_rth_input;
1421 		else
1422 			porig = &fnhe->fnhe_rth_output;
1423 		orig = rcu_dereference(*porig);
1424 
1425 		if (fnhe->fnhe_genid != genid) {
1426 			fnhe->fnhe_genid = genid;
1427 			fnhe->fnhe_gw = 0;
1428 			fnhe->fnhe_pmtu = 0;
1429 			fnhe->fnhe_expires = 0;
1430 			fnhe->fnhe_mtu_locked = false;
1431 			fnhe_flush_routes(fnhe);
1432 			orig = NULL;
1433 		}
1434 		fill_route_from_fnhe(rt, fnhe);
1435 		if (!rt->rt_gw4) {
1436 			rt->rt_gw4 = daddr;
1437 			rt->rt_gw_family = AF_INET;
1438 		}
1439 
1440 		if (do_cache) {
1441 			dst_hold(&rt->dst);
1442 			rcu_assign_pointer(*porig, rt);
1443 			if (orig) {
1444 				dst_dev_put(&orig->dst);
1445 				dst_release(&orig->dst);
1446 			}
1447 			ret = true;
1448 		}
1449 
1450 		fnhe->fnhe_stamp = jiffies;
1451 	}
1452 	spin_unlock_bh(&fnhe_lock);
1453 
1454 	return ret;
1455 }
1456 
1457 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1458 {
1459 	struct rtable *orig, *prev, **p;
1460 	bool ret = true;
1461 
1462 	if (rt_is_input_route(rt)) {
1463 		p = (struct rtable **)&nhc->nhc_rth_input;
1464 	} else {
1465 		p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1466 	}
1467 	orig = *p;
1468 
1469 	/* hold dst before doing cmpxchg() to avoid race condition
1470 	 * on this dst
1471 	 */
1472 	dst_hold(&rt->dst);
1473 	prev = cmpxchg(p, orig, rt);
1474 	if (prev == orig) {
1475 		if (orig) {
1476 			rt_add_uncached_list(orig);
1477 			dst_release(&orig->dst);
1478 		}
1479 	} else {
1480 		dst_release(&rt->dst);
1481 		ret = false;
1482 	}
1483 
1484 	return ret;
1485 }
1486 
1487 struct uncached_list {
1488 	spinlock_t		lock;
1489 	struct list_head	head;
1490 	struct list_head	quarantine;
1491 };
1492 
1493 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1494 
1495 void rt_add_uncached_list(struct rtable *rt)
1496 {
1497 	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1498 
1499 	rt->dst.rt_uncached_list = ul;
1500 
1501 	spin_lock_bh(&ul->lock);
1502 	list_add_tail(&rt->dst.rt_uncached, &ul->head);
1503 	spin_unlock_bh(&ul->lock);
1504 }
1505 
1506 void rt_del_uncached_list(struct rtable *rt)
1507 {
1508 	if (!list_empty(&rt->dst.rt_uncached)) {
1509 		struct uncached_list *ul = rt->dst.rt_uncached_list;
1510 
1511 		spin_lock_bh(&ul->lock);
1512 		list_del_init(&rt->dst.rt_uncached);
1513 		spin_unlock_bh(&ul->lock);
1514 	}
1515 }
1516 
1517 static void ipv4_dst_destroy(struct dst_entry *dst)
1518 {
1519 	ip_dst_metrics_put(dst);
1520 	rt_del_uncached_list(dst_rtable(dst));
1521 }
1522 
1523 void rt_flush_dev(struct net_device *dev)
1524 {
1525 	struct rtable *rt, *safe;
1526 	int cpu;
1527 
1528 	for_each_possible_cpu(cpu) {
1529 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1530 
1531 		if (list_empty(&ul->head))
1532 			continue;
1533 
1534 		spin_lock_bh(&ul->lock);
1535 		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1536 			if (rt->dst.dev != dev)
1537 				continue;
1538 			rt->dst.dev = blackhole_netdev;
1539 			netdev_ref_replace(dev, blackhole_netdev,
1540 					   &rt->dst.dev_tracker, GFP_ATOMIC);
1541 			list_move(&rt->dst.rt_uncached, &ul->quarantine);
1542 		}
1543 		spin_unlock_bh(&ul->lock);
1544 	}
1545 }
1546 
1547 static bool rt_cache_valid(const struct rtable *rt)
1548 {
1549 	return	rt &&
1550 		rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1551 		!rt_is_expired(rt);
1552 }
1553 
1554 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1555 			   const struct fib_result *res,
1556 			   struct fib_nh_exception *fnhe,
1557 			   struct fib_info *fi, u16 type, u32 itag,
1558 			   const bool do_cache)
1559 {
1560 	bool cached = false;
1561 
1562 	if (fi) {
1563 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1564 
1565 		if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1566 			rt->rt_uses_gateway = 1;
1567 			rt->rt_gw_family = nhc->nhc_gw_family;
1568 			/* only INET and INET6 are supported */
1569 			if (likely(nhc->nhc_gw_family == AF_INET))
1570 				rt->rt_gw4 = nhc->nhc_gw.ipv4;
1571 			else
1572 				rt->rt_gw6 = nhc->nhc_gw.ipv6;
1573 		}
1574 
1575 		ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1576 
1577 #ifdef CONFIG_IP_ROUTE_CLASSID
1578 		if (nhc->nhc_family == AF_INET) {
1579 			struct fib_nh *nh;
1580 
1581 			nh = container_of(nhc, struct fib_nh, nh_common);
1582 			rt->dst.tclassid = nh->nh_tclassid;
1583 		}
1584 #endif
1585 		rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1586 		if (unlikely(fnhe))
1587 			cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1588 		else if (do_cache)
1589 			cached = rt_cache_route(nhc, rt);
1590 		if (unlikely(!cached)) {
1591 			/* Routes we intend to cache in nexthop exception or
1592 			 * FIB nexthop have the DST_NOCACHE bit clear.
1593 			 * However, if we are unsuccessful at storing this
1594 			 * route into the cache we really need to set it.
1595 			 */
1596 			if (!rt->rt_gw4) {
1597 				rt->rt_gw_family = AF_INET;
1598 				rt->rt_gw4 = daddr;
1599 			}
1600 			rt_add_uncached_list(rt);
1601 		}
1602 	} else
1603 		rt_add_uncached_list(rt);
1604 
1605 #ifdef CONFIG_IP_ROUTE_CLASSID
1606 #ifdef CONFIG_IP_MULTIPLE_TABLES
1607 	set_class_tag(rt, res->tclassid);
1608 #endif
1609 	set_class_tag(rt, itag);
1610 #endif
1611 }
1612 
1613 struct rtable *rt_dst_alloc(struct net_device *dev,
1614 			    unsigned int flags, u16 type,
1615 			    bool noxfrm)
1616 {
1617 	struct rtable *rt;
1618 
1619 	rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1620 		       (noxfrm ? DST_NOXFRM : 0));
1621 
1622 	if (rt) {
1623 		rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1624 		rt->rt_flags = flags;
1625 		rt->rt_type = type;
1626 		rt->rt_is_input = 0;
1627 		rt->rt_iif = 0;
1628 		rt->rt_pmtu = 0;
1629 		rt->rt_mtu_locked = 0;
1630 		rt->rt_uses_gateway = 0;
1631 		rt->rt_gw_family = 0;
1632 		rt->rt_gw4 = 0;
1633 
1634 		rt->dst.output = ip_output;
1635 		if (flags & RTCF_LOCAL)
1636 			rt->dst.input = ip_local_deliver;
1637 	}
1638 
1639 	return rt;
1640 }
1641 EXPORT_SYMBOL(rt_dst_alloc);
1642 
1643 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1644 {
1645 	struct rtable *new_rt;
1646 
1647 	new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1648 			   rt->dst.flags);
1649 
1650 	if (new_rt) {
1651 		new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1652 		new_rt->rt_flags = rt->rt_flags;
1653 		new_rt->rt_type = rt->rt_type;
1654 		new_rt->rt_is_input = rt->rt_is_input;
1655 		new_rt->rt_iif = rt->rt_iif;
1656 		new_rt->rt_pmtu = rt->rt_pmtu;
1657 		new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1658 		new_rt->rt_gw_family = rt->rt_gw_family;
1659 		if (rt->rt_gw_family == AF_INET)
1660 			new_rt->rt_gw4 = rt->rt_gw4;
1661 		else if (rt->rt_gw_family == AF_INET6)
1662 			new_rt->rt_gw6 = rt->rt_gw6;
1663 
1664 		new_rt->dst.input = rt->dst.input;
1665 		new_rt->dst.output = rt->dst.output;
1666 		new_rt->dst.error = rt->dst.error;
1667 		new_rt->dst.lastuse = jiffies;
1668 		new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1669 	}
1670 	return new_rt;
1671 }
1672 EXPORT_SYMBOL(rt_dst_clone);
1673 
1674 /* called in rcu_read_lock() section */
1675 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1676 			  u8 tos, struct net_device *dev,
1677 			  struct in_device *in_dev, u32 *itag)
1678 {
1679 	int err;
1680 
1681 	/* Primary sanity checks. */
1682 	if (!in_dev)
1683 		return -EINVAL;
1684 
1685 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1686 	    skb->protocol != htons(ETH_P_IP))
1687 		return -EINVAL;
1688 
1689 	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1690 		return -EINVAL;
1691 
1692 	if (ipv4_is_zeronet(saddr)) {
1693 		if (!ipv4_is_local_multicast(daddr) &&
1694 		    ip_hdr(skb)->protocol != IPPROTO_IGMP)
1695 			return -EINVAL;
1696 	} else {
1697 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1698 					  in_dev, itag);
1699 		if (err < 0)
1700 			return err;
1701 	}
1702 	return 0;
1703 }
1704 
1705 /* called in rcu_read_lock() section */
1706 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1707 			     u8 tos, struct net_device *dev, int our)
1708 {
1709 	struct in_device *in_dev = __in_dev_get_rcu(dev);
1710 	unsigned int flags = RTCF_MULTICAST;
1711 	struct rtable *rth;
1712 	u32 itag = 0;
1713 	int err;
1714 
1715 	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1716 	if (err)
1717 		return err;
1718 
1719 	if (our)
1720 		flags |= RTCF_LOCAL;
1721 
1722 	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1723 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1724 
1725 	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1726 			   false);
1727 	if (!rth)
1728 		return -ENOBUFS;
1729 
1730 #ifdef CONFIG_IP_ROUTE_CLASSID
1731 	rth->dst.tclassid = itag;
1732 #endif
1733 	rth->dst.output = ip_rt_bug;
1734 	rth->rt_is_input= 1;
1735 
1736 #ifdef CONFIG_IP_MROUTE
1737 	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1738 		rth->dst.input = ip_mr_input;
1739 #endif
1740 	RT_CACHE_STAT_INC(in_slow_mc);
1741 
1742 	skb_dst_drop(skb);
1743 	skb_dst_set(skb, &rth->dst);
1744 	return 0;
1745 }
1746 
1747 
1748 static void ip_handle_martian_source(struct net_device *dev,
1749 				     struct in_device *in_dev,
1750 				     struct sk_buff *skb,
1751 				     __be32 daddr,
1752 				     __be32 saddr)
1753 {
1754 	RT_CACHE_STAT_INC(in_martian_src);
1755 #ifdef CONFIG_IP_ROUTE_VERBOSE
1756 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1757 		/*
1758 		 *	RFC1812 recommendation, if source is martian,
1759 		 *	the only hint is MAC header.
1760 		 */
1761 		pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1762 			&daddr, &saddr, dev->name);
1763 		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1764 			print_hex_dump(KERN_WARNING, "ll header: ",
1765 				       DUMP_PREFIX_OFFSET, 16, 1,
1766 				       skb_mac_header(skb),
1767 				       dev->hard_header_len, false);
1768 		}
1769 	}
1770 #endif
1771 }
1772 
1773 /* called in rcu_read_lock() section */
1774 static int __mkroute_input(struct sk_buff *skb,
1775 			   const struct fib_result *res,
1776 			   struct in_device *in_dev,
1777 			   __be32 daddr, __be32 saddr, u32 tos)
1778 {
1779 	struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1780 	struct net_device *dev = nhc->nhc_dev;
1781 	struct fib_nh_exception *fnhe;
1782 	struct rtable *rth;
1783 	int err;
1784 	struct in_device *out_dev;
1785 	bool do_cache;
1786 	u32 itag = 0;
1787 
1788 	/* get a working reference to the output device */
1789 	out_dev = __in_dev_get_rcu(dev);
1790 	if (!out_dev) {
1791 		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1792 		return -EINVAL;
1793 	}
1794 
1795 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1796 				  in_dev->dev, in_dev, &itag);
1797 	if (err < 0) {
1798 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1799 					 saddr);
1800 
1801 		goto cleanup;
1802 	}
1803 
1804 	do_cache = res->fi && !itag;
1805 	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1806 	    skb->protocol == htons(ETH_P_IP)) {
1807 		__be32 gw;
1808 
1809 		gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1810 		if (IN_DEV_SHARED_MEDIA(out_dev) ||
1811 		    inet_addr_onlink(out_dev, saddr, gw))
1812 			IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1813 	}
1814 
1815 	if (skb->protocol != htons(ETH_P_IP)) {
1816 		/* Not IP (i.e. ARP). Do not create route, if it is
1817 		 * invalid for proxy arp. DNAT routes are always valid.
1818 		 *
1819 		 * Proxy arp feature have been extended to allow, ARP
1820 		 * replies back to the same interface, to support
1821 		 * Private VLAN switch technologies. See arp.c.
1822 		 */
1823 		if (out_dev == in_dev &&
1824 		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1825 			err = -EINVAL;
1826 			goto cleanup;
1827 		}
1828 	}
1829 
1830 	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1831 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
1832 
1833 	fnhe = find_exception(nhc, daddr);
1834 	if (do_cache) {
1835 		if (fnhe)
1836 			rth = rcu_dereference(fnhe->fnhe_rth_input);
1837 		else
1838 			rth = rcu_dereference(nhc->nhc_rth_input);
1839 		if (rt_cache_valid(rth)) {
1840 			skb_dst_set_noref(skb, &rth->dst);
1841 			goto out;
1842 		}
1843 	}
1844 
1845 	rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1846 			   IN_DEV_ORCONF(out_dev, NOXFRM));
1847 	if (!rth) {
1848 		err = -ENOBUFS;
1849 		goto cleanup;
1850 	}
1851 
1852 	rth->rt_is_input = 1;
1853 	RT_CACHE_STAT_INC(in_slow_tot);
1854 
1855 	rth->dst.input = ip_forward;
1856 
1857 	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1858 		       do_cache);
1859 	lwtunnel_set_redirect(&rth->dst);
1860 	skb_dst_set(skb, &rth->dst);
1861 out:
1862 	err = 0;
1863  cleanup:
1864 	return err;
1865 }
1866 
1867 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1868 /* To make ICMP packets follow the right flow, the multipath hash is
1869  * calculated from the inner IP addresses.
1870  */
1871 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1872 				 struct flow_keys *hash_keys)
1873 {
1874 	const struct iphdr *outer_iph = ip_hdr(skb);
1875 	const struct iphdr *key_iph = outer_iph;
1876 	const struct iphdr *inner_iph;
1877 	const struct icmphdr *icmph;
1878 	struct iphdr _inner_iph;
1879 	struct icmphdr _icmph;
1880 
1881 	if (likely(outer_iph->protocol != IPPROTO_ICMP))
1882 		goto out;
1883 
1884 	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1885 		goto out;
1886 
1887 	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1888 				   &_icmph);
1889 	if (!icmph)
1890 		goto out;
1891 
1892 	if (!icmp_is_err(icmph->type))
1893 		goto out;
1894 
1895 	inner_iph = skb_header_pointer(skb,
1896 				       outer_iph->ihl * 4 + sizeof(_icmph),
1897 				       sizeof(_inner_iph), &_inner_iph);
1898 	if (!inner_iph)
1899 		goto out;
1900 
1901 	key_iph = inner_iph;
1902 out:
1903 	hash_keys->addrs.v4addrs.src = key_iph->saddr;
1904 	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1905 }
1906 
1907 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1908 					   const struct sk_buff *skb,
1909 					   bool *p_has_inner)
1910 {
1911 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1912 	struct flow_keys keys, hash_keys;
1913 
1914 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1915 		return 0;
1916 
1917 	memset(&hash_keys, 0, sizeof(hash_keys));
1918 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1919 
1920 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1921 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1922 		hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1923 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1924 		hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1925 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1926 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1927 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1928 		hash_keys.ports.src = keys.ports.src;
1929 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1930 		hash_keys.ports.dst = keys.ports.dst;
1931 
1932 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1933 	return flow_hash_from_keys(&hash_keys);
1934 }
1935 
1936 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1937 					   const struct sk_buff *skb,
1938 					   bool has_inner)
1939 {
1940 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1941 	struct flow_keys keys, hash_keys;
1942 
1943 	/* We assume the packet carries an encapsulation, but if none was
1944 	 * encountered during dissection of the outer flow, then there is no
1945 	 * point in calling the flow dissector again.
1946 	 */
1947 	if (!has_inner)
1948 		return 0;
1949 
1950 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1951 		return 0;
1952 
1953 	memset(&hash_keys, 0, sizeof(hash_keys));
1954 	skb_flow_dissect_flow_keys(skb, &keys, 0);
1955 
1956 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1957 		return 0;
1958 
1959 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1960 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1961 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1962 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1963 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1964 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1965 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1966 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1967 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1968 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1969 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1970 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1971 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1972 			hash_keys.tags.flow_label = keys.tags.flow_label;
1973 	}
1974 
1975 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1976 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
1977 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1978 		hash_keys.ports.src = keys.ports.src;
1979 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1980 		hash_keys.ports.dst = keys.ports.dst;
1981 
1982 	return flow_hash_from_keys(&hash_keys);
1983 }
1984 
1985 static u32 fib_multipath_custom_hash_skb(const struct net *net,
1986 					 const struct sk_buff *skb)
1987 {
1988 	u32 mhash, mhash_inner;
1989 	bool has_inner = true;
1990 
1991 	mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
1992 	mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
1993 
1994 	return jhash_2words(mhash, mhash_inner, 0);
1995 }
1996 
1997 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
1998 					 const struct flowi4 *fl4)
1999 {
2000 	u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2001 	struct flow_keys hash_keys;
2002 
2003 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2004 		return 0;
2005 
2006 	memset(&hash_keys, 0, sizeof(hash_keys));
2007 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2008 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2009 		hash_keys.addrs.v4addrs.src = fl4->saddr;
2010 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2011 		hash_keys.addrs.v4addrs.dst = fl4->daddr;
2012 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2013 		hash_keys.basic.ip_proto = fl4->flowi4_proto;
2014 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2015 		hash_keys.ports.src = fl4->fl4_sport;
2016 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2017 		hash_keys.ports.dst = fl4->fl4_dport;
2018 
2019 	return flow_hash_from_keys(&hash_keys);
2020 }
2021 
2022 /* if skb is set it will be used and fl4 can be NULL */
2023 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2024 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2025 {
2026 	u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2027 	struct flow_keys hash_keys;
2028 	u32 mhash = 0;
2029 
2030 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2031 	case 0:
2032 		memset(&hash_keys, 0, sizeof(hash_keys));
2033 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2034 		if (skb) {
2035 			ip_multipath_l3_keys(skb, &hash_keys);
2036 		} else {
2037 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2038 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2039 		}
2040 		mhash = flow_hash_from_keys(&hash_keys);
2041 		break;
2042 	case 1:
2043 		/* skb is currently provided only when forwarding */
2044 		if (skb) {
2045 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2046 			struct flow_keys keys;
2047 
2048 			/* short-circuit if we already have L4 hash present */
2049 			if (skb->l4_hash)
2050 				return skb_get_hash_raw(skb) >> 1;
2051 
2052 			memset(&hash_keys, 0, sizeof(hash_keys));
2053 
2054 			if (!flkeys) {
2055 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2056 				flkeys = &keys;
2057 			}
2058 
2059 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2060 			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2061 			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2062 			hash_keys.ports.src = flkeys->ports.src;
2063 			hash_keys.ports.dst = flkeys->ports.dst;
2064 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2065 		} else {
2066 			memset(&hash_keys, 0, sizeof(hash_keys));
2067 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2068 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2069 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2070 			hash_keys.ports.src = fl4->fl4_sport;
2071 			hash_keys.ports.dst = fl4->fl4_dport;
2072 			hash_keys.basic.ip_proto = fl4->flowi4_proto;
2073 		}
2074 		mhash = flow_hash_from_keys(&hash_keys);
2075 		break;
2076 	case 2:
2077 		memset(&hash_keys, 0, sizeof(hash_keys));
2078 		/* skb is currently provided only when forwarding */
2079 		if (skb) {
2080 			struct flow_keys keys;
2081 
2082 			skb_flow_dissect_flow_keys(skb, &keys, 0);
2083 			/* Inner can be v4 or v6 */
2084 			if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2085 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2086 				hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2087 				hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2088 			} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2089 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2090 				hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2091 				hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2092 				hash_keys.tags.flow_label = keys.tags.flow_label;
2093 				hash_keys.basic.ip_proto = keys.basic.ip_proto;
2094 			} else {
2095 				/* Same as case 0 */
2096 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2097 				ip_multipath_l3_keys(skb, &hash_keys);
2098 			}
2099 		} else {
2100 			/* Same as case 0 */
2101 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2102 			hash_keys.addrs.v4addrs.src = fl4->saddr;
2103 			hash_keys.addrs.v4addrs.dst = fl4->daddr;
2104 		}
2105 		mhash = flow_hash_from_keys(&hash_keys);
2106 		break;
2107 	case 3:
2108 		if (skb)
2109 			mhash = fib_multipath_custom_hash_skb(net, skb);
2110 		else
2111 			mhash = fib_multipath_custom_hash_fl4(net, fl4);
2112 		break;
2113 	}
2114 
2115 	if (multipath_hash)
2116 		mhash = jhash_2words(mhash, multipath_hash, 0);
2117 
2118 	return mhash >> 1;
2119 }
2120 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2121 
2122 static int ip_mkroute_input(struct sk_buff *skb,
2123 			    struct fib_result *res,
2124 			    struct in_device *in_dev,
2125 			    __be32 daddr, __be32 saddr, u32 tos,
2126 			    struct flow_keys *hkeys)
2127 {
2128 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2129 	if (res->fi && fib_info_num_path(res->fi) > 1) {
2130 		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2131 
2132 		fib_select_multipath(res, h);
2133 		IPCB(skb)->flags |= IPSKB_MULTIPATH;
2134 	}
2135 #endif
2136 
2137 	/* create a routing cache entry */
2138 	return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2139 }
2140 
2141 /* Implements all the saddr-related checks as ip_route_input_slow(),
2142  * assuming daddr is valid and the destination is not a local broadcast one.
2143  * Uses the provided hint instead of performing a route lookup.
2144  */
2145 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2146 		      u8 tos, struct net_device *dev,
2147 		      const struct sk_buff *hint)
2148 {
2149 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2150 	struct rtable *rt = skb_rtable(hint);
2151 	struct net *net = dev_net(dev);
2152 	int err = -EINVAL;
2153 	u32 tag = 0;
2154 
2155 	if (!in_dev)
2156 		return -EINVAL;
2157 
2158 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2159 		goto martian_source;
2160 
2161 	if (ipv4_is_zeronet(saddr))
2162 		goto martian_source;
2163 
2164 	if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2165 		goto martian_source;
2166 
2167 	if (rt->rt_type != RTN_LOCAL)
2168 		goto skip_validate_source;
2169 
2170 	tos &= IPTOS_RT_MASK;
2171 	err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2172 	if (err < 0)
2173 		goto martian_source;
2174 
2175 skip_validate_source:
2176 	skb_dst_copy(skb, hint);
2177 	return 0;
2178 
2179 martian_source:
2180 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2181 	return err;
2182 }
2183 
2184 /* get device for dst_alloc with local routes */
2185 static struct net_device *ip_rt_get_dev(struct net *net,
2186 					const struct fib_result *res)
2187 {
2188 	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2189 	struct net_device *dev = NULL;
2190 
2191 	if (nhc)
2192 		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2193 
2194 	return dev ? : net->loopback_dev;
2195 }
2196 
2197 /*
2198  *	NOTE. We drop all the packets that has local source
2199  *	addresses, because every properly looped back packet
2200  *	must have correct destination already attached by output routine.
2201  *	Changes in the enforced policies must be applied also to
2202  *	ip_route_use_hint().
2203  *
2204  *	Such approach solves two big problems:
2205  *	1. Not simplex devices are handled properly.
2206  *	2. IP spoofing attempts are filtered with 100% of guarantee.
2207  *	called with rcu_read_lock()
2208  */
2209 
2210 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2211 			       u8 tos, struct net_device *dev,
2212 			       struct fib_result *res)
2213 {
2214 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2215 	struct flow_keys *flkeys = NULL, _flkeys;
2216 	struct net    *net = dev_net(dev);
2217 	struct ip_tunnel_info *tun_info;
2218 	int		err = -EINVAL;
2219 	unsigned int	flags = 0;
2220 	u32		itag = 0;
2221 	struct rtable	*rth;
2222 	struct flowi4	fl4;
2223 	bool do_cache = true;
2224 
2225 	/* IP on this device is disabled. */
2226 
2227 	if (!in_dev)
2228 		goto out;
2229 
2230 	/* Check for the most weird martians, which can be not detected
2231 	 * by fib_lookup.
2232 	 */
2233 
2234 	tun_info = skb_tunnel_info(skb);
2235 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2236 		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2237 	else
2238 		fl4.flowi4_tun_key.tun_id = 0;
2239 	skb_dst_drop(skb);
2240 
2241 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2242 		goto martian_source;
2243 
2244 	res->fi = NULL;
2245 	res->table = NULL;
2246 	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2247 		goto brd_input;
2248 
2249 	/* Accept zero addresses only to limited broadcast;
2250 	 * I even do not know to fix it or not. Waiting for complains :-)
2251 	 */
2252 	if (ipv4_is_zeronet(saddr))
2253 		goto martian_source;
2254 
2255 	if (ipv4_is_zeronet(daddr))
2256 		goto martian_destination;
2257 
2258 	/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2259 	 * and call it once if daddr or/and saddr are loopback addresses
2260 	 */
2261 	if (ipv4_is_loopback(daddr)) {
2262 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2263 			goto martian_destination;
2264 	} else if (ipv4_is_loopback(saddr)) {
2265 		if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2266 			goto martian_source;
2267 	}
2268 
2269 	/*
2270 	 *	Now we are ready to route packet.
2271 	 */
2272 	fl4.flowi4_l3mdev = 0;
2273 	fl4.flowi4_oif = 0;
2274 	fl4.flowi4_iif = dev->ifindex;
2275 	fl4.flowi4_mark = skb->mark;
2276 	fl4.flowi4_tos = tos;
2277 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2278 	fl4.flowi4_flags = 0;
2279 	fl4.daddr = daddr;
2280 	fl4.saddr = saddr;
2281 	fl4.flowi4_uid = sock_net_uid(net, NULL);
2282 	fl4.flowi4_multipath_hash = 0;
2283 
2284 	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2285 		flkeys = &_flkeys;
2286 	} else {
2287 		fl4.flowi4_proto = 0;
2288 		fl4.fl4_sport = 0;
2289 		fl4.fl4_dport = 0;
2290 	}
2291 
2292 	err = fib_lookup(net, &fl4, res, 0);
2293 	if (err != 0) {
2294 		if (!IN_DEV_FORWARD(in_dev))
2295 			err = -EHOSTUNREACH;
2296 		goto no_route;
2297 	}
2298 
2299 	if (res->type == RTN_BROADCAST) {
2300 		if (IN_DEV_BFORWARD(in_dev))
2301 			goto make_route;
2302 		/* not do cache if bc_forwarding is enabled */
2303 		if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
2304 			do_cache = false;
2305 		goto brd_input;
2306 	}
2307 
2308 	if (res->type == RTN_LOCAL) {
2309 		err = fib_validate_source(skb, saddr, daddr, tos,
2310 					  0, dev, in_dev, &itag);
2311 		if (err < 0)
2312 			goto martian_source;
2313 		goto local_input;
2314 	}
2315 
2316 	if (!IN_DEV_FORWARD(in_dev)) {
2317 		err = -EHOSTUNREACH;
2318 		goto no_route;
2319 	}
2320 	if (res->type != RTN_UNICAST)
2321 		goto martian_destination;
2322 
2323 make_route:
2324 	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2325 out:	return err;
2326 
2327 brd_input:
2328 	if (skb->protocol != htons(ETH_P_IP))
2329 		goto e_inval;
2330 
2331 	if (!ipv4_is_zeronet(saddr)) {
2332 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2333 					  in_dev, &itag);
2334 		if (err < 0)
2335 			goto martian_source;
2336 	}
2337 	flags |= RTCF_BROADCAST;
2338 	res->type = RTN_BROADCAST;
2339 	RT_CACHE_STAT_INC(in_brd);
2340 
2341 local_input:
2342 	if (IN_DEV_ORCONF(in_dev, NOPOLICY))
2343 		IPCB(skb)->flags |= IPSKB_NOPOLICY;
2344 
2345 	do_cache &= res->fi && !itag;
2346 	if (do_cache) {
2347 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2348 
2349 		rth = rcu_dereference(nhc->nhc_rth_input);
2350 		if (rt_cache_valid(rth)) {
2351 			skb_dst_set_noref(skb, &rth->dst);
2352 			err = 0;
2353 			goto out;
2354 		}
2355 	}
2356 
2357 	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2358 			   flags | RTCF_LOCAL, res->type, false);
2359 	if (!rth)
2360 		goto e_nobufs;
2361 
2362 	rth->dst.output= ip_rt_bug;
2363 #ifdef CONFIG_IP_ROUTE_CLASSID
2364 	rth->dst.tclassid = itag;
2365 #endif
2366 	rth->rt_is_input = 1;
2367 
2368 	RT_CACHE_STAT_INC(in_slow_tot);
2369 	if (res->type == RTN_UNREACHABLE) {
2370 		rth->dst.input= ip_error;
2371 		rth->dst.error= -err;
2372 		rth->rt_flags	&= ~RTCF_LOCAL;
2373 	}
2374 
2375 	if (do_cache) {
2376 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2377 
2378 		rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2379 		if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2380 			WARN_ON(rth->dst.input == lwtunnel_input);
2381 			rth->dst.lwtstate->orig_input = rth->dst.input;
2382 			rth->dst.input = lwtunnel_input;
2383 		}
2384 
2385 		if (unlikely(!rt_cache_route(nhc, rth)))
2386 			rt_add_uncached_list(rth);
2387 	}
2388 	skb_dst_set(skb, &rth->dst);
2389 	err = 0;
2390 	goto out;
2391 
2392 no_route:
2393 	RT_CACHE_STAT_INC(in_no_route);
2394 	res->type = RTN_UNREACHABLE;
2395 	res->fi = NULL;
2396 	res->table = NULL;
2397 	goto local_input;
2398 
2399 	/*
2400 	 *	Do not cache martian addresses: they should be logged (RFC1812)
2401 	 */
2402 martian_destination:
2403 	RT_CACHE_STAT_INC(in_martian_dst);
2404 #ifdef CONFIG_IP_ROUTE_VERBOSE
2405 	if (IN_DEV_LOG_MARTIANS(in_dev))
2406 		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2407 				     &daddr, &saddr, dev->name);
2408 #endif
2409 
2410 e_inval:
2411 	err = -EINVAL;
2412 	goto out;
2413 
2414 e_nobufs:
2415 	err = -ENOBUFS;
2416 	goto out;
2417 
2418 martian_source:
2419 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2420 	goto out;
2421 }
2422 
2423 /* called with rcu_read_lock held */
2424 static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2425 			      u8 tos, struct net_device *dev, struct fib_result *res)
2426 {
2427 	/* Multicast recognition logic is moved from route cache to here.
2428 	 * The problem was that too many Ethernet cards have broken/missing
2429 	 * hardware multicast filters :-( As result the host on multicasting
2430 	 * network acquires a lot of useless route cache entries, sort of
2431 	 * SDR messages from all the world. Now we try to get rid of them.
2432 	 * Really, provided software IP multicast filter is organized
2433 	 * reasonably (at least, hashed), it does not result in a slowdown
2434 	 * comparing with route cache reject entries.
2435 	 * Note, that multicast routers are not affected, because
2436 	 * route cache entry is created eventually.
2437 	 */
2438 	if (ipv4_is_multicast(daddr)) {
2439 		struct in_device *in_dev = __in_dev_get_rcu(dev);
2440 		int our = 0;
2441 		int err = -EINVAL;
2442 
2443 		if (!in_dev)
2444 			return err;
2445 		our = ip_check_mc_rcu(in_dev, daddr, saddr,
2446 				      ip_hdr(skb)->protocol);
2447 
2448 		/* check l3 master if no match yet */
2449 		if (!our && netif_is_l3_slave(dev)) {
2450 			struct in_device *l3_in_dev;
2451 
2452 			l3_in_dev = __in_dev_get_rcu(skb->dev);
2453 			if (l3_in_dev)
2454 				our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2455 						      ip_hdr(skb)->protocol);
2456 		}
2457 
2458 		if (our
2459 #ifdef CONFIG_IP_MROUTE
2460 			||
2461 		    (!ipv4_is_local_multicast(daddr) &&
2462 		     IN_DEV_MFORWARD(in_dev))
2463 #endif
2464 		   ) {
2465 			err = ip_route_input_mc(skb, daddr, saddr,
2466 						tos, dev, our);
2467 		}
2468 		return err;
2469 	}
2470 
2471 	return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2472 }
2473 
2474 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2475 			 u8 tos, struct net_device *dev)
2476 {
2477 	struct fib_result res;
2478 	int err;
2479 
2480 	tos &= IPTOS_RT_MASK;
2481 	rcu_read_lock();
2482 	err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2483 	rcu_read_unlock();
2484 
2485 	return err;
2486 }
2487 EXPORT_SYMBOL(ip_route_input_noref);
2488 
2489 /* called with rcu_read_lock() */
2490 static struct rtable *__mkroute_output(const struct fib_result *res,
2491 				       const struct flowi4 *fl4, int orig_oif,
2492 				       struct net_device *dev_out,
2493 				       unsigned int flags)
2494 {
2495 	struct fib_info *fi = res->fi;
2496 	struct fib_nh_exception *fnhe;
2497 	struct in_device *in_dev;
2498 	u16 type = res->type;
2499 	struct rtable *rth;
2500 	bool do_cache;
2501 
2502 	in_dev = __in_dev_get_rcu(dev_out);
2503 	if (!in_dev)
2504 		return ERR_PTR(-EINVAL);
2505 
2506 	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2507 		if (ipv4_is_loopback(fl4->saddr) &&
2508 		    !(dev_out->flags & IFF_LOOPBACK) &&
2509 		    !netif_is_l3_master(dev_out))
2510 			return ERR_PTR(-EINVAL);
2511 
2512 	if (ipv4_is_lbcast(fl4->daddr))
2513 		type = RTN_BROADCAST;
2514 	else if (ipv4_is_multicast(fl4->daddr))
2515 		type = RTN_MULTICAST;
2516 	else if (ipv4_is_zeronet(fl4->daddr))
2517 		return ERR_PTR(-EINVAL);
2518 
2519 	if (dev_out->flags & IFF_LOOPBACK)
2520 		flags |= RTCF_LOCAL;
2521 
2522 	do_cache = true;
2523 	if (type == RTN_BROADCAST) {
2524 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2525 		fi = NULL;
2526 	} else if (type == RTN_MULTICAST) {
2527 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2528 		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2529 				     fl4->flowi4_proto))
2530 			flags &= ~RTCF_LOCAL;
2531 		else
2532 			do_cache = false;
2533 		/* If multicast route do not exist use
2534 		 * default one, but do not gateway in this case.
2535 		 * Yes, it is hack.
2536 		 */
2537 		if (fi && res->prefixlen < 4)
2538 			fi = NULL;
2539 	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2540 		   (orig_oif != dev_out->ifindex)) {
2541 		/* For local routes that require a particular output interface
2542 		 * we do not want to cache the result.  Caching the result
2543 		 * causes incorrect behaviour when there are multiple source
2544 		 * addresses on the interface, the end result being that if the
2545 		 * intended recipient is waiting on that interface for the
2546 		 * packet he won't receive it because it will be delivered on
2547 		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2548 		 * be set to the loopback interface as well.
2549 		 */
2550 		do_cache = false;
2551 	}
2552 
2553 	fnhe = NULL;
2554 	do_cache &= fi != NULL;
2555 	if (fi) {
2556 		struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2557 		struct rtable __rcu **prth;
2558 
2559 		fnhe = find_exception(nhc, fl4->daddr);
2560 		if (!do_cache)
2561 			goto add;
2562 		if (fnhe) {
2563 			prth = &fnhe->fnhe_rth_output;
2564 		} else {
2565 			if (unlikely(fl4->flowi4_flags &
2566 				     FLOWI_FLAG_KNOWN_NH &&
2567 				     !(nhc->nhc_gw_family &&
2568 				       nhc->nhc_scope == RT_SCOPE_LINK))) {
2569 				do_cache = false;
2570 				goto add;
2571 			}
2572 			prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2573 		}
2574 		rth = rcu_dereference(*prth);
2575 		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2576 			return rth;
2577 	}
2578 
2579 add:
2580 	rth = rt_dst_alloc(dev_out, flags, type,
2581 			   IN_DEV_ORCONF(in_dev, NOXFRM));
2582 	if (!rth)
2583 		return ERR_PTR(-ENOBUFS);
2584 
2585 	rth->rt_iif = orig_oif;
2586 
2587 	RT_CACHE_STAT_INC(out_slow_tot);
2588 
2589 	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2590 		if (flags & RTCF_LOCAL &&
2591 		    !(dev_out->flags & IFF_LOOPBACK)) {
2592 			rth->dst.output = ip_mc_output;
2593 			RT_CACHE_STAT_INC(out_slow_mc);
2594 		}
2595 #ifdef CONFIG_IP_MROUTE
2596 		if (type == RTN_MULTICAST) {
2597 			if (IN_DEV_MFORWARD(in_dev) &&
2598 			    !ipv4_is_local_multicast(fl4->daddr)) {
2599 				rth->dst.input = ip_mr_input;
2600 				rth->dst.output = ip_mc_output;
2601 			}
2602 		}
2603 #endif
2604 	}
2605 
2606 	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2607 	lwtunnel_set_redirect(&rth->dst);
2608 
2609 	return rth;
2610 }
2611 
2612 /*
2613  * Major route resolver routine.
2614  */
2615 
2616 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2617 					const struct sk_buff *skb)
2618 {
2619 	struct fib_result res = {
2620 		.type		= RTN_UNSPEC,
2621 		.fi		= NULL,
2622 		.table		= NULL,
2623 		.tclassid	= 0,
2624 	};
2625 	struct rtable *rth;
2626 
2627 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
2628 	fl4->flowi4_tos &= IPTOS_RT_MASK;
2629 
2630 	rcu_read_lock();
2631 	rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2632 	rcu_read_unlock();
2633 
2634 	return rth;
2635 }
2636 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2637 
2638 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2639 					    struct fib_result *res,
2640 					    const struct sk_buff *skb)
2641 {
2642 	struct net_device *dev_out = NULL;
2643 	int orig_oif = fl4->flowi4_oif;
2644 	unsigned int flags = 0;
2645 	struct rtable *rth;
2646 	int err;
2647 
2648 	if (fl4->saddr) {
2649 		if (ipv4_is_multicast(fl4->saddr) ||
2650 		    ipv4_is_lbcast(fl4->saddr) ||
2651 		    ipv4_is_zeronet(fl4->saddr)) {
2652 			rth = ERR_PTR(-EINVAL);
2653 			goto out;
2654 		}
2655 
2656 		rth = ERR_PTR(-ENETUNREACH);
2657 
2658 		/* I removed check for oif == dev_out->oif here.
2659 		 * It was wrong for two reasons:
2660 		 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2661 		 *    is assigned to multiple interfaces.
2662 		 * 2. Moreover, we are allowed to send packets with saddr
2663 		 *    of another iface. --ANK
2664 		 */
2665 
2666 		if (fl4->flowi4_oif == 0 &&
2667 		    (ipv4_is_multicast(fl4->daddr) ||
2668 		     ipv4_is_lbcast(fl4->daddr))) {
2669 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2670 			dev_out = __ip_dev_find(net, fl4->saddr, false);
2671 			if (!dev_out)
2672 				goto out;
2673 
2674 			/* Special hack: user can direct multicasts
2675 			 * and limited broadcast via necessary interface
2676 			 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2677 			 * This hack is not just for fun, it allows
2678 			 * vic,vat and friends to work.
2679 			 * They bind socket to loopback, set ttl to zero
2680 			 * and expect that it will work.
2681 			 * From the viewpoint of routing cache they are broken,
2682 			 * because we are not allowed to build multicast path
2683 			 * with loopback source addr (look, routing cache
2684 			 * cannot know, that ttl is zero, so that packet
2685 			 * will not leave this host and route is valid).
2686 			 * Luckily, this hack is good workaround.
2687 			 */
2688 
2689 			fl4->flowi4_oif = dev_out->ifindex;
2690 			goto make_route;
2691 		}
2692 
2693 		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2694 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2695 			if (!__ip_dev_find(net, fl4->saddr, false))
2696 				goto out;
2697 		}
2698 	}
2699 
2700 
2701 	if (fl4->flowi4_oif) {
2702 		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2703 		rth = ERR_PTR(-ENODEV);
2704 		if (!dev_out)
2705 			goto out;
2706 
2707 		/* RACE: Check return value of inet_select_addr instead. */
2708 		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2709 			rth = ERR_PTR(-ENETUNREACH);
2710 			goto out;
2711 		}
2712 		if (ipv4_is_local_multicast(fl4->daddr) ||
2713 		    ipv4_is_lbcast(fl4->daddr) ||
2714 		    fl4->flowi4_proto == IPPROTO_IGMP) {
2715 			if (!fl4->saddr)
2716 				fl4->saddr = inet_select_addr(dev_out, 0,
2717 							      RT_SCOPE_LINK);
2718 			goto make_route;
2719 		}
2720 		if (!fl4->saddr) {
2721 			if (ipv4_is_multicast(fl4->daddr))
2722 				fl4->saddr = inet_select_addr(dev_out, 0,
2723 							      fl4->flowi4_scope);
2724 			else if (!fl4->daddr)
2725 				fl4->saddr = inet_select_addr(dev_out, 0,
2726 							      RT_SCOPE_HOST);
2727 		}
2728 	}
2729 
2730 	if (!fl4->daddr) {
2731 		fl4->daddr = fl4->saddr;
2732 		if (!fl4->daddr)
2733 			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2734 		dev_out = net->loopback_dev;
2735 		fl4->flowi4_oif = LOOPBACK_IFINDEX;
2736 		res->type = RTN_LOCAL;
2737 		flags |= RTCF_LOCAL;
2738 		goto make_route;
2739 	}
2740 
2741 	err = fib_lookup(net, fl4, res, 0);
2742 	if (err) {
2743 		res->fi = NULL;
2744 		res->table = NULL;
2745 		if (fl4->flowi4_oif &&
2746 		    (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2747 			/* Apparently, routing tables are wrong. Assume,
2748 			 * that the destination is on link.
2749 			 *
2750 			 * WHY? DW.
2751 			 * Because we are allowed to send to iface
2752 			 * even if it has NO routes and NO assigned
2753 			 * addresses. When oif is specified, routing
2754 			 * tables are looked up with only one purpose:
2755 			 * to catch if destination is gatewayed, rather than
2756 			 * direct. Moreover, if MSG_DONTROUTE is set,
2757 			 * we send packet, ignoring both routing tables
2758 			 * and ifaddr state. --ANK
2759 			 *
2760 			 *
2761 			 * We could make it even if oif is unknown,
2762 			 * likely IPv6, but we do not.
2763 			 */
2764 
2765 			if (fl4->saddr == 0)
2766 				fl4->saddr = inet_select_addr(dev_out, 0,
2767 							      RT_SCOPE_LINK);
2768 			res->type = RTN_UNICAST;
2769 			goto make_route;
2770 		}
2771 		rth = ERR_PTR(err);
2772 		goto out;
2773 	}
2774 
2775 	if (res->type == RTN_LOCAL) {
2776 		if (!fl4->saddr) {
2777 			if (res->fi->fib_prefsrc)
2778 				fl4->saddr = res->fi->fib_prefsrc;
2779 			else
2780 				fl4->saddr = fl4->daddr;
2781 		}
2782 
2783 		/* L3 master device is the loopback for that domain */
2784 		dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2785 			net->loopback_dev;
2786 
2787 		/* make sure orig_oif points to fib result device even
2788 		 * though packet rx/tx happens over loopback or l3mdev
2789 		 */
2790 		orig_oif = FIB_RES_OIF(*res);
2791 
2792 		fl4->flowi4_oif = dev_out->ifindex;
2793 		flags |= RTCF_LOCAL;
2794 		goto make_route;
2795 	}
2796 
2797 	fib_select_path(net, res, fl4, skb);
2798 
2799 	dev_out = FIB_RES_DEV(*res);
2800 
2801 make_route:
2802 	rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2803 
2804 out:
2805 	return rth;
2806 }
2807 
2808 static struct dst_ops ipv4_dst_blackhole_ops = {
2809 	.family			= AF_INET,
2810 	.default_advmss		= ipv4_default_advmss,
2811 	.neigh_lookup		= ipv4_neigh_lookup,
2812 	.check			= dst_blackhole_check,
2813 	.cow_metrics		= dst_blackhole_cow_metrics,
2814 	.update_pmtu		= dst_blackhole_update_pmtu,
2815 	.redirect		= dst_blackhole_redirect,
2816 	.mtu			= dst_blackhole_mtu,
2817 };
2818 
2819 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2820 {
2821 	struct rtable *ort = dst_rtable(dst_orig);
2822 	struct rtable *rt;
2823 
2824 	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2825 	if (rt) {
2826 		struct dst_entry *new = &rt->dst;
2827 
2828 		new->__use = 1;
2829 		new->input = dst_discard;
2830 		new->output = dst_discard_out;
2831 
2832 		new->dev = net->loopback_dev;
2833 		netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
2834 
2835 		rt->rt_is_input = ort->rt_is_input;
2836 		rt->rt_iif = ort->rt_iif;
2837 		rt->rt_pmtu = ort->rt_pmtu;
2838 		rt->rt_mtu_locked = ort->rt_mtu_locked;
2839 
2840 		rt->rt_genid = rt_genid_ipv4(net);
2841 		rt->rt_flags = ort->rt_flags;
2842 		rt->rt_type = ort->rt_type;
2843 		rt->rt_uses_gateway = ort->rt_uses_gateway;
2844 		rt->rt_gw_family = ort->rt_gw_family;
2845 		if (rt->rt_gw_family == AF_INET)
2846 			rt->rt_gw4 = ort->rt_gw4;
2847 		else if (rt->rt_gw_family == AF_INET6)
2848 			rt->rt_gw6 = ort->rt_gw6;
2849 	}
2850 
2851 	dst_release(dst_orig);
2852 
2853 	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2854 }
2855 
2856 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2857 				    const struct sock *sk)
2858 {
2859 	struct rtable *rt = __ip_route_output_key(net, flp4);
2860 
2861 	if (IS_ERR(rt))
2862 		return rt;
2863 
2864 	if (flp4->flowi4_proto) {
2865 		flp4->flowi4_oif = rt->dst.dev->ifindex;
2866 		rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
2867 						  flowi4_to_flowi(flp4),
2868 						  sk, 0));
2869 	}
2870 
2871 	return rt;
2872 }
2873 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2874 
2875 /* called with rcu_read_lock held */
2876 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2877 			struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2878 			struct sk_buff *skb, u32 portid, u32 seq,
2879 			unsigned int flags)
2880 {
2881 	struct rtmsg *r;
2882 	struct nlmsghdr *nlh;
2883 	unsigned long expires = 0;
2884 	u32 error;
2885 	u32 metrics[RTAX_MAX];
2886 
2887 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2888 	if (!nlh)
2889 		return -EMSGSIZE;
2890 
2891 	r = nlmsg_data(nlh);
2892 	r->rtm_family	 = AF_INET;
2893 	r->rtm_dst_len	= 32;
2894 	r->rtm_src_len	= 0;
2895 	r->rtm_tos	= fl4 ? fl4->flowi4_tos : 0;
2896 	r->rtm_table	= table_id < 256 ? table_id : RT_TABLE_COMPAT;
2897 	if (nla_put_u32(skb, RTA_TABLE, table_id))
2898 		goto nla_put_failure;
2899 	r->rtm_type	= rt->rt_type;
2900 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2901 	r->rtm_protocol = RTPROT_UNSPEC;
2902 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2903 	if (rt->rt_flags & RTCF_NOTIFY)
2904 		r->rtm_flags |= RTM_F_NOTIFY;
2905 	if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2906 		r->rtm_flags |= RTCF_DOREDIRECT;
2907 
2908 	if (nla_put_in_addr(skb, RTA_DST, dst))
2909 		goto nla_put_failure;
2910 	if (src) {
2911 		r->rtm_src_len = 32;
2912 		if (nla_put_in_addr(skb, RTA_SRC, src))
2913 			goto nla_put_failure;
2914 	}
2915 	if (rt->dst.dev &&
2916 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2917 		goto nla_put_failure;
2918 	if (rt->dst.lwtstate &&
2919 	    lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2920 		goto nla_put_failure;
2921 #ifdef CONFIG_IP_ROUTE_CLASSID
2922 	if (rt->dst.tclassid &&
2923 	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2924 		goto nla_put_failure;
2925 #endif
2926 	if (fl4 && !rt_is_input_route(rt) &&
2927 	    fl4->saddr != src) {
2928 		if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2929 			goto nla_put_failure;
2930 	}
2931 	if (rt->rt_uses_gateway) {
2932 		if (rt->rt_gw_family == AF_INET &&
2933 		    nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2934 			goto nla_put_failure;
2935 		} else if (rt->rt_gw_family == AF_INET6) {
2936 			int alen = sizeof(struct in6_addr);
2937 			struct nlattr *nla;
2938 			struct rtvia *via;
2939 
2940 			nla = nla_reserve(skb, RTA_VIA, alen + 2);
2941 			if (!nla)
2942 				goto nla_put_failure;
2943 
2944 			via = nla_data(nla);
2945 			via->rtvia_family = AF_INET6;
2946 			memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2947 		}
2948 	}
2949 
2950 	expires = rt->dst.expires;
2951 	if (expires) {
2952 		unsigned long now = jiffies;
2953 
2954 		if (time_before(now, expires))
2955 			expires -= now;
2956 		else
2957 			expires = 0;
2958 	}
2959 
2960 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2961 	if (rt->rt_pmtu && expires)
2962 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2963 	if (rt->rt_mtu_locked && expires)
2964 		metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2965 	if (rtnetlink_put_metrics(skb, metrics) < 0)
2966 		goto nla_put_failure;
2967 
2968 	if (fl4) {
2969 		if (fl4->flowi4_mark &&
2970 		    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2971 			goto nla_put_failure;
2972 
2973 		if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2974 		    nla_put_u32(skb, RTA_UID,
2975 				from_kuid_munged(current_user_ns(),
2976 						 fl4->flowi4_uid)))
2977 			goto nla_put_failure;
2978 
2979 		if (rt_is_input_route(rt)) {
2980 #ifdef CONFIG_IP_MROUTE
2981 			if (ipv4_is_multicast(dst) &&
2982 			    !ipv4_is_local_multicast(dst) &&
2983 			    IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
2984 				int err = ipmr_get_route(net, skb,
2985 							 fl4->saddr, fl4->daddr,
2986 							 r, portid);
2987 
2988 				if (err <= 0) {
2989 					if (err == 0)
2990 						return 0;
2991 					goto nla_put_failure;
2992 				}
2993 			} else
2994 #endif
2995 				if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2996 					goto nla_put_failure;
2997 		}
2998 	}
2999 
3000 	error = rt->dst.error;
3001 
3002 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3003 		goto nla_put_failure;
3004 
3005 	nlmsg_end(skb, nlh);
3006 	return 0;
3007 
3008 nla_put_failure:
3009 	nlmsg_cancel(skb, nlh);
3010 	return -EMSGSIZE;
3011 }
3012 
3013 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3014 			    struct netlink_callback *cb, u32 table_id,
3015 			    struct fnhe_hash_bucket *bucket, int genid,
3016 			    int *fa_index, int fa_start, unsigned int flags)
3017 {
3018 	int i;
3019 
3020 	for (i = 0; i < FNHE_HASH_SIZE; i++) {
3021 		struct fib_nh_exception *fnhe;
3022 
3023 		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3024 		     fnhe = rcu_dereference(fnhe->fnhe_next)) {
3025 			struct rtable *rt;
3026 			int err;
3027 
3028 			if (*fa_index < fa_start)
3029 				goto next;
3030 
3031 			if (fnhe->fnhe_genid != genid)
3032 				goto next;
3033 
3034 			if (fnhe->fnhe_expires &&
3035 			    time_after(jiffies, fnhe->fnhe_expires))
3036 				goto next;
3037 
3038 			rt = rcu_dereference(fnhe->fnhe_rth_input);
3039 			if (!rt)
3040 				rt = rcu_dereference(fnhe->fnhe_rth_output);
3041 			if (!rt)
3042 				goto next;
3043 
3044 			err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3045 					   table_id, NULL, skb,
3046 					   NETLINK_CB(cb->skb).portid,
3047 					   cb->nlh->nlmsg_seq, flags);
3048 			if (err)
3049 				return err;
3050 next:
3051 			(*fa_index)++;
3052 		}
3053 	}
3054 
3055 	return 0;
3056 }
3057 
3058 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3059 		       u32 table_id, struct fib_info *fi,
3060 		       int *fa_index, int fa_start, unsigned int flags)
3061 {
3062 	struct net *net = sock_net(cb->skb->sk);
3063 	int nhsel, genid = fnhe_genid(net);
3064 
3065 	for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3066 		struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3067 		struct fnhe_hash_bucket *bucket;
3068 		int err;
3069 
3070 		if (nhc->nhc_flags & RTNH_F_DEAD)
3071 			continue;
3072 
3073 		rcu_read_lock();
3074 		bucket = rcu_dereference(nhc->nhc_exceptions);
3075 		err = 0;
3076 		if (bucket)
3077 			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3078 					       genid, fa_index, fa_start,
3079 					       flags);
3080 		rcu_read_unlock();
3081 		if (err)
3082 			return err;
3083 	}
3084 
3085 	return 0;
3086 }
3087 
3088 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3089 						   u8 ip_proto, __be16 sport,
3090 						   __be16 dport)
3091 {
3092 	struct sk_buff *skb;
3093 	struct iphdr *iph;
3094 
3095 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3096 	if (!skb)
3097 		return NULL;
3098 
3099 	/* Reserve room for dummy headers, this skb can pass
3100 	 * through good chunk of routing engine.
3101 	 */
3102 	skb_reset_mac_header(skb);
3103 	skb_reset_network_header(skb);
3104 	skb->protocol = htons(ETH_P_IP);
3105 	iph = skb_put(skb, sizeof(struct iphdr));
3106 	iph->protocol = ip_proto;
3107 	iph->saddr = src;
3108 	iph->daddr = dst;
3109 	iph->version = 0x4;
3110 	iph->frag_off = 0;
3111 	iph->ihl = 0x5;
3112 	skb_set_transport_header(skb, skb->len);
3113 
3114 	switch (iph->protocol) {
3115 	case IPPROTO_UDP: {
3116 		struct udphdr *udph;
3117 
3118 		udph = skb_put_zero(skb, sizeof(struct udphdr));
3119 		udph->source = sport;
3120 		udph->dest = dport;
3121 		udph->len = htons(sizeof(struct udphdr));
3122 		udph->check = 0;
3123 		break;
3124 	}
3125 	case IPPROTO_TCP: {
3126 		struct tcphdr *tcph;
3127 
3128 		tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3129 		tcph->source	= sport;
3130 		tcph->dest	= dport;
3131 		tcph->doff	= sizeof(struct tcphdr) / 4;
3132 		tcph->rst = 1;
3133 		tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3134 					    src, dst, 0);
3135 		break;
3136 	}
3137 	case IPPROTO_ICMP: {
3138 		struct icmphdr *icmph;
3139 
3140 		icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3141 		icmph->type = ICMP_ECHO;
3142 		icmph->code = 0;
3143 	}
3144 	}
3145 
3146 	return skb;
3147 }
3148 
3149 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3150 				       const struct nlmsghdr *nlh,
3151 				       struct nlattr **tb,
3152 				       struct netlink_ext_ack *extack)
3153 {
3154 	struct rtmsg *rtm;
3155 	int i, err;
3156 
3157 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3158 		NL_SET_ERR_MSG(extack,
3159 			       "ipv4: Invalid header for route get request");
3160 		return -EINVAL;
3161 	}
3162 
3163 	if (!netlink_strict_get_check(skb))
3164 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3165 					      rtm_ipv4_policy, extack);
3166 
3167 	rtm = nlmsg_data(nlh);
3168 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3169 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3170 	    rtm->rtm_table || rtm->rtm_protocol ||
3171 	    rtm->rtm_scope || rtm->rtm_type) {
3172 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3173 		return -EINVAL;
3174 	}
3175 
3176 	if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3177 			       RTM_F_LOOKUP_TABLE |
3178 			       RTM_F_FIB_MATCH)) {
3179 		NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3180 		return -EINVAL;
3181 	}
3182 
3183 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3184 					    rtm_ipv4_policy, extack);
3185 	if (err)
3186 		return err;
3187 
3188 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3189 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3190 		NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3191 		return -EINVAL;
3192 	}
3193 
3194 	for (i = 0; i <= RTA_MAX; i++) {
3195 		if (!tb[i])
3196 			continue;
3197 
3198 		switch (i) {
3199 		case RTA_IIF:
3200 		case RTA_OIF:
3201 		case RTA_SRC:
3202 		case RTA_DST:
3203 		case RTA_IP_PROTO:
3204 		case RTA_SPORT:
3205 		case RTA_DPORT:
3206 		case RTA_MARK:
3207 		case RTA_UID:
3208 			break;
3209 		default:
3210 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3211 			return -EINVAL;
3212 		}
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3219 			     struct netlink_ext_ack *extack)
3220 {
3221 	struct net *net = sock_net(in_skb->sk);
3222 	struct nlattr *tb[RTA_MAX+1];
3223 	u32 table_id = RT_TABLE_MAIN;
3224 	__be16 sport = 0, dport = 0;
3225 	struct fib_result res = {};
3226 	u8 ip_proto = IPPROTO_UDP;
3227 	struct rtable *rt = NULL;
3228 	struct sk_buff *skb;
3229 	struct rtmsg *rtm;
3230 	struct flowi4 fl4 = {};
3231 	__be32 dst = 0;
3232 	__be32 src = 0;
3233 	kuid_t uid;
3234 	u32 iif;
3235 	int err;
3236 	int mark;
3237 
3238 	err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3239 	if (err < 0)
3240 		return err;
3241 
3242 	rtm = nlmsg_data(nlh);
3243 	src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3244 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3245 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3246 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3247 	if (tb[RTA_UID])
3248 		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3249 	else
3250 		uid = (iif ? INVALID_UID : current_uid());
3251 
3252 	if (tb[RTA_IP_PROTO]) {
3253 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3254 						  &ip_proto, AF_INET, extack);
3255 		if (err)
3256 			return err;
3257 	}
3258 
3259 	if (tb[RTA_SPORT])
3260 		sport = nla_get_be16(tb[RTA_SPORT]);
3261 
3262 	if (tb[RTA_DPORT])
3263 		dport = nla_get_be16(tb[RTA_DPORT]);
3264 
3265 	skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3266 	if (!skb)
3267 		return -ENOBUFS;
3268 
3269 	fl4.daddr = dst;
3270 	fl4.saddr = src;
3271 	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3272 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3273 	fl4.flowi4_mark = mark;
3274 	fl4.flowi4_uid = uid;
3275 	if (sport)
3276 		fl4.fl4_sport = sport;
3277 	if (dport)
3278 		fl4.fl4_dport = dport;
3279 	fl4.flowi4_proto = ip_proto;
3280 
3281 	rcu_read_lock();
3282 
3283 	if (iif) {
3284 		struct net_device *dev;
3285 
3286 		dev = dev_get_by_index_rcu(net, iif);
3287 		if (!dev) {
3288 			err = -ENODEV;
3289 			goto errout_rcu;
3290 		}
3291 
3292 		fl4.flowi4_iif = iif; /* for rt_fill_info */
3293 		skb->dev	= dev;
3294 		skb->mark	= mark;
3295 		err = ip_route_input_rcu(skb, dst, src,
3296 					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3297 					 &res);
3298 
3299 		rt = skb_rtable(skb);
3300 		if (err == 0 && rt->dst.error)
3301 			err = -rt->dst.error;
3302 	} else {
3303 		fl4.flowi4_iif = LOOPBACK_IFINDEX;
3304 		skb->dev = net->loopback_dev;
3305 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3306 		err = 0;
3307 		if (IS_ERR(rt))
3308 			err = PTR_ERR(rt);
3309 		else
3310 			skb_dst_set(skb, &rt->dst);
3311 	}
3312 
3313 	if (err)
3314 		goto errout_rcu;
3315 
3316 	if (rtm->rtm_flags & RTM_F_NOTIFY)
3317 		rt->rt_flags |= RTCF_NOTIFY;
3318 
3319 	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3320 		table_id = res.table ? res.table->tb_id : 0;
3321 
3322 	/* reset skb for netlink reply msg */
3323 	skb_trim(skb, 0);
3324 	skb_reset_network_header(skb);
3325 	skb_reset_transport_header(skb);
3326 	skb_reset_mac_header(skb);
3327 
3328 	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3329 		struct fib_rt_info fri;
3330 
3331 		if (!res.fi) {
3332 			err = fib_props[res.type].error;
3333 			if (!err)
3334 				err = -EHOSTUNREACH;
3335 			goto errout_rcu;
3336 		}
3337 		fri.fi = res.fi;
3338 		fri.tb_id = table_id;
3339 		fri.dst = res.prefix;
3340 		fri.dst_len = res.prefixlen;
3341 		fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
3342 		fri.type = rt->rt_type;
3343 		fri.offload = 0;
3344 		fri.trap = 0;
3345 		fri.offload_failed = 0;
3346 		if (res.fa_head) {
3347 			struct fib_alias *fa;
3348 
3349 			hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3350 				u8 slen = 32 - fri.dst_len;
3351 
3352 				if (fa->fa_slen == slen &&
3353 				    fa->tb_id == fri.tb_id &&
3354 				    fa->fa_dscp == fri.dscp &&
3355 				    fa->fa_info == res.fi &&
3356 				    fa->fa_type == fri.type) {
3357 					fri.offload = READ_ONCE(fa->offload);
3358 					fri.trap = READ_ONCE(fa->trap);
3359 					fri.offload_failed =
3360 						READ_ONCE(fa->offload_failed);
3361 					break;
3362 				}
3363 			}
3364 		}
3365 		err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3366 				    nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3367 	} else {
3368 		err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3369 				   NETLINK_CB(in_skb).portid,
3370 				   nlh->nlmsg_seq, 0);
3371 	}
3372 	if (err < 0)
3373 		goto errout_rcu;
3374 
3375 	rcu_read_unlock();
3376 
3377 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3378 
3379 errout_free:
3380 	return err;
3381 errout_rcu:
3382 	rcu_read_unlock();
3383 	kfree_skb(skb);
3384 	goto errout_free;
3385 }
3386 
3387 void ip_rt_multicast_event(struct in_device *in_dev)
3388 {
3389 	rt_cache_flush(dev_net(in_dev->dev));
3390 }
3391 
3392 #ifdef CONFIG_SYSCTL
3393 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3394 static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
3395 static int ip_rt_gc_elasticity __read_mostly	= 8;
3396 static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
3397 
3398 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3399 		void *buffer, size_t *lenp, loff_t *ppos)
3400 {
3401 	struct net *net = (struct net *)__ctl->extra1;
3402 
3403 	if (write) {
3404 		rt_cache_flush(net);
3405 		fnhe_genid_bump(net);
3406 		return 0;
3407 	}
3408 
3409 	return -EINVAL;
3410 }
3411 
3412 static struct ctl_table ipv4_route_table[] = {
3413 	{
3414 		.procname	= "gc_thresh",
3415 		.data		= &ipv4_dst_ops.gc_thresh,
3416 		.maxlen		= sizeof(int),
3417 		.mode		= 0644,
3418 		.proc_handler	= proc_dointvec,
3419 	},
3420 	{
3421 		.procname	= "max_size",
3422 		.data		= &ip_rt_max_size,
3423 		.maxlen		= sizeof(int),
3424 		.mode		= 0644,
3425 		.proc_handler	= proc_dointvec,
3426 	},
3427 	{
3428 		/*  Deprecated. Use gc_min_interval_ms */
3429 
3430 		.procname	= "gc_min_interval",
3431 		.data		= &ip_rt_gc_min_interval,
3432 		.maxlen		= sizeof(int),
3433 		.mode		= 0644,
3434 		.proc_handler	= proc_dointvec_jiffies,
3435 	},
3436 	{
3437 		.procname	= "gc_min_interval_ms",
3438 		.data		= &ip_rt_gc_min_interval,
3439 		.maxlen		= sizeof(int),
3440 		.mode		= 0644,
3441 		.proc_handler	= proc_dointvec_ms_jiffies,
3442 	},
3443 	{
3444 		.procname	= "gc_timeout",
3445 		.data		= &ip_rt_gc_timeout,
3446 		.maxlen		= sizeof(int),
3447 		.mode		= 0644,
3448 		.proc_handler	= proc_dointvec_jiffies,
3449 	},
3450 	{
3451 		.procname	= "gc_interval",
3452 		.data		= &ip_rt_gc_interval,
3453 		.maxlen		= sizeof(int),
3454 		.mode		= 0644,
3455 		.proc_handler	= proc_dointvec_jiffies,
3456 	},
3457 	{
3458 		.procname	= "redirect_load",
3459 		.data		= &ip_rt_redirect_load,
3460 		.maxlen		= sizeof(int),
3461 		.mode		= 0644,
3462 		.proc_handler	= proc_dointvec,
3463 	},
3464 	{
3465 		.procname	= "redirect_number",
3466 		.data		= &ip_rt_redirect_number,
3467 		.maxlen		= sizeof(int),
3468 		.mode		= 0644,
3469 		.proc_handler	= proc_dointvec,
3470 	},
3471 	{
3472 		.procname	= "redirect_silence",
3473 		.data		= &ip_rt_redirect_silence,
3474 		.maxlen		= sizeof(int),
3475 		.mode		= 0644,
3476 		.proc_handler	= proc_dointvec,
3477 	},
3478 	{
3479 		.procname	= "error_cost",
3480 		.data		= &ip_rt_error_cost,
3481 		.maxlen		= sizeof(int),
3482 		.mode		= 0644,
3483 		.proc_handler	= proc_dointvec,
3484 	},
3485 	{
3486 		.procname	= "error_burst",
3487 		.data		= &ip_rt_error_burst,
3488 		.maxlen		= sizeof(int),
3489 		.mode		= 0644,
3490 		.proc_handler	= proc_dointvec,
3491 	},
3492 	{
3493 		.procname	= "gc_elasticity",
3494 		.data		= &ip_rt_gc_elasticity,
3495 		.maxlen		= sizeof(int),
3496 		.mode		= 0644,
3497 		.proc_handler	= proc_dointvec,
3498 	},
3499 };
3500 
3501 static const char ipv4_route_flush_procname[] = "flush";
3502 
3503 static struct ctl_table ipv4_route_netns_table[] = {
3504 	{
3505 		.procname	= ipv4_route_flush_procname,
3506 		.maxlen		= sizeof(int),
3507 		.mode		= 0200,
3508 		.proc_handler	= ipv4_sysctl_rtcache_flush,
3509 	},
3510 	{
3511 		.procname       = "min_pmtu",
3512 		.data           = &init_net.ipv4.ip_rt_min_pmtu,
3513 		.maxlen         = sizeof(int),
3514 		.mode           = 0644,
3515 		.proc_handler   = proc_dointvec_minmax,
3516 		.extra1         = &ip_min_valid_pmtu,
3517 	},
3518 	{
3519 		.procname       = "mtu_expires",
3520 		.data           = &init_net.ipv4.ip_rt_mtu_expires,
3521 		.maxlen         = sizeof(int),
3522 		.mode           = 0644,
3523 		.proc_handler   = proc_dointvec_jiffies,
3524 	},
3525 	{
3526 		.procname   = "min_adv_mss",
3527 		.data       = &init_net.ipv4.ip_rt_min_advmss,
3528 		.maxlen     = sizeof(int),
3529 		.mode       = 0644,
3530 		.proc_handler   = proc_dointvec,
3531 	},
3532 };
3533 
3534 static __net_init int sysctl_route_net_init(struct net *net)
3535 {
3536 	struct ctl_table *tbl;
3537 	size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
3538 
3539 	tbl = ipv4_route_netns_table;
3540 	if (!net_eq(net, &init_net)) {
3541 		int i;
3542 
3543 		tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3544 		if (!tbl)
3545 			goto err_dup;
3546 
3547 		/* Don't export non-whitelisted sysctls to unprivileged users */
3548 		if (net->user_ns != &init_user_ns) {
3549 			if (tbl[0].procname != ipv4_route_flush_procname)
3550 				table_size = 0;
3551 		}
3552 
3553 		/* Update the variables to point into the current struct net
3554 		 * except for the first element flush
3555 		 */
3556 		for (i = 1; i < table_size; i++)
3557 			tbl[i].data += (void *)net - (void *)&init_net;
3558 	}
3559 	tbl[0].extra1 = net;
3560 
3561 	net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
3562 						     tbl, table_size);
3563 	if (!net->ipv4.route_hdr)
3564 		goto err_reg;
3565 	return 0;
3566 
3567 err_reg:
3568 	if (tbl != ipv4_route_netns_table)
3569 		kfree(tbl);
3570 err_dup:
3571 	return -ENOMEM;
3572 }
3573 
3574 static __net_exit void sysctl_route_net_exit(struct net *net)
3575 {
3576 	const struct ctl_table *tbl;
3577 
3578 	tbl = net->ipv4.route_hdr->ctl_table_arg;
3579 	unregister_net_sysctl_table(net->ipv4.route_hdr);
3580 	BUG_ON(tbl == ipv4_route_netns_table);
3581 	kfree(tbl);
3582 }
3583 
3584 static __net_initdata struct pernet_operations sysctl_route_ops = {
3585 	.init = sysctl_route_net_init,
3586 	.exit = sysctl_route_net_exit,
3587 };
3588 #endif
3589 
3590 static __net_init int netns_ip_rt_init(struct net *net)
3591 {
3592 	/* Set default value for namespaceified sysctls */
3593 	net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3594 	net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3595 	net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3596 	return 0;
3597 }
3598 
3599 static struct pernet_operations __net_initdata ip_rt_ops = {
3600 	.init = netns_ip_rt_init,
3601 };
3602 
3603 static __net_init int rt_genid_init(struct net *net)
3604 {
3605 	atomic_set(&net->ipv4.rt_genid, 0);
3606 	atomic_set(&net->fnhe_genid, 0);
3607 	atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
3608 	return 0;
3609 }
3610 
3611 static __net_initdata struct pernet_operations rt_genid_ops = {
3612 	.init = rt_genid_init,
3613 };
3614 
3615 static int __net_init ipv4_inetpeer_init(struct net *net)
3616 {
3617 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3618 
3619 	if (!bp)
3620 		return -ENOMEM;
3621 	inet_peer_base_init(bp);
3622 	net->ipv4.peers = bp;
3623 	return 0;
3624 }
3625 
3626 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3627 {
3628 	struct inet_peer_base *bp = net->ipv4.peers;
3629 
3630 	net->ipv4.peers = NULL;
3631 	inetpeer_invalidate_tree(bp);
3632 	kfree(bp);
3633 }
3634 
3635 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3636 	.init	=	ipv4_inetpeer_init,
3637 	.exit	=	ipv4_inetpeer_exit,
3638 };
3639 
3640 #ifdef CONFIG_IP_ROUTE_CLASSID
3641 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3642 #endif /* CONFIG_IP_ROUTE_CLASSID */
3643 
3644 int __init ip_rt_init(void)
3645 {
3646 	void *idents_hash;
3647 	int cpu;
3648 
3649 	/* For modern hosts, this will use 2 MB of memory */
3650 	idents_hash = alloc_large_system_hash("IP idents",
3651 					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
3652 					      0,
3653 					      16, /* one bucket per 64 KB */
3654 					      HASH_ZERO,
3655 					      NULL,
3656 					      &ip_idents_mask,
3657 					      2048,
3658 					      256*1024);
3659 
3660 	ip_idents = idents_hash;
3661 
3662 	get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3663 
3664 	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3665 
3666 	for_each_possible_cpu(cpu) {
3667 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3668 
3669 		INIT_LIST_HEAD(&ul->head);
3670 		INIT_LIST_HEAD(&ul->quarantine);
3671 		spin_lock_init(&ul->lock);
3672 	}
3673 #ifdef CONFIG_IP_ROUTE_CLASSID
3674 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3675 	if (!ip_rt_acct)
3676 		panic("IP: failed to allocate ip_rt_acct\n");
3677 #endif
3678 
3679 	ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
3680 					      SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3681 
3682 	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3683 
3684 	if (dst_entries_init(&ipv4_dst_ops) < 0)
3685 		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3686 
3687 	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3688 		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3689 
3690 	ipv4_dst_ops.gc_thresh = ~0;
3691 	ip_rt_max_size = INT_MAX;
3692 
3693 	devinet_init();
3694 	ip_fib_init();
3695 
3696 	if (ip_rt_proc_init())
3697 		pr_err("Unable to create route proc files\n");
3698 #ifdef CONFIG_XFRM
3699 	xfrm_init();
3700 	xfrm4_init();
3701 #endif
3702 	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3703 		      RTNL_FLAG_DOIT_UNLOCKED);
3704 
3705 #ifdef CONFIG_SYSCTL
3706 	register_pernet_subsys(&sysctl_route_ops);
3707 #endif
3708 	register_pernet_subsys(&ip_rt_ops);
3709 	register_pernet_subsys(&rt_genid_ops);
3710 	register_pernet_subsys(&ipv4_inetpeer_ops);
3711 	return 0;
3712 }
3713 
3714 #ifdef CONFIG_SYSCTL
3715 /*
3716  * We really need to sanitize the damn ipv4 init order, then all
3717  * this nonsense will go away.
3718  */
3719 void __init ip_static_sysctl_init(void)
3720 {
3721 	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3722 }
3723 #endif
3724