xref: /linux/net/ipv4/route.c (revision 3e4cd0737d2e9c3dd52153a23aef1753e3a99fc4)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		ROUTE - implementation of the IP router.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *		Alan Cox	:	Verify area fixes.
16  *		Alan Cox	:	cli() protects routing changes
17  *		Rui Oliveira	:	ICMP routing table updates
18  *		(rco@di.uminho.pt)	Routing table insertion and update
19  *		Linus Torvalds	:	Rewrote bits to be sensible
20  *		Alan Cox	:	Added BSD route gw semantics
21  *		Alan Cox	:	Super /proc >4K
22  *		Alan Cox	:	MTU in route table
23  *		Alan Cox	: 	MSS actually. Also added the window
24  *					clamper.
25  *		Sam Lantinga	:	Fixed route matching in rt_del()
26  *		Alan Cox	:	Routing cache support.
27  *		Alan Cox	:	Removed compatibility cruft.
28  *		Alan Cox	:	RTF_REJECT support.
29  *		Alan Cox	:	TCP irtt support.
30  *		Jonathan Naylor	:	Added Metric support.
31  *	Miquel van Smoorenburg	:	BSD API fixes.
32  *	Miquel van Smoorenburg	:	Metrics.
33  *		Alan Cox	:	Use __u32 properly
34  *		Alan Cox	:	Aligned routing errors more closely with BSD
35  *					our system is still very different.
36  *		Alan Cox	:	Faster /proc handling
37  *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
38  *					routing caches and better behaviour.
39  *
40  *		Olaf Erb	:	irtt wasn't being copied right.
41  *		Bjorn Ekwall	:	Kerneld route support.
42  *		Alan Cox	:	Multicast fixed (I hope)
43  * 		Pavel Krauz	:	Limited broadcast fixed
44  *		Mike McLagan	:	Routing by source
45  *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
46  *					route.c and rewritten from scratch.
47  *		Andi Kleen	:	Load-limit warning messages.
48  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
49  *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
50  *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
51  *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
52  *		Marc Boucher	:	routing by fwmark
53  *	Robert Olsson		:	Added rt_cache statistics
54  *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
55  *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
56  * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
57  * 	Ilia Sotnikov		:	Removed TOS from hash calculations
58  *
59  *		This program is free software; you can redistribute it and/or
60  *		modify it under the terms of the GNU General Public License
61  *		as published by the Free Software Foundation; either version
62  *		2 of the License, or (at your option) any later version.
63  */
64 
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/mm.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
94 #include <net/dst.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
108 #ifdef CONFIG_SYSCTL
109 #include <linux/sysctl.h>
110 #endif
111 
112 #define RT_FL_TOS(oldflp4) \
113     ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114 
115 #define IP_MAX_MTU	0xFFF0
116 
117 #define RT_GC_TIMEOUT (300*HZ)
118 
119 static int ip_rt_max_size;
120 static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
121 static int ip_rt_gc_interval __read_mostly	= 60 * HZ;
122 static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
123 static int ip_rt_redirect_number __read_mostly	= 9;
124 static int ip_rt_redirect_load __read_mostly	= HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly	= HZ;
127 static int ip_rt_error_burst __read_mostly	= 5 * HZ;
128 static int ip_rt_gc_elasticity __read_mostly	= 8;
129 static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
130 static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly	= 256;
132 static int rt_chain_length_max __read_mostly	= 20;
133 
134 /*
135  *	Interface to generic destination cache.
136  */
137 
138 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
139 static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
140 static unsigned int	 ipv4_default_mtu(const struct dst_entry *dst);
141 static void		 ipv4_dst_destroy(struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void		 ipv4_link_failure(struct sk_buff *skb);
144 static void		 ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
145 static int rt_garbage_collect(struct dst_ops *ops);
146 
147 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 			    int how)
149 {
150 }
151 
152 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 {
154 	struct rtable *rt = (struct rtable *) dst;
155 	struct inet_peer *peer;
156 	u32 *p = NULL;
157 
158 	if (!rt->peer)
159 		rt_bind_peer(rt, rt->rt_dst, 1);
160 
161 	peer = rt->peer;
162 	if (peer) {
163 		u32 *old_p = __DST_METRICS_PTR(old);
164 		unsigned long prev, new;
165 
166 		p = peer->metrics;
167 		if (inet_metrics_new(peer))
168 			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
169 
170 		new = (unsigned long) p;
171 		prev = cmpxchg(&dst->_metrics, old, new);
172 
173 		if (prev != old) {
174 			p = __DST_METRICS_PTR(prev);
175 			if (prev & DST_METRICS_READ_ONLY)
176 				p = NULL;
177 		} else {
178 			if (rt->fi) {
179 				fib_info_put(rt->fi);
180 				rt->fi = NULL;
181 			}
182 		}
183 	}
184 	return p;
185 }
186 
187 static struct dst_ops ipv4_dst_ops = {
188 	.family =		AF_INET,
189 	.protocol =		cpu_to_be16(ETH_P_IP),
190 	.gc =			rt_garbage_collect,
191 	.check =		ipv4_dst_check,
192 	.default_advmss =	ipv4_default_advmss,
193 	.default_mtu =		ipv4_default_mtu,
194 	.cow_metrics =		ipv4_cow_metrics,
195 	.destroy =		ipv4_dst_destroy,
196 	.ifdown =		ipv4_dst_ifdown,
197 	.negative_advice =	ipv4_negative_advice,
198 	.link_failure =		ipv4_link_failure,
199 	.update_pmtu =		ip_rt_update_pmtu,
200 	.local_out =		__ip_local_out,
201 };
202 
203 #define ECN_OR_COST(class)	TC_PRIO_##class
204 
205 const __u8 ip_tos2prio[16] = {
206 	TC_PRIO_BESTEFFORT,
207 	ECN_OR_COST(BESTEFFORT),
208 	TC_PRIO_BESTEFFORT,
209 	ECN_OR_COST(BESTEFFORT),
210 	TC_PRIO_BULK,
211 	ECN_OR_COST(BULK),
212 	TC_PRIO_BULK,
213 	ECN_OR_COST(BULK),
214 	TC_PRIO_INTERACTIVE,
215 	ECN_OR_COST(INTERACTIVE),
216 	TC_PRIO_INTERACTIVE,
217 	ECN_OR_COST(INTERACTIVE),
218 	TC_PRIO_INTERACTIVE_BULK,
219 	ECN_OR_COST(INTERACTIVE_BULK),
220 	TC_PRIO_INTERACTIVE_BULK,
221 	ECN_OR_COST(INTERACTIVE_BULK)
222 };
223 
224 
225 /*
226  * Route cache.
227  */
228 
229 /* The locking scheme is rather straight forward:
230  *
231  * 1) Read-Copy Update protects the buckets of the central route hash.
232  * 2) Only writers remove entries, and they hold the lock
233  *    as they look at rtable reference counts.
234  * 3) Only readers acquire references to rtable entries,
235  *    they do so with atomic increments and with the
236  *    lock held.
237  */
238 
239 struct rt_hash_bucket {
240 	struct rtable __rcu	*chain;
241 };
242 
243 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 	defined(CONFIG_PROVE_LOCKING)
245 /*
246  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247  * The size of this table is a power of two and depends on the number of CPUS.
248  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
249  */
250 #ifdef CONFIG_LOCKDEP
251 # define RT_HASH_LOCK_SZ	256
252 #else
253 # if NR_CPUS >= 32
254 #  define RT_HASH_LOCK_SZ	4096
255 # elif NR_CPUS >= 16
256 #  define RT_HASH_LOCK_SZ	2048
257 # elif NR_CPUS >= 8
258 #  define RT_HASH_LOCK_SZ	1024
259 # elif NR_CPUS >= 4
260 #  define RT_HASH_LOCK_SZ	512
261 # else
262 #  define RT_HASH_LOCK_SZ	256
263 # endif
264 #endif
265 
266 static spinlock_t	*rt_hash_locks;
267 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
268 
269 static __init void rt_hash_lock_init(void)
270 {
271 	int i;
272 
273 	rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 			GFP_KERNEL);
275 	if (!rt_hash_locks)
276 		panic("IP: failed to allocate rt_hash_locks\n");
277 
278 	for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 		spin_lock_init(&rt_hash_locks[i]);
280 }
281 #else
282 # define rt_hash_lock_addr(slot) NULL
283 
284 static inline void rt_hash_lock_init(void)
285 {
286 }
287 #endif
288 
289 static struct rt_hash_bucket 	*rt_hash_table __read_mostly;
290 static unsigned			rt_hash_mask __read_mostly;
291 static unsigned int		rt_hash_log  __read_mostly;
292 
293 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
294 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
295 
296 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
297 				   int genid)
298 {
299 	return jhash_3words((__force u32)daddr, (__force u32)saddr,
300 			    idx, genid)
301 		& rt_hash_mask;
302 }
303 
304 static inline int rt_genid(struct net *net)
305 {
306 	return atomic_read(&net->ipv4.rt_genid);
307 }
308 
309 #ifdef CONFIG_PROC_FS
310 struct rt_cache_iter_state {
311 	struct seq_net_private p;
312 	int bucket;
313 	int genid;
314 };
315 
316 static struct rtable *rt_cache_get_first(struct seq_file *seq)
317 {
318 	struct rt_cache_iter_state *st = seq->private;
319 	struct rtable *r = NULL;
320 
321 	for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
322 		if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
323 			continue;
324 		rcu_read_lock_bh();
325 		r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
326 		while (r) {
327 			if (dev_net(r->dst.dev) == seq_file_net(seq) &&
328 			    r->rt_genid == st->genid)
329 				return r;
330 			r = rcu_dereference_bh(r->dst.rt_next);
331 		}
332 		rcu_read_unlock_bh();
333 	}
334 	return r;
335 }
336 
337 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
338 					  struct rtable *r)
339 {
340 	struct rt_cache_iter_state *st = seq->private;
341 
342 	r = rcu_dereference_bh(r->dst.rt_next);
343 	while (!r) {
344 		rcu_read_unlock_bh();
345 		do {
346 			if (--st->bucket < 0)
347 				return NULL;
348 		} while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
349 		rcu_read_lock_bh();
350 		r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
351 	}
352 	return r;
353 }
354 
355 static struct rtable *rt_cache_get_next(struct seq_file *seq,
356 					struct rtable *r)
357 {
358 	struct rt_cache_iter_state *st = seq->private;
359 	while ((r = __rt_cache_get_next(seq, r)) != NULL) {
360 		if (dev_net(r->dst.dev) != seq_file_net(seq))
361 			continue;
362 		if (r->rt_genid == st->genid)
363 			break;
364 	}
365 	return r;
366 }
367 
368 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
369 {
370 	struct rtable *r = rt_cache_get_first(seq);
371 
372 	if (r)
373 		while (pos && (r = rt_cache_get_next(seq, r)))
374 			--pos;
375 	return pos ? NULL : r;
376 }
377 
378 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379 {
380 	struct rt_cache_iter_state *st = seq->private;
381 	if (*pos)
382 		return rt_cache_get_idx(seq, *pos - 1);
383 	st->genid = rt_genid(seq_file_net(seq));
384 	return SEQ_START_TOKEN;
385 }
386 
387 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388 {
389 	struct rtable *r;
390 
391 	if (v == SEQ_START_TOKEN)
392 		r = rt_cache_get_first(seq);
393 	else
394 		r = rt_cache_get_next(seq, v);
395 	++*pos;
396 	return r;
397 }
398 
399 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400 {
401 	if (v && v != SEQ_START_TOKEN)
402 		rcu_read_unlock_bh();
403 }
404 
405 static int rt_cache_seq_show(struct seq_file *seq, void *v)
406 {
407 	if (v == SEQ_START_TOKEN)
408 		seq_printf(seq, "%-127s\n",
409 			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 			   "HHUptod\tSpecDst");
412 	else {
413 		struct rtable *r = v;
414 		int len;
415 
416 		seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 			      "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
418 			r->dst.dev ? r->dst.dev->name : "*",
419 			(__force u32)r->rt_dst,
420 			(__force u32)r->rt_gateway,
421 			r->rt_flags, atomic_read(&r->dst.__refcnt),
422 			r->dst.__use, 0, (__force u32)r->rt_src,
423 			dst_metric_advmss(&r->dst) + 40,
424 			dst_metric(&r->dst, RTAX_WINDOW),
425 			(int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 			      dst_metric(&r->dst, RTAX_RTTVAR)),
427 			r->rt_key_tos,
428 			r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 			r->dst.hh ? (r->dst.hh->hh_output ==
430 				       dev_queue_xmit) : 0,
431 			r->rt_spec_dst, &len);
432 
433 		seq_printf(seq, "%*s\n", 127 - len, "");
434 	}
435 	return 0;
436 }
437 
438 static const struct seq_operations rt_cache_seq_ops = {
439 	.start  = rt_cache_seq_start,
440 	.next   = rt_cache_seq_next,
441 	.stop   = rt_cache_seq_stop,
442 	.show   = rt_cache_seq_show,
443 };
444 
445 static int rt_cache_seq_open(struct inode *inode, struct file *file)
446 {
447 	return seq_open_net(inode, file, &rt_cache_seq_ops,
448 			sizeof(struct rt_cache_iter_state));
449 }
450 
451 static const struct file_operations rt_cache_seq_fops = {
452 	.owner	 = THIS_MODULE,
453 	.open	 = rt_cache_seq_open,
454 	.read	 = seq_read,
455 	.llseek	 = seq_lseek,
456 	.release = seq_release_net,
457 };
458 
459 
460 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461 {
462 	int cpu;
463 
464 	if (*pos == 0)
465 		return SEQ_START_TOKEN;
466 
467 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
468 		if (!cpu_possible(cpu))
469 			continue;
470 		*pos = cpu+1;
471 		return &per_cpu(rt_cache_stat, cpu);
472 	}
473 	return NULL;
474 }
475 
476 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477 {
478 	int cpu;
479 
480 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
481 		if (!cpu_possible(cpu))
482 			continue;
483 		*pos = cpu+1;
484 		return &per_cpu(rt_cache_stat, cpu);
485 	}
486 	return NULL;
487 
488 }
489 
490 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491 {
492 
493 }
494 
495 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496 {
497 	struct rt_cache_stat *st = v;
498 
499 	if (v == SEQ_START_TOKEN) {
500 		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
501 		return 0;
502 	}
503 
504 	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
505 		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
506 		   dst_entries_get_slow(&ipv4_dst_ops),
507 		   st->in_hit,
508 		   st->in_slow_tot,
509 		   st->in_slow_mc,
510 		   st->in_no_route,
511 		   st->in_brd,
512 		   st->in_martian_dst,
513 		   st->in_martian_src,
514 
515 		   st->out_hit,
516 		   st->out_slow_tot,
517 		   st->out_slow_mc,
518 
519 		   st->gc_total,
520 		   st->gc_ignored,
521 		   st->gc_goal_miss,
522 		   st->gc_dst_overflow,
523 		   st->in_hlist_search,
524 		   st->out_hlist_search
525 		);
526 	return 0;
527 }
528 
529 static const struct seq_operations rt_cpu_seq_ops = {
530 	.start  = rt_cpu_seq_start,
531 	.next   = rt_cpu_seq_next,
532 	.stop   = rt_cpu_seq_stop,
533 	.show   = rt_cpu_seq_show,
534 };
535 
536 
537 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538 {
539 	return seq_open(file, &rt_cpu_seq_ops);
540 }
541 
542 static const struct file_operations rt_cpu_seq_fops = {
543 	.owner	 = THIS_MODULE,
544 	.open	 = rt_cpu_seq_open,
545 	.read	 = seq_read,
546 	.llseek	 = seq_lseek,
547 	.release = seq_release,
548 };
549 
550 #ifdef CONFIG_IP_ROUTE_CLASSID
551 static int rt_acct_proc_show(struct seq_file *m, void *v)
552 {
553 	struct ip_rt_acct *dst, *src;
554 	unsigned int i, j;
555 
556 	dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 	if (!dst)
558 		return -ENOMEM;
559 
560 	for_each_possible_cpu(i) {
561 		src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 		for (j = 0; j < 256; j++) {
563 			dst[j].o_bytes   += src[j].o_bytes;
564 			dst[j].o_packets += src[j].o_packets;
565 			dst[j].i_bytes   += src[j].i_bytes;
566 			dst[j].i_packets += src[j].i_packets;
567 		}
568 	}
569 
570 	seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 	kfree(dst);
572 	return 0;
573 }
574 
575 static int rt_acct_proc_open(struct inode *inode, struct file *file)
576 {
577 	return single_open(file, rt_acct_proc_show, NULL);
578 }
579 
580 static const struct file_operations rt_acct_proc_fops = {
581 	.owner		= THIS_MODULE,
582 	.open		= rt_acct_proc_open,
583 	.read		= seq_read,
584 	.llseek		= seq_lseek,
585 	.release	= single_release,
586 };
587 #endif
588 
589 static int __net_init ip_rt_do_proc_init(struct net *net)
590 {
591 	struct proc_dir_entry *pde;
592 
593 	pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 			&rt_cache_seq_fops);
595 	if (!pde)
596 		goto err1;
597 
598 	pde = proc_create("rt_cache", S_IRUGO,
599 			  net->proc_net_stat, &rt_cpu_seq_fops);
600 	if (!pde)
601 		goto err2;
602 
603 #ifdef CONFIG_IP_ROUTE_CLASSID
604 	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
605 	if (!pde)
606 		goto err3;
607 #endif
608 	return 0;
609 
610 #ifdef CONFIG_IP_ROUTE_CLASSID
611 err3:
612 	remove_proc_entry("rt_cache", net->proc_net_stat);
613 #endif
614 err2:
615 	remove_proc_entry("rt_cache", net->proc_net);
616 err1:
617 	return -ENOMEM;
618 }
619 
620 static void __net_exit ip_rt_do_proc_exit(struct net *net)
621 {
622 	remove_proc_entry("rt_cache", net->proc_net_stat);
623 	remove_proc_entry("rt_cache", net->proc_net);
624 #ifdef CONFIG_IP_ROUTE_CLASSID
625 	remove_proc_entry("rt_acct", net->proc_net);
626 #endif
627 }
628 
629 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
630 	.init = ip_rt_do_proc_init,
631 	.exit = ip_rt_do_proc_exit,
632 };
633 
634 static int __init ip_rt_proc_init(void)
635 {
636 	return register_pernet_subsys(&ip_rt_proc_ops);
637 }
638 
639 #else
640 static inline int ip_rt_proc_init(void)
641 {
642 	return 0;
643 }
644 #endif /* CONFIG_PROC_FS */
645 
646 static inline void rt_free(struct rtable *rt)
647 {
648 	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
649 }
650 
651 static inline void rt_drop(struct rtable *rt)
652 {
653 	ip_rt_put(rt);
654 	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
655 }
656 
657 static inline int rt_fast_clean(struct rtable *rth)
658 {
659 	/* Kill broadcast/multicast entries very aggresively, if they
660 	   collide in hash table with more useful entries */
661 	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
662 		rt_is_input_route(rth) && rth->dst.rt_next;
663 }
664 
665 static inline int rt_valuable(struct rtable *rth)
666 {
667 	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
668 		(rth->peer && rth->peer->pmtu_expires);
669 }
670 
671 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672 {
673 	unsigned long age;
674 	int ret = 0;
675 
676 	if (atomic_read(&rth->dst.__refcnt))
677 		goto out;
678 
679 	age = jiffies - rth->dst.lastuse;
680 	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 	    (age <= tmo2 && rt_valuable(rth)))
682 		goto out;
683 	ret = 1;
684 out:	return ret;
685 }
686 
687 /* Bits of score are:
688  * 31: very valuable
689  * 30: not quite useless
690  * 29..0: usage counter
691  */
692 static inline u32 rt_score(struct rtable *rt)
693 {
694 	u32 score = jiffies - rt->dst.lastuse;
695 
696 	score = ~score & ~(3<<30);
697 
698 	if (rt_valuable(rt))
699 		score |= (1<<31);
700 
701 	if (rt_is_output_route(rt) ||
702 	    !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 		score |= (1<<30);
704 
705 	return score;
706 }
707 
708 static inline bool rt_caching(const struct net *net)
709 {
710 	return net->ipv4.current_rt_cache_rebuild_count <=
711 		net->ipv4.sysctl_rt_cache_rebuild_count;
712 }
713 
714 static inline bool compare_hash_inputs(const struct rtable *rt1,
715 				       const struct rtable *rt2)
716 {
717 	return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 		((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 		(rt1->rt_iif ^ rt2->rt_iif)) == 0);
720 }
721 
722 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
723 {
724 	return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 		((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 		(rt1->rt_mark ^ rt2->rt_mark) |
727 		(rt1->rt_key_tos ^ rt2->rt_key_tos) |
728 		(rt1->rt_oif ^ rt2->rt_oif) |
729 		(rt1->rt_iif ^ rt2->rt_iif)) == 0;
730 }
731 
732 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733 {
734 	return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
735 }
736 
737 static inline int rt_is_expired(struct rtable *rth)
738 {
739 	return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
740 }
741 
742 /*
743  * Perform a full scan of hash table and free all entries.
744  * Can be called by a softirq or a process.
745  * In the later case, we want to be reschedule if necessary
746  */
747 static void rt_do_flush(struct net *net, int process_context)
748 {
749 	unsigned int i;
750 	struct rtable *rth, *next;
751 
752 	for (i = 0; i <= rt_hash_mask; i++) {
753 		struct rtable __rcu **pprev;
754 		struct rtable *list;
755 
756 		if (process_context && need_resched())
757 			cond_resched();
758 		rth = rcu_dereference_raw(rt_hash_table[i].chain);
759 		if (!rth)
760 			continue;
761 
762 		spin_lock_bh(rt_hash_lock_addr(i));
763 
764 		list = NULL;
765 		pprev = &rt_hash_table[i].chain;
766 		rth = rcu_dereference_protected(*pprev,
767 			lockdep_is_held(rt_hash_lock_addr(i)));
768 
769 		while (rth) {
770 			next = rcu_dereference_protected(rth->dst.rt_next,
771 				lockdep_is_held(rt_hash_lock_addr(i)));
772 
773 			if (!net ||
774 			    net_eq(dev_net(rth->dst.dev), net)) {
775 				rcu_assign_pointer(*pprev, next);
776 				rcu_assign_pointer(rth->dst.rt_next, list);
777 				list = rth;
778 			} else {
779 				pprev = &rth->dst.rt_next;
780 			}
781 			rth = next;
782 		}
783 
784 		spin_unlock_bh(rt_hash_lock_addr(i));
785 
786 		for (; list; list = next) {
787 			next = rcu_dereference_protected(list->dst.rt_next, 1);
788 			rt_free(list);
789 		}
790 	}
791 }
792 
793 /*
794  * While freeing expired entries, we compute average chain length
795  * and standard deviation, using fixed-point arithmetic.
796  * This to have an estimation of rt_chain_length_max
797  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
798  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799  */
800 
801 #define FRACT_BITS 3
802 #define ONE (1UL << FRACT_BITS)
803 
804 /*
805  * Given a hash chain and an item in this hash chain,
806  * find if a previous entry has the same hash_inputs
807  * (but differs on tos, mark or oif)
808  * Returns 0 if an alias is found.
809  * Returns ONE if rth has no alias before itself.
810  */
811 static int has_noalias(const struct rtable *head, const struct rtable *rth)
812 {
813 	const struct rtable *aux = head;
814 
815 	while (aux != rth) {
816 		if (compare_hash_inputs(aux, rth))
817 			return 0;
818 		aux = rcu_dereference_protected(aux->dst.rt_next, 1);
819 	}
820 	return ONE;
821 }
822 
823 /*
824  * Perturbation of rt_genid by a small quantity [1..256]
825  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826  * many times (2^24) without giving recent rt_genid.
827  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
828  */
829 static void rt_cache_invalidate(struct net *net)
830 {
831 	unsigned char shuffle;
832 
833 	get_random_bytes(&shuffle, sizeof(shuffle));
834 	atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
835 }
836 
837 /*
838  * delay < 0  : invalidate cache (fast : entries will be deleted later)
839  * delay >= 0 : invalidate & flush cache (can be long)
840  */
841 void rt_cache_flush(struct net *net, int delay)
842 {
843 	rt_cache_invalidate(net);
844 	if (delay >= 0)
845 		rt_do_flush(net, !in_softirq());
846 }
847 
848 /* Flush previous cache invalidated entries from the cache */
849 void rt_cache_flush_batch(struct net *net)
850 {
851 	rt_do_flush(net, !in_softirq());
852 }
853 
854 static void rt_emergency_hash_rebuild(struct net *net)
855 {
856 	if (net_ratelimit())
857 		printk(KERN_WARNING "Route hash chain too long!\n");
858 	rt_cache_invalidate(net);
859 }
860 
861 /*
862    Short description of GC goals.
863 
864    We want to build algorithm, which will keep routing cache
865    at some equilibrium point, when number of aged off entries
866    is kept approximately equal to newly generated ones.
867 
868    Current expiration strength is variable "expire".
869    We try to adjust it dynamically, so that if networking
870    is idle expires is large enough to keep enough of warm entries,
871    and when load increases it reduces to limit cache size.
872  */
873 
874 static int rt_garbage_collect(struct dst_ops *ops)
875 {
876 	static unsigned long expire = RT_GC_TIMEOUT;
877 	static unsigned long last_gc;
878 	static int rover;
879 	static int equilibrium;
880 	struct rtable *rth;
881 	struct rtable __rcu **rthp;
882 	unsigned long now = jiffies;
883 	int goal;
884 	int entries = dst_entries_get_fast(&ipv4_dst_ops);
885 
886 	/*
887 	 * Garbage collection is pretty expensive,
888 	 * do not make it too frequently.
889 	 */
890 
891 	RT_CACHE_STAT_INC(gc_total);
892 
893 	if (now - last_gc < ip_rt_gc_min_interval &&
894 	    entries < ip_rt_max_size) {
895 		RT_CACHE_STAT_INC(gc_ignored);
896 		goto out;
897 	}
898 
899 	entries = dst_entries_get_slow(&ipv4_dst_ops);
900 	/* Calculate number of entries, which we want to expire now. */
901 	goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
902 	if (goal <= 0) {
903 		if (equilibrium < ipv4_dst_ops.gc_thresh)
904 			equilibrium = ipv4_dst_ops.gc_thresh;
905 		goal = entries - equilibrium;
906 		if (goal > 0) {
907 			equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
908 			goal = entries - equilibrium;
909 		}
910 	} else {
911 		/* We are in dangerous area. Try to reduce cache really
912 		 * aggressively.
913 		 */
914 		goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
915 		equilibrium = entries - goal;
916 	}
917 
918 	if (now - last_gc >= ip_rt_gc_min_interval)
919 		last_gc = now;
920 
921 	if (goal <= 0) {
922 		equilibrium += goal;
923 		goto work_done;
924 	}
925 
926 	do {
927 		int i, k;
928 
929 		for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 			unsigned long tmo = expire;
931 
932 			k = (k + 1) & rt_hash_mask;
933 			rthp = &rt_hash_table[k].chain;
934 			spin_lock_bh(rt_hash_lock_addr(k));
935 			while ((rth = rcu_dereference_protected(*rthp,
936 					lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
937 				if (!rt_is_expired(rth) &&
938 					!rt_may_expire(rth, tmo, expire)) {
939 					tmo >>= 1;
940 					rthp = &rth->dst.rt_next;
941 					continue;
942 				}
943 				*rthp = rth->dst.rt_next;
944 				rt_free(rth);
945 				goal--;
946 			}
947 			spin_unlock_bh(rt_hash_lock_addr(k));
948 			if (goal <= 0)
949 				break;
950 		}
951 		rover = k;
952 
953 		if (goal <= 0)
954 			goto work_done;
955 
956 		/* Goal is not achieved. We stop process if:
957 
958 		   - if expire reduced to zero. Otherwise, expire is halfed.
959 		   - if table is not full.
960 		   - if we are called from interrupt.
961 		   - jiffies check is just fallback/debug loop breaker.
962 		     We will not spin here for long time in any case.
963 		 */
964 
965 		RT_CACHE_STAT_INC(gc_goal_miss);
966 
967 		if (expire == 0)
968 			break;
969 
970 		expire >>= 1;
971 
972 		if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
973 			goto out;
974 	} while (!in_softirq() && time_before_eq(jiffies, now));
975 
976 	if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
977 		goto out;
978 	if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
979 		goto out;
980 	if (net_ratelimit())
981 		printk(KERN_WARNING "dst cache overflow\n");
982 	RT_CACHE_STAT_INC(gc_dst_overflow);
983 	return 1;
984 
985 work_done:
986 	expire += ip_rt_gc_min_interval;
987 	if (expire > ip_rt_gc_timeout ||
988 	    dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
989 	    dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
990 		expire = ip_rt_gc_timeout;
991 out:	return 0;
992 }
993 
994 /*
995  * Returns number of entries in a hash chain that have different hash_inputs
996  */
997 static int slow_chain_length(const struct rtable *head)
998 {
999 	int length = 0;
1000 	const struct rtable *rth = head;
1001 
1002 	while (rth) {
1003 		length += has_noalias(head, rth);
1004 		rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1005 	}
1006 	return length >> FRACT_BITS;
1007 }
1008 
1009 static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1010 				     struct sk_buff *skb, int ifindex)
1011 {
1012 	struct rtable	*rth, *cand;
1013 	struct rtable __rcu **rthp, **candp;
1014 	unsigned long	now;
1015 	u32 		min_score;
1016 	int		chain_length;
1017 	int attempts = !in_softirq();
1018 
1019 restart:
1020 	chain_length = 0;
1021 	min_score = ~(u32)0;
1022 	cand = NULL;
1023 	candp = NULL;
1024 	now = jiffies;
1025 
1026 	if (!rt_caching(dev_net(rt->dst.dev))) {
1027 		/*
1028 		 * If we're not caching, just tell the caller we
1029 		 * were successful and don't touch the route.  The
1030 		 * caller hold the sole reference to the cache entry, and
1031 		 * it will be released when the caller is done with it.
1032 		 * If we drop it here, the callers have no way to resolve routes
1033 		 * when we're not caching.  Instead, just point *rp at rt, so
1034 		 * the caller gets a single use out of the route
1035 		 * Note that we do rt_free on this new route entry, so that
1036 		 * once its refcount hits zero, we are still able to reap it
1037 		 * (Thanks Alexey)
1038 		 * Note: To avoid expensive rcu stuff for this uncached dst,
1039 		 * we set DST_NOCACHE so that dst_release() can free dst without
1040 		 * waiting a grace period.
1041 		 */
1042 
1043 		rt->dst.flags |= DST_NOCACHE;
1044 		if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1045 			int err = arp_bind_neighbour(&rt->dst);
1046 			if (err) {
1047 				if (net_ratelimit())
1048 					printk(KERN_WARNING
1049 					    "Neighbour table failure & not caching routes.\n");
1050 				ip_rt_put(rt);
1051 				return ERR_PTR(err);
1052 			}
1053 		}
1054 
1055 		goto skip_hashing;
1056 	}
1057 
1058 	rthp = &rt_hash_table[hash].chain;
1059 
1060 	spin_lock_bh(rt_hash_lock_addr(hash));
1061 	while ((rth = rcu_dereference_protected(*rthp,
1062 			lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1063 		if (rt_is_expired(rth)) {
1064 			*rthp = rth->dst.rt_next;
1065 			rt_free(rth);
1066 			continue;
1067 		}
1068 		if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1069 			/* Put it first */
1070 			*rthp = rth->dst.rt_next;
1071 			/*
1072 			 * Since lookup is lockfree, the deletion
1073 			 * must be visible to another weakly ordered CPU before
1074 			 * the insertion at the start of the hash chain.
1075 			 */
1076 			rcu_assign_pointer(rth->dst.rt_next,
1077 					   rt_hash_table[hash].chain);
1078 			/*
1079 			 * Since lookup is lockfree, the update writes
1080 			 * must be ordered for consistency on SMP.
1081 			 */
1082 			rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1083 
1084 			dst_use(&rth->dst, now);
1085 			spin_unlock_bh(rt_hash_lock_addr(hash));
1086 
1087 			rt_drop(rt);
1088 			if (skb)
1089 				skb_dst_set(skb, &rth->dst);
1090 			return rth;
1091 		}
1092 
1093 		if (!atomic_read(&rth->dst.__refcnt)) {
1094 			u32 score = rt_score(rth);
1095 
1096 			if (score <= min_score) {
1097 				cand = rth;
1098 				candp = rthp;
1099 				min_score = score;
1100 			}
1101 		}
1102 
1103 		chain_length++;
1104 
1105 		rthp = &rth->dst.rt_next;
1106 	}
1107 
1108 	if (cand) {
1109 		/* ip_rt_gc_elasticity used to be average length of chain
1110 		 * length, when exceeded gc becomes really aggressive.
1111 		 *
1112 		 * The second limit is less certain. At the moment it allows
1113 		 * only 2 entries per bucket. We will see.
1114 		 */
1115 		if (chain_length > ip_rt_gc_elasticity) {
1116 			*candp = cand->dst.rt_next;
1117 			rt_free(cand);
1118 		}
1119 	} else {
1120 		if (chain_length > rt_chain_length_max &&
1121 		    slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1122 			struct net *net = dev_net(rt->dst.dev);
1123 			int num = ++net->ipv4.current_rt_cache_rebuild_count;
1124 			if (!rt_caching(net)) {
1125 				printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1126 					rt->dst.dev->name, num);
1127 			}
1128 			rt_emergency_hash_rebuild(net);
1129 			spin_unlock_bh(rt_hash_lock_addr(hash));
1130 
1131 			hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1132 					ifindex, rt_genid(net));
1133 			goto restart;
1134 		}
1135 	}
1136 
1137 	/* Try to bind route to arp only if it is output
1138 	   route or unicast forwarding path.
1139 	 */
1140 	if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1141 		int err = arp_bind_neighbour(&rt->dst);
1142 		if (err) {
1143 			spin_unlock_bh(rt_hash_lock_addr(hash));
1144 
1145 			if (err != -ENOBUFS) {
1146 				rt_drop(rt);
1147 				return ERR_PTR(err);
1148 			}
1149 
1150 			/* Neighbour tables are full and nothing
1151 			   can be released. Try to shrink route cache,
1152 			   it is most likely it holds some neighbour records.
1153 			 */
1154 			if (attempts-- > 0) {
1155 				int saved_elasticity = ip_rt_gc_elasticity;
1156 				int saved_int = ip_rt_gc_min_interval;
1157 				ip_rt_gc_elasticity	= 1;
1158 				ip_rt_gc_min_interval	= 0;
1159 				rt_garbage_collect(&ipv4_dst_ops);
1160 				ip_rt_gc_min_interval	= saved_int;
1161 				ip_rt_gc_elasticity	= saved_elasticity;
1162 				goto restart;
1163 			}
1164 
1165 			if (net_ratelimit())
1166 				printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1167 			rt_drop(rt);
1168 			return ERR_PTR(-ENOBUFS);
1169 		}
1170 	}
1171 
1172 	rt->dst.rt_next = rt_hash_table[hash].chain;
1173 
1174 	/*
1175 	 * Since lookup is lockfree, we must make sure
1176 	 * previous writes to rt are committed to memory
1177 	 * before making rt visible to other CPUS.
1178 	 */
1179 	rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1180 
1181 	spin_unlock_bh(rt_hash_lock_addr(hash));
1182 
1183 skip_hashing:
1184 	if (skb)
1185 		skb_dst_set(skb, &rt->dst);
1186 	return rt;
1187 }
1188 
1189 static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1190 
1191 static u32 rt_peer_genid(void)
1192 {
1193 	return atomic_read(&__rt_peer_genid);
1194 }
1195 
1196 void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1197 {
1198 	struct inet_peer *peer;
1199 
1200 	peer = inet_getpeer_v4(daddr, create);
1201 
1202 	if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1203 		inet_putpeer(peer);
1204 	else
1205 		rt->rt_peer_genid = rt_peer_genid();
1206 }
1207 
1208 /*
1209  * Peer allocation may fail only in serious out-of-memory conditions.  However
1210  * we still can generate some output.
1211  * Random ID selection looks a bit dangerous because we have no chances to
1212  * select ID being unique in a reasonable period of time.
1213  * But broken packet identifier may be better than no packet at all.
1214  */
1215 static void ip_select_fb_ident(struct iphdr *iph)
1216 {
1217 	static DEFINE_SPINLOCK(ip_fb_id_lock);
1218 	static u32 ip_fallback_id;
1219 	u32 salt;
1220 
1221 	spin_lock_bh(&ip_fb_id_lock);
1222 	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1223 	iph->id = htons(salt & 0xFFFF);
1224 	ip_fallback_id = salt;
1225 	spin_unlock_bh(&ip_fb_id_lock);
1226 }
1227 
1228 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1229 {
1230 	struct rtable *rt = (struct rtable *) dst;
1231 
1232 	if (rt) {
1233 		if (rt->peer == NULL)
1234 			rt_bind_peer(rt, rt->rt_dst, 1);
1235 
1236 		/* If peer is attached to destination, it is never detached,
1237 		   so that we need not to grab a lock to dereference it.
1238 		 */
1239 		if (rt->peer) {
1240 			iph->id = htons(inet_getid(rt->peer, more));
1241 			return;
1242 		}
1243 	} else
1244 		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1245 		       __builtin_return_address(0));
1246 
1247 	ip_select_fb_ident(iph);
1248 }
1249 EXPORT_SYMBOL(__ip_select_ident);
1250 
1251 static void rt_del(unsigned hash, struct rtable *rt)
1252 {
1253 	struct rtable __rcu **rthp;
1254 	struct rtable *aux;
1255 
1256 	rthp = &rt_hash_table[hash].chain;
1257 	spin_lock_bh(rt_hash_lock_addr(hash));
1258 	ip_rt_put(rt);
1259 	while ((aux = rcu_dereference_protected(*rthp,
1260 			lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1261 		if (aux == rt || rt_is_expired(aux)) {
1262 			*rthp = aux->dst.rt_next;
1263 			rt_free(aux);
1264 			continue;
1265 		}
1266 		rthp = &aux->dst.rt_next;
1267 	}
1268 	spin_unlock_bh(rt_hash_lock_addr(hash));
1269 }
1270 
1271 /* called in rcu_read_lock() section */
1272 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1273 		    __be32 saddr, struct net_device *dev)
1274 {
1275 	struct in_device *in_dev = __in_dev_get_rcu(dev);
1276 	struct inet_peer *peer;
1277 	struct net *net;
1278 
1279 	if (!in_dev)
1280 		return;
1281 
1282 	net = dev_net(dev);
1283 	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1284 	    ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1285 	    ipv4_is_zeronet(new_gw))
1286 		goto reject_redirect;
1287 
1288 	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1289 		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1290 			goto reject_redirect;
1291 		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1292 			goto reject_redirect;
1293 	} else {
1294 		if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1295 			goto reject_redirect;
1296 	}
1297 
1298 	peer = inet_getpeer_v4(daddr, 1);
1299 	if (peer) {
1300 		peer->redirect_learned.a4 = new_gw;
1301 
1302 		inet_putpeer(peer);
1303 
1304 		atomic_inc(&__rt_peer_genid);
1305 	}
1306 	return;
1307 
1308 reject_redirect:
1309 #ifdef CONFIG_IP_ROUTE_VERBOSE
1310 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1311 		printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1312 			"  Advised path = %pI4 -> %pI4\n",
1313 		       &old_gw, dev->name, &new_gw,
1314 		       &saddr, &daddr);
1315 #endif
1316 	;
1317 }
1318 
1319 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1320 {
1321 	struct rtable *rt = (struct rtable *)dst;
1322 	struct dst_entry *ret = dst;
1323 
1324 	if (rt) {
1325 		if (dst->obsolete > 0) {
1326 			ip_rt_put(rt);
1327 			ret = NULL;
1328 		} else if (rt->rt_flags & RTCF_REDIRECTED) {
1329 			unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1330 						rt->rt_oif,
1331 						rt_genid(dev_net(dst->dev)));
1332 			rt_del(hash, rt);
1333 			ret = NULL;
1334 		} else if (rt->peer &&
1335 			   rt->peer->pmtu_expires &&
1336 			   time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1337 			unsigned long orig = rt->peer->pmtu_expires;
1338 
1339 			if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1340 				dst_metric_set(dst, RTAX_MTU,
1341 					       rt->peer->pmtu_orig);
1342 		}
1343 	}
1344 	return ret;
1345 }
1346 
1347 /*
1348  * Algorithm:
1349  *	1. The first ip_rt_redirect_number redirects are sent
1350  *	   with exponential backoff, then we stop sending them at all,
1351  *	   assuming that the host ignores our redirects.
1352  *	2. If we did not see packets requiring redirects
1353  *	   during ip_rt_redirect_silence, we assume that the host
1354  *	   forgot redirected route and start to send redirects again.
1355  *
1356  * This algorithm is much cheaper and more intelligent than dumb load limiting
1357  * in icmp.c.
1358  *
1359  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1360  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1361  */
1362 
1363 void ip_rt_send_redirect(struct sk_buff *skb)
1364 {
1365 	struct rtable *rt = skb_rtable(skb);
1366 	struct in_device *in_dev;
1367 	struct inet_peer *peer;
1368 	int log_martians;
1369 
1370 	rcu_read_lock();
1371 	in_dev = __in_dev_get_rcu(rt->dst.dev);
1372 	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1373 		rcu_read_unlock();
1374 		return;
1375 	}
1376 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1377 	rcu_read_unlock();
1378 
1379 	if (!rt->peer)
1380 		rt_bind_peer(rt, rt->rt_dst, 1);
1381 	peer = rt->peer;
1382 	if (!peer) {
1383 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1384 		return;
1385 	}
1386 
1387 	/* No redirected packets during ip_rt_redirect_silence;
1388 	 * reset the algorithm.
1389 	 */
1390 	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1391 		peer->rate_tokens = 0;
1392 
1393 	/* Too many ignored redirects; do not send anything
1394 	 * set dst.rate_last to the last seen redirected packet.
1395 	 */
1396 	if (peer->rate_tokens >= ip_rt_redirect_number) {
1397 		peer->rate_last = jiffies;
1398 		return;
1399 	}
1400 
1401 	/* Check for load limit; set rate_last to the latest sent
1402 	 * redirect.
1403 	 */
1404 	if (peer->rate_tokens == 0 ||
1405 	    time_after(jiffies,
1406 		       (peer->rate_last +
1407 			(ip_rt_redirect_load << peer->rate_tokens)))) {
1408 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1409 		peer->rate_last = jiffies;
1410 		++peer->rate_tokens;
1411 #ifdef CONFIG_IP_ROUTE_VERBOSE
1412 		if (log_martians &&
1413 		    peer->rate_tokens == ip_rt_redirect_number &&
1414 		    net_ratelimit())
1415 			printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1416 			       &ip_hdr(skb)->saddr, rt->rt_iif,
1417 				&rt->rt_dst, &rt->rt_gateway);
1418 #endif
1419 	}
1420 }
1421 
1422 static int ip_error(struct sk_buff *skb)
1423 {
1424 	struct rtable *rt = skb_rtable(skb);
1425 	struct inet_peer *peer;
1426 	unsigned long now;
1427 	bool send;
1428 	int code;
1429 
1430 	switch (rt->dst.error) {
1431 		case EINVAL:
1432 		default:
1433 			goto out;
1434 		case EHOSTUNREACH:
1435 			code = ICMP_HOST_UNREACH;
1436 			break;
1437 		case ENETUNREACH:
1438 			code = ICMP_NET_UNREACH;
1439 			IP_INC_STATS_BH(dev_net(rt->dst.dev),
1440 					IPSTATS_MIB_INNOROUTES);
1441 			break;
1442 		case EACCES:
1443 			code = ICMP_PKT_FILTERED;
1444 			break;
1445 	}
1446 
1447 	if (!rt->peer)
1448 		rt_bind_peer(rt, rt->rt_dst, 1);
1449 	peer = rt->peer;
1450 
1451 	send = true;
1452 	if (peer) {
1453 		now = jiffies;
1454 		peer->rate_tokens += now - peer->rate_last;
1455 		if (peer->rate_tokens > ip_rt_error_burst)
1456 			peer->rate_tokens = ip_rt_error_burst;
1457 		peer->rate_last = now;
1458 		if (peer->rate_tokens >= ip_rt_error_cost)
1459 			peer->rate_tokens -= ip_rt_error_cost;
1460 		else
1461 			send = false;
1462 	}
1463 	if (send)
1464 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1465 
1466 out:	kfree_skb(skb);
1467 	return 0;
1468 }
1469 
1470 /*
1471  *	The last two values are not from the RFC but
1472  *	are needed for AMPRnet AX.25 paths.
1473  */
1474 
1475 static const unsigned short mtu_plateau[] =
1476 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1477 
1478 static inline unsigned short guess_mtu(unsigned short old_mtu)
1479 {
1480 	int i;
1481 
1482 	for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1483 		if (old_mtu > mtu_plateau[i])
1484 			return mtu_plateau[i];
1485 	return 68;
1486 }
1487 
1488 unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1489 				 unsigned short new_mtu,
1490 				 struct net_device *dev)
1491 {
1492 	unsigned short old_mtu = ntohs(iph->tot_len);
1493 	unsigned short est_mtu = 0;
1494 	struct inet_peer *peer;
1495 
1496 	peer = inet_getpeer_v4(iph->daddr, 1);
1497 	if (peer) {
1498 		unsigned short mtu = new_mtu;
1499 
1500 		if (new_mtu < 68 || new_mtu >= old_mtu) {
1501 			/* BSD 4.2 derived systems incorrectly adjust
1502 			 * tot_len by the IP header length, and report
1503 			 * a zero MTU in the ICMP message.
1504 			 */
1505 			if (mtu == 0 &&
1506 			    old_mtu >= 68 + (iph->ihl << 2))
1507 				old_mtu -= iph->ihl << 2;
1508 			mtu = guess_mtu(old_mtu);
1509 		}
1510 
1511 		if (mtu < ip_rt_min_pmtu)
1512 			mtu = ip_rt_min_pmtu;
1513 		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1514 			unsigned long pmtu_expires;
1515 
1516 			pmtu_expires = jiffies + ip_rt_mtu_expires;
1517 			if (!pmtu_expires)
1518 				pmtu_expires = 1UL;
1519 
1520 			est_mtu = mtu;
1521 			peer->pmtu_learned = mtu;
1522 			peer->pmtu_expires = pmtu_expires;
1523 		}
1524 
1525 		inet_putpeer(peer);
1526 
1527 		atomic_inc(&__rt_peer_genid);
1528 	}
1529 	return est_mtu ? : new_mtu;
1530 }
1531 
1532 static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1533 {
1534 	unsigned long expires = peer->pmtu_expires;
1535 
1536 	if (time_before(jiffies, expires)) {
1537 		u32 orig_dst_mtu = dst_mtu(dst);
1538 		if (peer->pmtu_learned < orig_dst_mtu) {
1539 			if (!peer->pmtu_orig)
1540 				peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1541 			dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1542 		}
1543 	} else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1544 		dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1545 }
1546 
1547 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1548 {
1549 	struct rtable *rt = (struct rtable *) dst;
1550 	struct inet_peer *peer;
1551 
1552 	dst_confirm(dst);
1553 
1554 	if (!rt->peer)
1555 		rt_bind_peer(rt, rt->rt_dst, 1);
1556 	peer = rt->peer;
1557 	if (peer) {
1558 		if (mtu < ip_rt_min_pmtu)
1559 			mtu = ip_rt_min_pmtu;
1560 		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
1561 			unsigned long pmtu_expires;
1562 
1563 			pmtu_expires = jiffies + ip_rt_mtu_expires;
1564 			if (!pmtu_expires)
1565 				pmtu_expires = 1UL;
1566 
1567 			peer->pmtu_learned = mtu;
1568 			peer->pmtu_expires = pmtu_expires;
1569 
1570 			atomic_inc(&__rt_peer_genid);
1571 			rt->rt_peer_genid = rt_peer_genid();
1572 		}
1573 		check_peer_pmtu(dst, peer);
1574 	}
1575 }
1576 
1577 static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1578 {
1579 	struct rtable *rt = (struct rtable *) dst;
1580 	__be32 orig_gw = rt->rt_gateway;
1581 
1582 	dst_confirm(&rt->dst);
1583 
1584 	neigh_release(rt->dst.neighbour);
1585 	rt->dst.neighbour = NULL;
1586 
1587 	rt->rt_gateway = peer->redirect_learned.a4;
1588 	if (arp_bind_neighbour(&rt->dst) ||
1589 	    !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1590 		if (rt->dst.neighbour)
1591 			neigh_event_send(rt->dst.neighbour, NULL);
1592 		rt->rt_gateway = orig_gw;
1593 		return -EAGAIN;
1594 	} else {
1595 		rt->rt_flags |= RTCF_REDIRECTED;
1596 		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1597 					rt->dst.neighbour);
1598 	}
1599 	return 0;
1600 }
1601 
1602 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1603 {
1604 	struct rtable *rt = (struct rtable *) dst;
1605 
1606 	if (rt_is_expired(rt))
1607 		return NULL;
1608 	if (rt->rt_peer_genid != rt_peer_genid()) {
1609 		struct inet_peer *peer;
1610 
1611 		if (!rt->peer)
1612 			rt_bind_peer(rt, rt->rt_dst, 0);
1613 
1614 		peer = rt->peer;
1615 		if (peer && peer->pmtu_expires)
1616 			check_peer_pmtu(dst, peer);
1617 
1618 		if (peer && peer->redirect_learned.a4 &&
1619 		    peer->redirect_learned.a4 != rt->rt_gateway) {
1620 			if (check_peer_redir(dst, peer))
1621 				return NULL;
1622 		}
1623 
1624 		rt->rt_peer_genid = rt_peer_genid();
1625 	}
1626 	return dst;
1627 }
1628 
1629 static void ipv4_dst_destroy(struct dst_entry *dst)
1630 {
1631 	struct rtable *rt = (struct rtable *) dst;
1632 	struct inet_peer *peer = rt->peer;
1633 
1634 	if (rt->fi) {
1635 		fib_info_put(rt->fi);
1636 		rt->fi = NULL;
1637 	}
1638 	if (peer) {
1639 		rt->peer = NULL;
1640 		inet_putpeer(peer);
1641 	}
1642 }
1643 
1644 
1645 static void ipv4_link_failure(struct sk_buff *skb)
1646 {
1647 	struct rtable *rt;
1648 
1649 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1650 
1651 	rt = skb_rtable(skb);
1652 	if (rt &&
1653 	    rt->peer &&
1654 	    rt->peer->pmtu_expires) {
1655 		unsigned long orig = rt->peer->pmtu_expires;
1656 
1657 		if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1658 			dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1659 	}
1660 }
1661 
1662 static int ip_rt_bug(struct sk_buff *skb)
1663 {
1664 	printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1665 		&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1666 		skb->dev ? skb->dev->name : "?");
1667 	kfree_skb(skb);
1668 	return 0;
1669 }
1670 
1671 /*
1672    We do not cache source address of outgoing interface,
1673    because it is used only by IP RR, TS and SRR options,
1674    so that it out of fast path.
1675 
1676    BTW remember: "addr" is allowed to be not aligned
1677    in IP options!
1678  */
1679 
1680 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1681 {
1682 	__be32 src;
1683 
1684 	if (rt_is_output_route(rt))
1685 		src = ip_hdr(skb)->saddr;
1686 	else {
1687 		struct fib_result res;
1688 		struct flowi4 fl4;
1689 		struct iphdr *iph;
1690 
1691 		iph = ip_hdr(skb);
1692 
1693 		memset(&fl4, 0, sizeof(fl4));
1694 		fl4.daddr = iph->daddr;
1695 		fl4.saddr = iph->saddr;
1696 		fl4.flowi4_tos = iph->tos;
1697 		fl4.flowi4_oif = rt->dst.dev->ifindex;
1698 		fl4.flowi4_iif = skb->dev->ifindex;
1699 		fl4.flowi4_mark = skb->mark;
1700 
1701 		rcu_read_lock();
1702 		if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1703 			src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1704 		else
1705 			src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1706 					RT_SCOPE_UNIVERSE);
1707 		rcu_read_unlock();
1708 	}
1709 	memcpy(addr, &src, 4);
1710 }
1711 
1712 #ifdef CONFIG_IP_ROUTE_CLASSID
1713 static void set_class_tag(struct rtable *rt, u32 tag)
1714 {
1715 	if (!(rt->dst.tclassid & 0xFFFF))
1716 		rt->dst.tclassid |= tag & 0xFFFF;
1717 	if (!(rt->dst.tclassid & 0xFFFF0000))
1718 		rt->dst.tclassid |= tag & 0xFFFF0000;
1719 }
1720 #endif
1721 
1722 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1723 {
1724 	unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1725 
1726 	if (advmss == 0) {
1727 		advmss = max_t(unsigned int, dst->dev->mtu - 40,
1728 			       ip_rt_min_advmss);
1729 		if (advmss > 65535 - 40)
1730 			advmss = 65535 - 40;
1731 	}
1732 	return advmss;
1733 }
1734 
1735 static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1736 {
1737 	unsigned int mtu = dst->dev->mtu;
1738 
1739 	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1740 		const struct rtable *rt = (const struct rtable *) dst;
1741 
1742 		if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1743 			mtu = 576;
1744 	}
1745 
1746 	if (mtu > IP_MAX_MTU)
1747 		mtu = IP_MAX_MTU;
1748 
1749 	return mtu;
1750 }
1751 
1752 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1753 			    struct fib_info *fi)
1754 {
1755 	struct inet_peer *peer;
1756 	int create = 0;
1757 
1758 	/* If a peer entry exists for this destination, we must hook
1759 	 * it up in order to get at cached metrics.
1760 	 */
1761 	if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1762 		create = 1;
1763 
1764 	rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
1765 	if (peer) {
1766 		rt->rt_peer_genid = rt_peer_genid();
1767 		if (inet_metrics_new(peer))
1768 			memcpy(peer->metrics, fi->fib_metrics,
1769 			       sizeof(u32) * RTAX_MAX);
1770 		dst_init_metrics(&rt->dst, peer->metrics, false);
1771 
1772 		if (peer->pmtu_expires)
1773 			check_peer_pmtu(&rt->dst, peer);
1774 		if (peer->redirect_learned.a4 &&
1775 		    peer->redirect_learned.a4 != rt->rt_gateway) {
1776 			rt->rt_gateway = peer->redirect_learned.a4;
1777 			rt->rt_flags |= RTCF_REDIRECTED;
1778 		}
1779 	} else {
1780 		if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1781 			rt->fi = fi;
1782 			atomic_inc(&fi->fib_clntref);
1783 		}
1784 		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1785 	}
1786 }
1787 
1788 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1789 			   const struct fib_result *res,
1790 			   struct fib_info *fi, u16 type, u32 itag)
1791 {
1792 	struct dst_entry *dst = &rt->dst;
1793 
1794 	if (fi) {
1795 		if (FIB_RES_GW(*res) &&
1796 		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1797 			rt->rt_gateway = FIB_RES_GW(*res);
1798 		rt_init_metrics(rt, fl4, fi);
1799 #ifdef CONFIG_IP_ROUTE_CLASSID
1800 		dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1801 #endif
1802 	}
1803 
1804 	if (dst_mtu(dst) > IP_MAX_MTU)
1805 		dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1806 	if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
1807 		dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1808 
1809 #ifdef CONFIG_IP_ROUTE_CLASSID
1810 #ifdef CONFIG_IP_MULTIPLE_TABLES
1811 	set_class_tag(rt, fib_rules_tclass(res));
1812 #endif
1813 	set_class_tag(rt, itag);
1814 #endif
1815 }
1816 
1817 static struct rtable *rt_dst_alloc(struct net_device *dev,
1818 				   bool nopolicy, bool noxfrm)
1819 {
1820 	return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1821 			 DST_HOST |
1822 			 (nopolicy ? DST_NOPOLICY : 0) |
1823 			 (noxfrm ? DST_NOXFRM : 0));
1824 }
1825 
1826 /* called in rcu_read_lock() section */
1827 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1828 				u8 tos, struct net_device *dev, int our)
1829 {
1830 	unsigned int hash;
1831 	struct rtable *rth;
1832 	__be32 spec_dst;
1833 	struct in_device *in_dev = __in_dev_get_rcu(dev);
1834 	u32 itag = 0;
1835 	int err;
1836 
1837 	/* Primary sanity checks. */
1838 
1839 	if (in_dev == NULL)
1840 		return -EINVAL;
1841 
1842 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1843 	    ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1844 		goto e_inval;
1845 
1846 	if (ipv4_is_zeronet(saddr)) {
1847 		if (!ipv4_is_local_multicast(daddr))
1848 			goto e_inval;
1849 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1850 	} else {
1851 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1852 					  &itag);
1853 		if (err < 0)
1854 			goto e_err;
1855 	}
1856 	rth = rt_dst_alloc(init_net.loopback_dev,
1857 			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1858 	if (!rth)
1859 		goto e_nobufs;
1860 
1861 #ifdef CONFIG_IP_ROUTE_CLASSID
1862 	rth->dst.tclassid = itag;
1863 #endif
1864 	rth->dst.output = ip_rt_bug;
1865 
1866 	rth->rt_key_dst	= daddr;
1867 	rth->rt_key_src	= saddr;
1868 	rth->rt_genid	= rt_genid(dev_net(dev));
1869 	rth->rt_flags	= RTCF_MULTICAST;
1870 	rth->rt_type	= RTN_MULTICAST;
1871 	rth->rt_key_tos	= tos;
1872 	rth->rt_dst	= daddr;
1873 	rth->rt_src	= saddr;
1874 	rth->rt_route_iif = dev->ifindex;
1875 	rth->rt_iif	= dev->ifindex;
1876 	rth->rt_oif	= 0;
1877 	rth->rt_mark    = skb->mark;
1878 	rth->rt_gateway	= daddr;
1879 	rth->rt_spec_dst= spec_dst;
1880 	rth->rt_peer_genid = 0;
1881 	rth->peer = NULL;
1882 	rth->fi = NULL;
1883 	if (our) {
1884 		rth->dst.input= ip_local_deliver;
1885 		rth->rt_flags |= RTCF_LOCAL;
1886 	}
1887 
1888 #ifdef CONFIG_IP_MROUTE
1889 	if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1890 		rth->dst.input = ip_mr_input;
1891 #endif
1892 	RT_CACHE_STAT_INC(in_slow_mc);
1893 
1894 	hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1895 	rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1896 	err = 0;
1897 	if (IS_ERR(rth))
1898 		err = PTR_ERR(rth);
1899 
1900 e_nobufs:
1901 	return -ENOBUFS;
1902 e_inval:
1903 	return -EINVAL;
1904 e_err:
1905 	return err;
1906 }
1907 
1908 
1909 static void ip_handle_martian_source(struct net_device *dev,
1910 				     struct in_device *in_dev,
1911 				     struct sk_buff *skb,
1912 				     __be32 daddr,
1913 				     __be32 saddr)
1914 {
1915 	RT_CACHE_STAT_INC(in_martian_src);
1916 #ifdef CONFIG_IP_ROUTE_VERBOSE
1917 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1918 		/*
1919 		 *	RFC1812 recommendation, if source is martian,
1920 		 *	the only hint is MAC header.
1921 		 */
1922 		printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1923 			&daddr, &saddr, dev->name);
1924 		if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1925 			int i;
1926 			const unsigned char *p = skb_mac_header(skb);
1927 			printk(KERN_WARNING "ll header: ");
1928 			for (i = 0; i < dev->hard_header_len; i++, p++) {
1929 				printk("%02x", *p);
1930 				if (i < (dev->hard_header_len - 1))
1931 					printk(":");
1932 			}
1933 			printk("\n");
1934 		}
1935 	}
1936 #endif
1937 }
1938 
1939 /* called in rcu_read_lock() section */
1940 static int __mkroute_input(struct sk_buff *skb,
1941 			   const struct fib_result *res,
1942 			   struct in_device *in_dev,
1943 			   __be32 daddr, __be32 saddr, u32 tos,
1944 			   struct rtable **result)
1945 {
1946 	struct rtable *rth;
1947 	int err;
1948 	struct in_device *out_dev;
1949 	unsigned int flags = 0;
1950 	__be32 spec_dst;
1951 	u32 itag;
1952 
1953 	/* get a working reference to the output device */
1954 	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1955 	if (out_dev == NULL) {
1956 		if (net_ratelimit())
1957 			printk(KERN_CRIT "Bug in ip_route_input" \
1958 			       "_slow(). Please, report\n");
1959 		return -EINVAL;
1960 	}
1961 
1962 
1963 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1964 				  in_dev->dev, &spec_dst, &itag);
1965 	if (err < 0) {
1966 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1967 					 saddr);
1968 
1969 		goto cleanup;
1970 	}
1971 
1972 	if (err)
1973 		flags |= RTCF_DIRECTSRC;
1974 
1975 	if (out_dev == in_dev && err &&
1976 	    (IN_DEV_SHARED_MEDIA(out_dev) ||
1977 	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1978 		flags |= RTCF_DOREDIRECT;
1979 
1980 	if (skb->protocol != htons(ETH_P_IP)) {
1981 		/* Not IP (i.e. ARP). Do not create route, if it is
1982 		 * invalid for proxy arp. DNAT routes are always valid.
1983 		 *
1984 		 * Proxy arp feature have been extended to allow, ARP
1985 		 * replies back to the same interface, to support
1986 		 * Private VLAN switch technologies. See arp.c.
1987 		 */
1988 		if (out_dev == in_dev &&
1989 		    IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1990 			err = -EINVAL;
1991 			goto cleanup;
1992 		}
1993 	}
1994 
1995 	rth = rt_dst_alloc(out_dev->dev,
1996 			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
1997 			   IN_DEV_CONF_GET(out_dev, NOXFRM));
1998 	if (!rth) {
1999 		err = -ENOBUFS;
2000 		goto cleanup;
2001 	}
2002 
2003 	rth->rt_key_dst	= daddr;
2004 	rth->rt_key_src	= saddr;
2005 	rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2006 	rth->rt_flags = flags;
2007 	rth->rt_type = res->type;
2008 	rth->rt_key_tos	= tos;
2009 	rth->rt_dst	= daddr;
2010 	rth->rt_src	= saddr;
2011 	rth->rt_route_iif = in_dev->dev->ifindex;
2012 	rth->rt_iif 	= in_dev->dev->ifindex;
2013 	rth->rt_oif 	= 0;
2014 	rth->rt_mark    = skb->mark;
2015 	rth->rt_gateway	= daddr;
2016 	rth->rt_spec_dst= spec_dst;
2017 	rth->rt_peer_genid = 0;
2018 	rth->peer = NULL;
2019 	rth->fi = NULL;
2020 
2021 	rth->dst.input = ip_forward;
2022 	rth->dst.output = ip_output;
2023 
2024 	rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2025 
2026 	*result = rth;
2027 	err = 0;
2028  cleanup:
2029 	return err;
2030 }
2031 
2032 static int ip_mkroute_input(struct sk_buff *skb,
2033 			    struct fib_result *res,
2034 			    const struct flowi4 *fl4,
2035 			    struct in_device *in_dev,
2036 			    __be32 daddr, __be32 saddr, u32 tos)
2037 {
2038 	struct rtable* rth = NULL;
2039 	int err;
2040 	unsigned hash;
2041 
2042 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2043 	if (res->fi && res->fi->fib_nhs > 1)
2044 		fib_select_multipath(res);
2045 #endif
2046 
2047 	/* create a routing cache entry */
2048 	err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2049 	if (err)
2050 		return err;
2051 
2052 	/* put it into the cache */
2053 	hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2054 		       rt_genid(dev_net(rth->dst.dev)));
2055 	rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2056 	if (IS_ERR(rth))
2057 		return PTR_ERR(rth);
2058 	return 0;
2059 }
2060 
2061 /*
2062  *	NOTE. We drop all the packets that has local source
2063  *	addresses, because every properly looped back packet
2064  *	must have correct destination already attached by output routine.
2065  *
2066  *	Such approach solves two big problems:
2067  *	1. Not simplex devices are handled properly.
2068  *	2. IP spoofing attempts are filtered with 100% of guarantee.
2069  *	called with rcu_read_lock()
2070  */
2071 
2072 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2073 			       u8 tos, struct net_device *dev)
2074 {
2075 	struct fib_result res;
2076 	struct in_device *in_dev = __in_dev_get_rcu(dev);
2077 	struct flowi4	fl4;
2078 	unsigned	flags = 0;
2079 	u32		itag = 0;
2080 	struct rtable * rth;
2081 	unsigned	hash;
2082 	__be32		spec_dst;
2083 	int		err = -EINVAL;
2084 	struct net    * net = dev_net(dev);
2085 
2086 	/* IP on this device is disabled. */
2087 
2088 	if (!in_dev)
2089 		goto out;
2090 
2091 	/* Check for the most weird martians, which can be not detected
2092 	   by fib_lookup.
2093 	 */
2094 
2095 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2096 	    ipv4_is_loopback(saddr))
2097 		goto martian_source;
2098 
2099 	if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2100 		goto brd_input;
2101 
2102 	/* Accept zero addresses only to limited broadcast;
2103 	 * I even do not know to fix it or not. Waiting for complains :-)
2104 	 */
2105 	if (ipv4_is_zeronet(saddr))
2106 		goto martian_source;
2107 
2108 	if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2109 		goto martian_destination;
2110 
2111 	/*
2112 	 *	Now we are ready to route packet.
2113 	 */
2114 	fl4.flowi4_oif = 0;
2115 	fl4.flowi4_iif = dev->ifindex;
2116 	fl4.flowi4_mark = skb->mark;
2117 	fl4.flowi4_tos = tos;
2118 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2119 	fl4.daddr = daddr;
2120 	fl4.saddr = saddr;
2121 	err = fib_lookup(net, &fl4, &res);
2122 	if (err != 0) {
2123 		if (!IN_DEV_FORWARD(in_dev))
2124 			goto e_hostunreach;
2125 		goto no_route;
2126 	}
2127 
2128 	RT_CACHE_STAT_INC(in_slow_tot);
2129 
2130 	if (res.type == RTN_BROADCAST)
2131 		goto brd_input;
2132 
2133 	if (res.type == RTN_LOCAL) {
2134 		err = fib_validate_source(skb, saddr, daddr, tos,
2135 					  net->loopback_dev->ifindex,
2136 					  dev, &spec_dst, &itag);
2137 		if (err < 0)
2138 			goto martian_source_keep_err;
2139 		if (err)
2140 			flags |= RTCF_DIRECTSRC;
2141 		spec_dst = daddr;
2142 		goto local_input;
2143 	}
2144 
2145 	if (!IN_DEV_FORWARD(in_dev))
2146 		goto e_hostunreach;
2147 	if (res.type != RTN_UNICAST)
2148 		goto martian_destination;
2149 
2150 	err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
2151 out:	return err;
2152 
2153 brd_input:
2154 	if (skb->protocol != htons(ETH_P_IP))
2155 		goto e_inval;
2156 
2157 	if (ipv4_is_zeronet(saddr))
2158 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2159 	else {
2160 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2161 					  &itag);
2162 		if (err < 0)
2163 			goto martian_source_keep_err;
2164 		if (err)
2165 			flags |= RTCF_DIRECTSRC;
2166 	}
2167 	flags |= RTCF_BROADCAST;
2168 	res.type = RTN_BROADCAST;
2169 	RT_CACHE_STAT_INC(in_brd);
2170 
2171 local_input:
2172 	rth = rt_dst_alloc(net->loopback_dev,
2173 			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2174 	if (!rth)
2175 		goto e_nobufs;
2176 
2177 	rth->dst.input= ip_local_deliver;
2178 	rth->dst.output= ip_rt_bug;
2179 #ifdef CONFIG_IP_ROUTE_CLASSID
2180 	rth->dst.tclassid = itag;
2181 #endif
2182 
2183 	rth->rt_key_dst	= daddr;
2184 	rth->rt_key_src	= saddr;
2185 	rth->rt_genid = rt_genid(net);
2186 	rth->rt_flags 	= flags|RTCF_LOCAL;
2187 	rth->rt_type	= res.type;
2188 	rth->rt_key_tos	= tos;
2189 	rth->rt_dst	= daddr;
2190 	rth->rt_src	= saddr;
2191 #ifdef CONFIG_IP_ROUTE_CLASSID
2192 	rth->dst.tclassid = itag;
2193 #endif
2194 	rth->rt_route_iif = dev->ifindex;
2195 	rth->rt_iif	= dev->ifindex;
2196 	rth->rt_oif	= 0;
2197 	rth->rt_mark    = skb->mark;
2198 	rth->rt_gateway	= daddr;
2199 	rth->rt_spec_dst= spec_dst;
2200 	rth->rt_peer_genid = 0;
2201 	rth->peer = NULL;
2202 	rth->fi = NULL;
2203 	if (res.type == RTN_UNREACHABLE) {
2204 		rth->dst.input= ip_error;
2205 		rth->dst.error= -err;
2206 		rth->rt_flags 	&= ~RTCF_LOCAL;
2207 	}
2208 	hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2209 	rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2210 	err = 0;
2211 	if (IS_ERR(rth))
2212 		err = PTR_ERR(rth);
2213 	goto out;
2214 
2215 no_route:
2216 	RT_CACHE_STAT_INC(in_no_route);
2217 	spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2218 	res.type = RTN_UNREACHABLE;
2219 	if (err == -ESRCH)
2220 		err = -ENETUNREACH;
2221 	goto local_input;
2222 
2223 	/*
2224 	 *	Do not cache martian addresses: they should be logged (RFC1812)
2225 	 */
2226 martian_destination:
2227 	RT_CACHE_STAT_INC(in_martian_dst);
2228 #ifdef CONFIG_IP_ROUTE_VERBOSE
2229 	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2230 		printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2231 			&daddr, &saddr, dev->name);
2232 #endif
2233 
2234 e_hostunreach:
2235 	err = -EHOSTUNREACH;
2236 	goto out;
2237 
2238 e_inval:
2239 	err = -EINVAL;
2240 	goto out;
2241 
2242 e_nobufs:
2243 	err = -ENOBUFS;
2244 	goto out;
2245 
2246 martian_source:
2247 	err = -EINVAL;
2248 martian_source_keep_err:
2249 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2250 	goto out;
2251 }
2252 
2253 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2254 			   u8 tos, struct net_device *dev, bool noref)
2255 {
2256 	struct rtable * rth;
2257 	unsigned	hash;
2258 	int iif = dev->ifindex;
2259 	struct net *net;
2260 	int res;
2261 
2262 	net = dev_net(dev);
2263 
2264 	rcu_read_lock();
2265 
2266 	if (!rt_caching(net))
2267 		goto skip_cache;
2268 
2269 	tos &= IPTOS_RT_MASK;
2270 	hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2271 
2272 	for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2273 	     rth = rcu_dereference(rth->dst.rt_next)) {
2274 		if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2275 		     ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2276 		     (rth->rt_iif ^ iif) |
2277 		     rth->rt_oif |
2278 		     (rth->rt_key_tos ^ tos)) == 0 &&
2279 		    rth->rt_mark == skb->mark &&
2280 		    net_eq(dev_net(rth->dst.dev), net) &&
2281 		    !rt_is_expired(rth)) {
2282 			if (noref) {
2283 				dst_use_noref(&rth->dst, jiffies);
2284 				skb_dst_set_noref(skb, &rth->dst);
2285 			} else {
2286 				dst_use(&rth->dst, jiffies);
2287 				skb_dst_set(skb, &rth->dst);
2288 			}
2289 			RT_CACHE_STAT_INC(in_hit);
2290 			rcu_read_unlock();
2291 			return 0;
2292 		}
2293 		RT_CACHE_STAT_INC(in_hlist_search);
2294 	}
2295 
2296 skip_cache:
2297 	/* Multicast recognition logic is moved from route cache to here.
2298 	   The problem was that too many Ethernet cards have broken/missing
2299 	   hardware multicast filters :-( As result the host on multicasting
2300 	   network acquires a lot of useless route cache entries, sort of
2301 	   SDR messages from all the world. Now we try to get rid of them.
2302 	   Really, provided software IP multicast filter is organized
2303 	   reasonably (at least, hashed), it does not result in a slowdown
2304 	   comparing with route cache reject entries.
2305 	   Note, that multicast routers are not affected, because
2306 	   route cache entry is created eventually.
2307 	 */
2308 	if (ipv4_is_multicast(daddr)) {
2309 		struct in_device *in_dev = __in_dev_get_rcu(dev);
2310 
2311 		if (in_dev) {
2312 			int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2313 						  ip_hdr(skb)->protocol);
2314 			if (our
2315 #ifdef CONFIG_IP_MROUTE
2316 				||
2317 			    (!ipv4_is_local_multicast(daddr) &&
2318 			     IN_DEV_MFORWARD(in_dev))
2319 #endif
2320 			   ) {
2321 				int res = ip_route_input_mc(skb, daddr, saddr,
2322 							    tos, dev, our);
2323 				rcu_read_unlock();
2324 				return res;
2325 			}
2326 		}
2327 		rcu_read_unlock();
2328 		return -EINVAL;
2329 	}
2330 	res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2331 	rcu_read_unlock();
2332 	return res;
2333 }
2334 EXPORT_SYMBOL(ip_route_input_common);
2335 
2336 /* called with rcu_read_lock() */
2337 static struct rtable *__mkroute_output(const struct fib_result *res,
2338 				       const struct flowi4 *fl4,
2339 				       __be32 orig_daddr, __be32 orig_saddr,
2340 				       int orig_oif, struct net_device *dev_out,
2341 				       unsigned int flags)
2342 {
2343 	struct fib_info *fi = res->fi;
2344 	u32 tos = RT_FL_TOS(fl4);
2345 	struct in_device *in_dev;
2346 	u16 type = res->type;
2347 	struct rtable *rth;
2348 
2349 	if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2350 		return ERR_PTR(-EINVAL);
2351 
2352 	if (ipv4_is_lbcast(fl4->daddr))
2353 		type = RTN_BROADCAST;
2354 	else if (ipv4_is_multicast(fl4->daddr))
2355 		type = RTN_MULTICAST;
2356 	else if (ipv4_is_zeronet(fl4->daddr))
2357 		return ERR_PTR(-EINVAL);
2358 
2359 	if (dev_out->flags & IFF_LOOPBACK)
2360 		flags |= RTCF_LOCAL;
2361 
2362 	in_dev = __in_dev_get_rcu(dev_out);
2363 	if (!in_dev)
2364 		return ERR_PTR(-EINVAL);
2365 
2366 	if (type == RTN_BROADCAST) {
2367 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
2368 		fi = NULL;
2369 	} else if (type == RTN_MULTICAST) {
2370 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
2371 		if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2372 				     fl4->flowi4_proto))
2373 			flags &= ~RTCF_LOCAL;
2374 		/* If multicast route do not exist use
2375 		 * default one, but do not gateway in this case.
2376 		 * Yes, it is hack.
2377 		 */
2378 		if (fi && res->prefixlen < 4)
2379 			fi = NULL;
2380 	}
2381 
2382 	rth = rt_dst_alloc(dev_out,
2383 			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
2384 			   IN_DEV_CONF_GET(in_dev, NOXFRM));
2385 	if (!rth)
2386 		return ERR_PTR(-ENOBUFS);
2387 
2388 	rth->dst.output = ip_output;
2389 
2390 	rth->rt_key_dst	= orig_daddr;
2391 	rth->rt_key_src	= orig_saddr;
2392 	rth->rt_genid = rt_genid(dev_net(dev_out));
2393 	rth->rt_flags	= flags;
2394 	rth->rt_type	= type;
2395 	rth->rt_key_tos	= tos;
2396 	rth->rt_dst	= fl4->daddr;
2397 	rth->rt_src	= fl4->saddr;
2398 	rth->rt_route_iif = 0;
2399 	rth->rt_iif	= orig_oif ? : dev_out->ifindex;
2400 	rth->rt_oif	= orig_oif;
2401 	rth->rt_mark    = fl4->flowi4_mark;
2402 	rth->rt_gateway = fl4->daddr;
2403 	rth->rt_spec_dst= fl4->saddr;
2404 	rth->rt_peer_genid = 0;
2405 	rth->peer = NULL;
2406 	rth->fi = NULL;
2407 
2408 	RT_CACHE_STAT_INC(out_slow_tot);
2409 
2410 	if (flags & RTCF_LOCAL) {
2411 		rth->dst.input = ip_local_deliver;
2412 		rth->rt_spec_dst = fl4->daddr;
2413 	}
2414 	if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2415 		rth->rt_spec_dst = fl4->saddr;
2416 		if (flags & RTCF_LOCAL &&
2417 		    !(dev_out->flags & IFF_LOOPBACK)) {
2418 			rth->dst.output = ip_mc_output;
2419 			RT_CACHE_STAT_INC(out_slow_mc);
2420 		}
2421 #ifdef CONFIG_IP_MROUTE
2422 		if (type == RTN_MULTICAST) {
2423 			if (IN_DEV_MFORWARD(in_dev) &&
2424 			    !ipv4_is_local_multicast(fl4->daddr)) {
2425 				rth->dst.input = ip_mr_input;
2426 				rth->dst.output = ip_mc_output;
2427 			}
2428 		}
2429 #endif
2430 	}
2431 
2432 	rt_set_nexthop(rth, fl4, res, fi, type, 0);
2433 
2434 	return rth;
2435 }
2436 
2437 /*
2438  * Major route resolver routine.
2439  * called with rcu_read_lock();
2440  */
2441 
2442 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2443 {
2444 	struct net_device *dev_out = NULL;
2445 	u32 tos	= RT_FL_TOS(fl4);
2446 	unsigned int flags = 0;
2447 	struct fib_result res;
2448 	struct rtable *rth;
2449 	__be32 orig_daddr;
2450 	__be32 orig_saddr;
2451 	int orig_oif;
2452 
2453 	res.fi		= NULL;
2454 #ifdef CONFIG_IP_MULTIPLE_TABLES
2455 	res.r		= NULL;
2456 #endif
2457 
2458 	orig_daddr = fl4->daddr;
2459 	orig_saddr = fl4->saddr;
2460 	orig_oif = fl4->flowi4_oif;
2461 
2462 	fl4->flowi4_iif = net->loopback_dev->ifindex;
2463 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2464 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2465 			 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2466 
2467 	rcu_read_lock();
2468 	if (fl4->saddr) {
2469 		rth = ERR_PTR(-EINVAL);
2470 		if (ipv4_is_multicast(fl4->saddr) ||
2471 		    ipv4_is_lbcast(fl4->saddr) ||
2472 		    ipv4_is_zeronet(fl4->saddr))
2473 			goto out;
2474 
2475 		/* I removed check for oif == dev_out->oif here.
2476 		   It was wrong for two reasons:
2477 		   1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2478 		      is assigned to multiple interfaces.
2479 		   2. Moreover, we are allowed to send packets with saddr
2480 		      of another iface. --ANK
2481 		 */
2482 
2483 		if (fl4->flowi4_oif == 0 &&
2484 		    (ipv4_is_multicast(fl4->daddr) ||
2485 		     ipv4_is_lbcast(fl4->daddr))) {
2486 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2487 			dev_out = __ip_dev_find(net, fl4->saddr, false);
2488 			if (dev_out == NULL)
2489 				goto out;
2490 
2491 			/* Special hack: user can direct multicasts
2492 			   and limited broadcast via necessary interface
2493 			   without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2494 			   This hack is not just for fun, it allows
2495 			   vic,vat and friends to work.
2496 			   They bind socket to loopback, set ttl to zero
2497 			   and expect that it will work.
2498 			   From the viewpoint of routing cache they are broken,
2499 			   because we are not allowed to build multicast path
2500 			   with loopback source addr (look, routing cache
2501 			   cannot know, that ttl is zero, so that packet
2502 			   will not leave this host and route is valid).
2503 			   Luckily, this hack is good workaround.
2504 			 */
2505 
2506 			fl4->flowi4_oif = dev_out->ifindex;
2507 			goto make_route;
2508 		}
2509 
2510 		if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2511 			/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2512 			if (!__ip_dev_find(net, fl4->saddr, false))
2513 				goto out;
2514 		}
2515 	}
2516 
2517 
2518 	if (fl4->flowi4_oif) {
2519 		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2520 		rth = ERR_PTR(-ENODEV);
2521 		if (dev_out == NULL)
2522 			goto out;
2523 
2524 		/* RACE: Check return value of inet_select_addr instead. */
2525 		if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2526 			rth = ERR_PTR(-ENETUNREACH);
2527 			goto out;
2528 		}
2529 		if (ipv4_is_local_multicast(fl4->daddr) ||
2530 		    ipv4_is_lbcast(fl4->daddr)) {
2531 			if (!fl4->saddr)
2532 				fl4->saddr = inet_select_addr(dev_out, 0,
2533 							      RT_SCOPE_LINK);
2534 			goto make_route;
2535 		}
2536 		if (fl4->saddr) {
2537 			if (ipv4_is_multicast(fl4->daddr))
2538 				fl4->saddr = inet_select_addr(dev_out, 0,
2539 							      fl4->flowi4_scope);
2540 			else if (!fl4->daddr)
2541 				fl4->saddr = inet_select_addr(dev_out, 0,
2542 							      RT_SCOPE_HOST);
2543 		}
2544 	}
2545 
2546 	if (!fl4->daddr) {
2547 		fl4->daddr = fl4->saddr;
2548 		if (!fl4->daddr)
2549 			fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2550 		dev_out = net->loopback_dev;
2551 		fl4->flowi4_oif = net->loopback_dev->ifindex;
2552 		res.type = RTN_LOCAL;
2553 		flags |= RTCF_LOCAL;
2554 		goto make_route;
2555 	}
2556 
2557 	if (fib_lookup(net, fl4, &res)) {
2558 		res.fi = NULL;
2559 		if (fl4->flowi4_oif) {
2560 			/* Apparently, routing tables are wrong. Assume,
2561 			   that the destination is on link.
2562 
2563 			   WHY? DW.
2564 			   Because we are allowed to send to iface
2565 			   even if it has NO routes and NO assigned
2566 			   addresses. When oif is specified, routing
2567 			   tables are looked up with only one purpose:
2568 			   to catch if destination is gatewayed, rather than
2569 			   direct. Moreover, if MSG_DONTROUTE is set,
2570 			   we send packet, ignoring both routing tables
2571 			   and ifaddr state. --ANK
2572 
2573 
2574 			   We could make it even if oif is unknown,
2575 			   likely IPv6, but we do not.
2576 			 */
2577 
2578 			if (fl4->saddr == 0)
2579 				fl4->saddr = inet_select_addr(dev_out, 0,
2580 							      RT_SCOPE_LINK);
2581 			res.type = RTN_UNICAST;
2582 			goto make_route;
2583 		}
2584 		rth = ERR_PTR(-ENETUNREACH);
2585 		goto out;
2586 	}
2587 
2588 	if (res.type == RTN_LOCAL) {
2589 		if (!fl4->saddr) {
2590 			if (res.fi->fib_prefsrc)
2591 				fl4->saddr = res.fi->fib_prefsrc;
2592 			else
2593 				fl4->saddr = fl4->daddr;
2594 		}
2595 		dev_out = net->loopback_dev;
2596 		fl4->flowi4_oif = dev_out->ifindex;
2597 		res.fi = NULL;
2598 		flags |= RTCF_LOCAL;
2599 		goto make_route;
2600 	}
2601 
2602 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2603 	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2604 		fib_select_multipath(&res);
2605 	else
2606 #endif
2607 	if (!res.prefixlen &&
2608 	    res.table->tb_num_default > 1 &&
2609 	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
2610 		fib_select_default(&res);
2611 
2612 	if (!fl4->saddr)
2613 		fl4->saddr = FIB_RES_PREFSRC(net, res);
2614 
2615 	dev_out = FIB_RES_DEV(res);
2616 	fl4->flowi4_oif = dev_out->ifindex;
2617 
2618 
2619 make_route:
2620 	rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2621 			       dev_out, flags);
2622 	if (!IS_ERR(rth)) {
2623 		unsigned int hash;
2624 
2625 		hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2626 			       rt_genid(dev_net(dev_out)));
2627 		rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2628 	}
2629 
2630 out:
2631 	rcu_read_unlock();
2632 	return rth;
2633 }
2634 
2635 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2636 {
2637 	struct rtable *rth;
2638 	unsigned int hash;
2639 
2640 	if (!rt_caching(net))
2641 		goto slow_output;
2642 
2643 	hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2644 
2645 	rcu_read_lock_bh();
2646 	for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2647 		rth = rcu_dereference_bh(rth->dst.rt_next)) {
2648 		if (rth->rt_key_dst == flp4->daddr &&
2649 		    rth->rt_key_src == flp4->saddr &&
2650 		    rt_is_output_route(rth) &&
2651 		    rth->rt_oif == flp4->flowi4_oif &&
2652 		    rth->rt_mark == flp4->flowi4_mark &&
2653 		    !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2654 			    (IPTOS_RT_MASK | RTO_ONLINK)) &&
2655 		    net_eq(dev_net(rth->dst.dev), net) &&
2656 		    !rt_is_expired(rth)) {
2657 			dst_use(&rth->dst, jiffies);
2658 			RT_CACHE_STAT_INC(out_hit);
2659 			rcu_read_unlock_bh();
2660 			if (!flp4->saddr)
2661 				flp4->saddr = rth->rt_src;
2662 			if (!flp4->daddr)
2663 				flp4->daddr = rth->rt_dst;
2664 			return rth;
2665 		}
2666 		RT_CACHE_STAT_INC(out_hlist_search);
2667 	}
2668 	rcu_read_unlock_bh();
2669 
2670 slow_output:
2671 	return ip_route_output_slow(net, flp4);
2672 }
2673 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2674 
2675 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2676 {
2677 	return NULL;
2678 }
2679 
2680 static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2681 {
2682 	return 0;
2683 }
2684 
2685 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2686 {
2687 }
2688 
2689 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2690 					  unsigned long old)
2691 {
2692 	return NULL;
2693 }
2694 
2695 static struct dst_ops ipv4_dst_blackhole_ops = {
2696 	.family			=	AF_INET,
2697 	.protocol		=	cpu_to_be16(ETH_P_IP),
2698 	.destroy		=	ipv4_dst_destroy,
2699 	.check			=	ipv4_blackhole_dst_check,
2700 	.default_mtu		=	ipv4_blackhole_default_mtu,
2701 	.default_advmss		=	ipv4_default_advmss,
2702 	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
2703 	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
2704 };
2705 
2706 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2707 {
2708 	struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2709 	struct rtable *ort = (struct rtable *) dst_orig;
2710 
2711 	if (rt) {
2712 		struct dst_entry *new = &rt->dst;
2713 
2714 		new->__use = 1;
2715 		new->input = dst_discard;
2716 		new->output = dst_discard;
2717 		dst_copy_metrics(new, &ort->dst);
2718 
2719 		new->dev = ort->dst.dev;
2720 		if (new->dev)
2721 			dev_hold(new->dev);
2722 
2723 		rt->rt_key_dst = ort->rt_key_dst;
2724 		rt->rt_key_src = ort->rt_key_src;
2725 		rt->rt_key_tos = ort->rt_key_tos;
2726 		rt->rt_route_iif = ort->rt_route_iif;
2727 		rt->rt_iif = ort->rt_iif;
2728 		rt->rt_oif = ort->rt_oif;
2729 		rt->rt_mark = ort->rt_mark;
2730 
2731 		rt->rt_genid = rt_genid(net);
2732 		rt->rt_flags = ort->rt_flags;
2733 		rt->rt_type = ort->rt_type;
2734 		rt->rt_dst = ort->rt_dst;
2735 		rt->rt_src = ort->rt_src;
2736 		rt->rt_gateway = ort->rt_gateway;
2737 		rt->rt_spec_dst = ort->rt_spec_dst;
2738 		rt->peer = ort->peer;
2739 		if (rt->peer)
2740 			atomic_inc(&rt->peer->refcnt);
2741 		rt->fi = ort->fi;
2742 		if (rt->fi)
2743 			atomic_inc(&rt->fi->fib_clntref);
2744 
2745 		dst_free(new);
2746 	}
2747 
2748 	dst_release(dst_orig);
2749 
2750 	return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2751 }
2752 
2753 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2754 				    struct sock *sk)
2755 {
2756 	struct rtable *rt = __ip_route_output_key(net, flp4);
2757 
2758 	if (IS_ERR(rt))
2759 		return rt;
2760 
2761 	if (flp4->flowi4_proto)
2762 		rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2763 						   flowi4_to_flowi(flp4),
2764 						   sk, 0);
2765 
2766 	return rt;
2767 }
2768 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2769 
2770 static int rt_fill_info(struct net *net,
2771 			struct sk_buff *skb, u32 pid, u32 seq, int event,
2772 			int nowait, unsigned int flags)
2773 {
2774 	struct rtable *rt = skb_rtable(skb);
2775 	struct rtmsg *r;
2776 	struct nlmsghdr *nlh;
2777 	long expires;
2778 	u32 id = 0, ts = 0, tsage = 0, error;
2779 
2780 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2781 	if (nlh == NULL)
2782 		return -EMSGSIZE;
2783 
2784 	r = nlmsg_data(nlh);
2785 	r->rtm_family	 = AF_INET;
2786 	r->rtm_dst_len	= 32;
2787 	r->rtm_src_len	= 0;
2788 	r->rtm_tos	= rt->rt_key_tos;
2789 	r->rtm_table	= RT_TABLE_MAIN;
2790 	NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2791 	r->rtm_type	= rt->rt_type;
2792 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
2793 	r->rtm_protocol = RTPROT_UNSPEC;
2794 	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2795 	if (rt->rt_flags & RTCF_NOTIFY)
2796 		r->rtm_flags |= RTM_F_NOTIFY;
2797 
2798 	NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2799 
2800 	if (rt->rt_key_src) {
2801 		r->rtm_src_len = 32;
2802 		NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
2803 	}
2804 	if (rt->dst.dev)
2805 		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2806 #ifdef CONFIG_IP_ROUTE_CLASSID
2807 	if (rt->dst.tclassid)
2808 		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2809 #endif
2810 	if (rt_is_input_route(rt))
2811 		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2812 	else if (rt->rt_src != rt->rt_key_src)
2813 		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2814 
2815 	if (rt->rt_dst != rt->rt_gateway)
2816 		NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2817 
2818 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2819 		goto nla_put_failure;
2820 
2821 	if (rt->rt_mark)
2822 		NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2823 
2824 	error = rt->dst.error;
2825 	expires = (rt->peer && rt->peer->pmtu_expires) ?
2826 		rt->peer->pmtu_expires - jiffies : 0;
2827 	if (rt->peer) {
2828 		inet_peer_refcheck(rt->peer);
2829 		id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2830 		if (rt->peer->tcp_ts_stamp) {
2831 			ts = rt->peer->tcp_ts;
2832 			tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2833 		}
2834 	}
2835 
2836 	if (rt_is_input_route(rt)) {
2837 #ifdef CONFIG_IP_MROUTE
2838 		__be32 dst = rt->rt_dst;
2839 
2840 		if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2841 		    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2842 			int err = ipmr_get_route(net, skb,
2843 						 rt->rt_src, rt->rt_dst,
2844 						 r, nowait);
2845 			if (err <= 0) {
2846 				if (!nowait) {
2847 					if (err == 0)
2848 						return 0;
2849 					goto nla_put_failure;
2850 				} else {
2851 					if (err == -EMSGSIZE)
2852 						goto nla_put_failure;
2853 					error = err;
2854 				}
2855 			}
2856 		} else
2857 #endif
2858 			NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
2859 	}
2860 
2861 	if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2862 			       expires, error) < 0)
2863 		goto nla_put_failure;
2864 
2865 	return nlmsg_end(skb, nlh);
2866 
2867 nla_put_failure:
2868 	nlmsg_cancel(skb, nlh);
2869 	return -EMSGSIZE;
2870 }
2871 
2872 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2873 {
2874 	struct net *net = sock_net(in_skb->sk);
2875 	struct rtmsg *rtm;
2876 	struct nlattr *tb[RTA_MAX+1];
2877 	struct rtable *rt = NULL;
2878 	__be32 dst = 0;
2879 	__be32 src = 0;
2880 	u32 iif;
2881 	int err;
2882 	int mark;
2883 	struct sk_buff *skb;
2884 
2885 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2886 	if (err < 0)
2887 		goto errout;
2888 
2889 	rtm = nlmsg_data(nlh);
2890 
2891 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2892 	if (skb == NULL) {
2893 		err = -ENOBUFS;
2894 		goto errout;
2895 	}
2896 
2897 	/* Reserve room for dummy headers, this skb can pass
2898 	   through good chunk of routing engine.
2899 	 */
2900 	skb_reset_mac_header(skb);
2901 	skb_reset_network_header(skb);
2902 
2903 	/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2904 	ip_hdr(skb)->protocol = IPPROTO_ICMP;
2905 	skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2906 
2907 	src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2908 	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2909 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2910 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2911 
2912 	if (iif) {
2913 		struct net_device *dev;
2914 
2915 		dev = __dev_get_by_index(net, iif);
2916 		if (dev == NULL) {
2917 			err = -ENODEV;
2918 			goto errout_free;
2919 		}
2920 
2921 		skb->protocol	= htons(ETH_P_IP);
2922 		skb->dev	= dev;
2923 		skb->mark	= mark;
2924 		local_bh_disable();
2925 		err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2926 		local_bh_enable();
2927 
2928 		rt = skb_rtable(skb);
2929 		if (err == 0 && rt->dst.error)
2930 			err = -rt->dst.error;
2931 	} else {
2932 		struct flowi4 fl4 = {
2933 			.daddr = dst,
2934 			.saddr = src,
2935 			.flowi4_tos = rtm->rtm_tos,
2936 			.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2937 			.flowi4_mark = mark,
2938 		};
2939 		rt = ip_route_output_key(net, &fl4);
2940 
2941 		err = 0;
2942 		if (IS_ERR(rt))
2943 			err = PTR_ERR(rt);
2944 	}
2945 
2946 	if (err)
2947 		goto errout_free;
2948 
2949 	skb_dst_set(skb, &rt->dst);
2950 	if (rtm->rtm_flags & RTM_F_NOTIFY)
2951 		rt->rt_flags |= RTCF_NOTIFY;
2952 
2953 	err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2954 			   RTM_NEWROUTE, 0, 0);
2955 	if (err <= 0)
2956 		goto errout_free;
2957 
2958 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2959 errout:
2960 	return err;
2961 
2962 errout_free:
2963 	kfree_skb(skb);
2964 	goto errout;
2965 }
2966 
2967 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
2968 {
2969 	struct rtable *rt;
2970 	int h, s_h;
2971 	int idx, s_idx;
2972 	struct net *net;
2973 
2974 	net = sock_net(skb->sk);
2975 
2976 	s_h = cb->args[0];
2977 	if (s_h < 0)
2978 		s_h = 0;
2979 	s_idx = idx = cb->args[1];
2980 	for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2981 		if (!rt_hash_table[h].chain)
2982 			continue;
2983 		rcu_read_lock_bh();
2984 		for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
2985 		     rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2986 			if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
2987 				continue;
2988 			if (rt_is_expired(rt))
2989 				continue;
2990 			skb_dst_set_noref(skb, &rt->dst);
2991 			if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2992 					 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2993 					 1, NLM_F_MULTI) <= 0) {
2994 				skb_dst_drop(skb);
2995 				rcu_read_unlock_bh();
2996 				goto done;
2997 			}
2998 			skb_dst_drop(skb);
2999 		}
3000 		rcu_read_unlock_bh();
3001 	}
3002 
3003 done:
3004 	cb->args[0] = h;
3005 	cb->args[1] = idx;
3006 	return skb->len;
3007 }
3008 
3009 void ip_rt_multicast_event(struct in_device *in_dev)
3010 {
3011 	rt_cache_flush(dev_net(in_dev->dev), 0);
3012 }
3013 
3014 #ifdef CONFIG_SYSCTL
3015 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3016 					void __user *buffer,
3017 					size_t *lenp, loff_t *ppos)
3018 {
3019 	if (write) {
3020 		int flush_delay;
3021 		ctl_table ctl;
3022 		struct net *net;
3023 
3024 		memcpy(&ctl, __ctl, sizeof(ctl));
3025 		ctl.data = &flush_delay;
3026 		proc_dointvec(&ctl, write, buffer, lenp, ppos);
3027 
3028 		net = (struct net *)__ctl->extra1;
3029 		rt_cache_flush(net, flush_delay);
3030 		return 0;
3031 	}
3032 
3033 	return -EINVAL;
3034 }
3035 
3036 static ctl_table ipv4_route_table[] = {
3037 	{
3038 		.procname	= "gc_thresh",
3039 		.data		= &ipv4_dst_ops.gc_thresh,
3040 		.maxlen		= sizeof(int),
3041 		.mode		= 0644,
3042 		.proc_handler	= proc_dointvec,
3043 	},
3044 	{
3045 		.procname	= "max_size",
3046 		.data		= &ip_rt_max_size,
3047 		.maxlen		= sizeof(int),
3048 		.mode		= 0644,
3049 		.proc_handler	= proc_dointvec,
3050 	},
3051 	{
3052 		/*  Deprecated. Use gc_min_interval_ms */
3053 
3054 		.procname	= "gc_min_interval",
3055 		.data		= &ip_rt_gc_min_interval,
3056 		.maxlen		= sizeof(int),
3057 		.mode		= 0644,
3058 		.proc_handler	= proc_dointvec_jiffies,
3059 	},
3060 	{
3061 		.procname	= "gc_min_interval_ms",
3062 		.data		= &ip_rt_gc_min_interval,
3063 		.maxlen		= sizeof(int),
3064 		.mode		= 0644,
3065 		.proc_handler	= proc_dointvec_ms_jiffies,
3066 	},
3067 	{
3068 		.procname	= "gc_timeout",
3069 		.data		= &ip_rt_gc_timeout,
3070 		.maxlen		= sizeof(int),
3071 		.mode		= 0644,
3072 		.proc_handler	= proc_dointvec_jiffies,
3073 	},
3074 	{
3075 		.procname	= "gc_interval",
3076 		.data		= &ip_rt_gc_interval,
3077 		.maxlen		= sizeof(int),
3078 		.mode		= 0644,
3079 		.proc_handler	= proc_dointvec_jiffies,
3080 	},
3081 	{
3082 		.procname	= "redirect_load",
3083 		.data		= &ip_rt_redirect_load,
3084 		.maxlen		= sizeof(int),
3085 		.mode		= 0644,
3086 		.proc_handler	= proc_dointvec,
3087 	},
3088 	{
3089 		.procname	= "redirect_number",
3090 		.data		= &ip_rt_redirect_number,
3091 		.maxlen		= sizeof(int),
3092 		.mode		= 0644,
3093 		.proc_handler	= proc_dointvec,
3094 	},
3095 	{
3096 		.procname	= "redirect_silence",
3097 		.data		= &ip_rt_redirect_silence,
3098 		.maxlen		= sizeof(int),
3099 		.mode		= 0644,
3100 		.proc_handler	= proc_dointvec,
3101 	},
3102 	{
3103 		.procname	= "error_cost",
3104 		.data		= &ip_rt_error_cost,
3105 		.maxlen		= sizeof(int),
3106 		.mode		= 0644,
3107 		.proc_handler	= proc_dointvec,
3108 	},
3109 	{
3110 		.procname	= "error_burst",
3111 		.data		= &ip_rt_error_burst,
3112 		.maxlen		= sizeof(int),
3113 		.mode		= 0644,
3114 		.proc_handler	= proc_dointvec,
3115 	},
3116 	{
3117 		.procname	= "gc_elasticity",
3118 		.data		= &ip_rt_gc_elasticity,
3119 		.maxlen		= sizeof(int),
3120 		.mode		= 0644,
3121 		.proc_handler	= proc_dointvec,
3122 	},
3123 	{
3124 		.procname	= "mtu_expires",
3125 		.data		= &ip_rt_mtu_expires,
3126 		.maxlen		= sizeof(int),
3127 		.mode		= 0644,
3128 		.proc_handler	= proc_dointvec_jiffies,
3129 	},
3130 	{
3131 		.procname	= "min_pmtu",
3132 		.data		= &ip_rt_min_pmtu,
3133 		.maxlen		= sizeof(int),
3134 		.mode		= 0644,
3135 		.proc_handler	= proc_dointvec,
3136 	},
3137 	{
3138 		.procname	= "min_adv_mss",
3139 		.data		= &ip_rt_min_advmss,
3140 		.maxlen		= sizeof(int),
3141 		.mode		= 0644,
3142 		.proc_handler	= proc_dointvec,
3143 	},
3144 	{ }
3145 };
3146 
3147 static struct ctl_table empty[1];
3148 
3149 static struct ctl_table ipv4_skeleton[] =
3150 {
3151 	{ .procname = "route",
3152 	  .mode = 0555, .child = ipv4_route_table},
3153 	{ .procname = "neigh",
3154 	  .mode = 0555, .child = empty},
3155 	{ }
3156 };
3157 
3158 static __net_initdata struct ctl_path ipv4_path[] = {
3159 	{ .procname = "net", },
3160 	{ .procname = "ipv4", },
3161 	{ },
3162 };
3163 
3164 static struct ctl_table ipv4_route_flush_table[] = {
3165 	{
3166 		.procname	= "flush",
3167 		.maxlen		= sizeof(int),
3168 		.mode		= 0200,
3169 		.proc_handler	= ipv4_sysctl_rtcache_flush,
3170 	},
3171 	{ },
3172 };
3173 
3174 static __net_initdata struct ctl_path ipv4_route_path[] = {
3175 	{ .procname = "net", },
3176 	{ .procname = "ipv4", },
3177 	{ .procname = "route", },
3178 	{ },
3179 };
3180 
3181 static __net_init int sysctl_route_net_init(struct net *net)
3182 {
3183 	struct ctl_table *tbl;
3184 
3185 	tbl = ipv4_route_flush_table;
3186 	if (!net_eq(net, &init_net)) {
3187 		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3188 		if (tbl == NULL)
3189 			goto err_dup;
3190 	}
3191 	tbl[0].extra1 = net;
3192 
3193 	net->ipv4.route_hdr =
3194 		register_net_sysctl_table(net, ipv4_route_path, tbl);
3195 	if (net->ipv4.route_hdr == NULL)
3196 		goto err_reg;
3197 	return 0;
3198 
3199 err_reg:
3200 	if (tbl != ipv4_route_flush_table)
3201 		kfree(tbl);
3202 err_dup:
3203 	return -ENOMEM;
3204 }
3205 
3206 static __net_exit void sysctl_route_net_exit(struct net *net)
3207 {
3208 	struct ctl_table *tbl;
3209 
3210 	tbl = net->ipv4.route_hdr->ctl_table_arg;
3211 	unregister_net_sysctl_table(net->ipv4.route_hdr);
3212 	BUG_ON(tbl == ipv4_route_flush_table);
3213 	kfree(tbl);
3214 }
3215 
3216 static __net_initdata struct pernet_operations sysctl_route_ops = {
3217 	.init = sysctl_route_net_init,
3218 	.exit = sysctl_route_net_exit,
3219 };
3220 #endif
3221 
3222 static __net_init int rt_genid_init(struct net *net)
3223 {
3224 	get_random_bytes(&net->ipv4.rt_genid,
3225 			 sizeof(net->ipv4.rt_genid));
3226 	get_random_bytes(&net->ipv4.dev_addr_genid,
3227 			 sizeof(net->ipv4.dev_addr_genid));
3228 	return 0;
3229 }
3230 
3231 static __net_initdata struct pernet_operations rt_genid_ops = {
3232 	.init = rt_genid_init,
3233 };
3234 
3235 
3236 #ifdef CONFIG_IP_ROUTE_CLASSID
3237 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3238 #endif /* CONFIG_IP_ROUTE_CLASSID */
3239 
3240 static __initdata unsigned long rhash_entries;
3241 static int __init set_rhash_entries(char *str)
3242 {
3243 	if (!str)
3244 		return 0;
3245 	rhash_entries = simple_strtoul(str, &str, 0);
3246 	return 1;
3247 }
3248 __setup("rhash_entries=", set_rhash_entries);
3249 
3250 int __init ip_rt_init(void)
3251 {
3252 	int rc = 0;
3253 
3254 #ifdef CONFIG_IP_ROUTE_CLASSID
3255 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3256 	if (!ip_rt_acct)
3257 		panic("IP: failed to allocate ip_rt_acct\n");
3258 #endif
3259 
3260 	ipv4_dst_ops.kmem_cachep =
3261 		kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3262 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3263 
3264 	ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3265 
3266 	if (dst_entries_init(&ipv4_dst_ops) < 0)
3267 		panic("IP: failed to allocate ipv4_dst_ops counter\n");
3268 
3269 	if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3270 		panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3271 
3272 	rt_hash_table = (struct rt_hash_bucket *)
3273 		alloc_large_system_hash("IP route cache",
3274 					sizeof(struct rt_hash_bucket),
3275 					rhash_entries,
3276 					(totalram_pages >= 128 * 1024) ?
3277 					15 : 17,
3278 					0,
3279 					&rt_hash_log,
3280 					&rt_hash_mask,
3281 					rhash_entries ? 0 : 512 * 1024);
3282 	memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3283 	rt_hash_lock_init();
3284 
3285 	ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3286 	ip_rt_max_size = (rt_hash_mask + 1) * 16;
3287 
3288 	devinet_init();
3289 	ip_fib_init();
3290 
3291 	if (ip_rt_proc_init())
3292 		printk(KERN_ERR "Unable to create route proc files\n");
3293 #ifdef CONFIG_XFRM
3294 	xfrm_init();
3295 	xfrm4_init(ip_rt_max_size);
3296 #endif
3297 	rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3298 
3299 #ifdef CONFIG_SYSCTL
3300 	register_pernet_subsys(&sysctl_route_ops);
3301 #endif
3302 	register_pernet_subsys(&rt_genid_ops);
3303 	return rc;
3304 }
3305 
3306 #ifdef CONFIG_SYSCTL
3307 /*
3308  * We really need to sanitize the damn ipv4 init order, then all
3309  * this nonsense will go away.
3310  */
3311 void __init ip_static_sysctl_init(void)
3312 {
3313 	register_sysctl_paths(ipv4_path, ipv4_skeleton);
3314 }
3315 #endif
3316