xref: /linux/net/ipv6/route.c (revision 9e9f60108423f18a99c9cc93ef7f23490ecc709b)
1 /*
2  *	Linux INET6 implementation
3  *	FIB front-end.
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 /*	Changes:
15  *
16  *	YOSHIFUJI Hideaki @USAGI
17  *		reworked default router selection.
18  *		- respect outgoing interface
19  *		- select from (probably) reachable routers (i.e.
20  *		routers in REACHABLE, STALE, DELAY or PROBE states).
21  *		- always select the same router if it is (probably)
22  *		reachable.  otherwise, round-robin the list.
23  *	Ville Nuorvala
24  *		Fixed routing subtrees.
25  */
26 
27 #define pr_fmt(fmt) "IPv6: " fmt
28 
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61 
62 #include <asm/uaccess.h>
63 
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67 
68 enum rt6_nud_state {
69 	RT6_NUD_FAIL_HARD = -3,
70 	RT6_NUD_FAIL_PROBE = -2,
71 	RT6_NUD_FAIL_DO_RR = -1,
72 	RT6_NUD_SUCCEED = 1
73 };
74 
75 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
76 				    const struct in6_addr *dest);
77 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
78 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
79 static unsigned int	 ip6_mtu(const struct dst_entry *dst);
80 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
81 static void		ip6_dst_destroy(struct dst_entry *);
82 static void		ip6_dst_ifdown(struct dst_entry *,
83 				       struct net_device *dev, int how);
84 static int		 ip6_dst_gc(struct dst_ops *ops);
85 
86 static int		ip6_pkt_discard(struct sk_buff *skb);
87 static int		ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
88 static int		ip6_pkt_prohibit(struct sk_buff *skb);
89 static int		ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
90 static void		ip6_link_failure(struct sk_buff *skb);
91 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
92 					   struct sk_buff *skb, u32 mtu);
93 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
94 					struct sk_buff *skb);
95 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
96 
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info *rt6_add_route_info(struct net *net,
99 					   const struct in6_addr *prefix, int prefixlen,
100 					   const struct in6_addr *gwaddr, int ifindex,
101 					   unsigned int pref);
102 static struct rt6_info *rt6_get_route_info(struct net *net,
103 					   const struct in6_addr *prefix, int prefixlen,
104 					   const struct in6_addr *gwaddr, int ifindex);
105 #endif
106 
107 static void rt6_bind_peer(struct rt6_info *rt, int create)
108 {
109 	struct inet_peer_base *base;
110 	struct inet_peer *peer;
111 
112 	base = inetpeer_base_ptr(rt->_rt6i_peer);
113 	if (!base)
114 		return;
115 
116 	peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
117 	if (peer) {
118 		if (!rt6_set_peer(rt, peer))
119 			inet_putpeer(peer);
120 	}
121 }
122 
123 static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
124 {
125 	if (rt6_has_peer(rt))
126 		return rt6_peer_ptr(rt);
127 
128 	rt6_bind_peer(rt, create);
129 	return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
130 }
131 
132 static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
133 {
134 	return __rt6_get_peer(rt, 1);
135 }
136 
137 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
138 {
139 	struct rt6_info *rt = (struct rt6_info *) dst;
140 	struct inet_peer *peer;
141 	u32 *p = NULL;
142 
143 	if (!(rt->dst.flags & DST_HOST))
144 		return NULL;
145 
146 	peer = rt6_get_peer_create(rt);
147 	if (peer) {
148 		u32 *old_p = __DST_METRICS_PTR(old);
149 		unsigned long prev, new;
150 
151 		p = peer->metrics;
152 		if (inet_metrics_new(peer) ||
153 		    (old & DST_METRICS_FORCE_OVERWRITE))
154 			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
155 
156 		new = (unsigned long) p;
157 		prev = cmpxchg(&dst->_metrics, old, new);
158 
159 		if (prev != old) {
160 			p = __DST_METRICS_PTR(prev);
161 			if (prev & DST_METRICS_READ_ONLY)
162 				p = NULL;
163 		}
164 	}
165 	return p;
166 }
167 
168 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
169 					     struct sk_buff *skb,
170 					     const void *daddr)
171 {
172 	struct in6_addr *p = &rt->rt6i_gateway;
173 
174 	if (!ipv6_addr_any(p))
175 		return (const void *) p;
176 	else if (skb)
177 		return &ipv6_hdr(skb)->daddr;
178 	return daddr;
179 }
180 
181 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
182 					  struct sk_buff *skb,
183 					  const void *daddr)
184 {
185 	struct rt6_info *rt = (struct rt6_info *) dst;
186 	struct neighbour *n;
187 
188 	daddr = choose_neigh_daddr(rt, skb, daddr);
189 	n = __ipv6_neigh_lookup(dst->dev, daddr);
190 	if (n)
191 		return n;
192 	return neigh_create(&nd_tbl, daddr, dst->dev);
193 }
194 
195 static struct dst_ops ip6_dst_ops_template = {
196 	.family			=	AF_INET6,
197 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
198 	.gc			=	ip6_dst_gc,
199 	.gc_thresh		=	1024,
200 	.check			=	ip6_dst_check,
201 	.default_advmss		=	ip6_default_advmss,
202 	.mtu			=	ip6_mtu,
203 	.cow_metrics		=	ipv6_cow_metrics,
204 	.destroy		=	ip6_dst_destroy,
205 	.ifdown			=	ip6_dst_ifdown,
206 	.negative_advice	=	ip6_negative_advice,
207 	.link_failure		=	ip6_link_failure,
208 	.update_pmtu		=	ip6_rt_update_pmtu,
209 	.redirect		=	rt6_do_redirect,
210 	.local_out		=	__ip6_local_out,
211 	.neigh_lookup		=	ip6_neigh_lookup,
212 };
213 
214 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
215 {
216 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
217 
218 	return mtu ? : dst->dev->mtu;
219 }
220 
221 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
222 					 struct sk_buff *skb, u32 mtu)
223 {
224 }
225 
226 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
227 				      struct sk_buff *skb)
228 {
229 }
230 
231 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
232 					 unsigned long old)
233 {
234 	return NULL;
235 }
236 
237 static struct dst_ops ip6_dst_blackhole_ops = {
238 	.family			=	AF_INET6,
239 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
240 	.destroy		=	ip6_dst_destroy,
241 	.check			=	ip6_dst_check,
242 	.mtu			=	ip6_blackhole_mtu,
243 	.default_advmss		=	ip6_default_advmss,
244 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
245 	.redirect		=	ip6_rt_blackhole_redirect,
246 	.cow_metrics		=	ip6_rt_blackhole_cow_metrics,
247 	.neigh_lookup		=	ip6_neigh_lookup,
248 };
249 
250 static const u32 ip6_template_metrics[RTAX_MAX] = {
251 	[RTAX_HOPLIMIT - 1] = 0,
252 };
253 
254 static const struct rt6_info ip6_null_entry_template = {
255 	.dst = {
256 		.__refcnt	= ATOMIC_INIT(1),
257 		.__use		= 1,
258 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
259 		.error		= -ENETUNREACH,
260 		.input		= ip6_pkt_discard,
261 		.output		= ip6_pkt_discard_out,
262 	},
263 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
264 	.rt6i_protocol  = RTPROT_KERNEL,
265 	.rt6i_metric	= ~(u32) 0,
266 	.rt6i_ref	= ATOMIC_INIT(1),
267 };
268 
269 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
270 
271 static const struct rt6_info ip6_prohibit_entry_template = {
272 	.dst = {
273 		.__refcnt	= ATOMIC_INIT(1),
274 		.__use		= 1,
275 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
276 		.error		= -EACCES,
277 		.input		= ip6_pkt_prohibit,
278 		.output		= ip6_pkt_prohibit_out,
279 	},
280 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
281 	.rt6i_protocol  = RTPROT_KERNEL,
282 	.rt6i_metric	= ~(u32) 0,
283 	.rt6i_ref	= ATOMIC_INIT(1),
284 };
285 
286 static const struct rt6_info ip6_blk_hole_entry_template = {
287 	.dst = {
288 		.__refcnt	= ATOMIC_INIT(1),
289 		.__use		= 1,
290 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
291 		.error		= -EINVAL,
292 		.input		= dst_discard,
293 		.output		= dst_discard_sk,
294 	},
295 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
296 	.rt6i_protocol  = RTPROT_KERNEL,
297 	.rt6i_metric	= ~(u32) 0,
298 	.rt6i_ref	= ATOMIC_INIT(1),
299 };
300 
301 #endif
302 
303 /* allocate dst with ip6_dst_ops */
304 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
305 					     struct net_device *dev,
306 					     int flags,
307 					     struct fib6_table *table)
308 {
309 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
310 					0, DST_OBSOLETE_FORCE_CHK, flags);
311 
312 	if (rt) {
313 		struct dst_entry *dst = &rt->dst;
314 
315 		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
316 		rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
317 		INIT_LIST_HEAD(&rt->rt6i_siblings);
318 	}
319 	return rt;
320 }
321 
322 static void ip6_dst_destroy(struct dst_entry *dst)
323 {
324 	struct rt6_info *rt = (struct rt6_info *)dst;
325 	struct inet6_dev *idev = rt->rt6i_idev;
326 	struct dst_entry *from = dst->from;
327 
328 	if (!(rt->dst.flags & DST_HOST))
329 		dst_destroy_metrics_generic(dst);
330 
331 	if (idev) {
332 		rt->rt6i_idev = NULL;
333 		in6_dev_put(idev);
334 	}
335 
336 	dst->from = NULL;
337 	dst_release(from);
338 
339 	if (rt6_has_peer(rt)) {
340 		struct inet_peer *peer = rt6_peer_ptr(rt);
341 		inet_putpeer(peer);
342 	}
343 }
344 
345 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
346 			   int how)
347 {
348 	struct rt6_info *rt = (struct rt6_info *)dst;
349 	struct inet6_dev *idev = rt->rt6i_idev;
350 	struct net_device *loopback_dev =
351 		dev_net(dev)->loopback_dev;
352 
353 	if (dev != loopback_dev) {
354 		if (idev && idev->dev == dev) {
355 			struct inet6_dev *loopback_idev =
356 				in6_dev_get(loopback_dev);
357 			if (loopback_idev) {
358 				rt->rt6i_idev = loopback_idev;
359 				in6_dev_put(idev);
360 			}
361 		}
362 	}
363 }
364 
365 static bool rt6_check_expired(const struct rt6_info *rt)
366 {
367 	if (rt->rt6i_flags & RTF_EXPIRES) {
368 		if (time_after(jiffies, rt->dst.expires))
369 			return true;
370 	} else if (rt->dst.from) {
371 		return rt6_check_expired((struct rt6_info *) rt->dst.from);
372 	}
373 	return false;
374 }
375 
376 /* Multipath route selection:
377  *   Hash based function using packet header and flowlabel.
378  * Adapted from fib_info_hashfn()
379  */
380 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
381 			       const struct flowi6 *fl6)
382 {
383 	unsigned int val = fl6->flowi6_proto;
384 
385 	val ^= ipv6_addr_hash(&fl6->daddr);
386 	val ^= ipv6_addr_hash(&fl6->saddr);
387 
388 	/* Work only if this not encapsulated */
389 	switch (fl6->flowi6_proto) {
390 	case IPPROTO_UDP:
391 	case IPPROTO_TCP:
392 	case IPPROTO_SCTP:
393 		val ^= (__force u16)fl6->fl6_sport;
394 		val ^= (__force u16)fl6->fl6_dport;
395 		break;
396 
397 	case IPPROTO_ICMPV6:
398 		val ^= (__force u16)fl6->fl6_icmp_type;
399 		val ^= (__force u16)fl6->fl6_icmp_code;
400 		break;
401 	}
402 	/* RFC6438 recommands to use flowlabel */
403 	val ^= (__force u32)fl6->flowlabel;
404 
405 	/* Perhaps, we need to tune, this function? */
406 	val = val ^ (val >> 7) ^ (val >> 12);
407 	return val % candidate_count;
408 }
409 
410 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
411 					     struct flowi6 *fl6, int oif,
412 					     int strict)
413 {
414 	struct rt6_info *sibling, *next_sibling;
415 	int route_choosen;
416 
417 	route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
418 	/* Don't change the route, if route_choosen == 0
419 	 * (siblings does not include ourself)
420 	 */
421 	if (route_choosen)
422 		list_for_each_entry_safe(sibling, next_sibling,
423 				&match->rt6i_siblings, rt6i_siblings) {
424 			route_choosen--;
425 			if (route_choosen == 0) {
426 				if (rt6_score_route(sibling, oif, strict) < 0)
427 					break;
428 				match = sibling;
429 				break;
430 			}
431 		}
432 	return match;
433 }
434 
435 /*
436  *	Route lookup. Any table->tb6_lock is implied.
437  */
438 
439 static inline struct rt6_info *rt6_device_match(struct net *net,
440 						    struct rt6_info *rt,
441 						    const struct in6_addr *saddr,
442 						    int oif,
443 						    int flags)
444 {
445 	struct rt6_info *local = NULL;
446 	struct rt6_info *sprt;
447 
448 	if (!oif && ipv6_addr_any(saddr))
449 		goto out;
450 
451 	for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
452 		struct net_device *dev = sprt->dst.dev;
453 
454 		if (oif) {
455 			if (dev->ifindex == oif)
456 				return sprt;
457 			if (dev->flags & IFF_LOOPBACK) {
458 				if (!sprt->rt6i_idev ||
459 				    sprt->rt6i_idev->dev->ifindex != oif) {
460 					if (flags & RT6_LOOKUP_F_IFACE && oif)
461 						continue;
462 					if (local && (!oif ||
463 						      local->rt6i_idev->dev->ifindex == oif))
464 						continue;
465 				}
466 				local = sprt;
467 			}
468 		} else {
469 			if (ipv6_chk_addr(net, saddr, dev,
470 					  flags & RT6_LOOKUP_F_IFACE))
471 				return sprt;
472 		}
473 	}
474 
475 	if (oif) {
476 		if (local)
477 			return local;
478 
479 		if (flags & RT6_LOOKUP_F_IFACE)
480 			return net->ipv6.ip6_null_entry;
481 	}
482 out:
483 	return rt;
484 }
485 
486 #ifdef CONFIG_IPV6_ROUTER_PREF
487 struct __rt6_probe_work {
488 	struct work_struct work;
489 	struct in6_addr target;
490 	struct net_device *dev;
491 };
492 
493 static void rt6_probe_deferred(struct work_struct *w)
494 {
495 	struct in6_addr mcaddr;
496 	struct __rt6_probe_work *work =
497 		container_of(w, struct __rt6_probe_work, work);
498 
499 	addrconf_addr_solict_mult(&work->target, &mcaddr);
500 	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
501 	dev_put(work->dev);
502 	kfree(w);
503 }
504 
505 static void rt6_probe(struct rt6_info *rt)
506 {
507 	struct neighbour *neigh;
508 	/*
509 	 * Okay, this does not seem to be appropriate
510 	 * for now, however, we need to check if it
511 	 * is really so; aka Router Reachability Probing.
512 	 *
513 	 * Router Reachability Probe MUST be rate-limited
514 	 * to no more than one per minute.
515 	 */
516 	if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
517 		return;
518 	rcu_read_lock_bh();
519 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
520 	if (neigh) {
521 		write_lock(&neigh->lock);
522 		if (neigh->nud_state & NUD_VALID)
523 			goto out;
524 	}
525 
526 	if (!neigh ||
527 	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
528 		struct __rt6_probe_work *work;
529 
530 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
531 
532 		if (neigh && work)
533 			__neigh_set_probe_once(neigh);
534 
535 		if (neigh)
536 			write_unlock(&neigh->lock);
537 
538 		if (work) {
539 			INIT_WORK(&work->work, rt6_probe_deferred);
540 			work->target = rt->rt6i_gateway;
541 			dev_hold(rt->dst.dev);
542 			work->dev = rt->dst.dev;
543 			schedule_work(&work->work);
544 		}
545 	} else {
546 out:
547 		write_unlock(&neigh->lock);
548 	}
549 	rcu_read_unlock_bh();
550 }
551 #else
552 static inline void rt6_probe(struct rt6_info *rt)
553 {
554 }
555 #endif
556 
557 /*
558  * Default Router Selection (RFC 2461 6.3.6)
559  */
560 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
561 {
562 	struct net_device *dev = rt->dst.dev;
563 	if (!oif || dev->ifindex == oif)
564 		return 2;
565 	if ((dev->flags & IFF_LOOPBACK) &&
566 	    rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
567 		return 1;
568 	return 0;
569 }
570 
571 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
572 {
573 	struct neighbour *neigh;
574 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
575 
576 	if (rt->rt6i_flags & RTF_NONEXTHOP ||
577 	    !(rt->rt6i_flags & RTF_GATEWAY))
578 		return RT6_NUD_SUCCEED;
579 
580 	rcu_read_lock_bh();
581 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
582 	if (neigh) {
583 		read_lock(&neigh->lock);
584 		if (neigh->nud_state & NUD_VALID)
585 			ret = RT6_NUD_SUCCEED;
586 #ifdef CONFIG_IPV6_ROUTER_PREF
587 		else if (!(neigh->nud_state & NUD_FAILED))
588 			ret = RT6_NUD_SUCCEED;
589 		else
590 			ret = RT6_NUD_FAIL_PROBE;
591 #endif
592 		read_unlock(&neigh->lock);
593 	} else {
594 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
595 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
596 	}
597 	rcu_read_unlock_bh();
598 
599 	return ret;
600 }
601 
602 static int rt6_score_route(struct rt6_info *rt, int oif,
603 			   int strict)
604 {
605 	int m;
606 
607 	m = rt6_check_dev(rt, oif);
608 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
609 		return RT6_NUD_FAIL_HARD;
610 #ifdef CONFIG_IPV6_ROUTER_PREF
611 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
612 #endif
613 	if (strict & RT6_LOOKUP_F_REACHABLE) {
614 		int n = rt6_check_neigh(rt);
615 		if (n < 0)
616 			return n;
617 	}
618 	return m;
619 }
620 
621 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
622 				   int *mpri, struct rt6_info *match,
623 				   bool *do_rr)
624 {
625 	int m;
626 	bool match_do_rr = false;
627 
628 	if (rt6_check_expired(rt))
629 		goto out;
630 
631 	m = rt6_score_route(rt, oif, strict);
632 	if (m == RT6_NUD_FAIL_DO_RR) {
633 		match_do_rr = true;
634 		m = 0; /* lowest valid score */
635 	} else if (m == RT6_NUD_FAIL_HARD) {
636 		goto out;
637 	}
638 
639 	if (strict & RT6_LOOKUP_F_REACHABLE)
640 		rt6_probe(rt);
641 
642 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
643 	if (m > *mpri) {
644 		*do_rr = match_do_rr;
645 		*mpri = m;
646 		match = rt;
647 	}
648 out:
649 	return match;
650 }
651 
652 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
653 				     struct rt6_info *rr_head,
654 				     u32 metric, int oif, int strict,
655 				     bool *do_rr)
656 {
657 	struct rt6_info *rt, *match;
658 	int mpri = -1;
659 
660 	match = NULL;
661 	for (rt = rr_head; rt && rt->rt6i_metric == metric;
662 	     rt = rt->dst.rt6_next)
663 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
664 	for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
665 	     rt = rt->dst.rt6_next)
666 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
667 
668 	return match;
669 }
670 
671 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
672 {
673 	struct rt6_info *match, *rt0;
674 	struct net *net;
675 	bool do_rr = false;
676 
677 	rt0 = fn->rr_ptr;
678 	if (!rt0)
679 		fn->rr_ptr = rt0 = fn->leaf;
680 
681 	match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
682 			     &do_rr);
683 
684 	if (do_rr) {
685 		struct rt6_info *next = rt0->dst.rt6_next;
686 
687 		/* no entries matched; do round-robin */
688 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
689 			next = fn->leaf;
690 
691 		if (next != rt0)
692 			fn->rr_ptr = next;
693 	}
694 
695 	net = dev_net(rt0->dst.dev);
696 	return match ? match : net->ipv6.ip6_null_entry;
697 }
698 
699 #ifdef CONFIG_IPV6_ROUTE_INFO
700 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
701 		  const struct in6_addr *gwaddr)
702 {
703 	struct net *net = dev_net(dev);
704 	struct route_info *rinfo = (struct route_info *) opt;
705 	struct in6_addr prefix_buf, *prefix;
706 	unsigned int pref;
707 	unsigned long lifetime;
708 	struct rt6_info *rt;
709 
710 	if (len < sizeof(struct route_info)) {
711 		return -EINVAL;
712 	}
713 
714 	/* Sanity check for prefix_len and length */
715 	if (rinfo->length > 3) {
716 		return -EINVAL;
717 	} else if (rinfo->prefix_len > 128) {
718 		return -EINVAL;
719 	} else if (rinfo->prefix_len > 64) {
720 		if (rinfo->length < 2) {
721 			return -EINVAL;
722 		}
723 	} else if (rinfo->prefix_len > 0) {
724 		if (rinfo->length < 1) {
725 			return -EINVAL;
726 		}
727 	}
728 
729 	pref = rinfo->route_pref;
730 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
731 		return -EINVAL;
732 
733 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
734 
735 	if (rinfo->length == 3)
736 		prefix = (struct in6_addr *)rinfo->prefix;
737 	else {
738 		/* this function is safe */
739 		ipv6_addr_prefix(&prefix_buf,
740 				 (struct in6_addr *)rinfo->prefix,
741 				 rinfo->prefix_len);
742 		prefix = &prefix_buf;
743 	}
744 
745 	if (rinfo->prefix_len == 0)
746 		rt = rt6_get_dflt_router(gwaddr, dev);
747 	else
748 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
749 					gwaddr, dev->ifindex);
750 
751 	if (rt && !lifetime) {
752 		ip6_del_rt(rt);
753 		rt = NULL;
754 	}
755 
756 	if (!rt && lifetime)
757 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
758 					pref);
759 	else if (rt)
760 		rt->rt6i_flags = RTF_ROUTEINFO |
761 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
762 
763 	if (rt) {
764 		if (!addrconf_finite_timeout(lifetime))
765 			rt6_clean_expires(rt);
766 		else
767 			rt6_set_expires(rt, jiffies + HZ * lifetime);
768 
769 		ip6_rt_put(rt);
770 	}
771 	return 0;
772 }
773 #endif
774 
775 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
776 					struct in6_addr *saddr)
777 {
778 	struct fib6_node *pn;
779 	while (1) {
780 		if (fn->fn_flags & RTN_TL_ROOT)
781 			return NULL;
782 		pn = fn->parent;
783 		if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
784 			fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
785 		else
786 			fn = pn;
787 		if (fn->fn_flags & RTN_RTINFO)
788 			return fn;
789 	}
790 }
791 
792 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
793 					     struct fib6_table *table,
794 					     struct flowi6 *fl6, int flags)
795 {
796 	struct fib6_node *fn;
797 	struct rt6_info *rt;
798 
799 	read_lock_bh(&table->tb6_lock);
800 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
801 restart:
802 	rt = fn->leaf;
803 	rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
804 	if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
805 		rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
806 	if (rt == net->ipv6.ip6_null_entry) {
807 		fn = fib6_backtrack(fn, &fl6->saddr);
808 		if (fn)
809 			goto restart;
810 	}
811 	dst_use(&rt->dst, jiffies);
812 	read_unlock_bh(&table->tb6_lock);
813 	return rt;
814 
815 }
816 
817 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
818 				    int flags)
819 {
820 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
821 }
822 EXPORT_SYMBOL_GPL(ip6_route_lookup);
823 
824 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
825 			    const struct in6_addr *saddr, int oif, int strict)
826 {
827 	struct flowi6 fl6 = {
828 		.flowi6_oif = oif,
829 		.daddr = *daddr,
830 	};
831 	struct dst_entry *dst;
832 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
833 
834 	if (saddr) {
835 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
836 		flags |= RT6_LOOKUP_F_HAS_SADDR;
837 	}
838 
839 	dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
840 	if (dst->error == 0)
841 		return (struct rt6_info *) dst;
842 
843 	dst_release(dst);
844 
845 	return NULL;
846 }
847 EXPORT_SYMBOL(rt6_lookup);
848 
849 /* ip6_ins_rt is called with FREE table->tb6_lock.
850    It takes new route entry, the addition fails by any reason the
851    route is freed. In any case, if caller does not hold it, it may
852    be destroyed.
853  */
854 
855 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
856 			struct nlattr *mx, int mx_len)
857 {
858 	int err;
859 	struct fib6_table *table;
860 
861 	table = rt->rt6i_table;
862 	write_lock_bh(&table->tb6_lock);
863 	err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
864 	write_unlock_bh(&table->tb6_lock);
865 
866 	return err;
867 }
868 
869 int ip6_ins_rt(struct rt6_info *rt)
870 {
871 	struct nl_info info = {
872 		.nl_net = dev_net(rt->dst.dev),
873 	};
874 	return __ip6_ins_rt(rt, &info, NULL, 0);
875 }
876 
877 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
878 				      const struct in6_addr *daddr,
879 				      const struct in6_addr *saddr)
880 {
881 	struct rt6_info *rt;
882 
883 	/*
884 	 *	Clone the route.
885 	 */
886 
887 	rt = ip6_rt_copy(ort, daddr);
888 
889 	if (rt) {
890 		if (ort->rt6i_dst.plen != 128 &&
891 		    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
892 			rt->rt6i_flags |= RTF_ANYCAST;
893 
894 		rt->rt6i_flags |= RTF_CACHE;
895 
896 #ifdef CONFIG_IPV6_SUBTREES
897 		if (rt->rt6i_src.plen && saddr) {
898 			rt->rt6i_src.addr = *saddr;
899 			rt->rt6i_src.plen = 128;
900 		}
901 #endif
902 	}
903 
904 	return rt;
905 }
906 
907 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
908 					const struct in6_addr *daddr)
909 {
910 	struct rt6_info *rt = ip6_rt_copy(ort, daddr);
911 
912 	if (rt)
913 		rt->rt6i_flags |= RTF_CACHE;
914 	return rt;
915 }
916 
917 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
918 				      struct flowi6 *fl6, int flags)
919 {
920 	struct fib6_node *fn, *saved_fn;
921 	struct rt6_info *rt, *nrt;
922 	int strict = 0;
923 	int attempts = 3;
924 	int err;
925 
926 	strict |= flags & RT6_LOOKUP_F_IFACE;
927 	if (net->ipv6.devconf_all->forwarding == 0)
928 		strict |= RT6_LOOKUP_F_REACHABLE;
929 
930 redo_fib6_lookup_lock:
931 	read_lock_bh(&table->tb6_lock);
932 
933 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
934 	saved_fn = fn;
935 
936 redo_rt6_select:
937 	rt = rt6_select(fn, oif, strict);
938 	if (rt->rt6i_nsiblings)
939 		rt = rt6_multipath_select(rt, fl6, oif, strict);
940 	if (rt == net->ipv6.ip6_null_entry) {
941 		fn = fib6_backtrack(fn, &fl6->saddr);
942 		if (fn)
943 			goto redo_rt6_select;
944 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
945 			/* also consider unreachable route */
946 			strict &= ~RT6_LOOKUP_F_REACHABLE;
947 			fn = saved_fn;
948 			goto redo_rt6_select;
949 		} else {
950 			dst_hold(&rt->dst);
951 			read_unlock_bh(&table->tb6_lock);
952 			goto out2;
953 		}
954 	}
955 
956 	dst_hold(&rt->dst);
957 	read_unlock_bh(&table->tb6_lock);
958 
959 	if (rt->rt6i_flags & RTF_CACHE)
960 		goto out2;
961 
962 	if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
963 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
964 	else if (!(rt->dst.flags & DST_HOST))
965 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
966 	else
967 		goto out2;
968 
969 	ip6_rt_put(rt);
970 	rt = nrt ? : net->ipv6.ip6_null_entry;
971 
972 	dst_hold(&rt->dst);
973 	if (nrt) {
974 		err = ip6_ins_rt(nrt);
975 		if (!err)
976 			goto out2;
977 	}
978 
979 	if (--attempts <= 0)
980 		goto out2;
981 
982 	/*
983 	 * Race condition! In the gap, when table->tb6_lock was
984 	 * released someone could insert this route.  Relookup.
985 	 */
986 	ip6_rt_put(rt);
987 	goto redo_fib6_lookup_lock;
988 
989 out2:
990 	rt->dst.lastuse = jiffies;
991 	rt->dst.__use++;
992 
993 	return rt;
994 }
995 
996 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
997 					    struct flowi6 *fl6, int flags)
998 {
999 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1000 }
1001 
1002 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1003 						struct net_device *dev,
1004 						struct flowi6 *fl6, int flags)
1005 {
1006 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1007 		flags |= RT6_LOOKUP_F_IFACE;
1008 
1009 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1010 }
1011 
1012 void ip6_route_input(struct sk_buff *skb)
1013 {
1014 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1015 	struct net *net = dev_net(skb->dev);
1016 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1017 	struct flowi6 fl6 = {
1018 		.flowi6_iif = skb->dev->ifindex,
1019 		.daddr = iph->daddr,
1020 		.saddr = iph->saddr,
1021 		.flowlabel = ip6_flowinfo(iph),
1022 		.flowi6_mark = skb->mark,
1023 		.flowi6_proto = iph->nexthdr,
1024 	};
1025 
1026 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1027 }
1028 
1029 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1030 					     struct flowi6 *fl6, int flags)
1031 {
1032 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1033 }
1034 
1035 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1036 				    struct flowi6 *fl6)
1037 {
1038 	int flags = 0;
1039 
1040 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
1041 
1042 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1043 		flags |= RT6_LOOKUP_F_IFACE;
1044 
1045 	if (!ipv6_addr_any(&fl6->saddr))
1046 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1047 	else if (sk)
1048 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1049 
1050 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1051 }
1052 EXPORT_SYMBOL(ip6_route_output);
1053 
1054 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1055 {
1056 	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1057 	struct dst_entry *new = NULL;
1058 
1059 	rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1060 	if (rt) {
1061 		new = &rt->dst;
1062 
1063 		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1064 		rt6_init_peer(rt, net->ipv6.peers);
1065 
1066 		new->__use = 1;
1067 		new->input = dst_discard;
1068 		new->output = dst_discard_sk;
1069 
1070 		if (dst_metrics_read_only(&ort->dst))
1071 			new->_metrics = ort->dst._metrics;
1072 		else
1073 			dst_copy_metrics(new, &ort->dst);
1074 		rt->rt6i_idev = ort->rt6i_idev;
1075 		if (rt->rt6i_idev)
1076 			in6_dev_hold(rt->rt6i_idev);
1077 
1078 		rt->rt6i_gateway = ort->rt6i_gateway;
1079 		rt->rt6i_flags = ort->rt6i_flags;
1080 		rt->rt6i_metric = 0;
1081 
1082 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1083 #ifdef CONFIG_IPV6_SUBTREES
1084 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1085 #endif
1086 
1087 		dst_free(new);
1088 	}
1089 
1090 	dst_release(dst_orig);
1091 	return new ? new : ERR_PTR(-ENOMEM);
1092 }
1093 
1094 /*
1095  *	Destination cache support functions
1096  */
1097 
1098 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1099 {
1100 	struct rt6_info *rt;
1101 
1102 	rt = (struct rt6_info *) dst;
1103 
1104 	/* All IPV6 dsts are created with ->obsolete set to the value
1105 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1106 	 * into this function always.
1107 	 */
1108 	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1109 		return NULL;
1110 
1111 	if (rt6_check_expired(rt))
1112 		return NULL;
1113 
1114 	return dst;
1115 }
1116 
1117 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1118 {
1119 	struct rt6_info *rt = (struct rt6_info *) dst;
1120 
1121 	if (rt) {
1122 		if (rt->rt6i_flags & RTF_CACHE) {
1123 			if (rt6_check_expired(rt)) {
1124 				ip6_del_rt(rt);
1125 				dst = NULL;
1126 			}
1127 		} else {
1128 			dst_release(dst);
1129 			dst = NULL;
1130 		}
1131 	}
1132 	return dst;
1133 }
1134 
1135 static void ip6_link_failure(struct sk_buff *skb)
1136 {
1137 	struct rt6_info *rt;
1138 
1139 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1140 
1141 	rt = (struct rt6_info *) skb_dst(skb);
1142 	if (rt) {
1143 		if (rt->rt6i_flags & RTF_CACHE) {
1144 			dst_hold(&rt->dst);
1145 			if (ip6_del_rt(rt))
1146 				dst_free(&rt->dst);
1147 		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1148 			rt->rt6i_node->fn_sernum = -1;
1149 		}
1150 	}
1151 }
1152 
1153 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1154 			       struct sk_buff *skb, u32 mtu)
1155 {
1156 	struct rt6_info *rt6 = (struct rt6_info *)dst;
1157 
1158 	dst_confirm(dst);
1159 	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1160 		struct net *net = dev_net(dst->dev);
1161 
1162 		rt6->rt6i_flags |= RTF_MODIFIED;
1163 		if (mtu < IPV6_MIN_MTU) {
1164 			u32 features = dst_metric(dst, RTAX_FEATURES);
1165 			mtu = IPV6_MIN_MTU;
1166 			features |= RTAX_FEATURE_ALLFRAG;
1167 			dst_metric_set(dst, RTAX_FEATURES, features);
1168 		}
1169 		dst_metric_set(dst, RTAX_MTU, mtu);
1170 		rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1171 	}
1172 }
1173 
1174 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1175 		     int oif, u32 mark)
1176 {
1177 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1178 	struct dst_entry *dst;
1179 	struct flowi6 fl6;
1180 
1181 	memset(&fl6, 0, sizeof(fl6));
1182 	fl6.flowi6_oif = oif;
1183 	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1184 	fl6.daddr = iph->daddr;
1185 	fl6.saddr = iph->saddr;
1186 	fl6.flowlabel = ip6_flowinfo(iph);
1187 
1188 	dst = ip6_route_output(net, NULL, &fl6);
1189 	if (!dst->error)
1190 		ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1191 	dst_release(dst);
1192 }
1193 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1194 
1195 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1196 {
1197 	ip6_update_pmtu(skb, sock_net(sk), mtu,
1198 			sk->sk_bound_dev_if, sk->sk_mark);
1199 }
1200 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1201 
1202 /* Handle redirects */
1203 struct ip6rd_flowi {
1204 	struct flowi6 fl6;
1205 	struct in6_addr gateway;
1206 };
1207 
1208 static struct rt6_info *__ip6_route_redirect(struct net *net,
1209 					     struct fib6_table *table,
1210 					     struct flowi6 *fl6,
1211 					     int flags)
1212 {
1213 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1214 	struct rt6_info *rt;
1215 	struct fib6_node *fn;
1216 
1217 	/* Get the "current" route for this destination and
1218 	 * check if the redirect has come from approriate router.
1219 	 *
1220 	 * RFC 4861 specifies that redirects should only be
1221 	 * accepted if they come from the nexthop to the target.
1222 	 * Due to the way the routes are chosen, this notion
1223 	 * is a bit fuzzy and one might need to check all possible
1224 	 * routes.
1225 	 */
1226 
1227 	read_lock_bh(&table->tb6_lock);
1228 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1229 restart:
1230 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1231 		if (rt6_check_expired(rt))
1232 			continue;
1233 		if (rt->dst.error)
1234 			break;
1235 		if (!(rt->rt6i_flags & RTF_GATEWAY))
1236 			continue;
1237 		if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1238 			continue;
1239 		if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1240 			continue;
1241 		break;
1242 	}
1243 
1244 	if (!rt)
1245 		rt = net->ipv6.ip6_null_entry;
1246 	else if (rt->dst.error) {
1247 		rt = net->ipv6.ip6_null_entry;
1248 	} else if (rt == net->ipv6.ip6_null_entry) {
1249 		fn = fib6_backtrack(fn, &fl6->saddr);
1250 		if (fn)
1251 			goto restart;
1252 	}
1253 
1254 	dst_hold(&rt->dst);
1255 
1256 	read_unlock_bh(&table->tb6_lock);
1257 
1258 	return rt;
1259 };
1260 
1261 static struct dst_entry *ip6_route_redirect(struct net *net,
1262 					const struct flowi6 *fl6,
1263 					const struct in6_addr *gateway)
1264 {
1265 	int flags = RT6_LOOKUP_F_HAS_SADDR;
1266 	struct ip6rd_flowi rdfl;
1267 
1268 	rdfl.fl6 = *fl6;
1269 	rdfl.gateway = *gateway;
1270 
1271 	return fib6_rule_lookup(net, &rdfl.fl6,
1272 				flags, __ip6_route_redirect);
1273 }
1274 
1275 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1276 {
1277 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1278 	struct dst_entry *dst;
1279 	struct flowi6 fl6;
1280 
1281 	memset(&fl6, 0, sizeof(fl6));
1282 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
1283 	fl6.flowi6_oif = oif;
1284 	fl6.flowi6_mark = mark;
1285 	fl6.daddr = iph->daddr;
1286 	fl6.saddr = iph->saddr;
1287 	fl6.flowlabel = ip6_flowinfo(iph);
1288 
1289 	dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1290 	rt6_do_redirect(dst, NULL, skb);
1291 	dst_release(dst);
1292 }
1293 EXPORT_SYMBOL_GPL(ip6_redirect);
1294 
1295 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1296 			    u32 mark)
1297 {
1298 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1299 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1300 	struct dst_entry *dst;
1301 	struct flowi6 fl6;
1302 
1303 	memset(&fl6, 0, sizeof(fl6));
1304 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
1305 	fl6.flowi6_oif = oif;
1306 	fl6.flowi6_mark = mark;
1307 	fl6.daddr = msg->dest;
1308 	fl6.saddr = iph->daddr;
1309 
1310 	dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1311 	rt6_do_redirect(dst, NULL, skb);
1312 	dst_release(dst);
1313 }
1314 
1315 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1316 {
1317 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1318 }
1319 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1320 
1321 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1322 {
1323 	struct net_device *dev = dst->dev;
1324 	unsigned int mtu = dst_mtu(dst);
1325 	struct net *net = dev_net(dev);
1326 
1327 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1328 
1329 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1330 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1331 
1332 	/*
1333 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1334 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1335 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
1336 	 * rely only on pmtu discovery"
1337 	 */
1338 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1339 		mtu = IPV6_MAXPLEN;
1340 	return mtu;
1341 }
1342 
1343 static unsigned int ip6_mtu(const struct dst_entry *dst)
1344 {
1345 	struct inet6_dev *idev;
1346 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1347 
1348 	if (mtu)
1349 		goto out;
1350 
1351 	mtu = IPV6_MIN_MTU;
1352 
1353 	rcu_read_lock();
1354 	idev = __in6_dev_get(dst->dev);
1355 	if (idev)
1356 		mtu = idev->cnf.mtu6;
1357 	rcu_read_unlock();
1358 
1359 out:
1360 	return min_t(unsigned int, mtu, IP6_MAX_MTU);
1361 }
1362 
1363 static struct dst_entry *icmp6_dst_gc_list;
1364 static DEFINE_SPINLOCK(icmp6_dst_lock);
1365 
1366 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1367 				  struct flowi6 *fl6)
1368 {
1369 	struct dst_entry *dst;
1370 	struct rt6_info *rt;
1371 	struct inet6_dev *idev = in6_dev_get(dev);
1372 	struct net *net = dev_net(dev);
1373 
1374 	if (unlikely(!idev))
1375 		return ERR_PTR(-ENODEV);
1376 
1377 	rt = ip6_dst_alloc(net, dev, 0, NULL);
1378 	if (unlikely(!rt)) {
1379 		in6_dev_put(idev);
1380 		dst = ERR_PTR(-ENOMEM);
1381 		goto out;
1382 	}
1383 
1384 	rt->dst.flags |= DST_HOST;
1385 	rt->dst.output  = ip6_output;
1386 	atomic_set(&rt->dst.__refcnt, 1);
1387 	rt->rt6i_gateway  = fl6->daddr;
1388 	rt->rt6i_dst.addr = fl6->daddr;
1389 	rt->rt6i_dst.plen = 128;
1390 	rt->rt6i_idev     = idev;
1391 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1392 
1393 	spin_lock_bh(&icmp6_dst_lock);
1394 	rt->dst.next = icmp6_dst_gc_list;
1395 	icmp6_dst_gc_list = &rt->dst;
1396 	spin_unlock_bh(&icmp6_dst_lock);
1397 
1398 	fib6_force_start_gc(net);
1399 
1400 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1401 
1402 out:
1403 	return dst;
1404 }
1405 
1406 int icmp6_dst_gc(void)
1407 {
1408 	struct dst_entry *dst, **pprev;
1409 	int more = 0;
1410 
1411 	spin_lock_bh(&icmp6_dst_lock);
1412 	pprev = &icmp6_dst_gc_list;
1413 
1414 	while ((dst = *pprev) != NULL) {
1415 		if (!atomic_read(&dst->__refcnt)) {
1416 			*pprev = dst->next;
1417 			dst_free(dst);
1418 		} else {
1419 			pprev = &dst->next;
1420 			++more;
1421 		}
1422 	}
1423 
1424 	spin_unlock_bh(&icmp6_dst_lock);
1425 
1426 	return more;
1427 }
1428 
1429 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1430 			    void *arg)
1431 {
1432 	struct dst_entry *dst, **pprev;
1433 
1434 	spin_lock_bh(&icmp6_dst_lock);
1435 	pprev = &icmp6_dst_gc_list;
1436 	while ((dst = *pprev) != NULL) {
1437 		struct rt6_info *rt = (struct rt6_info *) dst;
1438 		if (func(rt, arg)) {
1439 			*pprev = dst->next;
1440 			dst_free(dst);
1441 		} else {
1442 			pprev = &dst->next;
1443 		}
1444 	}
1445 	spin_unlock_bh(&icmp6_dst_lock);
1446 }
1447 
1448 static int ip6_dst_gc(struct dst_ops *ops)
1449 {
1450 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1451 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1452 	int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1453 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1454 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1455 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1456 	int entries;
1457 
1458 	entries = dst_entries_get_fast(ops);
1459 	if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1460 	    entries <= rt_max_size)
1461 		goto out;
1462 
1463 	net->ipv6.ip6_rt_gc_expire++;
1464 	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1465 	entries = dst_entries_get_slow(ops);
1466 	if (entries < ops->gc_thresh)
1467 		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1468 out:
1469 	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1470 	return entries > rt_max_size;
1471 }
1472 
1473 /*
1474  *
1475  */
1476 
1477 int ip6_route_add(struct fib6_config *cfg)
1478 {
1479 	int err;
1480 	struct net *net = cfg->fc_nlinfo.nl_net;
1481 	struct rt6_info *rt = NULL;
1482 	struct net_device *dev = NULL;
1483 	struct inet6_dev *idev = NULL;
1484 	struct fib6_table *table;
1485 	int addr_type;
1486 
1487 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1488 		return -EINVAL;
1489 #ifndef CONFIG_IPV6_SUBTREES
1490 	if (cfg->fc_src_len)
1491 		return -EINVAL;
1492 #endif
1493 	if (cfg->fc_ifindex) {
1494 		err = -ENODEV;
1495 		dev = dev_get_by_index(net, cfg->fc_ifindex);
1496 		if (!dev)
1497 			goto out;
1498 		idev = in6_dev_get(dev);
1499 		if (!idev)
1500 			goto out;
1501 	}
1502 
1503 	if (cfg->fc_metric == 0)
1504 		cfg->fc_metric = IP6_RT_PRIO_USER;
1505 
1506 	err = -ENOBUFS;
1507 	if (cfg->fc_nlinfo.nlh &&
1508 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1509 		table = fib6_get_table(net, cfg->fc_table);
1510 		if (!table) {
1511 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1512 			table = fib6_new_table(net, cfg->fc_table);
1513 		}
1514 	} else {
1515 		table = fib6_new_table(net, cfg->fc_table);
1516 	}
1517 
1518 	if (!table)
1519 		goto out;
1520 
1521 	rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1522 
1523 	if (!rt) {
1524 		err = -ENOMEM;
1525 		goto out;
1526 	}
1527 
1528 	if (cfg->fc_flags & RTF_EXPIRES)
1529 		rt6_set_expires(rt, jiffies +
1530 				clock_t_to_jiffies(cfg->fc_expires));
1531 	else
1532 		rt6_clean_expires(rt);
1533 
1534 	if (cfg->fc_protocol == RTPROT_UNSPEC)
1535 		cfg->fc_protocol = RTPROT_BOOT;
1536 	rt->rt6i_protocol = cfg->fc_protocol;
1537 
1538 	addr_type = ipv6_addr_type(&cfg->fc_dst);
1539 
1540 	if (addr_type & IPV6_ADDR_MULTICAST)
1541 		rt->dst.input = ip6_mc_input;
1542 	else if (cfg->fc_flags & RTF_LOCAL)
1543 		rt->dst.input = ip6_input;
1544 	else
1545 		rt->dst.input = ip6_forward;
1546 
1547 	rt->dst.output = ip6_output;
1548 
1549 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1550 	rt->rt6i_dst.plen = cfg->fc_dst_len;
1551 	if (rt->rt6i_dst.plen == 128) {
1552 		rt->dst.flags |= DST_HOST;
1553 		dst_metrics_set_force_overwrite(&rt->dst);
1554 	}
1555 
1556 #ifdef CONFIG_IPV6_SUBTREES
1557 	ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1558 	rt->rt6i_src.plen = cfg->fc_src_len;
1559 #endif
1560 
1561 	rt->rt6i_metric = cfg->fc_metric;
1562 
1563 	/* We cannot add true routes via loopback here,
1564 	   they would result in kernel looping; promote them to reject routes
1565 	 */
1566 	if ((cfg->fc_flags & RTF_REJECT) ||
1567 	    (dev && (dev->flags & IFF_LOOPBACK) &&
1568 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
1569 	     !(cfg->fc_flags & RTF_LOCAL))) {
1570 		/* hold loopback dev/idev if we haven't done so. */
1571 		if (dev != net->loopback_dev) {
1572 			if (dev) {
1573 				dev_put(dev);
1574 				in6_dev_put(idev);
1575 			}
1576 			dev = net->loopback_dev;
1577 			dev_hold(dev);
1578 			idev = in6_dev_get(dev);
1579 			if (!idev) {
1580 				err = -ENODEV;
1581 				goto out;
1582 			}
1583 		}
1584 		rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1585 		switch (cfg->fc_type) {
1586 		case RTN_BLACKHOLE:
1587 			rt->dst.error = -EINVAL;
1588 			rt->dst.output = dst_discard_sk;
1589 			rt->dst.input = dst_discard;
1590 			break;
1591 		case RTN_PROHIBIT:
1592 			rt->dst.error = -EACCES;
1593 			rt->dst.output = ip6_pkt_prohibit_out;
1594 			rt->dst.input = ip6_pkt_prohibit;
1595 			break;
1596 		case RTN_THROW:
1597 		default:
1598 			rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1599 					: -ENETUNREACH;
1600 			rt->dst.output = ip6_pkt_discard_out;
1601 			rt->dst.input = ip6_pkt_discard;
1602 			break;
1603 		}
1604 		goto install_route;
1605 	}
1606 
1607 	if (cfg->fc_flags & RTF_GATEWAY) {
1608 		const struct in6_addr *gw_addr;
1609 		int gwa_type;
1610 
1611 		gw_addr = &cfg->fc_gateway;
1612 		rt->rt6i_gateway = *gw_addr;
1613 		gwa_type = ipv6_addr_type(gw_addr);
1614 
1615 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1616 			struct rt6_info *grt;
1617 
1618 			/* IPv6 strictly inhibits using not link-local
1619 			   addresses as nexthop address.
1620 			   Otherwise, router will not able to send redirects.
1621 			   It is very good, but in some (rare!) circumstances
1622 			   (SIT, PtP, NBMA NOARP links) it is handy to allow
1623 			   some exceptions. --ANK
1624 			 */
1625 			err = -EINVAL;
1626 			if (!(gwa_type & IPV6_ADDR_UNICAST))
1627 				goto out;
1628 
1629 			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1630 
1631 			err = -EHOSTUNREACH;
1632 			if (!grt)
1633 				goto out;
1634 			if (dev) {
1635 				if (dev != grt->dst.dev) {
1636 					ip6_rt_put(grt);
1637 					goto out;
1638 				}
1639 			} else {
1640 				dev = grt->dst.dev;
1641 				idev = grt->rt6i_idev;
1642 				dev_hold(dev);
1643 				in6_dev_hold(grt->rt6i_idev);
1644 			}
1645 			if (!(grt->rt6i_flags & RTF_GATEWAY))
1646 				err = 0;
1647 			ip6_rt_put(grt);
1648 
1649 			if (err)
1650 				goto out;
1651 		}
1652 		err = -EINVAL;
1653 		if (!dev || (dev->flags & IFF_LOOPBACK))
1654 			goto out;
1655 	}
1656 
1657 	err = -ENODEV;
1658 	if (!dev)
1659 		goto out;
1660 
1661 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1662 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1663 			err = -EINVAL;
1664 			goto out;
1665 		}
1666 		rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1667 		rt->rt6i_prefsrc.plen = 128;
1668 	} else
1669 		rt->rt6i_prefsrc.plen = 0;
1670 
1671 	rt->rt6i_flags = cfg->fc_flags;
1672 
1673 install_route:
1674 	rt->dst.dev = dev;
1675 	rt->rt6i_idev = idev;
1676 	rt->rt6i_table = table;
1677 
1678 	cfg->fc_nlinfo.nl_net = dev_net(dev);
1679 
1680 	return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
1681 
1682 out:
1683 	if (dev)
1684 		dev_put(dev);
1685 	if (idev)
1686 		in6_dev_put(idev);
1687 	if (rt)
1688 		dst_free(&rt->dst);
1689 	return err;
1690 }
1691 
1692 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1693 {
1694 	int err;
1695 	struct fib6_table *table;
1696 	struct net *net = dev_net(rt->dst.dev);
1697 
1698 	if (rt == net->ipv6.ip6_null_entry) {
1699 		err = -ENOENT;
1700 		goto out;
1701 	}
1702 
1703 	table = rt->rt6i_table;
1704 	write_lock_bh(&table->tb6_lock);
1705 	err = fib6_del(rt, info);
1706 	write_unlock_bh(&table->tb6_lock);
1707 
1708 out:
1709 	ip6_rt_put(rt);
1710 	return err;
1711 }
1712 
1713 int ip6_del_rt(struct rt6_info *rt)
1714 {
1715 	struct nl_info info = {
1716 		.nl_net = dev_net(rt->dst.dev),
1717 	};
1718 	return __ip6_del_rt(rt, &info);
1719 }
1720 
1721 static int ip6_route_del(struct fib6_config *cfg)
1722 {
1723 	struct fib6_table *table;
1724 	struct fib6_node *fn;
1725 	struct rt6_info *rt;
1726 	int err = -ESRCH;
1727 
1728 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1729 	if (!table)
1730 		return err;
1731 
1732 	read_lock_bh(&table->tb6_lock);
1733 
1734 	fn = fib6_locate(&table->tb6_root,
1735 			 &cfg->fc_dst, cfg->fc_dst_len,
1736 			 &cfg->fc_src, cfg->fc_src_len);
1737 
1738 	if (fn) {
1739 		for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1740 			if (cfg->fc_ifindex &&
1741 			    (!rt->dst.dev ||
1742 			     rt->dst.dev->ifindex != cfg->fc_ifindex))
1743 				continue;
1744 			if (cfg->fc_flags & RTF_GATEWAY &&
1745 			    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1746 				continue;
1747 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1748 				continue;
1749 			dst_hold(&rt->dst);
1750 			read_unlock_bh(&table->tb6_lock);
1751 
1752 			return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1753 		}
1754 	}
1755 	read_unlock_bh(&table->tb6_lock);
1756 
1757 	return err;
1758 }
1759 
1760 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1761 {
1762 	struct net *net = dev_net(skb->dev);
1763 	struct netevent_redirect netevent;
1764 	struct rt6_info *rt, *nrt = NULL;
1765 	struct ndisc_options ndopts;
1766 	struct inet6_dev *in6_dev;
1767 	struct neighbour *neigh;
1768 	struct rd_msg *msg;
1769 	int optlen, on_link;
1770 	u8 *lladdr;
1771 
1772 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1773 	optlen -= sizeof(*msg);
1774 
1775 	if (optlen < 0) {
1776 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1777 		return;
1778 	}
1779 
1780 	msg = (struct rd_msg *)icmp6_hdr(skb);
1781 
1782 	if (ipv6_addr_is_multicast(&msg->dest)) {
1783 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1784 		return;
1785 	}
1786 
1787 	on_link = 0;
1788 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1789 		on_link = 1;
1790 	} else if (ipv6_addr_type(&msg->target) !=
1791 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1792 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1793 		return;
1794 	}
1795 
1796 	in6_dev = __in6_dev_get(skb->dev);
1797 	if (!in6_dev)
1798 		return;
1799 	if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1800 		return;
1801 
1802 	/* RFC2461 8.1:
1803 	 *	The IP source address of the Redirect MUST be the same as the current
1804 	 *	first-hop router for the specified ICMP Destination Address.
1805 	 */
1806 
1807 	if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1808 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1809 		return;
1810 	}
1811 
1812 	lladdr = NULL;
1813 	if (ndopts.nd_opts_tgt_lladdr) {
1814 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1815 					     skb->dev);
1816 		if (!lladdr) {
1817 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1818 			return;
1819 		}
1820 	}
1821 
1822 	rt = (struct rt6_info *) dst;
1823 	if (rt == net->ipv6.ip6_null_entry) {
1824 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1825 		return;
1826 	}
1827 
1828 	/* Redirect received -> path was valid.
1829 	 * Look, redirects are sent only in response to data packets,
1830 	 * so that this nexthop apparently is reachable. --ANK
1831 	 */
1832 	dst_confirm(&rt->dst);
1833 
1834 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1835 	if (!neigh)
1836 		return;
1837 
1838 	/*
1839 	 *	We have finally decided to accept it.
1840 	 */
1841 
1842 	neigh_update(neigh, lladdr, NUD_STALE,
1843 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
1844 		     NEIGH_UPDATE_F_OVERRIDE|
1845 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1846 				     NEIGH_UPDATE_F_ISROUTER))
1847 		     );
1848 
1849 	nrt = ip6_rt_copy(rt, &msg->dest);
1850 	if (!nrt)
1851 		goto out;
1852 
1853 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1854 	if (on_link)
1855 		nrt->rt6i_flags &= ~RTF_GATEWAY;
1856 
1857 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1858 
1859 	if (ip6_ins_rt(nrt))
1860 		goto out;
1861 
1862 	netevent.old = &rt->dst;
1863 	netevent.new = &nrt->dst;
1864 	netevent.daddr = &msg->dest;
1865 	netevent.neigh = neigh;
1866 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1867 
1868 	if (rt->rt6i_flags & RTF_CACHE) {
1869 		rt = (struct rt6_info *) dst_clone(&rt->dst);
1870 		ip6_del_rt(rt);
1871 	}
1872 
1873 out:
1874 	neigh_release(neigh);
1875 }
1876 
1877 /*
1878  *	Misc support functions
1879  */
1880 
1881 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1882 				    const struct in6_addr *dest)
1883 {
1884 	struct net *net = dev_net(ort->dst.dev);
1885 	struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1886 					    ort->rt6i_table);
1887 
1888 	if (rt) {
1889 		rt->dst.input = ort->dst.input;
1890 		rt->dst.output = ort->dst.output;
1891 		rt->dst.flags |= DST_HOST;
1892 
1893 		rt->rt6i_dst.addr = *dest;
1894 		rt->rt6i_dst.plen = 128;
1895 		dst_copy_metrics(&rt->dst, &ort->dst);
1896 		rt->dst.error = ort->dst.error;
1897 		rt->rt6i_idev = ort->rt6i_idev;
1898 		if (rt->rt6i_idev)
1899 			in6_dev_hold(rt->rt6i_idev);
1900 		rt->dst.lastuse = jiffies;
1901 
1902 		if (ort->rt6i_flags & RTF_GATEWAY)
1903 			rt->rt6i_gateway = ort->rt6i_gateway;
1904 		else
1905 			rt->rt6i_gateway = *dest;
1906 		rt->rt6i_flags = ort->rt6i_flags;
1907 		rt6_set_from(rt, ort);
1908 		rt->rt6i_metric = 0;
1909 
1910 #ifdef CONFIG_IPV6_SUBTREES
1911 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1912 #endif
1913 		memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1914 		rt->rt6i_table = ort->rt6i_table;
1915 	}
1916 	return rt;
1917 }
1918 
1919 #ifdef CONFIG_IPV6_ROUTE_INFO
1920 static struct rt6_info *rt6_get_route_info(struct net *net,
1921 					   const struct in6_addr *prefix, int prefixlen,
1922 					   const struct in6_addr *gwaddr, int ifindex)
1923 {
1924 	struct fib6_node *fn;
1925 	struct rt6_info *rt = NULL;
1926 	struct fib6_table *table;
1927 
1928 	table = fib6_get_table(net, RT6_TABLE_INFO);
1929 	if (!table)
1930 		return NULL;
1931 
1932 	read_lock_bh(&table->tb6_lock);
1933 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
1934 	if (!fn)
1935 		goto out;
1936 
1937 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1938 		if (rt->dst.dev->ifindex != ifindex)
1939 			continue;
1940 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1941 			continue;
1942 		if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1943 			continue;
1944 		dst_hold(&rt->dst);
1945 		break;
1946 	}
1947 out:
1948 	read_unlock_bh(&table->tb6_lock);
1949 	return rt;
1950 }
1951 
1952 static struct rt6_info *rt6_add_route_info(struct net *net,
1953 					   const struct in6_addr *prefix, int prefixlen,
1954 					   const struct in6_addr *gwaddr, int ifindex,
1955 					   unsigned int pref)
1956 {
1957 	struct fib6_config cfg = {
1958 		.fc_table	= RT6_TABLE_INFO,
1959 		.fc_metric	= IP6_RT_PRIO_USER,
1960 		.fc_ifindex	= ifindex,
1961 		.fc_dst_len	= prefixlen,
1962 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1963 				  RTF_UP | RTF_PREF(pref),
1964 		.fc_nlinfo.portid = 0,
1965 		.fc_nlinfo.nlh = NULL,
1966 		.fc_nlinfo.nl_net = net,
1967 	};
1968 
1969 	cfg.fc_dst = *prefix;
1970 	cfg.fc_gateway = *gwaddr;
1971 
1972 	/* We should treat it as a default route if prefix length is 0. */
1973 	if (!prefixlen)
1974 		cfg.fc_flags |= RTF_DEFAULT;
1975 
1976 	ip6_route_add(&cfg);
1977 
1978 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1979 }
1980 #endif
1981 
1982 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1983 {
1984 	struct rt6_info *rt;
1985 	struct fib6_table *table;
1986 
1987 	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1988 	if (!table)
1989 		return NULL;
1990 
1991 	read_lock_bh(&table->tb6_lock);
1992 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1993 		if (dev == rt->dst.dev &&
1994 		    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1995 		    ipv6_addr_equal(&rt->rt6i_gateway, addr))
1996 			break;
1997 	}
1998 	if (rt)
1999 		dst_hold(&rt->dst);
2000 	read_unlock_bh(&table->tb6_lock);
2001 	return rt;
2002 }
2003 
2004 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2005 				     struct net_device *dev,
2006 				     unsigned int pref)
2007 {
2008 	struct fib6_config cfg = {
2009 		.fc_table	= RT6_TABLE_DFLT,
2010 		.fc_metric	= IP6_RT_PRIO_USER,
2011 		.fc_ifindex	= dev->ifindex,
2012 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2013 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2014 		.fc_nlinfo.portid = 0,
2015 		.fc_nlinfo.nlh = NULL,
2016 		.fc_nlinfo.nl_net = dev_net(dev),
2017 	};
2018 
2019 	cfg.fc_gateway = *gwaddr;
2020 
2021 	ip6_route_add(&cfg);
2022 
2023 	return rt6_get_dflt_router(gwaddr, dev);
2024 }
2025 
2026 void rt6_purge_dflt_routers(struct net *net)
2027 {
2028 	struct rt6_info *rt;
2029 	struct fib6_table *table;
2030 
2031 	/* NOTE: Keep consistent with rt6_get_dflt_router */
2032 	table = fib6_get_table(net, RT6_TABLE_DFLT);
2033 	if (!table)
2034 		return;
2035 
2036 restart:
2037 	read_lock_bh(&table->tb6_lock);
2038 	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2039 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2040 		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2041 			dst_hold(&rt->dst);
2042 			read_unlock_bh(&table->tb6_lock);
2043 			ip6_del_rt(rt);
2044 			goto restart;
2045 		}
2046 	}
2047 	read_unlock_bh(&table->tb6_lock);
2048 }
2049 
2050 static void rtmsg_to_fib6_config(struct net *net,
2051 				 struct in6_rtmsg *rtmsg,
2052 				 struct fib6_config *cfg)
2053 {
2054 	memset(cfg, 0, sizeof(*cfg));
2055 
2056 	cfg->fc_table = RT6_TABLE_MAIN;
2057 	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2058 	cfg->fc_metric = rtmsg->rtmsg_metric;
2059 	cfg->fc_expires = rtmsg->rtmsg_info;
2060 	cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2061 	cfg->fc_src_len = rtmsg->rtmsg_src_len;
2062 	cfg->fc_flags = rtmsg->rtmsg_flags;
2063 
2064 	cfg->fc_nlinfo.nl_net = net;
2065 
2066 	cfg->fc_dst = rtmsg->rtmsg_dst;
2067 	cfg->fc_src = rtmsg->rtmsg_src;
2068 	cfg->fc_gateway = rtmsg->rtmsg_gateway;
2069 }
2070 
2071 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2072 {
2073 	struct fib6_config cfg;
2074 	struct in6_rtmsg rtmsg;
2075 	int err;
2076 
2077 	switch (cmd) {
2078 	case SIOCADDRT:		/* Add a route */
2079 	case SIOCDELRT:		/* Delete a route */
2080 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2081 			return -EPERM;
2082 		err = copy_from_user(&rtmsg, arg,
2083 				     sizeof(struct in6_rtmsg));
2084 		if (err)
2085 			return -EFAULT;
2086 
2087 		rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2088 
2089 		rtnl_lock();
2090 		switch (cmd) {
2091 		case SIOCADDRT:
2092 			err = ip6_route_add(&cfg);
2093 			break;
2094 		case SIOCDELRT:
2095 			err = ip6_route_del(&cfg);
2096 			break;
2097 		default:
2098 			err = -EINVAL;
2099 		}
2100 		rtnl_unlock();
2101 
2102 		return err;
2103 	}
2104 
2105 	return -EINVAL;
2106 }
2107 
2108 /*
2109  *	Drop the packet on the floor
2110  */
2111 
2112 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2113 {
2114 	int type;
2115 	struct dst_entry *dst = skb_dst(skb);
2116 	switch (ipstats_mib_noroutes) {
2117 	case IPSTATS_MIB_INNOROUTES:
2118 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2119 		if (type == IPV6_ADDR_ANY) {
2120 			IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2121 				      IPSTATS_MIB_INADDRERRORS);
2122 			break;
2123 		}
2124 		/* FALLTHROUGH */
2125 	case IPSTATS_MIB_OUTNOROUTES:
2126 		IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2127 			      ipstats_mib_noroutes);
2128 		break;
2129 	}
2130 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2131 	kfree_skb(skb);
2132 	return 0;
2133 }
2134 
2135 static int ip6_pkt_discard(struct sk_buff *skb)
2136 {
2137 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2138 }
2139 
2140 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2141 {
2142 	skb->dev = skb_dst(skb)->dev;
2143 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2144 }
2145 
2146 static int ip6_pkt_prohibit(struct sk_buff *skb)
2147 {
2148 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2149 }
2150 
2151 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2152 {
2153 	skb->dev = skb_dst(skb)->dev;
2154 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2155 }
2156 
2157 /*
2158  *	Allocate a dst for local (unicast / anycast) address.
2159  */
2160 
2161 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2162 				    const struct in6_addr *addr,
2163 				    bool anycast)
2164 {
2165 	struct net *net = dev_net(idev->dev);
2166 	struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2167 					    DST_NOCOUNT, NULL);
2168 	if (!rt)
2169 		return ERR_PTR(-ENOMEM);
2170 
2171 	in6_dev_hold(idev);
2172 
2173 	rt->dst.flags |= DST_HOST;
2174 	rt->dst.input = ip6_input;
2175 	rt->dst.output = ip6_output;
2176 	rt->rt6i_idev = idev;
2177 
2178 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2179 	if (anycast)
2180 		rt->rt6i_flags |= RTF_ANYCAST;
2181 	else
2182 		rt->rt6i_flags |= RTF_LOCAL;
2183 
2184 	rt->rt6i_gateway  = *addr;
2185 	rt->rt6i_dst.addr = *addr;
2186 	rt->rt6i_dst.plen = 128;
2187 	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2188 
2189 	atomic_set(&rt->dst.__refcnt, 1);
2190 
2191 	return rt;
2192 }
2193 
2194 int ip6_route_get_saddr(struct net *net,
2195 			struct rt6_info *rt,
2196 			const struct in6_addr *daddr,
2197 			unsigned int prefs,
2198 			struct in6_addr *saddr)
2199 {
2200 	struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
2201 	int err = 0;
2202 	if (rt->rt6i_prefsrc.plen)
2203 		*saddr = rt->rt6i_prefsrc.addr;
2204 	else
2205 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2206 					 daddr, prefs, saddr);
2207 	return err;
2208 }
2209 
2210 /* remove deleted ip from prefsrc entries */
2211 struct arg_dev_net_ip {
2212 	struct net_device *dev;
2213 	struct net *net;
2214 	struct in6_addr *addr;
2215 };
2216 
2217 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2218 {
2219 	struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2220 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2221 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2222 
2223 	if (((void *)rt->dst.dev == dev || !dev) &&
2224 	    rt != net->ipv6.ip6_null_entry &&
2225 	    ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2226 		/* remove prefsrc entry */
2227 		rt->rt6i_prefsrc.plen = 0;
2228 	}
2229 	return 0;
2230 }
2231 
2232 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2233 {
2234 	struct net *net = dev_net(ifp->idev->dev);
2235 	struct arg_dev_net_ip adni = {
2236 		.dev = ifp->idev->dev,
2237 		.net = net,
2238 		.addr = &ifp->addr,
2239 	};
2240 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2241 }
2242 
2243 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2244 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2245 
2246 /* Remove routers and update dst entries when gateway turn into host. */
2247 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2248 {
2249 	struct in6_addr *gateway = (struct in6_addr *)arg;
2250 
2251 	if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2252 	     ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2253 	     ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2254 		return -1;
2255 	}
2256 	return 0;
2257 }
2258 
2259 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2260 {
2261 	fib6_clean_all(net, fib6_clean_tohost, gateway);
2262 }
2263 
2264 struct arg_dev_net {
2265 	struct net_device *dev;
2266 	struct net *net;
2267 };
2268 
2269 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2270 {
2271 	const struct arg_dev_net *adn = arg;
2272 	const struct net_device *dev = adn->dev;
2273 
2274 	if ((rt->dst.dev == dev || !dev) &&
2275 	    rt != adn->net->ipv6.ip6_null_entry)
2276 		return -1;
2277 
2278 	return 0;
2279 }
2280 
2281 void rt6_ifdown(struct net *net, struct net_device *dev)
2282 {
2283 	struct arg_dev_net adn = {
2284 		.dev = dev,
2285 		.net = net,
2286 	};
2287 
2288 	fib6_clean_all(net, fib6_ifdown, &adn);
2289 	icmp6_clean_all(fib6_ifdown, &adn);
2290 }
2291 
2292 struct rt6_mtu_change_arg {
2293 	struct net_device *dev;
2294 	unsigned int mtu;
2295 };
2296 
2297 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2298 {
2299 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2300 	struct inet6_dev *idev;
2301 
2302 	/* In IPv6 pmtu discovery is not optional,
2303 	   so that RTAX_MTU lock cannot disable it.
2304 	   We still use this lock to block changes
2305 	   caused by addrconf/ndisc.
2306 	*/
2307 
2308 	idev = __in6_dev_get(arg->dev);
2309 	if (!idev)
2310 		return 0;
2311 
2312 	/* For administrative MTU increase, there is no way to discover
2313 	   IPv6 PMTU increase, so PMTU increase should be updated here.
2314 	   Since RFC 1981 doesn't include administrative MTU increase
2315 	   update PMTU increase is a MUST. (i.e. jumbo frame)
2316 	 */
2317 	/*
2318 	   If new MTU is less than route PMTU, this new MTU will be the
2319 	   lowest MTU in the path, update the route PMTU to reflect PMTU
2320 	   decreases; if new MTU is greater than route PMTU, and the
2321 	   old MTU is the lowest MTU in the path, update the route PMTU
2322 	   to reflect the increase. In this case if the other nodes' MTU
2323 	   also have the lowest MTU, TOO BIG MESSAGE will be lead to
2324 	   PMTU discouvery.
2325 	 */
2326 	if (rt->dst.dev == arg->dev &&
2327 	    !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2328 	    (dst_mtu(&rt->dst) >= arg->mtu ||
2329 	     (dst_mtu(&rt->dst) < arg->mtu &&
2330 	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2331 		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2332 	}
2333 	return 0;
2334 }
2335 
2336 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2337 {
2338 	struct rt6_mtu_change_arg arg = {
2339 		.dev = dev,
2340 		.mtu = mtu,
2341 	};
2342 
2343 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2344 }
2345 
2346 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2347 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2348 	[RTA_OIF]               = { .type = NLA_U32 },
2349 	[RTA_IIF]		= { .type = NLA_U32 },
2350 	[RTA_PRIORITY]          = { .type = NLA_U32 },
2351 	[RTA_METRICS]           = { .type = NLA_NESTED },
2352 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
2353 };
2354 
2355 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2356 			      struct fib6_config *cfg)
2357 {
2358 	struct rtmsg *rtm;
2359 	struct nlattr *tb[RTA_MAX+1];
2360 	int err;
2361 
2362 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2363 	if (err < 0)
2364 		goto errout;
2365 
2366 	err = -EINVAL;
2367 	rtm = nlmsg_data(nlh);
2368 	memset(cfg, 0, sizeof(*cfg));
2369 
2370 	cfg->fc_table = rtm->rtm_table;
2371 	cfg->fc_dst_len = rtm->rtm_dst_len;
2372 	cfg->fc_src_len = rtm->rtm_src_len;
2373 	cfg->fc_flags = RTF_UP;
2374 	cfg->fc_protocol = rtm->rtm_protocol;
2375 	cfg->fc_type = rtm->rtm_type;
2376 
2377 	if (rtm->rtm_type == RTN_UNREACHABLE ||
2378 	    rtm->rtm_type == RTN_BLACKHOLE ||
2379 	    rtm->rtm_type == RTN_PROHIBIT ||
2380 	    rtm->rtm_type == RTN_THROW)
2381 		cfg->fc_flags |= RTF_REJECT;
2382 
2383 	if (rtm->rtm_type == RTN_LOCAL)
2384 		cfg->fc_flags |= RTF_LOCAL;
2385 
2386 	cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2387 	cfg->fc_nlinfo.nlh = nlh;
2388 	cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2389 
2390 	if (tb[RTA_GATEWAY]) {
2391 		nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2392 		cfg->fc_flags |= RTF_GATEWAY;
2393 	}
2394 
2395 	if (tb[RTA_DST]) {
2396 		int plen = (rtm->rtm_dst_len + 7) >> 3;
2397 
2398 		if (nla_len(tb[RTA_DST]) < plen)
2399 			goto errout;
2400 
2401 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2402 	}
2403 
2404 	if (tb[RTA_SRC]) {
2405 		int plen = (rtm->rtm_src_len + 7) >> 3;
2406 
2407 		if (nla_len(tb[RTA_SRC]) < plen)
2408 			goto errout;
2409 
2410 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2411 	}
2412 
2413 	if (tb[RTA_PREFSRC])
2414 		nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2415 
2416 	if (tb[RTA_OIF])
2417 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2418 
2419 	if (tb[RTA_PRIORITY])
2420 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2421 
2422 	if (tb[RTA_METRICS]) {
2423 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2424 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2425 	}
2426 
2427 	if (tb[RTA_TABLE])
2428 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2429 
2430 	if (tb[RTA_MULTIPATH]) {
2431 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2432 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2433 	}
2434 
2435 	err = 0;
2436 errout:
2437 	return err;
2438 }
2439 
2440 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2441 {
2442 	struct fib6_config r_cfg;
2443 	struct rtnexthop *rtnh;
2444 	int remaining;
2445 	int attrlen;
2446 	int err = 0, last_err = 0;
2447 
2448 beginning:
2449 	rtnh = (struct rtnexthop *)cfg->fc_mp;
2450 	remaining = cfg->fc_mp_len;
2451 
2452 	/* Parse a Multipath Entry */
2453 	while (rtnh_ok(rtnh, remaining)) {
2454 		memcpy(&r_cfg, cfg, sizeof(*cfg));
2455 		if (rtnh->rtnh_ifindex)
2456 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2457 
2458 		attrlen = rtnh_attrlen(rtnh);
2459 		if (attrlen > 0) {
2460 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2461 
2462 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2463 			if (nla) {
2464 				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2465 				r_cfg.fc_flags |= RTF_GATEWAY;
2466 			}
2467 		}
2468 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2469 		if (err) {
2470 			last_err = err;
2471 			/* If we are trying to remove a route, do not stop the
2472 			 * loop when ip6_route_del() fails (because next hop is
2473 			 * already gone), we should try to remove all next hops.
2474 			 */
2475 			if (add) {
2476 				/* If add fails, we should try to delete all
2477 				 * next hops that have been already added.
2478 				 */
2479 				add = 0;
2480 				goto beginning;
2481 			}
2482 		}
2483 		/* Because each route is added like a single route we remove
2484 		 * this flag after the first nexthop (if there is a collision,
2485 		 * we have already fail to add the first nexthop:
2486 		 * fib6_add_rt2node() has reject it).
2487 		 */
2488 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2489 		rtnh = rtnh_next(rtnh, &remaining);
2490 	}
2491 
2492 	return last_err;
2493 }
2494 
2495 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2496 {
2497 	struct fib6_config cfg;
2498 	int err;
2499 
2500 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2501 	if (err < 0)
2502 		return err;
2503 
2504 	if (cfg.fc_mp)
2505 		return ip6_route_multipath(&cfg, 0);
2506 	else
2507 		return ip6_route_del(&cfg);
2508 }
2509 
2510 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2511 {
2512 	struct fib6_config cfg;
2513 	int err;
2514 
2515 	err = rtm_to_fib6_config(skb, nlh, &cfg);
2516 	if (err < 0)
2517 		return err;
2518 
2519 	if (cfg.fc_mp)
2520 		return ip6_route_multipath(&cfg, 1);
2521 	else
2522 		return ip6_route_add(&cfg);
2523 }
2524 
2525 static inline size_t rt6_nlmsg_size(void)
2526 {
2527 	return NLMSG_ALIGN(sizeof(struct rtmsg))
2528 	       + nla_total_size(16) /* RTA_SRC */
2529 	       + nla_total_size(16) /* RTA_DST */
2530 	       + nla_total_size(16) /* RTA_GATEWAY */
2531 	       + nla_total_size(16) /* RTA_PREFSRC */
2532 	       + nla_total_size(4) /* RTA_TABLE */
2533 	       + nla_total_size(4) /* RTA_IIF */
2534 	       + nla_total_size(4) /* RTA_OIF */
2535 	       + nla_total_size(4) /* RTA_PRIORITY */
2536 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2537 	       + nla_total_size(sizeof(struct rta_cacheinfo));
2538 }
2539 
2540 static int rt6_fill_node(struct net *net,
2541 			 struct sk_buff *skb, struct rt6_info *rt,
2542 			 struct in6_addr *dst, struct in6_addr *src,
2543 			 int iif, int type, u32 portid, u32 seq,
2544 			 int prefix, int nowait, unsigned int flags)
2545 {
2546 	struct rtmsg *rtm;
2547 	struct nlmsghdr *nlh;
2548 	long expires;
2549 	u32 table;
2550 
2551 	if (prefix) {	/* user wants prefix routes only */
2552 		if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2553 			/* success since this is not a prefix route */
2554 			return 1;
2555 		}
2556 	}
2557 
2558 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2559 	if (!nlh)
2560 		return -EMSGSIZE;
2561 
2562 	rtm = nlmsg_data(nlh);
2563 	rtm->rtm_family = AF_INET6;
2564 	rtm->rtm_dst_len = rt->rt6i_dst.plen;
2565 	rtm->rtm_src_len = rt->rt6i_src.plen;
2566 	rtm->rtm_tos = 0;
2567 	if (rt->rt6i_table)
2568 		table = rt->rt6i_table->tb6_id;
2569 	else
2570 		table = RT6_TABLE_UNSPEC;
2571 	rtm->rtm_table = table;
2572 	if (nla_put_u32(skb, RTA_TABLE, table))
2573 		goto nla_put_failure;
2574 	if (rt->rt6i_flags & RTF_REJECT) {
2575 		switch (rt->dst.error) {
2576 		case -EINVAL:
2577 			rtm->rtm_type = RTN_BLACKHOLE;
2578 			break;
2579 		case -EACCES:
2580 			rtm->rtm_type = RTN_PROHIBIT;
2581 			break;
2582 		case -EAGAIN:
2583 			rtm->rtm_type = RTN_THROW;
2584 			break;
2585 		default:
2586 			rtm->rtm_type = RTN_UNREACHABLE;
2587 			break;
2588 		}
2589 	}
2590 	else if (rt->rt6i_flags & RTF_LOCAL)
2591 		rtm->rtm_type = RTN_LOCAL;
2592 	else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2593 		rtm->rtm_type = RTN_LOCAL;
2594 	else
2595 		rtm->rtm_type = RTN_UNICAST;
2596 	rtm->rtm_flags = 0;
2597 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2598 	rtm->rtm_protocol = rt->rt6i_protocol;
2599 	if (rt->rt6i_flags & RTF_DYNAMIC)
2600 		rtm->rtm_protocol = RTPROT_REDIRECT;
2601 	else if (rt->rt6i_flags & RTF_ADDRCONF) {
2602 		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2603 			rtm->rtm_protocol = RTPROT_RA;
2604 		else
2605 			rtm->rtm_protocol = RTPROT_KERNEL;
2606 	}
2607 
2608 	if (rt->rt6i_flags & RTF_CACHE)
2609 		rtm->rtm_flags |= RTM_F_CLONED;
2610 
2611 	if (dst) {
2612 		if (nla_put(skb, RTA_DST, 16, dst))
2613 			goto nla_put_failure;
2614 		rtm->rtm_dst_len = 128;
2615 	} else if (rtm->rtm_dst_len)
2616 		if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2617 			goto nla_put_failure;
2618 #ifdef CONFIG_IPV6_SUBTREES
2619 	if (src) {
2620 		if (nla_put(skb, RTA_SRC, 16, src))
2621 			goto nla_put_failure;
2622 		rtm->rtm_src_len = 128;
2623 	} else if (rtm->rtm_src_len &&
2624 		   nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2625 		goto nla_put_failure;
2626 #endif
2627 	if (iif) {
2628 #ifdef CONFIG_IPV6_MROUTE
2629 		if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2630 			int err = ip6mr_get_route(net, skb, rtm, nowait);
2631 			if (err <= 0) {
2632 				if (!nowait) {
2633 					if (err == 0)
2634 						return 0;
2635 					goto nla_put_failure;
2636 				} else {
2637 					if (err == -EMSGSIZE)
2638 						goto nla_put_failure;
2639 				}
2640 			}
2641 		} else
2642 #endif
2643 			if (nla_put_u32(skb, RTA_IIF, iif))
2644 				goto nla_put_failure;
2645 	} else if (dst) {
2646 		struct in6_addr saddr_buf;
2647 		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2648 		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2649 			goto nla_put_failure;
2650 	}
2651 
2652 	if (rt->rt6i_prefsrc.plen) {
2653 		struct in6_addr saddr_buf;
2654 		saddr_buf = rt->rt6i_prefsrc.addr;
2655 		if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2656 			goto nla_put_failure;
2657 	}
2658 
2659 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2660 		goto nla_put_failure;
2661 
2662 	if (rt->rt6i_flags & RTF_GATEWAY) {
2663 		if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2664 			goto nla_put_failure;
2665 	}
2666 
2667 	if (rt->dst.dev &&
2668 	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2669 		goto nla_put_failure;
2670 	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2671 		goto nla_put_failure;
2672 
2673 	expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2674 
2675 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2676 		goto nla_put_failure;
2677 
2678 	return nlmsg_end(skb, nlh);
2679 
2680 nla_put_failure:
2681 	nlmsg_cancel(skb, nlh);
2682 	return -EMSGSIZE;
2683 }
2684 
2685 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2686 {
2687 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2688 	int prefix;
2689 
2690 	if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2691 		struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2692 		prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2693 	} else
2694 		prefix = 0;
2695 
2696 	return rt6_fill_node(arg->net,
2697 		     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2698 		     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2699 		     prefix, 0, NLM_F_MULTI);
2700 }
2701 
2702 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2703 {
2704 	struct net *net = sock_net(in_skb->sk);
2705 	struct nlattr *tb[RTA_MAX+1];
2706 	struct rt6_info *rt;
2707 	struct sk_buff *skb;
2708 	struct rtmsg *rtm;
2709 	struct flowi6 fl6;
2710 	int err, iif = 0, oif = 0;
2711 
2712 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2713 	if (err < 0)
2714 		goto errout;
2715 
2716 	err = -EINVAL;
2717 	memset(&fl6, 0, sizeof(fl6));
2718 
2719 	if (tb[RTA_SRC]) {
2720 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2721 			goto errout;
2722 
2723 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2724 	}
2725 
2726 	if (tb[RTA_DST]) {
2727 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2728 			goto errout;
2729 
2730 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2731 	}
2732 
2733 	if (tb[RTA_IIF])
2734 		iif = nla_get_u32(tb[RTA_IIF]);
2735 
2736 	if (tb[RTA_OIF])
2737 		oif = nla_get_u32(tb[RTA_OIF]);
2738 
2739 	if (tb[RTA_MARK])
2740 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2741 
2742 	if (iif) {
2743 		struct net_device *dev;
2744 		int flags = 0;
2745 
2746 		dev = __dev_get_by_index(net, iif);
2747 		if (!dev) {
2748 			err = -ENODEV;
2749 			goto errout;
2750 		}
2751 
2752 		fl6.flowi6_iif = iif;
2753 
2754 		if (!ipv6_addr_any(&fl6.saddr))
2755 			flags |= RT6_LOOKUP_F_HAS_SADDR;
2756 
2757 		rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2758 							       flags);
2759 	} else {
2760 		fl6.flowi6_oif = oif;
2761 
2762 		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2763 	}
2764 
2765 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2766 	if (!skb) {
2767 		ip6_rt_put(rt);
2768 		err = -ENOBUFS;
2769 		goto errout;
2770 	}
2771 
2772 	/* Reserve room for dummy headers, this skb can pass
2773 	   through good chunk of routing engine.
2774 	 */
2775 	skb_reset_mac_header(skb);
2776 	skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2777 
2778 	skb_dst_set(skb, &rt->dst);
2779 
2780 	err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2781 			    RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2782 			    nlh->nlmsg_seq, 0, 0, 0);
2783 	if (err < 0) {
2784 		kfree_skb(skb);
2785 		goto errout;
2786 	}
2787 
2788 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2789 errout:
2790 	return err;
2791 }
2792 
2793 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2794 {
2795 	struct sk_buff *skb;
2796 	struct net *net = info->nl_net;
2797 	u32 seq;
2798 	int err;
2799 
2800 	err = -ENOBUFS;
2801 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2802 
2803 	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2804 	if (!skb)
2805 		goto errout;
2806 
2807 	err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2808 				event, info->portid, seq, 0, 0, 0);
2809 	if (err < 0) {
2810 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2811 		WARN_ON(err == -EMSGSIZE);
2812 		kfree_skb(skb);
2813 		goto errout;
2814 	}
2815 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2816 		    info->nlh, gfp_any());
2817 	return;
2818 errout:
2819 	if (err < 0)
2820 		rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2821 }
2822 
2823 static int ip6_route_dev_notify(struct notifier_block *this,
2824 				unsigned long event, void *ptr)
2825 {
2826 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2827 	struct net *net = dev_net(dev);
2828 
2829 	if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2830 		net->ipv6.ip6_null_entry->dst.dev = dev;
2831 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2832 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2833 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2834 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2835 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2836 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2837 #endif
2838 	}
2839 
2840 	return NOTIFY_OK;
2841 }
2842 
2843 /*
2844  *	/proc
2845  */
2846 
2847 #ifdef CONFIG_PROC_FS
2848 
2849 static const struct file_operations ipv6_route_proc_fops = {
2850 	.owner		= THIS_MODULE,
2851 	.open		= ipv6_route_open,
2852 	.read		= seq_read,
2853 	.llseek		= seq_lseek,
2854 	.release	= seq_release_net,
2855 };
2856 
2857 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2858 {
2859 	struct net *net = (struct net *)seq->private;
2860 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2861 		   net->ipv6.rt6_stats->fib_nodes,
2862 		   net->ipv6.rt6_stats->fib_route_nodes,
2863 		   net->ipv6.rt6_stats->fib_rt_alloc,
2864 		   net->ipv6.rt6_stats->fib_rt_entries,
2865 		   net->ipv6.rt6_stats->fib_rt_cache,
2866 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2867 		   net->ipv6.rt6_stats->fib_discarded_routes);
2868 
2869 	return 0;
2870 }
2871 
2872 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2873 {
2874 	return single_open_net(inode, file, rt6_stats_seq_show);
2875 }
2876 
2877 static const struct file_operations rt6_stats_seq_fops = {
2878 	.owner	 = THIS_MODULE,
2879 	.open	 = rt6_stats_seq_open,
2880 	.read	 = seq_read,
2881 	.llseek	 = seq_lseek,
2882 	.release = single_release_net,
2883 };
2884 #endif	/* CONFIG_PROC_FS */
2885 
2886 #ifdef CONFIG_SYSCTL
2887 
2888 static
2889 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2890 			      void __user *buffer, size_t *lenp, loff_t *ppos)
2891 {
2892 	struct net *net;
2893 	int delay;
2894 	if (!write)
2895 		return -EINVAL;
2896 
2897 	net = (struct net *)ctl->extra1;
2898 	delay = net->ipv6.sysctl.flush_delay;
2899 	proc_dointvec(ctl, write, buffer, lenp, ppos);
2900 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2901 	return 0;
2902 }
2903 
2904 struct ctl_table ipv6_route_table_template[] = {
2905 	{
2906 		.procname	=	"flush",
2907 		.data		=	&init_net.ipv6.sysctl.flush_delay,
2908 		.maxlen		=	sizeof(int),
2909 		.mode		=	0200,
2910 		.proc_handler	=	ipv6_sysctl_rtcache_flush
2911 	},
2912 	{
2913 		.procname	=	"gc_thresh",
2914 		.data		=	&ip6_dst_ops_template.gc_thresh,
2915 		.maxlen		=	sizeof(int),
2916 		.mode		=	0644,
2917 		.proc_handler	=	proc_dointvec,
2918 	},
2919 	{
2920 		.procname	=	"max_size",
2921 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
2922 		.maxlen		=	sizeof(int),
2923 		.mode		=	0644,
2924 		.proc_handler	=	proc_dointvec,
2925 	},
2926 	{
2927 		.procname	=	"gc_min_interval",
2928 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2929 		.maxlen		=	sizeof(int),
2930 		.mode		=	0644,
2931 		.proc_handler	=	proc_dointvec_jiffies,
2932 	},
2933 	{
2934 		.procname	=	"gc_timeout",
2935 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2936 		.maxlen		=	sizeof(int),
2937 		.mode		=	0644,
2938 		.proc_handler	=	proc_dointvec_jiffies,
2939 	},
2940 	{
2941 		.procname	=	"gc_interval",
2942 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
2943 		.maxlen		=	sizeof(int),
2944 		.mode		=	0644,
2945 		.proc_handler	=	proc_dointvec_jiffies,
2946 	},
2947 	{
2948 		.procname	=	"gc_elasticity",
2949 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2950 		.maxlen		=	sizeof(int),
2951 		.mode		=	0644,
2952 		.proc_handler	=	proc_dointvec,
2953 	},
2954 	{
2955 		.procname	=	"mtu_expires",
2956 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2957 		.maxlen		=	sizeof(int),
2958 		.mode		=	0644,
2959 		.proc_handler	=	proc_dointvec_jiffies,
2960 	},
2961 	{
2962 		.procname	=	"min_adv_mss",
2963 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
2964 		.maxlen		=	sizeof(int),
2965 		.mode		=	0644,
2966 		.proc_handler	=	proc_dointvec,
2967 	},
2968 	{
2969 		.procname	=	"gc_min_interval_ms",
2970 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2971 		.maxlen		=	sizeof(int),
2972 		.mode		=	0644,
2973 		.proc_handler	=	proc_dointvec_ms_jiffies,
2974 	},
2975 	{ }
2976 };
2977 
2978 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2979 {
2980 	struct ctl_table *table;
2981 
2982 	table = kmemdup(ipv6_route_table_template,
2983 			sizeof(ipv6_route_table_template),
2984 			GFP_KERNEL);
2985 
2986 	if (table) {
2987 		table[0].data = &net->ipv6.sysctl.flush_delay;
2988 		table[0].extra1 = net;
2989 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2990 		table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2991 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2992 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2993 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2994 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2995 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2996 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2997 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2998 
2999 		/* Don't export sysctls to unprivileged users */
3000 		if (net->user_ns != &init_user_ns)
3001 			table[0].procname = NULL;
3002 	}
3003 
3004 	return table;
3005 }
3006 #endif
3007 
3008 static int __net_init ip6_route_net_init(struct net *net)
3009 {
3010 	int ret = -ENOMEM;
3011 
3012 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3013 	       sizeof(net->ipv6.ip6_dst_ops));
3014 
3015 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3016 		goto out_ip6_dst_ops;
3017 
3018 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3019 					   sizeof(*net->ipv6.ip6_null_entry),
3020 					   GFP_KERNEL);
3021 	if (!net->ipv6.ip6_null_entry)
3022 		goto out_ip6_dst_entries;
3023 	net->ipv6.ip6_null_entry->dst.path =
3024 		(struct dst_entry *)net->ipv6.ip6_null_entry;
3025 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3026 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3027 			 ip6_template_metrics, true);
3028 
3029 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3030 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3031 					       sizeof(*net->ipv6.ip6_prohibit_entry),
3032 					       GFP_KERNEL);
3033 	if (!net->ipv6.ip6_prohibit_entry)
3034 		goto out_ip6_null_entry;
3035 	net->ipv6.ip6_prohibit_entry->dst.path =
3036 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3037 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3038 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3039 			 ip6_template_metrics, true);
3040 
3041 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3042 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
3043 					       GFP_KERNEL);
3044 	if (!net->ipv6.ip6_blk_hole_entry)
3045 		goto out_ip6_prohibit_entry;
3046 	net->ipv6.ip6_blk_hole_entry->dst.path =
3047 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3048 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3049 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3050 			 ip6_template_metrics, true);
3051 #endif
3052 
3053 	net->ipv6.sysctl.flush_delay = 0;
3054 	net->ipv6.sysctl.ip6_rt_max_size = 4096;
3055 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3056 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3057 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3058 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3059 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3060 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3061 
3062 	net->ipv6.ip6_rt_gc_expire = 30*HZ;
3063 
3064 	ret = 0;
3065 out:
3066 	return ret;
3067 
3068 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3069 out_ip6_prohibit_entry:
3070 	kfree(net->ipv6.ip6_prohibit_entry);
3071 out_ip6_null_entry:
3072 	kfree(net->ipv6.ip6_null_entry);
3073 #endif
3074 out_ip6_dst_entries:
3075 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3076 out_ip6_dst_ops:
3077 	goto out;
3078 }
3079 
3080 static void __net_exit ip6_route_net_exit(struct net *net)
3081 {
3082 	kfree(net->ipv6.ip6_null_entry);
3083 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3084 	kfree(net->ipv6.ip6_prohibit_entry);
3085 	kfree(net->ipv6.ip6_blk_hole_entry);
3086 #endif
3087 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3088 }
3089 
3090 static int __net_init ip6_route_net_init_late(struct net *net)
3091 {
3092 #ifdef CONFIG_PROC_FS
3093 	proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3094 	proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3095 #endif
3096 	return 0;
3097 }
3098 
3099 static void __net_exit ip6_route_net_exit_late(struct net *net)
3100 {
3101 #ifdef CONFIG_PROC_FS
3102 	remove_proc_entry("ipv6_route", net->proc_net);
3103 	remove_proc_entry("rt6_stats", net->proc_net);
3104 #endif
3105 }
3106 
3107 static struct pernet_operations ip6_route_net_ops = {
3108 	.init = ip6_route_net_init,
3109 	.exit = ip6_route_net_exit,
3110 };
3111 
3112 static int __net_init ipv6_inetpeer_init(struct net *net)
3113 {
3114 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3115 
3116 	if (!bp)
3117 		return -ENOMEM;
3118 	inet_peer_base_init(bp);
3119 	net->ipv6.peers = bp;
3120 	return 0;
3121 }
3122 
3123 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3124 {
3125 	struct inet_peer_base *bp = net->ipv6.peers;
3126 
3127 	net->ipv6.peers = NULL;
3128 	inetpeer_invalidate_tree(bp);
3129 	kfree(bp);
3130 }
3131 
3132 static struct pernet_operations ipv6_inetpeer_ops = {
3133 	.init	=	ipv6_inetpeer_init,
3134 	.exit	=	ipv6_inetpeer_exit,
3135 };
3136 
3137 static struct pernet_operations ip6_route_net_late_ops = {
3138 	.init = ip6_route_net_init_late,
3139 	.exit = ip6_route_net_exit_late,
3140 };
3141 
3142 static struct notifier_block ip6_route_dev_notifier = {
3143 	.notifier_call = ip6_route_dev_notify,
3144 	.priority = 0,
3145 };
3146 
3147 int __init ip6_route_init(void)
3148 {
3149 	int ret;
3150 
3151 	ret = -ENOMEM;
3152 	ip6_dst_ops_template.kmem_cachep =
3153 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3154 				  SLAB_HWCACHE_ALIGN, NULL);
3155 	if (!ip6_dst_ops_template.kmem_cachep)
3156 		goto out;
3157 
3158 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
3159 	if (ret)
3160 		goto out_kmem_cache;
3161 
3162 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3163 	if (ret)
3164 		goto out_dst_entries;
3165 
3166 	ret = register_pernet_subsys(&ip6_route_net_ops);
3167 	if (ret)
3168 		goto out_register_inetpeer;
3169 
3170 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3171 
3172 	/* Registering of the loopback is done before this portion of code,
3173 	 * the loopback reference in rt6_info will not be taken, do it
3174 	 * manually for init_net */
3175 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3176 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3177   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3178 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3179 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3180 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3181 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3182   #endif
3183 	ret = fib6_init();
3184 	if (ret)
3185 		goto out_register_subsys;
3186 
3187 	ret = xfrm6_init();
3188 	if (ret)
3189 		goto out_fib6_init;
3190 
3191 	ret = fib6_rules_init();
3192 	if (ret)
3193 		goto xfrm6_init;
3194 
3195 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
3196 	if (ret)
3197 		goto fib6_rules_init;
3198 
3199 	ret = -ENOBUFS;
3200 	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3201 	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3202 	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3203 		goto out_register_late_subsys;
3204 
3205 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3206 	if (ret)
3207 		goto out_register_late_subsys;
3208 
3209 out:
3210 	return ret;
3211 
3212 out_register_late_subsys:
3213 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3214 fib6_rules_init:
3215 	fib6_rules_cleanup();
3216 xfrm6_init:
3217 	xfrm6_fini();
3218 out_fib6_init:
3219 	fib6_gc_cleanup();
3220 out_register_subsys:
3221 	unregister_pernet_subsys(&ip6_route_net_ops);
3222 out_register_inetpeer:
3223 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3224 out_dst_entries:
3225 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3226 out_kmem_cache:
3227 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3228 	goto out;
3229 }
3230 
3231 void ip6_route_cleanup(void)
3232 {
3233 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
3234 	unregister_pernet_subsys(&ip6_route_net_late_ops);
3235 	fib6_rules_cleanup();
3236 	xfrm6_fini();
3237 	fib6_gc_cleanup();
3238 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
3239 	unregister_pernet_subsys(&ip6_route_net_ops);
3240 	dst_entries_destroy(&ip6_dst_blackhole_ops);
3241 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3242 }
3243