xref: /linux/net/ipv6/route.c (revision a7ddedc84c59a645ef970b992f7cda5bffc70cc0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux INET6 implementation
4  *	FIB front-end.
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  */
9 
10 /*	Changes:
11  *
12  *	YOSHIFUJI Hideaki @USAGI
13  *		reworked default router selection.
14  *		- respect outgoing interface
15  *		- select from (probably) reachable routers (i.e.
16  *		routers in REACHABLE, STALE, DELAY or PROBE states).
17  *		- always select the same router if it is (probably)
18  *		reachable.  otherwise, round-robin the list.
19  *	Ville Nuorvala
20  *		Fixed routing subtrees.
21  */
22 
23 #define pr_fmt(fmt) "IPv6: " fmt
24 
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/dst_metadata.h>
56 #include <net/xfrm.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
59 #include <net/rtnh.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
63 #include <net/ip.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
66 
67 #ifdef CONFIG_SYSCTL
68 #include <linux/sysctl.h>
69 #endif
70 
71 static int ip6_rt_type_to_error(u8 fib6_type);
72 
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
77 
78 enum rt6_nud_state {
79 	RT6_NUD_FAIL_HARD = -3,
80 	RT6_NUD_FAIL_PROBE = -2,
81 	RT6_NUD_FAIL_DO_RR = -1,
82 	RT6_NUD_SUCCEED = 1
83 };
84 
85 INDIRECT_CALLABLE_SCOPE
86 struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
87 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
88 INDIRECT_CALLABLE_SCOPE
89 unsigned int		ip6_mtu(const struct dst_entry *dst);
90 static void		ip6_negative_advice(struct sock *sk,
91 					    struct dst_entry *dst);
92 static void		ip6_dst_destroy(struct dst_entry *);
93 static void		ip6_dst_ifdown(struct dst_entry *,
94 				       struct net_device *dev);
95 static void		 ip6_dst_gc(struct dst_ops *ops);
96 
97 static int		ip6_pkt_discard(struct sk_buff *skb);
98 static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99 static int		ip6_pkt_prohibit(struct sk_buff *skb);
100 static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
101 static void		ip6_link_failure(struct sk_buff *skb);
102 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
103 					   struct sk_buff *skb, u32 mtu,
104 					   bool confirm_neigh);
105 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
106 					struct sk_buff *skb);
107 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 			   int strict);
109 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
110 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
111 			 struct fib6_info *rt, struct dst_entry *dst,
112 			 struct in6_addr *dest, struct in6_addr *src,
113 			 int iif, int type, u32 portid, u32 seq,
114 			 unsigned int flags);
115 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
116 					   const struct in6_addr *daddr,
117 					   const struct in6_addr *saddr);
118 
119 #ifdef CONFIG_IPV6_ROUTE_INFO
120 static struct fib6_info *rt6_add_route_info(struct net *net,
121 					   const struct in6_addr *prefix, int prefixlen,
122 					   const struct in6_addr *gwaddr,
123 					   struct net_device *dev,
124 					   unsigned int pref);
125 static struct fib6_info *rt6_get_route_info(struct net *net,
126 					   const struct in6_addr *prefix, int prefixlen,
127 					   const struct in6_addr *gwaddr,
128 					   struct net_device *dev);
129 #endif
130 
131 struct uncached_list {
132 	spinlock_t		lock;
133 	struct list_head	head;
134 };
135 
136 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
137 
138 void rt6_uncached_list_add(struct rt6_info *rt)
139 {
140 	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
141 
142 	rt->dst.rt_uncached_list = ul;
143 
144 	spin_lock_bh(&ul->lock);
145 	list_add_tail(&rt->dst.rt_uncached, &ul->head);
146 	spin_unlock_bh(&ul->lock);
147 }
148 
149 void rt6_uncached_list_del(struct rt6_info *rt)
150 {
151 	if (!list_empty(&rt->dst.rt_uncached)) {
152 		struct uncached_list *ul = rt->dst.rt_uncached_list;
153 
154 		spin_lock_bh(&ul->lock);
155 		list_del_init(&rt->dst.rt_uncached);
156 		spin_unlock_bh(&ul->lock);
157 	}
158 }
159 
160 static void rt6_uncached_list_flush_dev(struct net_device *dev)
161 {
162 	int cpu;
163 
164 	for_each_possible_cpu(cpu) {
165 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 		struct rt6_info *rt, *safe;
167 
168 		if (list_empty(&ul->head))
169 			continue;
170 
171 		spin_lock_bh(&ul->lock);
172 		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
173 			struct inet6_dev *rt_idev = rt->rt6i_idev;
174 			struct net_device *rt_dev = rt->dst.dev;
175 			bool handled = false;
176 
177 			if (rt_idev && rt_idev->dev == dev) {
178 				rt->rt6i_idev = in6_dev_get(blackhole_netdev);
179 				in6_dev_put(rt_idev);
180 				handled = true;
181 			}
182 
183 			if (rt_dev == dev) {
184 				rt->dst.dev = blackhole_netdev;
185 				netdev_ref_replace(rt_dev, blackhole_netdev,
186 						   &rt->dst.dev_tracker,
187 						   GFP_ATOMIC);
188 				handled = true;
189 			}
190 			if (handled)
191 				list_del_init(&rt->dst.rt_uncached);
192 		}
193 		spin_unlock_bh(&ul->lock);
194 	}
195 }
196 
197 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
198 					     struct sk_buff *skb,
199 					     const void *daddr)
200 {
201 	if (!ipv6_addr_any(p))
202 		return (const void *) p;
203 	else if (skb)
204 		return &ipv6_hdr(skb)->daddr;
205 	return daddr;
206 }
207 
208 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
209 				   struct net_device *dev,
210 				   struct sk_buff *skb,
211 				   const void *daddr)
212 {
213 	struct neighbour *n;
214 
215 	daddr = choose_neigh_daddr(gw, skb, daddr);
216 	n = __ipv6_neigh_lookup(dev, daddr);
217 	if (n)
218 		return n;
219 
220 	n = neigh_create(&nd_tbl, daddr, dev);
221 	return IS_ERR(n) ? NULL : n;
222 }
223 
224 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
225 					      struct sk_buff *skb,
226 					      const void *daddr)
227 {
228 	const struct rt6_info *rt = dst_rt6_info(dst);
229 
230 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
231 				dst_dev(dst), skb, daddr);
232 }
233 
234 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
235 {
236 	const struct rt6_info *rt = dst_rt6_info(dst);
237 	struct net_device *dev = dst_dev(dst);
238 
239 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
240 	if (!daddr)
241 		return;
242 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
243 		return;
244 	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
245 		return;
246 	__ipv6_confirm_neigh(dev, daddr);
247 }
248 
249 static struct dst_ops ip6_dst_ops_template = {
250 	.family			=	AF_INET6,
251 	.gc			=	ip6_dst_gc,
252 	.gc_thresh		=	1024,
253 	.check			=	ip6_dst_check,
254 	.default_advmss		=	ip6_default_advmss,
255 	.mtu			=	ip6_mtu,
256 	.cow_metrics		=	dst_cow_metrics_generic,
257 	.destroy		=	ip6_dst_destroy,
258 	.ifdown			=	ip6_dst_ifdown,
259 	.negative_advice	=	ip6_negative_advice,
260 	.link_failure		=	ip6_link_failure,
261 	.update_pmtu		=	ip6_rt_update_pmtu,
262 	.redirect		=	rt6_do_redirect,
263 	.local_out		=	__ip6_local_out,
264 	.neigh_lookup		=	ip6_dst_neigh_lookup,
265 	.confirm_neigh		=	ip6_confirm_neigh,
266 };
267 
268 static struct dst_ops ip6_dst_blackhole_ops = {
269 	.family			= AF_INET6,
270 	.default_advmss		= ip6_default_advmss,
271 	.neigh_lookup		= ip6_dst_neigh_lookup,
272 	.check			= ip6_dst_check,
273 	.destroy		= ip6_dst_destroy,
274 	.cow_metrics		= dst_cow_metrics_generic,
275 	.update_pmtu		= dst_blackhole_update_pmtu,
276 	.redirect		= dst_blackhole_redirect,
277 	.mtu			= dst_blackhole_mtu,
278 };
279 
280 static const u32 ip6_template_metrics[RTAX_MAX] = {
281 	[RTAX_HOPLIMIT - 1] = 0,
282 };
283 
284 static const struct fib6_info fib6_null_entry_template = {
285 	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
286 	.fib6_protocol  = RTPROT_KERNEL,
287 	.fib6_metric	= ~(u32)0,
288 	.fib6_ref	= REFCOUNT_INIT(1),
289 	.fib6_type	= RTN_UNREACHABLE,
290 	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
291 };
292 
293 static const struct rt6_info ip6_null_entry_template = {
294 	.dst = {
295 		.__rcuref	= RCUREF_INIT(1),
296 		.__use		= 1,
297 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
298 		.error		= -ENETUNREACH,
299 		.input		= ip6_pkt_discard,
300 		.output		= ip6_pkt_discard_out,
301 	},
302 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
303 };
304 
305 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
306 
307 static const struct rt6_info ip6_prohibit_entry_template = {
308 	.dst = {
309 		.__rcuref	= RCUREF_INIT(1),
310 		.__use		= 1,
311 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
312 		.error		= -EACCES,
313 		.input		= ip6_pkt_prohibit,
314 		.output		= ip6_pkt_prohibit_out,
315 	},
316 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
317 };
318 
319 static const struct rt6_info ip6_blk_hole_entry_template = {
320 	.dst = {
321 		.__rcuref	= RCUREF_INIT(1),
322 		.__use		= 1,
323 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
324 		.error		= -EINVAL,
325 		.input		= dst_discard,
326 		.output		= dst_discard_out,
327 	},
328 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
329 };
330 
331 #endif
332 
333 static void rt6_info_init(struct rt6_info *rt)
334 {
335 	memset_after(rt, 0, dst);
336 }
337 
338 /* allocate dst with ip6_dst_ops */
339 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
340 			       int flags)
341 {
342 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
343 					DST_OBSOLETE_FORCE_CHK, flags);
344 
345 	if (rt) {
346 		rt6_info_init(rt);
347 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
348 	}
349 
350 	return rt;
351 }
352 EXPORT_SYMBOL(ip6_dst_alloc);
353 
354 static void ip6_dst_destroy(struct dst_entry *dst)
355 {
356 	struct rt6_info *rt = dst_rt6_info(dst);
357 	struct fib6_info *from;
358 	struct inet6_dev *idev;
359 
360 	ip_dst_metrics_put(dst);
361 	rt6_uncached_list_del(rt);
362 
363 	idev = rt->rt6i_idev;
364 	if (idev) {
365 		rt->rt6i_idev = NULL;
366 		in6_dev_put(idev);
367 	}
368 
369 	from = unrcu_pointer(xchg(&rt->from, NULL));
370 	fib6_info_release(from);
371 }
372 
373 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
374 {
375 	struct rt6_info *rt = dst_rt6_info(dst);
376 	struct inet6_dev *idev = rt->rt6i_idev;
377 	struct fib6_info *from;
378 
379 	if (idev && idev->dev != blackhole_netdev) {
380 		struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
381 
382 		if (blackhole_idev) {
383 			rt->rt6i_idev = blackhole_idev;
384 			in6_dev_put(idev);
385 		}
386 	}
387 	from = unrcu_pointer(xchg(&rt->from, NULL));
388 	fib6_info_release(from);
389 }
390 
391 static bool __rt6_check_expired(const struct rt6_info *rt)
392 {
393 	if (rt->rt6i_flags & RTF_EXPIRES)
394 		return time_after(jiffies, READ_ONCE(rt->dst.expires));
395 	return false;
396 }
397 
398 static bool rt6_check_expired(const struct rt6_info *rt)
399 {
400 	struct fib6_info *from;
401 
402 	from = rcu_dereference(rt->from);
403 
404 	if (rt->rt6i_flags & RTF_EXPIRES) {
405 		if (time_after(jiffies, READ_ONCE(rt->dst.expires)))
406 			return true;
407 	} else if (from) {
408 		return READ_ONCE(rt->dst.obsolete) != DST_OBSOLETE_FORCE_CHK ||
409 			fib6_check_expired(from);
410 	}
411 	return false;
412 }
413 
414 static struct fib6_info *
415 rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
416 {
417 	struct fib6_info *iter;
418 	struct fib6_node *fn;
419 
420 	fn = rcu_dereference(rt->fib6_node);
421 	if (!fn)
422 		goto out;
423 	iter = rcu_dereference(fn->leaf);
424 	if (!iter)
425 		goto out;
426 
427 	while (iter) {
428 		if (iter->fib6_metric == rt->fib6_metric &&
429 		    rt6_qualify_for_ecmp(iter))
430 			return iter;
431 		iter = rcu_dereference(iter->fib6_next);
432 	}
433 
434 out:
435 	return NULL;
436 }
437 
438 void fib6_select_path(const struct net *net, struct fib6_result *res,
439 		      struct flowi6 *fl6, int oif, bool have_oif_match,
440 		      const struct sk_buff *skb, int strict)
441 {
442 	struct fib6_info *first, *match = res->f6i;
443 	struct fib6_info *sibling;
444 	int hash;
445 
446 	if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
447 		goto out;
448 
449 	if (match->nh && have_oif_match && res->nh)
450 		return;
451 
452 	if (skb)
453 		IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
454 
455 	/* We might have already computed the hash for ICMPv6 errors. In such
456 	 * case it will always be non-zero. Otherwise now is the time to do it.
457 	 */
458 	if (!fl6->mp_hash &&
459 	    (!match->nh || nexthop_is_multipath(match->nh)))
460 		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
461 
462 	if (unlikely(match->nh)) {
463 		nexthop_path_fib6_result(res, fl6->mp_hash);
464 		return;
465 	}
466 
467 	first = rt6_multipath_first_sibling_rcu(match);
468 	if (!first)
469 		goto out;
470 
471 	hash = fl6->mp_hash;
472 	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
473 		if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
474 				    strict) >= 0)
475 			match = first;
476 		goto out;
477 	}
478 
479 	list_for_each_entry_rcu(sibling, &first->fib6_siblings,
480 				fib6_siblings) {
481 		const struct fib6_nh *nh = sibling->fib6_nh;
482 		int nh_upper_bound;
483 
484 		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
485 		if (hash > nh_upper_bound)
486 			continue;
487 		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
488 			break;
489 		match = sibling;
490 		break;
491 	}
492 
493 out:
494 	res->f6i = match;
495 	res->nh = match->fib6_nh;
496 }
497 
498 /*
499  *	Route lookup. rcu_read_lock() should be held.
500  */
501 
502 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
503 			       const struct in6_addr *saddr, int oif, int flags)
504 {
505 	const struct net_device *dev;
506 
507 	if (nh->fib_nh_flags & RTNH_F_DEAD)
508 		return false;
509 
510 	dev = nh->fib_nh_dev;
511 	if (oif) {
512 		if (dev->ifindex == oif)
513 			return true;
514 	} else {
515 		if (ipv6_chk_addr(net, saddr, dev,
516 				  flags & RT6_LOOKUP_F_IFACE))
517 			return true;
518 	}
519 
520 	return false;
521 }
522 
523 struct fib6_nh_dm_arg {
524 	struct net		*net;
525 	const struct in6_addr	*saddr;
526 	int			oif;
527 	int			flags;
528 	struct fib6_nh		*nh;
529 };
530 
531 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
532 {
533 	struct fib6_nh_dm_arg *arg = _arg;
534 
535 	arg->nh = nh;
536 	return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
537 				  arg->flags);
538 }
539 
540 /* returns fib6_nh from nexthop or NULL */
541 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
542 					struct fib6_result *res,
543 					const struct in6_addr *saddr,
544 					int oif, int flags)
545 {
546 	struct fib6_nh_dm_arg arg = {
547 		.net   = net,
548 		.saddr = saddr,
549 		.oif   = oif,
550 		.flags = flags,
551 	};
552 
553 	if (nexthop_is_blackhole(nh))
554 		return NULL;
555 
556 	if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
557 		return arg.nh;
558 
559 	return NULL;
560 }
561 
562 static void rt6_device_match(struct net *net, struct fib6_result *res,
563 			     const struct in6_addr *saddr, int oif, int flags)
564 {
565 	struct fib6_info *f6i = res->f6i;
566 	struct fib6_info *spf6i;
567 	struct fib6_nh *nh;
568 
569 	if (!oif && ipv6_addr_any(saddr)) {
570 		if (unlikely(f6i->nh)) {
571 			nh = nexthop_fib6_nh(f6i->nh);
572 			if (nexthop_is_blackhole(f6i->nh))
573 				goto out_blackhole;
574 		} else {
575 			nh = f6i->fib6_nh;
576 		}
577 		if (!(nh->fib_nh_flags & RTNH_F_DEAD))
578 			goto out;
579 	}
580 
581 	for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
582 		bool matched = false;
583 
584 		if (unlikely(spf6i->nh)) {
585 			nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
586 					      oif, flags);
587 			if (nh)
588 				matched = true;
589 		} else {
590 			nh = spf6i->fib6_nh;
591 			if (__rt6_device_match(net, nh, saddr, oif, flags))
592 				matched = true;
593 		}
594 		if (matched) {
595 			res->f6i = spf6i;
596 			goto out;
597 		}
598 	}
599 
600 	if (oif && flags & RT6_LOOKUP_F_IFACE) {
601 		res->f6i = net->ipv6.fib6_null_entry;
602 		nh = res->f6i->fib6_nh;
603 		goto out;
604 	}
605 
606 	if (unlikely(f6i->nh)) {
607 		nh = nexthop_fib6_nh(f6i->nh);
608 		if (nexthop_is_blackhole(f6i->nh))
609 			goto out_blackhole;
610 	} else {
611 		nh = f6i->fib6_nh;
612 	}
613 
614 	if (nh->fib_nh_flags & RTNH_F_DEAD) {
615 		res->f6i = net->ipv6.fib6_null_entry;
616 		nh = res->f6i->fib6_nh;
617 	}
618 out:
619 	res->nh = nh;
620 	res->fib6_type = res->f6i->fib6_type;
621 	res->fib6_flags = res->f6i->fib6_flags;
622 	return;
623 
624 out_blackhole:
625 	res->fib6_flags |= RTF_REJECT;
626 	res->fib6_type = RTN_BLACKHOLE;
627 	res->nh = nh;
628 }
629 
630 #ifdef CONFIG_IPV6_ROUTER_PREF
631 struct __rt6_probe_work {
632 	struct work_struct work;
633 	struct in6_addr target;
634 	struct net_device *dev;
635 	netdevice_tracker dev_tracker;
636 };
637 
638 static void rt6_probe_deferred(struct work_struct *w)
639 {
640 	struct in6_addr mcaddr;
641 	struct __rt6_probe_work *work =
642 		container_of(w, struct __rt6_probe_work, work);
643 
644 	addrconf_addr_solict_mult(&work->target, &mcaddr);
645 	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
646 	netdev_put(work->dev, &work->dev_tracker);
647 	kfree(work);
648 }
649 
650 static void rt6_probe(struct fib6_nh *fib6_nh)
651 {
652 	struct __rt6_probe_work *work = NULL;
653 	const struct in6_addr *nh_gw;
654 	unsigned long last_probe;
655 	struct neighbour *neigh;
656 	struct net_device *dev;
657 	struct inet6_dev *idev;
658 
659 	/*
660 	 * Okay, this does not seem to be appropriate
661 	 * for now, however, we need to check if it
662 	 * is really so; aka Router Reachability Probing.
663 	 *
664 	 * Router Reachability Probe MUST be rate-limited
665 	 * to no more than one per minute.
666 	 */
667 	if (!fib6_nh->fib_nh_gw_family)
668 		return;
669 
670 	nh_gw = &fib6_nh->fib_nh_gw6;
671 	dev = fib6_nh->fib_nh_dev;
672 	rcu_read_lock();
673 	last_probe = READ_ONCE(fib6_nh->last_probe);
674 	idev = __in6_dev_get(dev);
675 	if (!idev)
676 		goto out;
677 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
678 	if (neigh) {
679 		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
680 			goto out;
681 
682 		write_lock_bh(&neigh->lock);
683 		if (!(neigh->nud_state & NUD_VALID) &&
684 		    time_after(jiffies,
685 			       neigh->updated +
686 			       READ_ONCE(idev->cnf.rtr_probe_interval))) {
687 			work = kmalloc(sizeof(*work), GFP_ATOMIC);
688 			if (work)
689 				__neigh_set_probe_once(neigh);
690 		}
691 		write_unlock_bh(&neigh->lock);
692 	} else if (time_after(jiffies, last_probe +
693 				       READ_ONCE(idev->cnf.rtr_probe_interval))) {
694 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
695 	}
696 
697 	if (!work || cmpxchg(&fib6_nh->last_probe,
698 			     last_probe, jiffies) != last_probe) {
699 		kfree(work);
700 	} else {
701 		INIT_WORK(&work->work, rt6_probe_deferred);
702 		work->target = *nh_gw;
703 		netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
704 		work->dev = dev;
705 		schedule_work(&work->work);
706 	}
707 
708 out:
709 	rcu_read_unlock();
710 }
711 #else
712 static inline void rt6_probe(struct fib6_nh *fib6_nh)
713 {
714 }
715 #endif
716 
717 /*
718  * Default Router Selection (RFC 2461 6.3.6)
719  */
720 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
721 {
722 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
723 	struct neighbour *neigh;
724 
725 	rcu_read_lock();
726 	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
727 					  &fib6_nh->fib_nh_gw6);
728 	if (neigh) {
729 		u8 nud_state = READ_ONCE(neigh->nud_state);
730 
731 		if (nud_state & NUD_VALID)
732 			ret = RT6_NUD_SUCCEED;
733 #ifdef CONFIG_IPV6_ROUTER_PREF
734 		else if (!(nud_state & NUD_FAILED))
735 			ret = RT6_NUD_SUCCEED;
736 		else
737 			ret = RT6_NUD_FAIL_PROBE;
738 #endif
739 	} else {
740 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
741 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
742 	}
743 	rcu_read_unlock();
744 
745 	return ret;
746 }
747 
748 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
749 			   int strict)
750 {
751 	int m = 0;
752 
753 	if (!oif || nh->fib_nh_dev->ifindex == oif)
754 		m = 2;
755 
756 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
757 		return RT6_NUD_FAIL_HARD;
758 #ifdef CONFIG_IPV6_ROUTER_PREF
759 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
760 #endif
761 	if ((strict & RT6_LOOKUP_F_REACHABLE) &&
762 	    !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
763 		int n = rt6_check_neigh(nh);
764 		if (n < 0)
765 			return n;
766 	}
767 	return m;
768 }
769 
770 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
771 		       int oif, int strict, int *mpri, bool *do_rr)
772 {
773 	bool match_do_rr = false;
774 	bool rc = false;
775 	int m;
776 
777 	if (nh->fib_nh_flags & RTNH_F_DEAD)
778 		goto out;
779 
780 	if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
781 	    nh->fib_nh_flags & RTNH_F_LINKDOWN &&
782 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
783 		goto out;
784 
785 	m = rt6_score_route(nh, fib6_flags, oif, strict);
786 	if (m == RT6_NUD_FAIL_DO_RR) {
787 		match_do_rr = true;
788 		m = 0; /* lowest valid score */
789 	} else if (m == RT6_NUD_FAIL_HARD) {
790 		goto out;
791 	}
792 
793 	if (strict & RT6_LOOKUP_F_REACHABLE)
794 		rt6_probe(nh);
795 
796 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
797 	if (m > *mpri) {
798 		*do_rr = match_do_rr;
799 		*mpri = m;
800 		rc = true;
801 	}
802 out:
803 	return rc;
804 }
805 
806 struct fib6_nh_frl_arg {
807 	u32		flags;
808 	int		oif;
809 	int		strict;
810 	int		*mpri;
811 	bool		*do_rr;
812 	struct fib6_nh	*nh;
813 };
814 
815 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
816 {
817 	struct fib6_nh_frl_arg *arg = _arg;
818 
819 	arg->nh = nh;
820 	return find_match(nh, arg->flags, arg->oif, arg->strict,
821 			  arg->mpri, arg->do_rr);
822 }
823 
824 static void __find_rr_leaf(struct fib6_info *f6i_start,
825 			   struct fib6_info *nomatch, u32 metric,
826 			   struct fib6_result *res, struct fib6_info **cont,
827 			   int oif, int strict, bool *do_rr, int *mpri)
828 {
829 	struct fib6_info *f6i;
830 
831 	for (f6i = f6i_start;
832 	     f6i && f6i != nomatch;
833 	     f6i = rcu_dereference(f6i->fib6_next)) {
834 		bool matched = false;
835 		struct fib6_nh *nh;
836 
837 		if (cont && f6i->fib6_metric != metric) {
838 			*cont = f6i;
839 			return;
840 		}
841 
842 		if (fib6_check_expired(f6i))
843 			continue;
844 
845 		if (unlikely(f6i->nh)) {
846 			struct fib6_nh_frl_arg arg = {
847 				.flags  = f6i->fib6_flags,
848 				.oif    = oif,
849 				.strict = strict,
850 				.mpri   = mpri,
851 				.do_rr  = do_rr
852 			};
853 
854 			if (nexthop_is_blackhole(f6i->nh)) {
855 				res->fib6_flags = RTF_REJECT;
856 				res->fib6_type = RTN_BLACKHOLE;
857 				res->f6i = f6i;
858 				res->nh = nexthop_fib6_nh(f6i->nh);
859 				return;
860 			}
861 			if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
862 						     &arg)) {
863 				matched = true;
864 				nh = arg.nh;
865 			}
866 		} else {
867 			nh = f6i->fib6_nh;
868 			if (find_match(nh, f6i->fib6_flags, oif, strict,
869 				       mpri, do_rr))
870 				matched = true;
871 		}
872 		if (matched) {
873 			res->f6i = f6i;
874 			res->nh = nh;
875 			res->fib6_flags = f6i->fib6_flags;
876 			res->fib6_type = f6i->fib6_type;
877 		}
878 	}
879 }
880 
881 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
882 			 struct fib6_info *rr_head, int oif, int strict,
883 			 bool *do_rr, struct fib6_result *res)
884 {
885 	u32 metric = rr_head->fib6_metric;
886 	struct fib6_info *cont = NULL;
887 	int mpri = -1;
888 
889 	__find_rr_leaf(rr_head, NULL, metric, res, &cont,
890 		       oif, strict, do_rr, &mpri);
891 
892 	__find_rr_leaf(leaf, rr_head, metric, res, &cont,
893 		       oif, strict, do_rr, &mpri);
894 
895 	if (res->f6i || !cont)
896 		return;
897 
898 	__find_rr_leaf(cont, NULL, metric, res, NULL,
899 		       oif, strict, do_rr, &mpri);
900 }
901 
902 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
903 		       struct fib6_result *res, int strict)
904 {
905 	struct fib6_info *leaf = rcu_dereference(fn->leaf);
906 	struct fib6_info *rt0;
907 	bool do_rr = false;
908 	int key_plen;
909 
910 	/* make sure this function or its helpers sets f6i */
911 	res->f6i = NULL;
912 
913 	if (!leaf || leaf == net->ipv6.fib6_null_entry)
914 		goto out;
915 
916 	rt0 = rcu_dereference(fn->rr_ptr);
917 	if (!rt0)
918 		rt0 = leaf;
919 
920 	/* Double check to make sure fn is not an intermediate node
921 	 * and fn->leaf does not points to its child's leaf
922 	 * (This might happen if all routes under fn are deleted from
923 	 * the tree and fib6_repair_tree() is called on the node.)
924 	 */
925 	key_plen = rt0->fib6_dst.plen;
926 #ifdef CONFIG_IPV6_SUBTREES
927 	if (rt0->fib6_src.plen)
928 		key_plen = rt0->fib6_src.plen;
929 #endif
930 	if (fn->fn_bit != key_plen)
931 		goto out;
932 
933 	find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
934 	if (do_rr) {
935 		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
936 
937 		/* no entries matched; do round-robin */
938 		if (!next || next->fib6_metric != rt0->fib6_metric)
939 			next = leaf;
940 
941 		if (next != rt0) {
942 			spin_lock_bh(&leaf->fib6_table->tb6_lock);
943 			/* make sure next is not being deleted from the tree */
944 			if (next->fib6_node)
945 				rcu_assign_pointer(fn->rr_ptr, next);
946 			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
947 		}
948 	}
949 
950 out:
951 	if (!res->f6i) {
952 		res->f6i = net->ipv6.fib6_null_entry;
953 		res->nh = res->f6i->fib6_nh;
954 		res->fib6_flags = res->f6i->fib6_flags;
955 		res->fib6_type = res->f6i->fib6_type;
956 	}
957 }
958 
959 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
960 {
961 	return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
962 	       res->nh->fib_nh_gw_family;
963 }
964 
965 #ifdef CONFIG_IPV6_ROUTE_INFO
966 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
967 		  const struct in6_addr *gwaddr)
968 {
969 	struct net *net = dev_net(dev);
970 	struct route_info *rinfo = (struct route_info *) opt;
971 	struct in6_addr prefix_buf, *prefix;
972 	struct fib6_table *table;
973 	unsigned int pref;
974 	unsigned long lifetime;
975 	struct fib6_info *rt;
976 
977 	if (len < sizeof(struct route_info)) {
978 		return -EINVAL;
979 	}
980 
981 	/* Sanity check for prefix_len and length */
982 	if (rinfo->length > 3) {
983 		return -EINVAL;
984 	} else if (rinfo->prefix_len > 128) {
985 		return -EINVAL;
986 	} else if (rinfo->prefix_len > 64) {
987 		if (rinfo->length < 2) {
988 			return -EINVAL;
989 		}
990 	} else if (rinfo->prefix_len > 0) {
991 		if (rinfo->length < 1) {
992 			return -EINVAL;
993 		}
994 	}
995 
996 	pref = rinfo->route_pref;
997 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
998 		return -EINVAL;
999 
1000 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
1001 
1002 	if (rinfo->length == 3)
1003 		prefix = (struct in6_addr *)rinfo->prefix;
1004 	else {
1005 		/* this function is safe */
1006 		ipv6_addr_prefix(&prefix_buf,
1007 				 (struct in6_addr *)rinfo->prefix,
1008 				 rinfo->prefix_len);
1009 		prefix = &prefix_buf;
1010 	}
1011 
1012 	if (rinfo->prefix_len == 0)
1013 		rt = rt6_get_dflt_router(net, gwaddr, dev);
1014 	else
1015 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
1016 					gwaddr, dev);
1017 
1018 	if (rt && !lifetime) {
1019 		ip6_del_rt(net, rt, false);
1020 		rt = NULL;
1021 	}
1022 
1023 	if (!rt && lifetime)
1024 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
1025 					dev, pref);
1026 	else if (rt)
1027 		rt->fib6_flags = RTF_ROUTEINFO |
1028 				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
1029 
1030 	if (rt) {
1031 		table = rt->fib6_table;
1032 		spin_lock_bh(&table->tb6_lock);
1033 
1034 		if (!addrconf_finite_timeout(lifetime)) {
1035 			fib6_clean_expires(rt);
1036 			fib6_remove_gc_list(rt);
1037 		} else {
1038 			fib6_set_expires(rt, jiffies + HZ * lifetime);
1039 			fib6_add_gc_list(rt);
1040 		}
1041 
1042 		spin_unlock_bh(&table->tb6_lock);
1043 
1044 		fib6_info_release(rt);
1045 	}
1046 	return 0;
1047 }
1048 #endif
1049 
1050 /*
1051  *	Misc support functions
1052  */
1053 
1054 /* called with rcu_lock held */
1055 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1056 {
1057 	struct net_device *dev = res->nh->fib_nh_dev;
1058 
1059 	if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1060 		/* for copies of local routes, dst->dev needs to be the
1061 		 * device if it is a master device, the master device if
1062 		 * device is enslaved, and the loopback as the default
1063 		 */
1064 		if (netif_is_l3_slave(dev) &&
1065 		    !rt6_need_strict(&res->f6i->fib6_dst.addr))
1066 			dev = l3mdev_master_dev_rcu(dev);
1067 		else if (!netif_is_l3_master(dev))
1068 			dev = dev_net(dev)->loopback_dev;
1069 		/* last case is netif_is_l3_master(dev) is true in which
1070 		 * case we want dev returned to be dev
1071 		 */
1072 	}
1073 
1074 	return dev;
1075 }
1076 
1077 static const int fib6_prop[RTN_MAX + 1] = {
1078 	[RTN_UNSPEC]	= 0,
1079 	[RTN_UNICAST]	= 0,
1080 	[RTN_LOCAL]	= 0,
1081 	[RTN_BROADCAST]	= 0,
1082 	[RTN_ANYCAST]	= 0,
1083 	[RTN_MULTICAST]	= 0,
1084 	[RTN_BLACKHOLE]	= -EINVAL,
1085 	[RTN_UNREACHABLE] = -EHOSTUNREACH,
1086 	[RTN_PROHIBIT]	= -EACCES,
1087 	[RTN_THROW]	= -EAGAIN,
1088 	[RTN_NAT]	= -EINVAL,
1089 	[RTN_XRESOLVE]	= -EINVAL,
1090 };
1091 
1092 static int ip6_rt_type_to_error(u8 fib6_type)
1093 {
1094 	return fib6_prop[fib6_type];
1095 }
1096 
1097 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1098 {
1099 	unsigned short flags = 0;
1100 
1101 	if (rt->dst_nocount)
1102 		flags |= DST_NOCOUNT;
1103 	if (rt->dst_nopolicy)
1104 		flags |= DST_NOPOLICY;
1105 
1106 	return flags;
1107 }
1108 
1109 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1110 {
1111 	rt->dst.error = ip6_rt_type_to_error(fib6_type);
1112 
1113 	switch (fib6_type) {
1114 	case RTN_BLACKHOLE:
1115 		rt->dst.output = dst_discard_out;
1116 		rt->dst.input = dst_discard;
1117 		break;
1118 	case RTN_PROHIBIT:
1119 		rt->dst.output = ip6_pkt_prohibit_out;
1120 		rt->dst.input = ip6_pkt_prohibit;
1121 		break;
1122 	case RTN_THROW:
1123 	case RTN_UNREACHABLE:
1124 	default:
1125 		rt->dst.output = ip6_pkt_discard_out;
1126 		rt->dst.input = ip6_pkt_discard;
1127 		break;
1128 	}
1129 }
1130 
1131 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1132 {
1133 	struct fib6_info *f6i = res->f6i;
1134 
1135 	if (res->fib6_flags & RTF_REJECT) {
1136 		ip6_rt_init_dst_reject(rt, res->fib6_type);
1137 		return;
1138 	}
1139 
1140 	rt->dst.error = 0;
1141 	rt->dst.output = ip6_output;
1142 
1143 	if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1144 		rt->dst.input = ip6_input;
1145 	} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1146 		rt->dst.input = ip6_mc_input;
1147 		rt->dst.output = ip6_mr_output;
1148 	} else {
1149 		rt->dst.input = ip6_forward;
1150 	}
1151 
1152 	if (res->nh->fib_nh_lws) {
1153 		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1154 		lwtunnel_set_redirect(&rt->dst);
1155 	}
1156 
1157 	rt->dst.lastuse = jiffies;
1158 }
1159 
1160 /* Caller must already hold reference to @from */
1161 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1162 {
1163 	rt->rt6i_flags &= ~RTF_EXPIRES;
1164 	rcu_assign_pointer(rt->from, from);
1165 	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1166 }
1167 
1168 /* Caller must already hold reference to f6i in result */
1169 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1170 {
1171 	const struct fib6_nh *nh = res->nh;
1172 	const struct net_device *dev = nh->fib_nh_dev;
1173 	struct fib6_info *f6i = res->f6i;
1174 
1175 	ip6_rt_init_dst(rt, res);
1176 
1177 	rt->rt6i_dst = f6i->fib6_dst;
1178 	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1179 	rt->rt6i_flags = res->fib6_flags;
1180 	if (nh->fib_nh_gw_family) {
1181 		rt->rt6i_gateway = nh->fib_nh_gw6;
1182 		rt->rt6i_flags |= RTF_GATEWAY;
1183 	}
1184 	rt6_set_from(rt, f6i);
1185 #ifdef CONFIG_IPV6_SUBTREES
1186 	rt->rt6i_src = f6i->fib6_src;
1187 #endif
1188 }
1189 
1190 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1191 					struct in6_addr *saddr)
1192 {
1193 	struct fib6_node *pn, *sn;
1194 	while (1) {
1195 		if (fn->fn_flags & RTN_TL_ROOT)
1196 			return NULL;
1197 		pn = rcu_dereference(fn->parent);
1198 		sn = FIB6_SUBTREE(pn);
1199 		if (sn && sn != fn)
1200 			fn = fib6_node_lookup(sn, NULL, saddr);
1201 		else
1202 			fn = pn;
1203 		if (fn->fn_flags & RTN_RTINFO)
1204 			return fn;
1205 	}
1206 }
1207 
1208 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1209 {
1210 	struct rt6_info *rt = *prt;
1211 
1212 	if (dst_hold_safe(&rt->dst))
1213 		return true;
1214 	if (net) {
1215 		rt = net->ipv6.ip6_null_entry;
1216 		dst_hold(&rt->dst);
1217 	} else {
1218 		rt = NULL;
1219 	}
1220 	*prt = rt;
1221 	return false;
1222 }
1223 
1224 /* called with rcu_lock held */
1225 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1226 {
1227 	struct net_device *dev = res->nh->fib_nh_dev;
1228 	struct fib6_info *f6i = res->f6i;
1229 	unsigned short flags;
1230 	struct rt6_info *nrt;
1231 
1232 	if (!fib6_info_hold_safe(f6i))
1233 		goto fallback;
1234 
1235 	flags = fib6_info_dst_flags(f6i);
1236 	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1237 	if (!nrt) {
1238 		fib6_info_release(f6i);
1239 		goto fallback;
1240 	}
1241 
1242 	ip6_rt_copy_init(nrt, res);
1243 	return nrt;
1244 
1245 fallback:
1246 	nrt = dev_net(dev)->ipv6.ip6_null_entry;
1247 	dst_hold(&nrt->dst);
1248 	return nrt;
1249 }
1250 
1251 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1252 					     struct fib6_table *table,
1253 					     struct flowi6 *fl6,
1254 					     const struct sk_buff *skb,
1255 					     int flags)
1256 {
1257 	struct fib6_result res = {};
1258 	struct fib6_node *fn;
1259 	struct rt6_info *rt;
1260 
1261 	rcu_read_lock();
1262 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1263 restart:
1264 	res.f6i = rcu_dereference(fn->leaf);
1265 	if (!res.f6i)
1266 		res.f6i = net->ipv6.fib6_null_entry;
1267 	else
1268 		rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1269 				 flags);
1270 
1271 	if (res.f6i == net->ipv6.fib6_null_entry) {
1272 		fn = fib6_backtrack(fn, &fl6->saddr);
1273 		if (fn)
1274 			goto restart;
1275 
1276 		rt = net->ipv6.ip6_null_entry;
1277 		dst_hold(&rt->dst);
1278 		goto out;
1279 	} else if (res.fib6_flags & RTF_REJECT) {
1280 		goto do_create;
1281 	}
1282 
1283 	fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1284 			 fl6->flowi6_oif != 0, skb, flags);
1285 
1286 	/* Search through exception table */
1287 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1288 	if (rt) {
1289 		if (ip6_hold_safe(net, &rt))
1290 			dst_use_noref(&rt->dst, jiffies);
1291 	} else {
1292 do_create:
1293 		rt = ip6_create_rt_rcu(&res);
1294 	}
1295 
1296 out:
1297 	trace_fib6_table_lookup(net, &res, table, fl6);
1298 
1299 	rcu_read_unlock();
1300 
1301 	return rt;
1302 }
1303 
1304 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1305 				   const struct sk_buff *skb, int flags)
1306 {
1307 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1308 }
1309 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1310 
1311 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1312 			    const struct in6_addr *saddr, int oif,
1313 			    const struct sk_buff *skb, int strict)
1314 {
1315 	struct flowi6 fl6 = {
1316 		.flowi6_oif = oif,
1317 		.daddr = *daddr,
1318 	};
1319 	struct dst_entry *dst;
1320 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1321 
1322 	if (saddr) {
1323 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1324 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1325 	}
1326 
1327 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1328 	if (dst->error == 0)
1329 		return dst_rt6_info(dst);
1330 
1331 	dst_release(dst);
1332 
1333 	return NULL;
1334 }
1335 EXPORT_SYMBOL(rt6_lookup);
1336 
1337 /* ip6_ins_rt is called with FREE table->tb6_lock.
1338  * It takes new route entry, the addition fails by any reason the
1339  * route is released.
1340  * Caller must hold dst before calling it.
1341  */
1342 
1343 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1344 			struct netlink_ext_ack *extack)
1345 {
1346 	int err;
1347 	struct fib6_table *table;
1348 
1349 	table = rt->fib6_table;
1350 	spin_lock_bh(&table->tb6_lock);
1351 	err = fib6_add(&table->tb6_root, rt, info, extack);
1352 	spin_unlock_bh(&table->tb6_lock);
1353 
1354 	return err;
1355 }
1356 
1357 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1358 {
1359 	struct nl_info info = {	.nl_net = net, };
1360 
1361 	return __ip6_ins_rt(rt, &info, NULL);
1362 }
1363 
1364 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1365 					   const struct in6_addr *daddr,
1366 					   const struct in6_addr *saddr)
1367 {
1368 	struct fib6_info *f6i = res->f6i;
1369 	struct net_device *dev;
1370 	struct rt6_info *rt;
1371 
1372 	/*
1373 	 *	Clone the route.
1374 	 */
1375 
1376 	if (!fib6_info_hold_safe(f6i))
1377 		return NULL;
1378 
1379 	dev = ip6_rt_get_dev_rcu(res);
1380 	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1381 	if (!rt) {
1382 		fib6_info_release(f6i);
1383 		return NULL;
1384 	}
1385 
1386 	ip6_rt_copy_init(rt, res);
1387 	rt->rt6i_flags |= RTF_CACHE;
1388 	rt->rt6i_dst.addr = *daddr;
1389 	rt->rt6i_dst.plen = 128;
1390 
1391 	if (!rt6_is_gw_or_nonexthop(res)) {
1392 		if (f6i->fib6_dst.plen != 128 &&
1393 		    ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1394 			rt->rt6i_flags |= RTF_ANYCAST;
1395 #ifdef CONFIG_IPV6_SUBTREES
1396 		if (rt->rt6i_src.plen && saddr) {
1397 			rt->rt6i_src.addr = *saddr;
1398 			rt->rt6i_src.plen = 128;
1399 		}
1400 #endif
1401 	}
1402 
1403 	return rt;
1404 }
1405 
1406 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1407 {
1408 	struct fib6_info *f6i = res->f6i;
1409 	unsigned short flags = fib6_info_dst_flags(f6i);
1410 	struct net_device *dev;
1411 	struct rt6_info *pcpu_rt;
1412 
1413 	if (!fib6_info_hold_safe(f6i))
1414 		return NULL;
1415 
1416 	rcu_read_lock();
1417 	dev = ip6_rt_get_dev_rcu(res);
1418 	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1419 	rcu_read_unlock();
1420 	if (!pcpu_rt) {
1421 		fib6_info_release(f6i);
1422 		return NULL;
1423 	}
1424 	ip6_rt_copy_init(pcpu_rt, res);
1425 	pcpu_rt->rt6i_flags |= RTF_PCPU;
1426 
1427 	if (f6i->nh)
1428 		pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1429 
1430 	return pcpu_rt;
1431 }
1432 
1433 static bool rt6_is_valid(const struct rt6_info *rt6)
1434 {
1435 	return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1436 }
1437 
1438 /* It should be called with rcu_read_lock() acquired */
1439 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1440 {
1441 	struct rt6_info *pcpu_rt;
1442 
1443 	pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1444 
1445 	if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1446 		struct rt6_info *prev, **p;
1447 
1448 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
1449 		/* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
1450 		prev = xchg(p, NULL);
1451 		if (prev) {
1452 			dst_dev_put(&prev->dst);
1453 			dst_release(&prev->dst);
1454 		}
1455 
1456 		pcpu_rt = NULL;
1457 	}
1458 
1459 	return pcpu_rt;
1460 }
1461 
1462 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1463 					    const struct fib6_result *res)
1464 {
1465 	struct rt6_info *pcpu_rt, *prev, **p;
1466 
1467 	pcpu_rt = ip6_rt_pcpu_alloc(res);
1468 	if (!pcpu_rt)
1469 		return NULL;
1470 
1471 	p = this_cpu_ptr(res->nh->rt6i_pcpu);
1472 	prev = cmpxchg(p, NULL, pcpu_rt);
1473 	BUG_ON(prev);
1474 
1475 	if (res->f6i->fib6_destroying) {
1476 		struct fib6_info *from;
1477 
1478 		from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
1479 		fib6_info_release(from);
1480 	}
1481 
1482 	return pcpu_rt;
1483 }
1484 
1485 /* exception hash table implementation
1486  */
1487 static DEFINE_SPINLOCK(rt6_exception_lock);
1488 
1489 /* Remove rt6_ex from hash table and free the memory
1490  * Caller must hold rt6_exception_lock
1491  */
1492 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1493 				 struct rt6_exception *rt6_ex)
1494 {
1495 	struct net *net;
1496 
1497 	if (!bucket || !rt6_ex)
1498 		return;
1499 
1500 	net = dev_net(rt6_ex->rt6i->dst.dev);
1501 	net->ipv6.rt6_stats->fib_rt_cache--;
1502 
1503 	/* purge completely the exception to allow releasing the held resources:
1504 	 * some [sk] cache may keep the dst around for unlimited time
1505 	 */
1506 	dst_dev_put(&rt6_ex->rt6i->dst);
1507 
1508 	hlist_del_rcu(&rt6_ex->hlist);
1509 	dst_release(&rt6_ex->rt6i->dst);
1510 	kfree_rcu(rt6_ex, rcu);
1511 	WARN_ON_ONCE(!bucket->depth);
1512 	bucket->depth--;
1513 }
1514 
1515 /* Remove oldest rt6_ex in bucket and free the memory
1516  * Caller must hold rt6_exception_lock
1517  */
1518 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1519 {
1520 	struct rt6_exception *rt6_ex, *oldest = NULL;
1521 
1522 	if (!bucket)
1523 		return;
1524 
1525 	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1526 		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1527 			oldest = rt6_ex;
1528 	}
1529 	rt6_remove_exception(bucket, oldest);
1530 }
1531 
1532 static u32 rt6_exception_hash(const struct in6_addr *dst,
1533 			      const struct in6_addr *src)
1534 {
1535 	static siphash_aligned_key_t rt6_exception_key;
1536 	struct {
1537 		struct in6_addr dst;
1538 		struct in6_addr src;
1539 	} __aligned(SIPHASH_ALIGNMENT) combined = {
1540 		.dst = *dst,
1541 	};
1542 	u64 val;
1543 
1544 	net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1545 
1546 #ifdef CONFIG_IPV6_SUBTREES
1547 	if (src)
1548 		combined.src = *src;
1549 #endif
1550 	val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1551 
1552 	return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1553 }
1554 
1555 /* Helper function to find the cached rt in the hash table
1556  * and update bucket pointer to point to the bucket for this
1557  * (daddr, saddr) pair
1558  * Caller must hold rt6_exception_lock
1559  */
1560 static struct rt6_exception *
1561 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1562 			      const struct in6_addr *daddr,
1563 			      const struct in6_addr *saddr)
1564 {
1565 	struct rt6_exception *rt6_ex;
1566 	u32 hval;
1567 
1568 	if (!(*bucket) || !daddr)
1569 		return NULL;
1570 
1571 	hval = rt6_exception_hash(daddr, saddr);
1572 	*bucket += hval;
1573 
1574 	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1575 		struct rt6_info *rt6 = rt6_ex->rt6i;
1576 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1577 
1578 #ifdef CONFIG_IPV6_SUBTREES
1579 		if (matched && saddr)
1580 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1581 #endif
1582 		if (matched)
1583 			return rt6_ex;
1584 	}
1585 	return NULL;
1586 }
1587 
1588 /* Helper function to find the cached rt in the hash table
1589  * and update bucket pointer to point to the bucket for this
1590  * (daddr, saddr) pair
1591  * Caller must hold rcu_read_lock()
1592  */
1593 static struct rt6_exception *
1594 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1595 			 const struct in6_addr *daddr,
1596 			 const struct in6_addr *saddr)
1597 {
1598 	struct rt6_exception *rt6_ex;
1599 	u32 hval;
1600 
1601 	WARN_ON_ONCE(!rcu_read_lock_held());
1602 
1603 	if (!(*bucket) || !daddr)
1604 		return NULL;
1605 
1606 	hval = rt6_exception_hash(daddr, saddr);
1607 	*bucket += hval;
1608 
1609 	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1610 		struct rt6_info *rt6 = rt6_ex->rt6i;
1611 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1612 
1613 #ifdef CONFIG_IPV6_SUBTREES
1614 		if (matched && saddr)
1615 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1616 #endif
1617 		if (matched)
1618 			return rt6_ex;
1619 	}
1620 	return NULL;
1621 }
1622 
1623 static unsigned int fib6_mtu(const struct fib6_result *res)
1624 {
1625 	const struct fib6_nh *nh = res->nh;
1626 	unsigned int mtu;
1627 
1628 	if (res->f6i->fib6_pmtu) {
1629 		mtu = res->f6i->fib6_pmtu;
1630 	} else {
1631 		struct net_device *dev = nh->fib_nh_dev;
1632 		struct inet6_dev *idev;
1633 
1634 		rcu_read_lock();
1635 		idev = __in6_dev_get(dev);
1636 		mtu = READ_ONCE(idev->cnf.mtu6);
1637 		rcu_read_unlock();
1638 	}
1639 
1640 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1641 
1642 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1643 }
1644 
1645 #define FIB6_EXCEPTION_BUCKET_FLUSHED  0x1UL
1646 
1647 /* used when the flushed bit is not relevant, only access to the bucket
1648  * (ie., all bucket users except rt6_insert_exception);
1649  *
1650  * called under rcu lock; sometimes called with rt6_exception_lock held
1651  */
1652 static
1653 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1654 						       spinlock_t *lock)
1655 {
1656 	struct rt6_exception_bucket *bucket;
1657 
1658 	if (lock)
1659 		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1660 						   lockdep_is_held(lock));
1661 	else
1662 		bucket = rcu_dereference(nh->rt6i_exception_bucket);
1663 
1664 	/* remove bucket flushed bit if set */
1665 	if (bucket) {
1666 		unsigned long p = (unsigned long)bucket;
1667 
1668 		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1669 		bucket = (struct rt6_exception_bucket *)p;
1670 	}
1671 
1672 	return bucket;
1673 }
1674 
1675 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1676 {
1677 	unsigned long p = (unsigned long)bucket;
1678 
1679 	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1680 }
1681 
1682 /* called with rt6_exception_lock held */
1683 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1684 					      spinlock_t *lock)
1685 {
1686 	struct rt6_exception_bucket *bucket;
1687 	unsigned long p;
1688 
1689 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1690 					   lockdep_is_held(lock));
1691 
1692 	p = (unsigned long)bucket;
1693 	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1694 	bucket = (struct rt6_exception_bucket *)p;
1695 	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1696 }
1697 
1698 static int rt6_insert_exception(struct rt6_info *nrt,
1699 				const struct fib6_result *res)
1700 {
1701 	struct net *net = dev_net(nrt->dst.dev);
1702 	struct rt6_exception_bucket *bucket;
1703 	struct fib6_info *f6i = res->f6i;
1704 	struct in6_addr *src_key = NULL;
1705 	struct rt6_exception *rt6_ex;
1706 	struct fib6_nh *nh = res->nh;
1707 	int max_depth;
1708 	int err = 0;
1709 
1710 	spin_lock_bh(&rt6_exception_lock);
1711 
1712 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1713 					  lockdep_is_held(&rt6_exception_lock));
1714 	if (!bucket) {
1715 		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1716 				 GFP_ATOMIC);
1717 		if (!bucket) {
1718 			err = -ENOMEM;
1719 			goto out;
1720 		}
1721 		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1722 	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1723 		err = -EINVAL;
1724 		goto out;
1725 	}
1726 
1727 #ifdef CONFIG_IPV6_SUBTREES
1728 	/* fib6_src.plen != 0 indicates f6i is in subtree
1729 	 * and exception table is indexed by a hash of
1730 	 * both fib6_dst and fib6_src.
1731 	 * Otherwise, the exception table is indexed by
1732 	 * a hash of only fib6_dst.
1733 	 */
1734 	if (f6i->fib6_src.plen)
1735 		src_key = &nrt->rt6i_src.addr;
1736 #endif
1737 	/* rt6_mtu_change() might lower mtu on f6i.
1738 	 * Only insert this exception route if its mtu
1739 	 * is less than f6i's mtu value.
1740 	 */
1741 	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1742 		err = -EINVAL;
1743 		goto out;
1744 	}
1745 
1746 	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1747 					       src_key);
1748 	if (rt6_ex)
1749 		rt6_remove_exception(bucket, rt6_ex);
1750 
1751 	rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1752 	if (!rt6_ex) {
1753 		err = -ENOMEM;
1754 		goto out;
1755 	}
1756 	rt6_ex->rt6i = nrt;
1757 	rt6_ex->stamp = jiffies;
1758 	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1759 	bucket->depth++;
1760 	net->ipv6.rt6_stats->fib_rt_cache++;
1761 
1762 	/* Randomize max depth to avoid some side channels attacks. */
1763 	max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
1764 	while (bucket->depth > max_depth)
1765 		rt6_exception_remove_oldest(bucket);
1766 
1767 out:
1768 	spin_unlock_bh(&rt6_exception_lock);
1769 
1770 	/* Update fn->fn_sernum to invalidate all cached dst */
1771 	if (!err) {
1772 		spin_lock_bh(&f6i->fib6_table->tb6_lock);
1773 		fib6_update_sernum(net, f6i);
1774 		fib6_add_gc_list(f6i);
1775 		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1776 		fib6_force_start_gc(net);
1777 	}
1778 
1779 	return err;
1780 }
1781 
1782 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1783 {
1784 	struct rt6_exception_bucket *bucket;
1785 	struct rt6_exception *rt6_ex;
1786 	struct hlist_node *tmp;
1787 	int i;
1788 
1789 	spin_lock_bh(&rt6_exception_lock);
1790 
1791 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1792 	if (!bucket)
1793 		goto out;
1794 
1795 	/* Prevent rt6_insert_exception() to recreate the bucket list */
1796 	if (!from)
1797 		fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1798 
1799 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1800 		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1801 			if (!from ||
1802 			    rcu_access_pointer(rt6_ex->rt6i->from) == from)
1803 				rt6_remove_exception(bucket, rt6_ex);
1804 		}
1805 		WARN_ON_ONCE(!from && bucket->depth);
1806 		bucket++;
1807 	}
1808 out:
1809 	spin_unlock_bh(&rt6_exception_lock);
1810 }
1811 
1812 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1813 {
1814 	struct fib6_info *f6i = arg;
1815 
1816 	fib6_nh_flush_exceptions(nh, f6i);
1817 
1818 	return 0;
1819 }
1820 
1821 void rt6_flush_exceptions(struct fib6_info *f6i)
1822 {
1823 	if (f6i->nh) {
1824 		rcu_read_lock();
1825 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, f6i);
1826 		rcu_read_unlock();
1827 	} else {
1828 		fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1829 	}
1830 }
1831 
1832 /* Find cached rt in the hash table inside passed in rt
1833  * Caller has to hold rcu_read_lock()
1834  */
1835 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1836 					   const struct in6_addr *daddr,
1837 					   const struct in6_addr *saddr)
1838 {
1839 	const struct in6_addr *src_key = NULL;
1840 	struct rt6_exception_bucket *bucket;
1841 	struct rt6_exception *rt6_ex;
1842 	struct rt6_info *ret = NULL;
1843 
1844 #ifdef CONFIG_IPV6_SUBTREES
1845 	/* fib6i_src.plen != 0 indicates f6i is in subtree
1846 	 * and exception table is indexed by a hash of
1847 	 * both fib6_dst and fib6_src.
1848 	 * However, the src addr used to create the hash
1849 	 * might not be exactly the passed in saddr which
1850 	 * is a /128 addr from the flow.
1851 	 * So we need to use f6i->fib6_src to redo lookup
1852 	 * if the passed in saddr does not find anything.
1853 	 * (See the logic in ip6_rt_cache_alloc() on how
1854 	 * rt->rt6i_src is updated.)
1855 	 */
1856 	if (res->f6i->fib6_src.plen)
1857 		src_key = saddr;
1858 find_ex:
1859 #endif
1860 	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1861 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1862 
1863 	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1864 		ret = rt6_ex->rt6i;
1865 
1866 #ifdef CONFIG_IPV6_SUBTREES
1867 	/* Use fib6_src as src_key and redo lookup */
1868 	if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1869 		src_key = &res->f6i->fib6_src.addr;
1870 		goto find_ex;
1871 	}
1872 #endif
1873 
1874 	return ret;
1875 }
1876 
1877 /* Remove the passed in cached rt from the hash table that contains it */
1878 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1879 				    const struct rt6_info *rt)
1880 {
1881 	const struct in6_addr *src_key = NULL;
1882 	struct rt6_exception_bucket *bucket;
1883 	struct rt6_exception *rt6_ex;
1884 	int err;
1885 
1886 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1887 		return -ENOENT;
1888 
1889 	spin_lock_bh(&rt6_exception_lock);
1890 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1891 
1892 #ifdef CONFIG_IPV6_SUBTREES
1893 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1894 	 * and exception table is indexed by a hash of
1895 	 * both rt6i_dst and rt6i_src.
1896 	 * Otherwise, the exception table is indexed by
1897 	 * a hash of only rt6i_dst.
1898 	 */
1899 	if (plen)
1900 		src_key = &rt->rt6i_src.addr;
1901 #endif
1902 	rt6_ex = __rt6_find_exception_spinlock(&bucket,
1903 					       &rt->rt6i_dst.addr,
1904 					       src_key);
1905 	if (rt6_ex) {
1906 		rt6_remove_exception(bucket, rt6_ex);
1907 		err = 0;
1908 	} else {
1909 		err = -ENOENT;
1910 	}
1911 
1912 	spin_unlock_bh(&rt6_exception_lock);
1913 	return err;
1914 }
1915 
1916 struct fib6_nh_excptn_arg {
1917 	struct rt6_info	*rt;
1918 	int		plen;
1919 };
1920 
1921 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1922 {
1923 	struct fib6_nh_excptn_arg *arg = _arg;
1924 	int err;
1925 
1926 	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1927 	if (err == 0)
1928 		return 1;
1929 
1930 	return 0;
1931 }
1932 
1933 static int rt6_remove_exception_rt(struct rt6_info *rt)
1934 {
1935 	struct fib6_info *from;
1936 
1937 	from = rcu_dereference(rt->from);
1938 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
1939 		return -EINVAL;
1940 
1941 	if (from->nh) {
1942 		struct fib6_nh_excptn_arg arg = {
1943 			.rt = rt,
1944 			.plen = from->fib6_src.plen
1945 		};
1946 		int rc;
1947 
1948 		/* rc = 1 means an entry was found */
1949 		rc = nexthop_for_each_fib6_nh(from->nh,
1950 					      rt6_nh_remove_exception_rt,
1951 					      &arg);
1952 		return rc ? 0 : -ENOENT;
1953 	}
1954 
1955 	return fib6_nh_remove_exception(from->fib6_nh,
1956 					from->fib6_src.plen, rt);
1957 }
1958 
1959 /* Find rt6_ex which contains the passed in rt cache and
1960  * refresh its stamp
1961  */
1962 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1963 				     const struct rt6_info *rt)
1964 {
1965 	const struct in6_addr *src_key = NULL;
1966 	struct rt6_exception_bucket *bucket;
1967 	struct rt6_exception *rt6_ex;
1968 
1969 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1970 #ifdef CONFIG_IPV6_SUBTREES
1971 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1972 	 * and exception table is indexed by a hash of
1973 	 * both rt6i_dst and rt6i_src.
1974 	 * Otherwise, the exception table is indexed by
1975 	 * a hash of only rt6i_dst.
1976 	 */
1977 	if (plen)
1978 		src_key = &rt->rt6i_src.addr;
1979 #endif
1980 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1981 	if (rt6_ex)
1982 		rt6_ex->stamp = jiffies;
1983 }
1984 
1985 struct fib6_nh_match_arg {
1986 	const struct net_device *dev;
1987 	const struct in6_addr	*gw;
1988 	struct fib6_nh		*match;
1989 };
1990 
1991 /* determine if fib6_nh has given device and gateway */
1992 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1993 {
1994 	struct fib6_nh_match_arg *arg = _arg;
1995 
1996 	if (arg->dev != nh->fib_nh_dev ||
1997 	    (arg->gw && !nh->fib_nh_gw_family) ||
1998 	    (!arg->gw && nh->fib_nh_gw_family) ||
1999 	    (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
2000 		return 0;
2001 
2002 	arg->match = nh;
2003 
2004 	/* found a match, break the loop */
2005 	return 1;
2006 }
2007 
2008 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
2009 {
2010 	struct fib6_info *from;
2011 	struct fib6_nh *fib6_nh;
2012 
2013 	rcu_read_lock();
2014 
2015 	from = rcu_dereference(rt->from);
2016 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
2017 		goto unlock;
2018 
2019 	if (from->nh) {
2020 		struct fib6_nh_match_arg arg = {
2021 			.dev = rt->dst.dev,
2022 			.gw = &rt->rt6i_gateway,
2023 		};
2024 
2025 		nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
2026 
2027 		if (!arg.match)
2028 			goto unlock;
2029 		fib6_nh = arg.match;
2030 	} else {
2031 		fib6_nh = from->fib6_nh;
2032 	}
2033 	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
2034 unlock:
2035 	rcu_read_unlock();
2036 }
2037 
2038 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2039 					 struct rt6_info *rt, int mtu)
2040 {
2041 	/* If the new MTU is lower than the route PMTU, this new MTU will be the
2042 	 * lowest MTU in the path: always allow updating the route PMTU to
2043 	 * reflect PMTU decreases.
2044 	 *
2045 	 * If the new MTU is higher, and the route PMTU is equal to the local
2046 	 * MTU, this means the old MTU is the lowest in the path, so allow
2047 	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2048 	 * handle this.
2049 	 */
2050 
2051 	if (dst_mtu(&rt->dst) >= mtu)
2052 		return true;
2053 
2054 	if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2055 		return true;
2056 
2057 	return false;
2058 }
2059 
2060 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2061 				       const struct fib6_nh *nh, int mtu)
2062 {
2063 	struct rt6_exception_bucket *bucket;
2064 	struct rt6_exception *rt6_ex;
2065 	int i;
2066 
2067 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2068 	if (!bucket)
2069 		return;
2070 
2071 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2072 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2073 			struct rt6_info *entry = rt6_ex->rt6i;
2074 
2075 			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2076 			 * route), the metrics of its rt->from have already
2077 			 * been updated.
2078 			 */
2079 			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2080 			    rt6_mtu_change_route_allowed(idev, entry, mtu))
2081 				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2082 		}
2083 		bucket++;
2084 	}
2085 }
2086 
2087 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2088 
2089 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2090 					    const struct in6_addr *gateway)
2091 {
2092 	struct rt6_exception_bucket *bucket;
2093 	struct rt6_exception *rt6_ex;
2094 	struct hlist_node *tmp;
2095 	int i;
2096 
2097 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2098 		return;
2099 
2100 	spin_lock_bh(&rt6_exception_lock);
2101 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2102 	if (bucket) {
2103 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2104 			hlist_for_each_entry_safe(rt6_ex, tmp,
2105 						  &bucket->chain, hlist) {
2106 				struct rt6_info *entry = rt6_ex->rt6i;
2107 
2108 				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2109 				    RTF_CACHE_GATEWAY &&
2110 				    ipv6_addr_equal(gateway,
2111 						    &entry->rt6i_gateway)) {
2112 					rt6_remove_exception(bucket, rt6_ex);
2113 				}
2114 			}
2115 			bucket++;
2116 		}
2117 	}
2118 
2119 	spin_unlock_bh(&rt6_exception_lock);
2120 }
2121 
2122 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2123 				      struct rt6_exception *rt6_ex,
2124 				      struct fib6_gc_args *gc_args,
2125 				      unsigned long now)
2126 {
2127 	struct rt6_info *rt = rt6_ex->rt6i;
2128 
2129 	/* we are pruning and obsoleting aged-out and non gateway exceptions
2130 	 * even if others have still references to them, so that on next
2131 	 * dst_check() such references can be dropped.
2132 	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2133 	 * expired, independently from their aging, as per RFC 8201 section 4
2134 	 */
2135 	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2136 		if (time_after_eq(now, READ_ONCE(rt->dst.lastuse) +
2137 				       gc_args->timeout)) {
2138 			pr_debug("aging clone %p\n", rt);
2139 			rt6_remove_exception(bucket, rt6_ex);
2140 			return;
2141 		}
2142 	} else if (time_after(jiffies, READ_ONCE(rt->dst.expires))) {
2143 		pr_debug("purging expired route %p\n", rt);
2144 		rt6_remove_exception(bucket, rt6_ex);
2145 		return;
2146 	}
2147 
2148 	if (rt->rt6i_flags & RTF_GATEWAY) {
2149 		struct neighbour *neigh;
2150 
2151 		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2152 
2153 		if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2154 			pr_debug("purging route %p via non-router but gateway\n",
2155 				 rt);
2156 			rt6_remove_exception(bucket, rt6_ex);
2157 			return;
2158 		}
2159 	}
2160 
2161 	gc_args->more++;
2162 }
2163 
2164 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2165 				   struct fib6_gc_args *gc_args,
2166 				   unsigned long now)
2167 {
2168 	struct rt6_exception_bucket *bucket;
2169 	struct rt6_exception *rt6_ex;
2170 	struct hlist_node *tmp;
2171 	int i;
2172 
2173 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2174 		return;
2175 
2176 	rcu_read_lock_bh();
2177 	spin_lock(&rt6_exception_lock);
2178 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2179 	if (bucket) {
2180 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2181 			hlist_for_each_entry_safe(rt6_ex, tmp,
2182 						  &bucket->chain, hlist) {
2183 				rt6_age_examine_exception(bucket, rt6_ex,
2184 							  gc_args, now);
2185 			}
2186 			bucket++;
2187 		}
2188 	}
2189 	spin_unlock(&rt6_exception_lock);
2190 	rcu_read_unlock_bh();
2191 }
2192 
2193 struct fib6_nh_age_excptn_arg {
2194 	struct fib6_gc_args	*gc_args;
2195 	unsigned long		now;
2196 };
2197 
2198 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2199 {
2200 	struct fib6_nh_age_excptn_arg *arg = _arg;
2201 
2202 	fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2203 	return 0;
2204 }
2205 
2206 void rt6_age_exceptions(struct fib6_info *f6i,
2207 			struct fib6_gc_args *gc_args,
2208 			unsigned long now)
2209 {
2210 	if (f6i->nh) {
2211 		struct fib6_nh_age_excptn_arg arg = {
2212 			.gc_args = gc_args,
2213 			.now = now
2214 		};
2215 
2216 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2217 					 &arg);
2218 	} else {
2219 		fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2220 	}
2221 }
2222 
2223 /* must be called with rcu lock held */
2224 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2225 		      struct flowi6 *fl6, struct fib6_result *res, int strict)
2226 {
2227 	struct fib6_node *fn, *saved_fn;
2228 
2229 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2230 	saved_fn = fn;
2231 
2232 redo_rt6_select:
2233 	rt6_select(net, fn, oif, res, strict);
2234 	if (res->f6i == net->ipv6.fib6_null_entry) {
2235 		fn = fib6_backtrack(fn, &fl6->saddr);
2236 		if (fn)
2237 			goto redo_rt6_select;
2238 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
2239 			/* also consider unreachable route */
2240 			strict &= ~RT6_LOOKUP_F_REACHABLE;
2241 			fn = saved_fn;
2242 			goto redo_rt6_select;
2243 		}
2244 	}
2245 
2246 	trace_fib6_table_lookup(net, res, table, fl6);
2247 
2248 	return 0;
2249 }
2250 
2251 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2252 			       int oif, struct flowi6 *fl6,
2253 			       const struct sk_buff *skb, int flags)
2254 {
2255 	struct fib6_result res = {};
2256 	struct rt6_info *rt = NULL;
2257 	int strict = 0;
2258 
2259 	WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2260 		     !rcu_read_lock_held());
2261 
2262 	strict |= flags & RT6_LOOKUP_F_IFACE;
2263 	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2264 	if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
2265 		strict |= RT6_LOOKUP_F_REACHABLE;
2266 
2267 	rcu_read_lock();
2268 
2269 	fib6_table_lookup(net, table, oif, fl6, &res, strict);
2270 	if (res.f6i == net->ipv6.fib6_null_entry)
2271 		goto out;
2272 
2273 	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2274 
2275 	/*Search through exception table */
2276 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2277 	if (rt) {
2278 		goto out;
2279 	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2280 			    !res.nh->fib_nh_gw_family)) {
2281 		/* Create a RTF_CACHE clone which will not be
2282 		 * owned by the fib6 tree.  It is for the special case where
2283 		 * the daddr in the skb during the neighbor look-up is different
2284 		 * from the fl6->daddr used to look-up route here.
2285 		 */
2286 		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2287 
2288 		if (rt) {
2289 			/* 1 refcnt is taken during ip6_rt_cache_alloc().
2290 			 * As rt6_uncached_list_add() does not consume refcnt,
2291 			 * this refcnt is always returned to the caller even
2292 			 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2293 			 */
2294 			rt6_uncached_list_add(rt);
2295 			rcu_read_unlock();
2296 
2297 			return rt;
2298 		}
2299 	} else {
2300 		/* Get a percpu copy */
2301 		local_bh_disable();
2302 		rt = rt6_get_pcpu_route(&res);
2303 
2304 		if (!rt)
2305 			rt = rt6_make_pcpu_route(net, &res);
2306 
2307 		local_bh_enable();
2308 	}
2309 out:
2310 	if (!rt)
2311 		rt = net->ipv6.ip6_null_entry;
2312 	if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2313 		ip6_hold_safe(net, &rt);
2314 	rcu_read_unlock();
2315 
2316 	return rt;
2317 }
2318 EXPORT_SYMBOL_GPL(ip6_pol_route);
2319 
2320 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2321 					    struct fib6_table *table,
2322 					    struct flowi6 *fl6,
2323 					    const struct sk_buff *skb,
2324 					    int flags)
2325 {
2326 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2327 }
2328 
2329 struct dst_entry *ip6_route_input_lookup(struct net *net,
2330 					 struct net_device *dev,
2331 					 struct flowi6 *fl6,
2332 					 const struct sk_buff *skb,
2333 					 int flags)
2334 {
2335 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2336 		flags |= RT6_LOOKUP_F_IFACE;
2337 
2338 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2339 }
2340 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2341 
2342 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2343 				  struct flow_keys *keys,
2344 				  struct flow_keys *flkeys)
2345 {
2346 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2347 	const struct ipv6hdr *key_iph = outer_iph;
2348 	struct flow_keys *_flkeys = flkeys;
2349 	const struct ipv6hdr *inner_iph;
2350 	const struct icmp6hdr *icmph;
2351 	struct ipv6hdr _inner_iph;
2352 	struct icmp6hdr _icmph;
2353 
2354 	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2355 		goto out;
2356 
2357 	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2358 				   sizeof(_icmph), &_icmph);
2359 	if (!icmph)
2360 		goto out;
2361 
2362 	if (!icmpv6_is_err(icmph->icmp6_type))
2363 		goto out;
2364 
2365 	inner_iph = skb_header_pointer(skb,
2366 				       skb_transport_offset(skb) + sizeof(*icmph),
2367 				       sizeof(_inner_iph), &_inner_iph);
2368 	if (!inner_iph)
2369 		goto out;
2370 
2371 	key_iph = inner_iph;
2372 	_flkeys = NULL;
2373 out:
2374 	if (_flkeys) {
2375 		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2376 		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2377 		keys->tags.flow_label = _flkeys->tags.flow_label;
2378 		keys->basic.ip_proto = _flkeys->basic.ip_proto;
2379 	} else {
2380 		keys->addrs.v6addrs.src = key_iph->saddr;
2381 		keys->addrs.v6addrs.dst = key_iph->daddr;
2382 		keys->tags.flow_label = ip6_flowlabel(key_iph);
2383 		keys->basic.ip_proto = key_iph->nexthdr;
2384 	}
2385 }
2386 
2387 static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2388 					   const struct sk_buff *skb,
2389 					   bool *p_has_inner)
2390 {
2391 	u32 hash_fields = ip6_multipath_hash_fields(net);
2392 	struct flow_keys keys, hash_keys;
2393 
2394 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2395 		return 0;
2396 
2397 	memset(&hash_keys, 0, sizeof(hash_keys));
2398 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2399 
2400 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2401 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2402 		hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2403 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2404 		hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2405 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2406 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2407 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2408 		hash_keys.tags.flow_label = keys.tags.flow_label;
2409 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2410 		hash_keys.ports.src = keys.ports.src;
2411 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2412 		hash_keys.ports.dst = keys.ports.dst;
2413 
2414 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2415 	return fib_multipath_hash_from_keys(net, &hash_keys);
2416 }
2417 
2418 static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2419 					   const struct sk_buff *skb,
2420 					   bool has_inner)
2421 {
2422 	u32 hash_fields = ip6_multipath_hash_fields(net);
2423 	struct flow_keys keys, hash_keys;
2424 
2425 	/* We assume the packet carries an encapsulation, but if none was
2426 	 * encountered during dissection of the outer flow, then there is no
2427 	 * point in calling the flow dissector again.
2428 	 */
2429 	if (!has_inner)
2430 		return 0;
2431 
2432 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2433 		return 0;
2434 
2435 	memset(&hash_keys, 0, sizeof(hash_keys));
2436 	skb_flow_dissect_flow_keys(skb, &keys, 0);
2437 
2438 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2439 		return 0;
2440 
2441 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2442 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2443 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2444 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2445 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2446 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2447 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2448 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2449 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2450 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2451 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2452 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2453 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2454 			hash_keys.tags.flow_label = keys.tags.flow_label;
2455 	}
2456 
2457 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2458 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2459 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2460 		hash_keys.ports.src = keys.ports.src;
2461 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2462 		hash_keys.ports.dst = keys.ports.dst;
2463 
2464 	return fib_multipath_hash_from_keys(net, &hash_keys);
2465 }
2466 
2467 static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2468 					 const struct sk_buff *skb)
2469 {
2470 	u32 mhash, mhash_inner;
2471 	bool has_inner = true;
2472 
2473 	mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2474 	mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2475 
2476 	return jhash_2words(mhash, mhash_inner, 0);
2477 }
2478 
2479 static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2480 					 const struct flowi6 *fl6)
2481 {
2482 	u32 hash_fields = ip6_multipath_hash_fields(net);
2483 	struct flow_keys hash_keys;
2484 
2485 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2486 		return 0;
2487 
2488 	memset(&hash_keys, 0, sizeof(hash_keys));
2489 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2490 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2491 		hash_keys.addrs.v6addrs.src = fl6->saddr;
2492 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2493 		hash_keys.addrs.v6addrs.dst = fl6->daddr;
2494 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2495 		hash_keys.basic.ip_proto = fl6->flowi6_proto;
2496 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2497 		hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2498 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) {
2499 		if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2500 			hash_keys.ports.src = (__force __be16)get_random_u16();
2501 		else
2502 			hash_keys.ports.src = fl6->fl6_sport;
2503 	}
2504 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2505 		hash_keys.ports.dst = fl6->fl6_dport;
2506 
2507 	return fib_multipath_hash_from_keys(net, &hash_keys);
2508 }
2509 
2510 /* if skb is set it will be used and fl6 can be NULL */
2511 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2512 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2513 {
2514 	struct flow_keys hash_keys;
2515 	u32 mhash = 0;
2516 
2517 	switch (ip6_multipath_hash_policy(net)) {
2518 	case 0:
2519 		memset(&hash_keys, 0, sizeof(hash_keys));
2520 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2521 		if (skb) {
2522 			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2523 		} else {
2524 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2525 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2526 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2527 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2528 		}
2529 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2530 		break;
2531 	case 1:
2532 		if (skb) {
2533 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2534 			struct flow_keys keys;
2535 
2536 			/* short-circuit if we already have L4 hash present */
2537 			if (skb->l4_hash)
2538 				return skb_get_hash_raw(skb) >> 1;
2539 
2540 			memset(&hash_keys, 0, sizeof(hash_keys));
2541 
2542 			if (!flkeys) {
2543 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2544 				flkeys = &keys;
2545 			}
2546 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2547 			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2548 			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2549 			hash_keys.ports.src = flkeys->ports.src;
2550 			hash_keys.ports.dst = flkeys->ports.dst;
2551 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2552 		} else {
2553 			memset(&hash_keys, 0, sizeof(hash_keys));
2554 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2555 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2556 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2557 			if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2558 				hash_keys.ports.src = (__force __be16)get_random_u16();
2559 			else
2560 				hash_keys.ports.src = fl6->fl6_sport;
2561 			hash_keys.ports.dst = fl6->fl6_dport;
2562 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2563 		}
2564 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2565 		break;
2566 	case 2:
2567 		memset(&hash_keys, 0, sizeof(hash_keys));
2568 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2569 		if (skb) {
2570 			struct flow_keys keys;
2571 
2572 			if (!flkeys) {
2573 				skb_flow_dissect_flow_keys(skb, &keys, 0);
2574 				flkeys = &keys;
2575 			}
2576 
2577 			/* Inner can be v4 or v6 */
2578 			if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2579 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2580 				hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2581 				hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2582 			} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2583 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2584 				hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2585 				hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2586 				hash_keys.tags.flow_label = flkeys->tags.flow_label;
2587 				hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2588 			} else {
2589 				/* Same as case 0 */
2590 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2591 				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2592 			}
2593 		} else {
2594 			/* Same as case 0 */
2595 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2596 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2597 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2598 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2599 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2600 		}
2601 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2602 		break;
2603 	case 3:
2604 		if (skb)
2605 			mhash = rt6_multipath_custom_hash_skb(net, skb);
2606 		else
2607 			mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2608 		break;
2609 	}
2610 
2611 	return mhash >> 1;
2612 }
2613 
2614 /* Called with rcu held */
2615 void ip6_route_input(struct sk_buff *skb)
2616 {
2617 	const struct ipv6hdr *iph = ipv6_hdr(skb);
2618 	struct net *net = dev_net(skb->dev);
2619 	int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2620 	struct ip_tunnel_info *tun_info;
2621 	struct flowi6 fl6 = {
2622 		.flowi6_iif = skb->dev->ifindex,
2623 		.daddr = iph->daddr,
2624 		.saddr = iph->saddr,
2625 		.flowlabel = ip6_flowinfo(iph),
2626 		.flowi6_mark = skb->mark,
2627 		.flowi6_proto = iph->nexthdr,
2628 	};
2629 	struct flow_keys *flkeys = NULL, _flkeys;
2630 
2631 	tun_info = skb_tunnel_info(skb);
2632 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2633 		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2634 
2635 	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2636 		flkeys = &_flkeys;
2637 
2638 	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2639 		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2640 	skb_dst_drop(skb);
2641 	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2642 						      &fl6, skb, flags));
2643 }
2644 
2645 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2646 					     struct fib6_table *table,
2647 					     struct flowi6 *fl6,
2648 					     const struct sk_buff *skb,
2649 					     int flags)
2650 {
2651 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2652 }
2653 
2654 static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2655 						      const struct sock *sk,
2656 						      struct flowi6 *fl6,
2657 						      int flags)
2658 {
2659 	bool any_src;
2660 
2661 	if (ipv6_addr_type(&fl6->daddr) &
2662 	    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2663 		struct dst_entry *dst;
2664 
2665 		/* This function does not take refcnt on the dst */
2666 		dst = l3mdev_link_scope_lookup(net, fl6);
2667 		if (dst)
2668 			return dst;
2669 	}
2670 
2671 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2672 
2673 	flags |= RT6_LOOKUP_F_DST_NOREF;
2674 	any_src = ipv6_addr_any(&fl6->saddr);
2675 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2676 	    (fl6->flowi6_oif && any_src))
2677 		flags |= RT6_LOOKUP_F_IFACE;
2678 
2679 	if (!any_src)
2680 		flags |= RT6_LOOKUP_F_HAS_SADDR;
2681 	else if (sk)
2682 		flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
2683 
2684 	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2685 }
2686 
2687 struct dst_entry *ip6_route_output_flags(struct net *net,
2688 					 const struct sock *sk,
2689 					 struct flowi6 *fl6,
2690 					 int flags)
2691 {
2692 	struct dst_entry *dst;
2693 	struct rt6_info *rt6;
2694 
2695 	rcu_read_lock();
2696 	dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2697 	rt6 = dst_rt6_info(dst);
2698 	/* For dst cached in uncached_list, refcnt is already taken. */
2699 	if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
2700 		dst = &net->ipv6.ip6_null_entry->dst;
2701 		dst_hold(dst);
2702 	}
2703 	rcu_read_unlock();
2704 
2705 	return dst;
2706 }
2707 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2708 
2709 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2710 {
2711 	struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
2712 	struct net_device *loopback_dev = net->loopback_dev;
2713 	struct dst_entry *new = NULL;
2714 
2715 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
2716 		       DST_OBSOLETE_DEAD, 0);
2717 	if (rt) {
2718 		rt6_info_init(rt);
2719 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2720 
2721 		new = &rt->dst;
2722 		new->__use = 1;
2723 		new->input = dst_discard;
2724 		new->output = dst_discard_out;
2725 
2726 		dst_copy_metrics(new, &ort->dst);
2727 
2728 		rt->rt6i_idev = in6_dev_get(loopback_dev);
2729 		rt->rt6i_gateway = ort->rt6i_gateway;
2730 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2731 
2732 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2733 #ifdef CONFIG_IPV6_SUBTREES
2734 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2735 #endif
2736 	}
2737 
2738 	dst_release(dst_orig);
2739 	return new ? new : ERR_PTR(-ENOMEM);
2740 }
2741 
2742 /*
2743  *	Destination cache support functions
2744  */
2745 
2746 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2747 {
2748 	u32 rt_cookie = 0;
2749 
2750 	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2751 		return false;
2752 
2753 	if (fib6_check_expired(f6i))
2754 		return false;
2755 
2756 	return true;
2757 }
2758 
2759 static struct dst_entry *rt6_check(struct rt6_info *rt,
2760 				   struct fib6_info *from,
2761 				   u32 cookie)
2762 {
2763 	u32 rt_cookie = 0;
2764 
2765 	if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2766 	    rt_cookie != cookie)
2767 		return NULL;
2768 
2769 	if (rt6_check_expired(rt))
2770 		return NULL;
2771 
2772 	return &rt->dst;
2773 }
2774 
2775 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2776 					    struct fib6_info *from,
2777 					    u32 cookie)
2778 {
2779 	if (!__rt6_check_expired(rt) &&
2780 	    READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
2781 	    fib6_check(from, cookie))
2782 		return &rt->dst;
2783 	return NULL;
2784 }
2785 
2786 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2787 							u32 cookie)
2788 {
2789 	struct dst_entry *dst_ret;
2790 	struct fib6_info *from;
2791 	struct rt6_info *rt;
2792 
2793 	rt = dst_rt6_info(dst);
2794 
2795 	if (rt->sernum)
2796 		return rt6_is_valid(rt) ? dst : NULL;
2797 
2798 	rcu_read_lock();
2799 
2800 	/* All IPV6 dsts are created with ->obsolete set to the value
2801 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2802 	 * into this function always.
2803 	 */
2804 
2805 	from = rcu_dereference(rt->from);
2806 
2807 	if (from && (rt->rt6i_flags & RTF_PCPU ||
2808 	    unlikely(!list_empty(&rt->dst.rt_uncached))))
2809 		dst_ret = rt6_dst_from_check(rt, from, cookie);
2810 	else
2811 		dst_ret = rt6_check(rt, from, cookie);
2812 
2813 	rcu_read_unlock();
2814 
2815 	return dst_ret;
2816 }
2817 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2818 
2819 static void ip6_negative_advice(struct sock *sk,
2820 				struct dst_entry *dst)
2821 {
2822 	struct rt6_info *rt = dst_rt6_info(dst);
2823 
2824 	if (rt->rt6i_flags & RTF_CACHE) {
2825 		rcu_read_lock();
2826 		if (rt6_check_expired(rt)) {
2827 			/* rt/dst can not be destroyed yet,
2828 			 * because of rcu_read_lock()
2829 			 */
2830 			sk_dst_reset(sk);
2831 			rt6_remove_exception_rt(rt);
2832 		}
2833 		rcu_read_unlock();
2834 		return;
2835 	}
2836 	sk_dst_reset(sk);
2837 }
2838 
2839 static void ip6_link_failure(struct sk_buff *skb)
2840 {
2841 	struct rt6_info *rt;
2842 
2843 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2844 
2845 	rt = dst_rt6_info(skb_dst(skb));
2846 	if (rt) {
2847 		rcu_read_lock();
2848 		if (rt->rt6i_flags & RTF_CACHE) {
2849 			rt6_remove_exception_rt(rt);
2850 		} else {
2851 			struct fib6_info *from;
2852 			struct fib6_node *fn;
2853 
2854 			from = rcu_dereference(rt->from);
2855 			if (from) {
2856 				fn = rcu_dereference(from->fib6_node);
2857 				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2858 					WRITE_ONCE(fn->fn_sernum, -1);
2859 			}
2860 		}
2861 		rcu_read_unlock();
2862 	}
2863 }
2864 
2865 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2866 {
2867 	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2868 		struct fib6_info *from;
2869 
2870 		rcu_read_lock();
2871 		from = rcu_dereference(rt0->from);
2872 		if (from)
2873 			WRITE_ONCE(rt0->dst.expires, from->expires);
2874 		rcu_read_unlock();
2875 	}
2876 
2877 	dst_set_expires(&rt0->dst, timeout);
2878 	rt0->rt6i_flags |= RTF_EXPIRES;
2879 }
2880 
2881 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2882 {
2883 	struct net *net = dev_net(rt->dst.dev);
2884 
2885 	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2886 	rt->rt6i_flags |= RTF_MODIFIED;
2887 	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2888 }
2889 
2890 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2891 {
2892 	return !(rt->rt6i_flags & RTF_CACHE) &&
2893 		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2894 }
2895 
2896 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2897 				 const struct ipv6hdr *iph, u32 mtu,
2898 				 bool confirm_neigh)
2899 {
2900 	const struct in6_addr *daddr, *saddr;
2901 	struct rt6_info *rt6 = dst_rt6_info(dst);
2902 
2903 	/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2904 	 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2905 	 * [see also comment in rt6_mtu_change_route()]
2906 	 */
2907 
2908 	if (iph) {
2909 		daddr = &iph->daddr;
2910 		saddr = &iph->saddr;
2911 	} else if (sk) {
2912 		daddr = &sk->sk_v6_daddr;
2913 		saddr = &inet6_sk(sk)->saddr;
2914 	} else {
2915 		daddr = NULL;
2916 		saddr = NULL;
2917 	}
2918 
2919 	if (confirm_neigh)
2920 		dst_confirm_neigh(dst, daddr);
2921 
2922 	if (mtu < IPV6_MIN_MTU)
2923 		return;
2924 	if (mtu >= dst_mtu(dst))
2925 		return;
2926 
2927 	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2928 		rt6_do_update_pmtu(rt6, mtu);
2929 		/* update rt6_ex->stamp for cache */
2930 		if (rt6->rt6i_flags & RTF_CACHE)
2931 			rt6_update_exception_stamp_rt(rt6);
2932 	} else if (daddr) {
2933 		struct fib6_result res = {};
2934 		struct rt6_info *nrt6;
2935 
2936 		rcu_read_lock();
2937 		res.f6i = rcu_dereference(rt6->from);
2938 		if (!res.f6i)
2939 			goto out_unlock;
2940 
2941 		res.fib6_flags = res.f6i->fib6_flags;
2942 		res.fib6_type = res.f6i->fib6_type;
2943 
2944 		if (res.f6i->nh) {
2945 			struct fib6_nh_match_arg arg = {
2946 				.dev = dst_dev_rcu(dst),
2947 				.gw = &rt6->rt6i_gateway,
2948 			};
2949 
2950 			nexthop_for_each_fib6_nh(res.f6i->nh,
2951 						 fib6_nh_find_match, &arg);
2952 
2953 			/* fib6_info uses a nexthop that does not have fib6_nh
2954 			 * using the dst->dev + gw. Should be impossible.
2955 			 */
2956 			if (!arg.match)
2957 				goto out_unlock;
2958 
2959 			res.nh = arg.match;
2960 		} else {
2961 			res.nh = res.f6i->fib6_nh;
2962 		}
2963 
2964 		nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2965 		if (nrt6) {
2966 			rt6_do_update_pmtu(nrt6, mtu);
2967 			if (rt6_insert_exception(nrt6, &res))
2968 				dst_release_immediate(&nrt6->dst);
2969 		}
2970 out_unlock:
2971 		rcu_read_unlock();
2972 	}
2973 }
2974 
2975 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2976 			       struct sk_buff *skb, u32 mtu,
2977 			       bool confirm_neigh)
2978 {
2979 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2980 			     confirm_neigh);
2981 }
2982 
2983 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2984 		     int oif, u32 mark, kuid_t uid)
2985 {
2986 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2987 	struct dst_entry *dst;
2988 	struct flowi6 fl6 = {
2989 		.flowi6_oif = oif,
2990 		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2991 		.daddr = iph->daddr,
2992 		.saddr = iph->saddr,
2993 		.flowlabel = ip6_flowinfo(iph),
2994 		.flowi6_uid = uid,
2995 	};
2996 
2997 	dst = ip6_route_output(net, NULL, &fl6);
2998 	if (!dst->error)
2999 		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
3000 	dst_release(dst);
3001 }
3002 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
3003 
3004 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
3005 {
3006 	int oif = sk->sk_bound_dev_if;
3007 	struct dst_entry *dst;
3008 
3009 	if (!oif && skb->dev)
3010 		oif = l3mdev_master_ifindex(skb->dev);
3011 
3012 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
3013 			sk_uid(sk));
3014 
3015 	dst = __sk_dst_get(sk);
3016 	if (!dst || !READ_ONCE(dst->obsolete) ||
3017 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
3018 		return;
3019 
3020 	bh_lock_sock(sk);
3021 	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
3022 		ip6_datagram_dst_update(sk, false);
3023 	bh_unlock_sock(sk);
3024 }
3025 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
3026 
3027 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
3028 			   const struct flowi6 *fl6)
3029 {
3030 #ifdef CONFIG_IPV6_SUBTREES
3031 	struct ipv6_pinfo *np = inet6_sk(sk);
3032 #endif
3033 
3034 	ip6_dst_store(sk, dst,
3035 		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
3036 		      &sk->sk_v6_daddr : NULL,
3037 #ifdef CONFIG_IPV6_SUBTREES
3038 		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
3039 		      &np->saddr :
3040 #endif
3041 		      NULL);
3042 }
3043 
3044 static bool ip6_redirect_nh_match(const struct fib6_result *res,
3045 				  struct flowi6 *fl6,
3046 				  const struct in6_addr *gw,
3047 				  struct rt6_info **ret)
3048 {
3049 	const struct fib6_nh *nh = res->nh;
3050 
3051 	if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
3052 	    fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3053 		return false;
3054 
3055 	/* rt_cache's gateway might be different from its 'parent'
3056 	 * in the case of an ip redirect.
3057 	 * So we keep searching in the exception table if the gateway
3058 	 * is different.
3059 	 */
3060 	if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3061 		struct rt6_info *rt_cache;
3062 
3063 		rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3064 		if (rt_cache &&
3065 		    ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3066 			*ret = rt_cache;
3067 			return true;
3068 		}
3069 		return false;
3070 	}
3071 	return true;
3072 }
3073 
3074 struct fib6_nh_rd_arg {
3075 	struct fib6_result	*res;
3076 	struct flowi6		*fl6;
3077 	const struct in6_addr	*gw;
3078 	struct rt6_info		**ret;
3079 };
3080 
3081 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3082 {
3083 	struct fib6_nh_rd_arg *arg = _arg;
3084 
3085 	arg->res->nh = nh;
3086 	return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3087 }
3088 
3089 /* Handle redirects */
3090 struct ip6rd_flowi {
3091 	struct flowi6 fl6;
3092 	struct in6_addr gateway;
3093 };
3094 
3095 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3096 					     struct fib6_table *table,
3097 					     struct flowi6 *fl6,
3098 					     const struct sk_buff *skb,
3099 					     int flags)
3100 {
3101 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3102 	struct rt6_info *ret = NULL;
3103 	struct fib6_result res = {};
3104 	struct fib6_nh_rd_arg arg = {
3105 		.res = &res,
3106 		.fl6 = fl6,
3107 		.gw  = &rdfl->gateway,
3108 		.ret = &ret
3109 	};
3110 	struct fib6_info *rt;
3111 	struct fib6_node *fn;
3112 
3113 	/* Get the "current" route for this destination and
3114 	 * check if the redirect has come from appropriate router.
3115 	 *
3116 	 * RFC 4861 specifies that redirects should only be
3117 	 * accepted if they come from the nexthop to the target.
3118 	 * Due to the way the routes are chosen, this notion
3119 	 * is a bit fuzzy and one might need to check all possible
3120 	 * routes.
3121 	 */
3122 
3123 	rcu_read_lock();
3124 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3125 restart:
3126 	for_each_fib6_node_rt_rcu(fn) {
3127 		res.f6i = rt;
3128 		if (fib6_check_expired(rt))
3129 			continue;
3130 		if (rt->fib6_flags & RTF_REJECT)
3131 			break;
3132 		if (unlikely(rt->nh)) {
3133 			if (nexthop_is_blackhole(rt->nh))
3134 				continue;
3135 			/* on match, res->nh is filled in and potentially ret */
3136 			if (nexthop_for_each_fib6_nh(rt->nh,
3137 						     fib6_nh_redirect_match,
3138 						     &arg))
3139 				goto out;
3140 		} else {
3141 			res.nh = rt->fib6_nh;
3142 			if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3143 						  &ret))
3144 				goto out;
3145 		}
3146 	}
3147 
3148 	if (!rt)
3149 		rt = net->ipv6.fib6_null_entry;
3150 	else if (rt->fib6_flags & RTF_REJECT) {
3151 		ret = net->ipv6.ip6_null_entry;
3152 		goto out;
3153 	}
3154 
3155 	if (rt == net->ipv6.fib6_null_entry) {
3156 		fn = fib6_backtrack(fn, &fl6->saddr);
3157 		if (fn)
3158 			goto restart;
3159 	}
3160 
3161 	res.f6i = rt;
3162 	res.nh = rt->fib6_nh;
3163 out:
3164 	if (ret) {
3165 		ip6_hold_safe(net, &ret);
3166 	} else {
3167 		res.fib6_flags = res.f6i->fib6_flags;
3168 		res.fib6_type = res.f6i->fib6_type;
3169 		ret = ip6_create_rt_rcu(&res);
3170 	}
3171 
3172 	rcu_read_unlock();
3173 
3174 	trace_fib6_table_lookup(net, &res, table, fl6);
3175 	return ret;
3176 };
3177 
3178 static struct dst_entry *ip6_route_redirect(struct net *net,
3179 					    const struct flowi6 *fl6,
3180 					    const struct sk_buff *skb,
3181 					    const struct in6_addr *gateway)
3182 {
3183 	int flags = RT6_LOOKUP_F_HAS_SADDR;
3184 	struct ip6rd_flowi rdfl;
3185 
3186 	rdfl.fl6 = *fl6;
3187 	rdfl.gateway = *gateway;
3188 
3189 	return fib6_rule_lookup(net, &rdfl.fl6, skb,
3190 				flags, __ip6_route_redirect);
3191 }
3192 
3193 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3194 		  kuid_t uid)
3195 {
3196 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3197 	struct dst_entry *dst;
3198 	struct flowi6 fl6 = {
3199 		.flowi6_iif = LOOPBACK_IFINDEX,
3200 		.flowi6_oif = oif,
3201 		.flowi6_mark = mark,
3202 		.daddr = iph->daddr,
3203 		.saddr = iph->saddr,
3204 		.flowlabel = ip6_flowinfo(iph),
3205 		.flowi6_uid = uid,
3206 	};
3207 
3208 	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3209 	rt6_do_redirect(dst, NULL, skb);
3210 	dst_release(dst);
3211 }
3212 EXPORT_SYMBOL_GPL(ip6_redirect);
3213 
3214 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3215 {
3216 	const struct ipv6hdr *iph = ipv6_hdr(skb);
3217 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3218 	struct dst_entry *dst;
3219 	struct flowi6 fl6 = {
3220 		.flowi6_iif = LOOPBACK_IFINDEX,
3221 		.flowi6_oif = oif,
3222 		.daddr = msg->dest,
3223 		.saddr = iph->daddr,
3224 		.flowi6_uid = sock_net_uid(net, NULL),
3225 	};
3226 
3227 	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3228 	rt6_do_redirect(dst, NULL, skb);
3229 	dst_release(dst);
3230 }
3231 
3232 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3233 {
3234 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3235 		     READ_ONCE(sk->sk_mark), sk_uid(sk));
3236 }
3237 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3238 
3239 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3240 {
3241 	unsigned int mtu = dst_mtu(dst);
3242 	struct net *net;
3243 
3244 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3245 
3246 	rcu_read_lock();
3247 
3248 	net = dst_dev_net_rcu(dst);
3249 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3250 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3251 
3252 	rcu_read_unlock();
3253 
3254 	/*
3255 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3256 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3257 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
3258 	 * rely only on pmtu discovery"
3259 	 */
3260 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3261 		mtu = IPV6_MAXPLEN;
3262 	return mtu;
3263 }
3264 
3265 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3266 {
3267 	return ip6_dst_mtu_maybe_forward(dst, false);
3268 }
3269 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3270 
3271 /* MTU selection:
3272  * 1. mtu on route is locked - use it
3273  * 2. mtu from nexthop exception
3274  * 3. mtu from egress device
3275  *
3276  * based on ip6_dst_mtu_forward and exception logic of
3277  * rt6_find_cached_rt; called with rcu_read_lock
3278  */
3279 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3280 		      const struct in6_addr *daddr,
3281 		      const struct in6_addr *saddr)
3282 {
3283 	const struct fib6_nh *nh = res->nh;
3284 	struct fib6_info *f6i = res->f6i;
3285 	struct inet6_dev *idev;
3286 	struct rt6_info *rt;
3287 	u32 mtu = 0;
3288 
3289 	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3290 		mtu = f6i->fib6_pmtu;
3291 		if (mtu)
3292 			goto out;
3293 	}
3294 
3295 	rt = rt6_find_cached_rt(res, daddr, saddr);
3296 	if (unlikely(rt)) {
3297 		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3298 	} else {
3299 		struct net_device *dev = nh->fib_nh_dev;
3300 
3301 		mtu = IPV6_MIN_MTU;
3302 		idev = __in6_dev_get(dev);
3303 		if (idev)
3304 			mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
3305 	}
3306 
3307 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3308 out:
3309 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3310 }
3311 
3312 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3313 				  struct flowi6 *fl6)
3314 {
3315 	struct dst_entry *dst;
3316 	struct rt6_info *rt;
3317 	struct inet6_dev *idev = in6_dev_get(dev);
3318 	struct net *net = dev_net(dev);
3319 
3320 	if (unlikely(!idev))
3321 		return ERR_PTR(-ENODEV);
3322 
3323 	rt = ip6_dst_alloc(net, dev, 0);
3324 	if (unlikely(!rt)) {
3325 		in6_dev_put(idev);
3326 		dst = ERR_PTR(-ENOMEM);
3327 		goto out;
3328 	}
3329 
3330 	rt->dst.input = ip6_input;
3331 	rt->dst.output  = ip6_output;
3332 	rt->rt6i_gateway  = fl6->daddr;
3333 	rt->rt6i_dst.addr = fl6->daddr;
3334 	rt->rt6i_dst.plen = 128;
3335 	rt->rt6i_idev     = idev;
3336 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3337 
3338 	/* Add this dst into uncached_list so that rt6_disable_ip() can
3339 	 * do proper release of the net_device
3340 	 */
3341 	rt6_uncached_list_add(rt);
3342 
3343 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3344 
3345 out:
3346 	return dst;
3347 }
3348 
3349 static void ip6_dst_gc(struct dst_ops *ops)
3350 {
3351 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3352 	int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3353 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3354 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3355 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3356 	unsigned int val;
3357 	int entries;
3358 
3359 	if (time_after(rt_last_gc + rt_min_interval, jiffies))
3360 		goto out;
3361 
3362 	fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3363 	entries = dst_entries_get_slow(ops);
3364 	if (entries < ops->gc_thresh)
3365 		atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3366 out:
3367 	val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3368 	atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3369 }
3370 
3371 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3372 			       const struct in6_addr *gw_addr, u32 tbid,
3373 			       int flags, struct fib6_result *res)
3374 {
3375 	struct flowi6 fl6 = {
3376 		.flowi6_oif = cfg->fc_ifindex,
3377 		.daddr = *gw_addr,
3378 		.saddr = cfg->fc_prefsrc,
3379 	};
3380 	struct fib6_table *table;
3381 	int err;
3382 
3383 	table = fib6_get_table(net, tbid);
3384 	if (!table)
3385 		return -EINVAL;
3386 
3387 	if (!ipv6_addr_any(&cfg->fc_prefsrc))
3388 		flags |= RT6_LOOKUP_F_HAS_SADDR;
3389 
3390 	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3391 
3392 	err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3393 	if (!err && res->f6i != net->ipv6.fib6_null_entry)
3394 		fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3395 				 cfg->fc_ifindex != 0, NULL, flags);
3396 
3397 	return err;
3398 }
3399 
3400 static int ip6_route_check_nh_onlink(struct net *net,
3401 				     struct fib6_config *cfg,
3402 				     const struct net_device *dev,
3403 				     struct netlink_ext_ack *extack)
3404 {
3405 	u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3406 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3407 	struct fib6_result res = {};
3408 	int err;
3409 
3410 	err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3411 	if (!err && !(res.fib6_flags & RTF_REJECT) &&
3412 	    /* ignore match if it is the default route */
3413 	    !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3414 	    (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3415 		NL_SET_ERR_MSG(extack,
3416 			       "Nexthop has invalid gateway or device mismatch");
3417 		err = -EINVAL;
3418 	}
3419 
3420 	return err;
3421 }
3422 
3423 static int ip6_route_check_nh(struct net *net,
3424 			      struct fib6_config *cfg,
3425 			      struct net_device **_dev,
3426 			      netdevice_tracker *dev_tracker,
3427 			      struct inet6_dev **idev)
3428 {
3429 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3430 	struct net_device *dev = _dev ? *_dev : NULL;
3431 	int flags = RT6_LOOKUP_F_IFACE;
3432 	struct fib6_result res = {};
3433 	int err = -EHOSTUNREACH;
3434 
3435 	if (cfg->fc_table) {
3436 		err = ip6_nh_lookup_table(net, cfg, gw_addr,
3437 					  cfg->fc_table, flags, &res);
3438 		/* gw_addr can not require a gateway or resolve to a reject
3439 		 * route. If a device is given, it must match the result.
3440 		 */
3441 		if (err || res.fib6_flags & RTF_REJECT ||
3442 		    res.nh->fib_nh_gw_family ||
3443 		    (dev && dev != res.nh->fib_nh_dev))
3444 			err = -EHOSTUNREACH;
3445 	}
3446 
3447 	if (err < 0) {
3448 		struct flowi6 fl6 = {
3449 			.flowi6_oif = cfg->fc_ifindex,
3450 			.daddr = *gw_addr,
3451 		};
3452 
3453 		err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3454 		if (err || res.fib6_flags & RTF_REJECT ||
3455 		    res.nh->fib_nh_gw_family)
3456 			err = -EHOSTUNREACH;
3457 
3458 		if (err)
3459 			return err;
3460 
3461 		fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3462 				 cfg->fc_ifindex != 0, NULL, flags);
3463 	}
3464 
3465 	err = 0;
3466 	if (dev) {
3467 		if (dev != res.nh->fib_nh_dev)
3468 			err = -EHOSTUNREACH;
3469 	} else {
3470 		*_dev = dev = res.nh->fib_nh_dev;
3471 		netdev_hold(dev, dev_tracker, GFP_ATOMIC);
3472 		*idev = in6_dev_get(dev);
3473 	}
3474 
3475 	return err;
3476 }
3477 
3478 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3479 			   struct net_device **_dev,
3480 			   netdevice_tracker *dev_tracker,
3481 			   struct inet6_dev **idev,
3482 			   struct netlink_ext_ack *extack)
3483 {
3484 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3485 	int gwa_type = ipv6_addr_type(gw_addr);
3486 	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3487 	const struct net_device *dev = *_dev;
3488 	bool need_addr_check = !dev;
3489 	int err = -EINVAL;
3490 
3491 	/* if gw_addr is local we will fail to detect this in case
3492 	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3493 	 * will return already-added prefix route via interface that
3494 	 * prefix route was assigned to, which might be non-loopback.
3495 	 */
3496 	if (dev &&
3497 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3498 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3499 		goto out;
3500 	}
3501 
3502 	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3503 		/* IPv6 strictly inhibits using not link-local
3504 		 * addresses as nexthop address.
3505 		 * Otherwise, router will not able to send redirects.
3506 		 * It is very good, but in some (rare!) circumstances
3507 		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3508 		 * some exceptions. --ANK
3509 		 * We allow IPv4-mapped nexthops to support RFC4798-type
3510 		 * addressing
3511 		 */
3512 		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3513 			NL_SET_ERR_MSG(extack, "Invalid gateway address");
3514 			goto out;
3515 		}
3516 
3517 		rcu_read_lock();
3518 
3519 		if (cfg->fc_flags & RTNH_F_ONLINK)
3520 			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3521 		else
3522 			err = ip6_route_check_nh(net, cfg, _dev, dev_tracker,
3523 						 idev);
3524 
3525 		rcu_read_unlock();
3526 
3527 		if (err)
3528 			goto out;
3529 	}
3530 
3531 	/* reload in case device was changed */
3532 	dev = *_dev;
3533 
3534 	err = -EINVAL;
3535 	if (!dev) {
3536 		NL_SET_ERR_MSG(extack, "Egress device not specified");
3537 		goto out;
3538 	} else if (dev->flags & IFF_LOOPBACK) {
3539 		NL_SET_ERR_MSG(extack,
3540 			       "Egress device can not be loopback device for this route");
3541 		goto out;
3542 	}
3543 
3544 	/* if we did not check gw_addr above, do so now that the
3545 	 * egress device has been resolved.
3546 	 */
3547 	if (need_addr_check &&
3548 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3549 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3550 		goto out;
3551 	}
3552 
3553 	err = 0;
3554 out:
3555 	return err;
3556 }
3557 
3558 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3559 {
3560 	if ((flags & RTF_REJECT) ||
3561 	    (dev && (dev->flags & IFF_LOOPBACK) &&
3562 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
3563 	     !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3564 		return true;
3565 
3566 	return false;
3567 }
3568 
3569 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3570 		 struct fib6_config *cfg, gfp_t gfp_flags,
3571 		 struct netlink_ext_ack *extack)
3572 {
3573 	netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
3574 	struct net_device *dev = NULL;
3575 	struct inet6_dev *idev = NULL;
3576 	int addr_type;
3577 	int err;
3578 
3579 	fib6_nh->fib_nh_family = AF_INET6;
3580 #ifdef CONFIG_IPV6_ROUTER_PREF
3581 	fib6_nh->last_probe = jiffies;
3582 #endif
3583 	if (cfg->fc_is_fdb) {
3584 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3585 		fib6_nh->fib_nh_gw_family = AF_INET6;
3586 		return 0;
3587 	}
3588 
3589 	err = -ENODEV;
3590 	if (cfg->fc_ifindex) {
3591 		dev = netdev_get_by_index(net, cfg->fc_ifindex,
3592 					  dev_tracker, gfp_flags);
3593 		if (!dev)
3594 			goto out;
3595 		idev = in6_dev_get(dev);
3596 		if (!idev)
3597 			goto out;
3598 	}
3599 
3600 	if (cfg->fc_flags & RTNH_F_ONLINK) {
3601 		if (!dev) {
3602 			NL_SET_ERR_MSG(extack,
3603 				       "Nexthop device required for onlink");
3604 			goto out;
3605 		}
3606 
3607 		if (!(dev->flags & IFF_UP)) {
3608 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3609 			err = -ENETDOWN;
3610 			goto out;
3611 		}
3612 
3613 		fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3614 	}
3615 
3616 	fib6_nh->fib_nh_weight = 1;
3617 
3618 	/* We cannot add true routes via loopback here,
3619 	 * they would result in kernel looping; promote them to reject routes
3620 	 */
3621 	addr_type = ipv6_addr_type(&cfg->fc_dst);
3622 	if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3623 		/* hold loopback dev/idev if we haven't done so. */
3624 		if (dev != net->loopback_dev) {
3625 			if (dev) {
3626 				netdev_put(dev, dev_tracker);
3627 				in6_dev_put(idev);
3628 			}
3629 			dev = net->loopback_dev;
3630 			netdev_hold(dev, dev_tracker, gfp_flags);
3631 			idev = in6_dev_get(dev);
3632 			if (!idev) {
3633 				err = -ENODEV;
3634 				goto out;
3635 			}
3636 		}
3637 		goto pcpu_alloc;
3638 	}
3639 
3640 	if (cfg->fc_flags & RTF_GATEWAY) {
3641 		err = ip6_validate_gw(net, cfg, &dev, dev_tracker,
3642 				      &idev, extack);
3643 		if (err)
3644 			goto out;
3645 
3646 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3647 		fib6_nh->fib_nh_gw_family = AF_INET6;
3648 	}
3649 
3650 	err = -ENODEV;
3651 	if (!dev)
3652 		goto out;
3653 
3654 	if (!idev || idev->cnf.disable_ipv6) {
3655 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3656 		err = -EACCES;
3657 		goto out;
3658 	}
3659 
3660 	if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3661 		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3662 		err = -ENETDOWN;
3663 		goto out;
3664 	}
3665 
3666 	if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3667 	    !netif_carrier_ok(dev))
3668 		fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3669 
3670 	err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3671 				 cfg->fc_encap_type, cfg, gfp_flags, extack);
3672 	if (err)
3673 		goto out;
3674 
3675 pcpu_alloc:
3676 	fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3677 	if (!fib6_nh->rt6i_pcpu) {
3678 		err = -ENOMEM;
3679 		goto out;
3680 	}
3681 
3682 	fib6_nh->fib_nh_dev = dev;
3683 	fib6_nh->fib_nh_oif = dev->ifindex;
3684 	err = 0;
3685 out:
3686 	if (idev)
3687 		in6_dev_put(idev);
3688 
3689 	if (err) {
3690 		fib_nh_common_release(&fib6_nh->nh_common);
3691 		fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
3692 		fib6_nh->fib_nh_lws = NULL;
3693 		netdev_put(dev, dev_tracker);
3694 	}
3695 
3696 	return err;
3697 }
3698 
3699 void fib6_nh_release(struct fib6_nh *fib6_nh)
3700 {
3701 	struct rt6_exception_bucket *bucket;
3702 
3703 	rcu_read_lock();
3704 
3705 	fib6_nh_flush_exceptions(fib6_nh, NULL);
3706 	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3707 	if (bucket) {
3708 		rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3709 		kfree(bucket);
3710 	}
3711 
3712 	rcu_read_unlock();
3713 
3714 	fib6_nh_release_dsts(fib6_nh);
3715 	free_percpu(fib6_nh->rt6i_pcpu);
3716 
3717 	fib_nh_common_release(&fib6_nh->nh_common);
3718 }
3719 
3720 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3721 {
3722 	int cpu;
3723 
3724 	if (!fib6_nh->rt6i_pcpu)
3725 		return;
3726 
3727 	for_each_possible_cpu(cpu) {
3728 		struct rt6_info *pcpu_rt, **ppcpu_rt;
3729 
3730 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3731 		pcpu_rt = xchg(ppcpu_rt, NULL);
3732 		if (pcpu_rt) {
3733 			dst_dev_put(&pcpu_rt->dst);
3734 			dst_release(&pcpu_rt->dst);
3735 		}
3736 	}
3737 }
3738 
3739 static int fib6_config_validate(struct fib6_config *cfg,
3740 				struct netlink_ext_ack *extack)
3741 {
3742 	/* RTF_PCPU is an internal flag; can not be set by userspace */
3743 	if (cfg->fc_flags & RTF_PCPU) {
3744 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3745 		goto errout;
3746 	}
3747 
3748 	/* RTF_CACHE is an internal flag; can not be set by userspace */
3749 	if (cfg->fc_flags & RTF_CACHE) {
3750 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3751 		goto errout;
3752 	}
3753 
3754 	if (cfg->fc_type > RTN_MAX) {
3755 		NL_SET_ERR_MSG(extack, "Invalid route type");
3756 		goto errout;
3757 	}
3758 
3759 	if (cfg->fc_dst_len > 128) {
3760 		NL_SET_ERR_MSG(extack, "Invalid prefix length");
3761 		goto errout;
3762 	}
3763 
3764 #ifdef CONFIG_IPV6_SUBTREES
3765 	if (cfg->fc_src_len > 128) {
3766 		NL_SET_ERR_MSG(extack, "Invalid source address length");
3767 		goto errout;
3768 	}
3769 
3770 	if (cfg->fc_nh_id && cfg->fc_src_len) {
3771 		NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3772 		goto errout;
3773 	}
3774 #else
3775 	if (cfg->fc_src_len) {
3776 		NL_SET_ERR_MSG(extack,
3777 			       "Specifying source address requires IPV6_SUBTREES to be enabled");
3778 		goto errout;
3779 	}
3780 #endif
3781 	return 0;
3782 errout:
3783 	return -EINVAL;
3784 }
3785 
3786 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3787 					       gfp_t gfp_flags,
3788 					       struct netlink_ext_ack *extack)
3789 {
3790 	struct net *net = cfg->fc_nlinfo.nl_net;
3791 	struct fib6_table *table;
3792 	struct fib6_info *rt;
3793 	int err;
3794 
3795 	if (cfg->fc_nlinfo.nlh &&
3796 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3797 		table = fib6_get_table(net, cfg->fc_table);
3798 		if (!table) {
3799 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3800 			table = fib6_new_table(net, cfg->fc_table);
3801 		}
3802 	} else {
3803 		table = fib6_new_table(net, cfg->fc_table);
3804 	}
3805 	if (!table) {
3806 		err = -ENOBUFS;
3807 		goto err;
3808 	}
3809 
3810 	rt = fib6_info_alloc(gfp_flags, !cfg->fc_nh_id);
3811 	if (!rt) {
3812 		err = -ENOMEM;
3813 		goto err;
3814 	}
3815 
3816 	rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
3817 					       extack);
3818 	if (IS_ERR(rt->fib6_metrics)) {
3819 		err = PTR_ERR(rt->fib6_metrics);
3820 		goto free;
3821 	}
3822 
3823 	if (cfg->fc_flags & RTF_ADDRCONF)
3824 		rt->dst_nocount = true;
3825 
3826 	if (cfg->fc_flags & RTF_EXPIRES)
3827 		fib6_set_expires(rt, jiffies +
3828 				 clock_t_to_jiffies(cfg->fc_expires));
3829 
3830 	if (cfg->fc_protocol == RTPROT_UNSPEC)
3831 		cfg->fc_protocol = RTPROT_BOOT;
3832 
3833 	rt->fib6_protocol = cfg->fc_protocol;
3834 	rt->fib6_table = table;
3835 	rt->fib6_metric = cfg->fc_metric;
3836 	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3837 	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3838 
3839 	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3840 	rt->fib6_dst.plen = cfg->fc_dst_len;
3841 
3842 #ifdef CONFIG_IPV6_SUBTREES
3843 	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3844 	rt->fib6_src.plen = cfg->fc_src_len;
3845 #endif
3846 	return rt;
3847 free:
3848 	kfree(rt);
3849 err:
3850 	return ERR_PTR(err);
3851 }
3852 
3853 static int ip6_route_info_create_nh(struct fib6_info *rt,
3854 				    struct fib6_config *cfg,
3855 				    gfp_t gfp_flags,
3856 				    struct netlink_ext_ack *extack)
3857 {
3858 	struct net *net = cfg->fc_nlinfo.nl_net;
3859 	struct fib6_nh *fib6_nh;
3860 	int err;
3861 
3862 	if (cfg->fc_nh_id) {
3863 		struct nexthop *nh;
3864 
3865 		rcu_read_lock();
3866 
3867 		nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3868 		if (!nh) {
3869 			err = -EINVAL;
3870 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3871 			goto out_free;
3872 		}
3873 
3874 		err = fib6_check_nexthop(nh, cfg, extack);
3875 		if (err)
3876 			goto out_free;
3877 
3878 		if (!nexthop_get(nh)) {
3879 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3880 			err = -ENOENT;
3881 			goto out_free;
3882 		}
3883 
3884 		rt->nh = nh;
3885 		fib6_nh = nexthop_fib6_nh(rt->nh);
3886 
3887 		rcu_read_unlock();
3888 	} else {
3889 		int addr_type;
3890 
3891 		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3892 		if (err)
3893 			goto out_release;
3894 
3895 		fib6_nh = rt->fib6_nh;
3896 
3897 		/* We cannot add true routes via loopback here, they would
3898 		 * result in kernel looping; promote them to reject routes
3899 		 */
3900 		addr_type = ipv6_addr_type(&cfg->fc_dst);
3901 		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3902 				   addr_type))
3903 			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3904 	}
3905 
3906 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3907 		struct net_device *dev = fib6_nh->fib_nh_dev;
3908 
3909 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3910 			NL_SET_ERR_MSG(extack, "Invalid source address");
3911 			err = -EINVAL;
3912 			goto out_release;
3913 		}
3914 		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3915 		rt->fib6_prefsrc.plen = 128;
3916 	}
3917 
3918 	return 0;
3919 out_release:
3920 	fib6_info_release(rt);
3921 	return err;
3922 out_free:
3923 	rcu_read_unlock();
3924 	ip_fib_metrics_put(rt->fib6_metrics);
3925 	kfree(rt);
3926 	return err;
3927 }
3928 
3929 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3930 		  struct netlink_ext_ack *extack)
3931 {
3932 	struct fib6_info *rt;
3933 	int err;
3934 
3935 	err = fib6_config_validate(cfg, extack);
3936 	if (err)
3937 		return err;
3938 
3939 	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3940 	if (IS_ERR(rt))
3941 		return PTR_ERR(rt);
3942 
3943 	err = ip6_route_info_create_nh(rt, cfg, gfp_flags, extack);
3944 	if (err)
3945 		return err;
3946 
3947 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3948 	fib6_info_release(rt);
3949 
3950 	return err;
3951 }
3952 
3953 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3954 {
3955 	struct net *net = info->nl_net;
3956 	struct fib6_table *table;
3957 	int err;
3958 
3959 	if (rt == net->ipv6.fib6_null_entry) {
3960 		err = -ENOENT;
3961 		goto out;
3962 	}
3963 
3964 	table = rt->fib6_table;
3965 	spin_lock_bh(&table->tb6_lock);
3966 	err = fib6_del(rt, info);
3967 	spin_unlock_bh(&table->tb6_lock);
3968 
3969 out:
3970 	fib6_info_release(rt);
3971 	return err;
3972 }
3973 
3974 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3975 {
3976 	struct nl_info info = {
3977 		.nl_net = net,
3978 		.skip_notify = skip_notify
3979 	};
3980 
3981 	return __ip6_del_rt(rt, &info);
3982 }
3983 
3984 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3985 {
3986 	struct nl_info *info = &cfg->fc_nlinfo;
3987 	struct net *net = info->nl_net;
3988 	struct sk_buff *skb = NULL;
3989 	struct fib6_table *table;
3990 	int err = -ENOENT;
3991 
3992 	if (rt == net->ipv6.fib6_null_entry)
3993 		goto out_put;
3994 	table = rt->fib6_table;
3995 	spin_lock_bh(&table->tb6_lock);
3996 
3997 	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3998 		struct fib6_info *sibling, *next_sibling;
3999 		struct fib6_node *fn;
4000 
4001 		/* prefer to send a single notification with all hops */
4002 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4003 		if (skb) {
4004 			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4005 
4006 			if (rt6_fill_node(net, skb, rt, NULL,
4007 					  NULL, NULL, 0, RTM_DELROUTE,
4008 					  info->portid, seq, 0) < 0) {
4009 				kfree_skb(skb);
4010 				skb = NULL;
4011 			} else
4012 				info->skip_notify = 1;
4013 		}
4014 
4015 		/* 'rt' points to the first sibling route. If it is not the
4016 		 * leaf, then we do not need to send a notification. Otherwise,
4017 		 * we need to check if the last sibling has a next route or not
4018 		 * and emit a replace or delete notification, respectively.
4019 		 */
4020 		info->skip_notify_kernel = 1;
4021 		fn = rcu_dereference_protected(rt->fib6_node,
4022 					    lockdep_is_held(&table->tb6_lock));
4023 		if (rcu_access_pointer(fn->leaf) == rt) {
4024 			struct fib6_info *last_sibling, *replace_rt;
4025 
4026 			last_sibling = list_last_entry(&rt->fib6_siblings,
4027 						       struct fib6_info,
4028 						       fib6_siblings);
4029 			replace_rt = rcu_dereference_protected(
4030 					    last_sibling->fib6_next,
4031 					    lockdep_is_held(&table->tb6_lock));
4032 			if (replace_rt)
4033 				call_fib6_entry_notifiers_replace(net,
4034 								  replace_rt);
4035 			else
4036 				call_fib6_multipath_entry_notifiers(net,
4037 						       FIB_EVENT_ENTRY_DEL,
4038 						       rt, rt->fib6_nsiblings,
4039 						       NULL);
4040 		}
4041 		list_for_each_entry_safe(sibling, next_sibling,
4042 					 &rt->fib6_siblings,
4043 					 fib6_siblings) {
4044 			err = fib6_del(sibling, info);
4045 			if (err)
4046 				goto out_unlock;
4047 		}
4048 	}
4049 
4050 	err = fib6_del(rt, info);
4051 out_unlock:
4052 	spin_unlock_bh(&table->tb6_lock);
4053 out_put:
4054 	fib6_info_release(rt);
4055 
4056 	if (skb) {
4057 		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4058 			    info->nlh, gfp_any());
4059 	}
4060 	return err;
4061 }
4062 
4063 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
4064 {
4065 	int rc = -ESRCH;
4066 
4067 	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
4068 		goto out;
4069 
4070 	if (cfg->fc_flags & RTF_GATEWAY &&
4071 	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
4072 		goto out;
4073 
4074 	rc = rt6_remove_exception_rt(rt);
4075 out:
4076 	return rc;
4077 }
4078 
4079 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
4080 			     struct fib6_nh *nh)
4081 {
4082 	struct fib6_result res = {
4083 		.f6i = rt,
4084 		.nh = nh,
4085 	};
4086 	struct rt6_info *rt_cache;
4087 
4088 	rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4089 	if (rt_cache)
4090 		return __ip6_del_cached_rt(rt_cache, cfg);
4091 
4092 	return 0;
4093 }
4094 
4095 struct fib6_nh_del_cached_rt_arg {
4096 	struct fib6_config *cfg;
4097 	struct fib6_info *f6i;
4098 };
4099 
4100 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4101 {
4102 	struct fib6_nh_del_cached_rt_arg *arg = _arg;
4103 	int rc;
4104 
4105 	rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4106 	return rc != -ESRCH ? rc : 0;
4107 }
4108 
4109 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4110 {
4111 	struct fib6_nh_del_cached_rt_arg arg = {
4112 		.cfg = cfg,
4113 		.f6i = f6i
4114 	};
4115 
4116 	return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4117 }
4118 
4119 static int ip6_route_del(struct fib6_config *cfg,
4120 			 struct netlink_ext_ack *extack)
4121 {
4122 	struct fib6_table *table;
4123 	struct fib6_info *rt;
4124 	struct fib6_node *fn;
4125 	int err = -ESRCH;
4126 
4127 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4128 	if (!table) {
4129 		NL_SET_ERR_MSG(extack, "FIB table does not exist");
4130 		return err;
4131 	}
4132 
4133 	rcu_read_lock();
4134 
4135 	fn = fib6_locate(&table->tb6_root,
4136 			 &cfg->fc_dst, cfg->fc_dst_len,
4137 			 &cfg->fc_src, cfg->fc_src_len,
4138 			 !(cfg->fc_flags & RTF_CACHE));
4139 
4140 	if (fn) {
4141 		for_each_fib6_node_rt_rcu(fn) {
4142 			struct fib6_nh *nh;
4143 
4144 			if (rt->nh && cfg->fc_nh_id &&
4145 			    rt->nh->id != cfg->fc_nh_id)
4146 				continue;
4147 
4148 			if (cfg->fc_flags & RTF_CACHE) {
4149 				int rc = 0;
4150 
4151 				if (rt->nh) {
4152 					rc = ip6_del_cached_rt_nh(cfg, rt);
4153 				} else if (cfg->fc_nh_id) {
4154 					continue;
4155 				} else {
4156 					nh = rt->fib6_nh;
4157 					rc = ip6_del_cached_rt(cfg, rt, nh);
4158 				}
4159 				if (rc != -ESRCH) {
4160 					rcu_read_unlock();
4161 					return rc;
4162 				}
4163 				continue;
4164 			}
4165 
4166 			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4167 				continue;
4168 			if (cfg->fc_protocol &&
4169 			    cfg->fc_protocol != rt->fib6_protocol)
4170 				continue;
4171 
4172 			if (rt->nh) {
4173 				if (!fib6_info_hold_safe(rt))
4174 					continue;
4175 
4176 				err =  __ip6_del_rt(rt, &cfg->fc_nlinfo);
4177 				break;
4178 			}
4179 			if (cfg->fc_nh_id)
4180 				continue;
4181 
4182 			nh = rt->fib6_nh;
4183 			if (cfg->fc_ifindex &&
4184 			    (!nh->fib_nh_dev ||
4185 			     nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4186 				continue;
4187 			if (cfg->fc_flags & RTF_GATEWAY &&
4188 			    !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4189 				continue;
4190 			if (!fib6_info_hold_safe(rt))
4191 				continue;
4192 
4193 			/* if gateway was specified only delete the one hop */
4194 			if (cfg->fc_flags & RTF_GATEWAY)
4195 				err = __ip6_del_rt(rt, &cfg->fc_nlinfo);
4196 			else
4197 				err = __ip6_del_rt_siblings(rt, cfg);
4198 			break;
4199 		}
4200 	}
4201 	rcu_read_unlock();
4202 
4203 	return err;
4204 }
4205 
4206 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4207 {
4208 	struct netevent_redirect netevent;
4209 	struct rt6_info *rt, *nrt = NULL;
4210 	struct fib6_result res = {};
4211 	struct ndisc_options ndopts;
4212 	struct inet6_dev *in6_dev;
4213 	struct neighbour *neigh;
4214 	struct rd_msg *msg;
4215 	int optlen, on_link;
4216 	u8 *lladdr;
4217 
4218 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4219 	optlen -= sizeof(*msg);
4220 
4221 	if (optlen < 0) {
4222 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4223 		return;
4224 	}
4225 
4226 	msg = (struct rd_msg *)icmp6_hdr(skb);
4227 
4228 	if (ipv6_addr_is_multicast(&msg->dest)) {
4229 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4230 		return;
4231 	}
4232 
4233 	on_link = 0;
4234 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4235 		on_link = 1;
4236 	} else if (ipv6_addr_type(&msg->target) !=
4237 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4238 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4239 		return;
4240 	}
4241 
4242 	in6_dev = __in6_dev_get(skb->dev);
4243 	if (!in6_dev)
4244 		return;
4245 	if (READ_ONCE(in6_dev->cnf.forwarding) ||
4246 	    !READ_ONCE(in6_dev->cnf.accept_redirects))
4247 		return;
4248 
4249 	/* RFC2461 8.1:
4250 	 *	The IP source address of the Redirect MUST be the same as the current
4251 	 *	first-hop router for the specified ICMP Destination Address.
4252 	 */
4253 
4254 	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4255 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4256 		return;
4257 	}
4258 
4259 	lladdr = NULL;
4260 	if (ndopts.nd_opts_tgt_lladdr) {
4261 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4262 					     skb->dev);
4263 		if (!lladdr) {
4264 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4265 			return;
4266 		}
4267 	}
4268 
4269 	rt = dst_rt6_info(dst);
4270 	if (rt->rt6i_flags & RTF_REJECT) {
4271 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4272 		return;
4273 	}
4274 
4275 	/* Redirect received -> path was valid.
4276 	 * Look, redirects are sent only in response to data packets,
4277 	 * so that this nexthop apparently is reachable. --ANK
4278 	 */
4279 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4280 
4281 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4282 	if (!neigh)
4283 		return;
4284 
4285 	/*
4286 	 *	We have finally decided to accept it.
4287 	 */
4288 
4289 	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4290 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
4291 		     NEIGH_UPDATE_F_OVERRIDE|
4292 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4293 				     NEIGH_UPDATE_F_ISROUTER)),
4294 		     NDISC_REDIRECT, &ndopts);
4295 
4296 	rcu_read_lock();
4297 	res.f6i = rcu_dereference(rt->from);
4298 	if (!res.f6i)
4299 		goto out;
4300 
4301 	if (res.f6i->nh) {
4302 		struct fib6_nh_match_arg arg = {
4303 			.dev = dst_dev_rcu(dst),
4304 			.gw = &rt->rt6i_gateway,
4305 		};
4306 
4307 		nexthop_for_each_fib6_nh(res.f6i->nh,
4308 					 fib6_nh_find_match, &arg);
4309 
4310 		/* fib6_info uses a nexthop that does not have fib6_nh
4311 		 * using the dst->dev. Should be impossible
4312 		 */
4313 		if (!arg.match)
4314 			goto out;
4315 		res.nh = arg.match;
4316 	} else {
4317 		res.nh = res.f6i->fib6_nh;
4318 	}
4319 
4320 	res.fib6_flags = res.f6i->fib6_flags;
4321 	res.fib6_type = res.f6i->fib6_type;
4322 	nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4323 	if (!nrt)
4324 		goto out;
4325 
4326 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4327 	if (on_link)
4328 		nrt->rt6i_flags &= ~RTF_GATEWAY;
4329 
4330 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4331 
4332 	/* rt6_insert_exception() will take care of duplicated exceptions */
4333 	if (rt6_insert_exception(nrt, &res)) {
4334 		dst_release_immediate(&nrt->dst);
4335 		goto out;
4336 	}
4337 
4338 	netevent.old = &rt->dst;
4339 	netevent.new = &nrt->dst;
4340 	netevent.daddr = &msg->dest;
4341 	netevent.neigh = neigh;
4342 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4343 
4344 out:
4345 	rcu_read_unlock();
4346 	neigh_release(neigh);
4347 }
4348 
4349 #ifdef CONFIG_IPV6_ROUTE_INFO
4350 static struct fib6_info *rt6_get_route_info(struct net *net,
4351 					   const struct in6_addr *prefix, int prefixlen,
4352 					   const struct in6_addr *gwaddr,
4353 					   struct net_device *dev)
4354 {
4355 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4356 	int ifindex = dev->ifindex;
4357 	struct fib6_node *fn;
4358 	struct fib6_info *rt = NULL;
4359 	struct fib6_table *table;
4360 
4361 	table = fib6_get_table(net, tb_id);
4362 	if (!table)
4363 		return NULL;
4364 
4365 	rcu_read_lock();
4366 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4367 	if (!fn)
4368 		goto out;
4369 
4370 	for_each_fib6_node_rt_rcu(fn) {
4371 		/* these routes do not use nexthops */
4372 		if (rt->nh)
4373 			continue;
4374 		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4375 			continue;
4376 		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4377 		    !rt->fib6_nh->fib_nh_gw_family)
4378 			continue;
4379 		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4380 			continue;
4381 		if (!fib6_info_hold_safe(rt))
4382 			continue;
4383 		break;
4384 	}
4385 out:
4386 	rcu_read_unlock();
4387 	return rt;
4388 }
4389 
4390 static struct fib6_info *rt6_add_route_info(struct net *net,
4391 					   const struct in6_addr *prefix, int prefixlen,
4392 					   const struct in6_addr *gwaddr,
4393 					   struct net_device *dev,
4394 					   unsigned int pref)
4395 {
4396 	struct fib6_config cfg = {
4397 		.fc_metric	= IP6_RT_PRIO_USER,
4398 		.fc_ifindex	= dev->ifindex,
4399 		.fc_dst_len	= prefixlen,
4400 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4401 				  RTF_UP | RTF_PREF(pref),
4402 		.fc_protocol = RTPROT_RA,
4403 		.fc_type = RTN_UNICAST,
4404 		.fc_nlinfo.portid = 0,
4405 		.fc_nlinfo.nlh = NULL,
4406 		.fc_nlinfo.nl_net = net,
4407 	};
4408 
4409 	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4410 	cfg.fc_dst = *prefix;
4411 	cfg.fc_gateway = *gwaddr;
4412 
4413 	/* We should treat it as a default route if prefix length is 0. */
4414 	if (!prefixlen)
4415 		cfg.fc_flags |= RTF_DEFAULT;
4416 
4417 	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4418 
4419 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4420 }
4421 #endif
4422 
4423 struct fib6_info *rt6_get_dflt_router(struct net *net,
4424 				     const struct in6_addr *addr,
4425 				     struct net_device *dev)
4426 {
4427 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4428 	struct fib6_info *rt;
4429 	struct fib6_table *table;
4430 
4431 	table = fib6_get_table(net, tb_id);
4432 	if (!table)
4433 		return NULL;
4434 
4435 	rcu_read_lock();
4436 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4437 		struct fib6_nh *nh;
4438 
4439 		/* RA routes do not use nexthops */
4440 		if (rt->nh)
4441 			continue;
4442 
4443 		nh = rt->fib6_nh;
4444 		if (dev == nh->fib_nh_dev &&
4445 		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4446 		    ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4447 			break;
4448 	}
4449 	if (rt && !fib6_info_hold_safe(rt))
4450 		rt = NULL;
4451 	rcu_read_unlock();
4452 	return rt;
4453 }
4454 
4455 struct fib6_info *rt6_add_dflt_router(struct net *net,
4456 				     const struct in6_addr *gwaddr,
4457 				     struct net_device *dev,
4458 				     unsigned int pref,
4459 				     u32 defrtr_usr_metric,
4460 				     int lifetime)
4461 {
4462 	struct fib6_config cfg = {
4463 		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4464 		.fc_metric	= defrtr_usr_metric,
4465 		.fc_ifindex	= dev->ifindex,
4466 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4467 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4468 		.fc_protocol = RTPROT_RA,
4469 		.fc_type = RTN_UNICAST,
4470 		.fc_nlinfo.portid = 0,
4471 		.fc_nlinfo.nlh = NULL,
4472 		.fc_nlinfo.nl_net = net,
4473 		.fc_expires = jiffies_to_clock_t(lifetime * HZ),
4474 	};
4475 
4476 	cfg.fc_gateway = *gwaddr;
4477 
4478 	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4479 		struct fib6_table *table;
4480 
4481 		table = fib6_get_table(dev_net(dev), cfg.fc_table);
4482 		if (table)
4483 			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4484 	}
4485 
4486 	return rt6_get_dflt_router(net, gwaddr, dev);
4487 }
4488 
4489 static void __rt6_purge_dflt_routers(struct net *net,
4490 				     struct fib6_table *table)
4491 {
4492 	struct fib6_info *rt;
4493 
4494 restart:
4495 	rcu_read_lock();
4496 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4497 		struct net_device *dev = fib6_info_nh_dev(rt);
4498 		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4499 
4500 		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4501 		    (!idev || idev->cnf.accept_ra != 2) &&
4502 		    fib6_info_hold_safe(rt)) {
4503 			rcu_read_unlock();
4504 			ip6_del_rt(net, rt, false);
4505 			goto restart;
4506 		}
4507 	}
4508 	rcu_read_unlock();
4509 
4510 	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4511 }
4512 
4513 void rt6_purge_dflt_routers(struct net *net)
4514 {
4515 	struct fib6_table *table;
4516 	struct hlist_head *head;
4517 	unsigned int h;
4518 
4519 	rcu_read_lock();
4520 
4521 	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4522 		head = &net->ipv6.fib_table_hash[h];
4523 		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4524 			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4525 				__rt6_purge_dflt_routers(net, table);
4526 		}
4527 	}
4528 
4529 	rcu_read_unlock();
4530 }
4531 
4532 static void rtmsg_to_fib6_config(struct net *net,
4533 				 struct in6_rtmsg *rtmsg,
4534 				 struct fib6_config *cfg)
4535 {
4536 	*cfg = (struct fib6_config){
4537 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4538 			 : RT6_TABLE_MAIN,
4539 		.fc_ifindex = rtmsg->rtmsg_ifindex,
4540 		.fc_metric = rtmsg->rtmsg_metric,
4541 		.fc_expires = rtmsg->rtmsg_info,
4542 		.fc_dst_len = rtmsg->rtmsg_dst_len,
4543 		.fc_src_len = rtmsg->rtmsg_src_len,
4544 		.fc_flags = rtmsg->rtmsg_flags,
4545 		.fc_type = rtmsg->rtmsg_type,
4546 
4547 		.fc_nlinfo.nl_net = net,
4548 
4549 		.fc_dst = rtmsg->rtmsg_dst,
4550 		.fc_src = rtmsg->rtmsg_src,
4551 		.fc_gateway = rtmsg->rtmsg_gateway,
4552 	};
4553 }
4554 
4555 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4556 {
4557 	struct fib6_config cfg;
4558 	int err;
4559 
4560 	if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4561 		return -EINVAL;
4562 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4563 		return -EPERM;
4564 
4565 	rtmsg_to_fib6_config(net, rtmsg, &cfg);
4566 
4567 	switch (cmd) {
4568 	case SIOCADDRT:
4569 		/* Only do the default setting of fc_metric in route adding */
4570 		if (cfg.fc_metric == 0)
4571 			cfg.fc_metric = IP6_RT_PRIO_USER;
4572 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4573 		break;
4574 	case SIOCDELRT:
4575 		err = ip6_route_del(&cfg, NULL);
4576 		break;
4577 	}
4578 
4579 	return err;
4580 }
4581 
4582 /*
4583  *	Drop the packet on the floor
4584  */
4585 
4586 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4587 {
4588 	struct dst_entry *dst = skb_dst(skb);
4589 	struct net_device *dev = dst_dev(dst);
4590 	struct net *net = dev_net(dev);
4591 	struct inet6_dev *idev;
4592 	SKB_DR(reason);
4593 	int type;
4594 
4595 	if (netif_is_l3_master(skb->dev) ||
4596 	    dev == net->loopback_dev)
4597 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4598 	else
4599 		idev = ip6_dst_idev(dst);
4600 
4601 	switch (ipstats_mib_noroutes) {
4602 	case IPSTATS_MIB_INNOROUTES:
4603 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4604 		if (type == IPV6_ADDR_ANY) {
4605 			SKB_DR_SET(reason, IP_INADDRERRORS);
4606 			IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4607 			break;
4608 		}
4609 		SKB_DR_SET(reason, IP_INNOROUTES);
4610 		fallthrough;
4611 	case IPSTATS_MIB_OUTNOROUTES:
4612 		SKB_DR_OR(reason, IP_OUTNOROUTES);
4613 		IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4614 		break;
4615 	}
4616 
4617 	/* Start over by dropping the dst for l3mdev case */
4618 	if (netif_is_l3_master(skb->dev))
4619 		skb_dst_drop(skb);
4620 
4621 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4622 	kfree_skb_reason(skb, reason);
4623 	return 0;
4624 }
4625 
4626 static int ip6_pkt_discard(struct sk_buff *skb)
4627 {
4628 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4629 }
4630 
4631 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4632 {
4633 	skb->dev = skb_dst_dev(skb);
4634 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4635 }
4636 
4637 static int ip6_pkt_prohibit(struct sk_buff *skb)
4638 {
4639 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4640 }
4641 
4642 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4643 {
4644 	skb->dev = skb_dst_dev(skb);
4645 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4646 }
4647 
4648 /*
4649  *	Allocate a dst for local (unicast / anycast) address.
4650  */
4651 
4652 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4653 				     struct inet6_dev *idev,
4654 				     const struct in6_addr *addr,
4655 				     bool anycast, gfp_t gfp_flags,
4656 				     struct netlink_ext_ack *extack)
4657 {
4658 	struct fib6_config cfg = {
4659 		.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4660 		.fc_ifindex = idev->dev->ifindex,
4661 		.fc_flags = RTF_UP | RTF_NONEXTHOP,
4662 		.fc_dst = *addr,
4663 		.fc_dst_len = 128,
4664 		.fc_protocol = RTPROT_KERNEL,
4665 		.fc_nlinfo.nl_net = net,
4666 		.fc_ignore_dev_down = true,
4667 	};
4668 	struct fib6_info *f6i;
4669 	int err;
4670 
4671 	if (anycast) {
4672 		cfg.fc_type = RTN_ANYCAST;
4673 		cfg.fc_flags |= RTF_ANYCAST;
4674 	} else {
4675 		cfg.fc_type = RTN_LOCAL;
4676 		cfg.fc_flags |= RTF_LOCAL;
4677 	}
4678 
4679 	f6i = ip6_route_info_create(&cfg, gfp_flags, extack);
4680 	if (IS_ERR(f6i))
4681 		return f6i;
4682 
4683 	err = ip6_route_info_create_nh(f6i, &cfg, gfp_flags, extack);
4684 	if (err)
4685 		return ERR_PTR(err);
4686 
4687 	f6i->dst_nocount = true;
4688 
4689 	if (!anycast &&
4690 	    (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
4691 	     READ_ONCE(idev->cnf.disable_policy)))
4692 		f6i->dst_nopolicy = true;
4693 
4694 	return f6i;
4695 }
4696 
4697 /* remove deleted ip from prefsrc entries */
4698 struct arg_dev_net_ip {
4699 	struct net *net;
4700 	struct in6_addr *addr;
4701 };
4702 
4703 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4704 {
4705 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4706 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4707 
4708 	if (!rt->nh &&
4709 	    rt != net->ipv6.fib6_null_entry &&
4710 	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) &&
4711 	    !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) {
4712 		spin_lock_bh(&rt6_exception_lock);
4713 		/* remove prefsrc entry */
4714 		rt->fib6_prefsrc.plen = 0;
4715 		spin_unlock_bh(&rt6_exception_lock);
4716 	}
4717 	return 0;
4718 }
4719 
4720 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4721 {
4722 	struct net *net = dev_net(ifp->idev->dev);
4723 	struct arg_dev_net_ip adni = {
4724 		.net = net,
4725 		.addr = &ifp->addr,
4726 	};
4727 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4728 }
4729 
4730 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT)
4731 
4732 /* Remove routers and update dst entries when gateway turn into host. */
4733 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4734 {
4735 	struct in6_addr *gateway = (struct in6_addr *)arg;
4736 	struct fib6_nh *nh;
4737 
4738 	/* RA routes do not use nexthops */
4739 	if (rt->nh)
4740 		return 0;
4741 
4742 	nh = rt->fib6_nh;
4743 	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4744 	    nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4745 		return -1;
4746 
4747 	/* Further clean up cached routes in exception table.
4748 	 * This is needed because cached route may have a different
4749 	 * gateway than its 'parent' in the case of an ip redirect.
4750 	 */
4751 	fib6_nh_exceptions_clean_tohost(nh, gateway);
4752 
4753 	return 0;
4754 }
4755 
4756 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4757 {
4758 	fib6_clean_all(net, fib6_clean_tohost, gateway);
4759 }
4760 
4761 struct arg_netdev_event {
4762 	const struct net_device *dev;
4763 	union {
4764 		unsigned char nh_flags;
4765 		unsigned long event;
4766 	};
4767 };
4768 
4769 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4770 {
4771 	struct fib6_info *iter;
4772 	struct fib6_node *fn;
4773 
4774 	fn = rcu_dereference_protected(rt->fib6_node,
4775 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4776 	iter = rcu_dereference_protected(fn->leaf,
4777 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4778 	while (iter) {
4779 		if (iter->fib6_metric == rt->fib6_metric &&
4780 		    rt6_qualify_for_ecmp(iter))
4781 			return iter;
4782 		iter = rcu_dereference_protected(iter->fib6_next,
4783 				lockdep_is_held(&rt->fib6_table->tb6_lock));
4784 	}
4785 
4786 	return NULL;
4787 }
4788 
4789 /* only called for fib entries with builtin fib6_nh */
4790 static bool rt6_is_dead(const struct fib6_info *rt)
4791 {
4792 	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4793 	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4794 	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4795 		return true;
4796 
4797 	return false;
4798 }
4799 
4800 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4801 {
4802 	struct fib6_info *iter;
4803 	int total = 0;
4804 
4805 	if (!rt6_is_dead(rt))
4806 		total += rt->fib6_nh->fib_nh_weight;
4807 
4808 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4809 		if (!rt6_is_dead(iter))
4810 			total += iter->fib6_nh->fib_nh_weight;
4811 	}
4812 
4813 	return total;
4814 }
4815 
4816 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4817 {
4818 	int upper_bound = -1;
4819 
4820 	if (!rt6_is_dead(rt)) {
4821 		*weight += rt->fib6_nh->fib_nh_weight;
4822 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4823 						    total) - 1;
4824 	}
4825 	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4826 }
4827 
4828 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4829 {
4830 	struct fib6_info *iter;
4831 	int weight = 0;
4832 
4833 	rt6_upper_bound_set(rt, &weight, total);
4834 
4835 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4836 		rt6_upper_bound_set(iter, &weight, total);
4837 }
4838 
4839 void rt6_multipath_rebalance(struct fib6_info *rt)
4840 {
4841 	struct fib6_info *first;
4842 	int total;
4843 
4844 	/* In case the entire multipath route was marked for flushing,
4845 	 * then there is no need to rebalance upon the removal of every
4846 	 * sibling route.
4847 	 */
4848 	if (!rt->fib6_nsiblings || rt->should_flush)
4849 		return;
4850 
4851 	/* During lookup routes are evaluated in order, so we need to
4852 	 * make sure upper bounds are assigned from the first sibling
4853 	 * onwards.
4854 	 */
4855 	first = rt6_multipath_first_sibling(rt);
4856 	if (WARN_ON_ONCE(!first))
4857 		return;
4858 
4859 	total = rt6_multipath_total_weight(first);
4860 	rt6_multipath_upper_bound_set(first, total);
4861 }
4862 
4863 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4864 {
4865 	const struct arg_netdev_event *arg = p_arg;
4866 	struct net *net = dev_net(arg->dev);
4867 
4868 	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4869 	    rt->fib6_nh->fib_nh_dev == arg->dev) {
4870 		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4871 		fib6_update_sernum_upto_root(net, rt);
4872 		rt6_multipath_rebalance(rt);
4873 	}
4874 
4875 	return 0;
4876 }
4877 
4878 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4879 {
4880 	struct arg_netdev_event arg = {
4881 		.dev = dev,
4882 		{
4883 			.nh_flags = nh_flags,
4884 		},
4885 	};
4886 
4887 	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4888 		arg.nh_flags |= RTNH_F_LINKDOWN;
4889 
4890 	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4891 }
4892 
4893 /* only called for fib entries with inline fib6_nh */
4894 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4895 				   const struct net_device *dev)
4896 {
4897 	struct fib6_info *iter;
4898 
4899 	if (rt->fib6_nh->fib_nh_dev == dev)
4900 		return true;
4901 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4902 		if (iter->fib6_nh->fib_nh_dev == dev)
4903 			return true;
4904 
4905 	return false;
4906 }
4907 
4908 static void rt6_multipath_flush(struct fib6_info *rt)
4909 {
4910 	struct fib6_info *iter;
4911 
4912 	rt->should_flush = 1;
4913 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4914 		iter->should_flush = 1;
4915 }
4916 
4917 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4918 					     const struct net_device *down_dev)
4919 {
4920 	struct fib6_info *iter;
4921 	unsigned int dead = 0;
4922 
4923 	if (rt->fib6_nh->fib_nh_dev == down_dev ||
4924 	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4925 		dead++;
4926 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4927 		if (iter->fib6_nh->fib_nh_dev == down_dev ||
4928 		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4929 			dead++;
4930 
4931 	return dead;
4932 }
4933 
4934 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4935 				       const struct net_device *dev,
4936 				       unsigned char nh_flags)
4937 {
4938 	struct fib6_info *iter;
4939 
4940 	if (rt->fib6_nh->fib_nh_dev == dev)
4941 		rt->fib6_nh->fib_nh_flags |= nh_flags;
4942 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4943 		if (iter->fib6_nh->fib_nh_dev == dev)
4944 			iter->fib6_nh->fib_nh_flags |= nh_flags;
4945 }
4946 
4947 /* called with write lock held for table with rt */
4948 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4949 {
4950 	const struct arg_netdev_event *arg = p_arg;
4951 	const struct net_device *dev = arg->dev;
4952 	struct net *net = dev_net(dev);
4953 
4954 	if (rt == net->ipv6.fib6_null_entry || rt->nh)
4955 		return 0;
4956 
4957 	switch (arg->event) {
4958 	case NETDEV_UNREGISTER:
4959 		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4960 	case NETDEV_DOWN:
4961 		if (rt->should_flush)
4962 			return -1;
4963 		if (!rt->fib6_nsiblings)
4964 			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4965 		if (rt6_multipath_uses_dev(rt, dev)) {
4966 			unsigned int count;
4967 
4968 			count = rt6_multipath_dead_count(rt, dev);
4969 			if (rt->fib6_nsiblings + 1 == count) {
4970 				rt6_multipath_flush(rt);
4971 				return -1;
4972 			}
4973 			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4974 						   RTNH_F_LINKDOWN);
4975 			fib6_update_sernum(net, rt);
4976 			rt6_multipath_rebalance(rt);
4977 		}
4978 		return -2;
4979 	case NETDEV_CHANGE:
4980 		if (rt->fib6_nh->fib_nh_dev != dev ||
4981 		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4982 			break;
4983 		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4984 		rt6_multipath_rebalance(rt);
4985 		break;
4986 	}
4987 
4988 	return 0;
4989 }
4990 
4991 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4992 {
4993 	struct arg_netdev_event arg = {
4994 		.dev = dev,
4995 		{
4996 			.event = event,
4997 		},
4998 	};
4999 	struct net *net = dev_net(dev);
5000 
5001 	if (net->ipv6.sysctl.skip_notify_on_dev_down)
5002 		fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
5003 	else
5004 		fib6_clean_all(net, fib6_ifdown, &arg);
5005 }
5006 
5007 void rt6_disable_ip(struct net_device *dev, unsigned long event)
5008 {
5009 	rt6_sync_down_dev(dev, event);
5010 	rt6_uncached_list_flush_dev(dev);
5011 	neigh_ifdown(&nd_tbl, dev);
5012 }
5013 
5014 struct rt6_mtu_change_arg {
5015 	struct net_device *dev;
5016 	unsigned int mtu;
5017 	struct fib6_info *f6i;
5018 };
5019 
5020 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
5021 {
5022 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
5023 	struct fib6_info *f6i = arg->f6i;
5024 
5025 	/* For administrative MTU increase, there is no way to discover
5026 	 * IPv6 PMTU increase, so PMTU increase should be updated here.
5027 	 * Since RFC 1981 doesn't include administrative MTU increase
5028 	 * update PMTU increase is a MUST. (i.e. jumbo frame)
5029 	 */
5030 	if (nh->fib_nh_dev == arg->dev) {
5031 		struct inet6_dev *idev = __in6_dev_get(arg->dev);
5032 		u32 mtu = f6i->fib6_pmtu;
5033 
5034 		if (mtu >= arg->mtu ||
5035 		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
5036 			fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
5037 
5038 		spin_lock_bh(&rt6_exception_lock);
5039 		rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
5040 		spin_unlock_bh(&rt6_exception_lock);
5041 	}
5042 
5043 	return 0;
5044 }
5045 
5046 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
5047 {
5048 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
5049 	struct inet6_dev *idev;
5050 
5051 	/* In IPv6 pmtu discovery is not optional,
5052 	   so that RTAX_MTU lock cannot disable it.
5053 	   We still use this lock to block changes
5054 	   caused by addrconf/ndisc.
5055 	*/
5056 
5057 	idev = __in6_dev_get(arg->dev);
5058 	if (!idev)
5059 		return 0;
5060 
5061 	if (fib6_metric_locked(f6i, RTAX_MTU))
5062 		return 0;
5063 
5064 	arg->f6i = f6i;
5065 	if (f6i->nh) {
5066 		/* fib6_nh_mtu_change only returns 0, so this is safe */
5067 		return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
5068 						arg);
5069 	}
5070 
5071 	return fib6_nh_mtu_change(f6i->fib6_nh, arg);
5072 }
5073 
5074 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
5075 {
5076 	struct rt6_mtu_change_arg arg = {
5077 		.dev = dev,
5078 		.mtu = mtu,
5079 	};
5080 
5081 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
5082 }
5083 
5084 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
5085 	[RTA_UNSPEC]		= { .strict_start_type = RTA_DPORT + 1 },
5086 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
5087 	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
5088 	[RTA_OIF]               = { .type = NLA_U32 },
5089 	[RTA_IIF]		= { .type = NLA_U32 },
5090 	[RTA_PRIORITY]          = { .type = NLA_U32 },
5091 	[RTA_METRICS]           = { .type = NLA_NESTED },
5092 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
5093 	[RTA_PREF]              = { .type = NLA_U8 },
5094 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
5095 	[RTA_ENCAP]		= { .type = NLA_NESTED },
5096 	[RTA_EXPIRES]		= { .type = NLA_U32 },
5097 	[RTA_UID]		= { .type = NLA_U32 },
5098 	[RTA_MARK]		= { .type = NLA_U32 },
5099 	[RTA_TABLE]		= { .type = NLA_U32 },
5100 	[RTA_IP_PROTO]		= { .type = NLA_U8 },
5101 	[RTA_SPORT]		= { .type = NLA_U16 },
5102 	[RTA_DPORT]		= { .type = NLA_U16 },
5103 	[RTA_NH_ID]		= { .type = NLA_U32 },
5104 	[RTA_FLOWLABEL]		= { .type = NLA_BE32 },
5105 };
5106 
5107 static int rtm_to_fib6_multipath_config(struct fib6_config *cfg,
5108 					struct netlink_ext_ack *extack,
5109 					bool newroute)
5110 {
5111 	struct rtnexthop *rtnh;
5112 	int remaining;
5113 
5114 	remaining = cfg->fc_mp_len;
5115 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5116 
5117 	if (!rtnh_ok(rtnh, remaining)) {
5118 		NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - no valid nexthops");
5119 		return -EINVAL;
5120 	}
5121 
5122 	do {
5123 		bool has_gateway = cfg->fc_flags & RTF_GATEWAY;
5124 		int attrlen = rtnh_attrlen(rtnh);
5125 
5126 		if (attrlen > 0) {
5127 			struct nlattr *nla, *attrs;
5128 
5129 			attrs = rtnh_attrs(rtnh);
5130 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5131 			if (nla) {
5132 				if (nla_len(nla) < sizeof(cfg->fc_gateway)) {
5133 					NL_SET_ERR_MSG(extack,
5134 						       "Invalid IPv6 address in RTA_GATEWAY");
5135 					return -EINVAL;
5136 				}
5137 
5138 				has_gateway = true;
5139 			}
5140 		}
5141 
5142 		if (newroute && (cfg->fc_nh_id || !has_gateway)) {
5143 			NL_SET_ERR_MSG(extack,
5144 				       "Device only routes can not be added for IPv6 using the multipath API.");
5145 			return -EINVAL;
5146 		}
5147 
5148 		rtnh = rtnh_next(rtnh, &remaining);
5149 	} while (rtnh_ok(rtnh, remaining));
5150 
5151 	return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack);
5152 }
5153 
5154 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5155 			      struct fib6_config *cfg,
5156 			      struct netlink_ext_ack *extack)
5157 {
5158 	bool newroute = nlh->nlmsg_type == RTM_NEWROUTE;
5159 	struct nlattr *tb[RTA_MAX+1];
5160 	struct rtmsg *rtm;
5161 	unsigned int pref;
5162 	int err;
5163 
5164 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5165 				     rtm_ipv6_policy, extack);
5166 	if (err < 0)
5167 		goto errout;
5168 
5169 	err = -EINVAL;
5170 	rtm = nlmsg_data(nlh);
5171 
5172 	if (rtm->rtm_tos) {
5173 		NL_SET_ERR_MSG(extack,
5174 			       "Invalid dsfield (tos): option not available for IPv6");
5175 		goto errout;
5176 	}
5177 
5178 	if (tb[RTA_FLOWLABEL]) {
5179 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
5180 				    "Flow label cannot be specified for this operation");
5181 		goto errout;
5182 	}
5183 
5184 	*cfg = (struct fib6_config){
5185 		.fc_table = rtm->rtm_table,
5186 		.fc_dst_len = rtm->rtm_dst_len,
5187 		.fc_src_len = rtm->rtm_src_len,
5188 		.fc_flags = RTF_UP,
5189 		.fc_protocol = rtm->rtm_protocol,
5190 		.fc_type = rtm->rtm_type,
5191 
5192 		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
5193 		.fc_nlinfo.nlh = nlh,
5194 		.fc_nlinfo.nl_net = sock_net(skb->sk),
5195 	};
5196 
5197 	if (rtm->rtm_type == RTN_UNREACHABLE ||
5198 	    rtm->rtm_type == RTN_BLACKHOLE ||
5199 	    rtm->rtm_type == RTN_PROHIBIT ||
5200 	    rtm->rtm_type == RTN_THROW)
5201 		cfg->fc_flags |= RTF_REJECT;
5202 
5203 	if (rtm->rtm_type == RTN_LOCAL)
5204 		cfg->fc_flags |= RTF_LOCAL;
5205 
5206 	if (rtm->rtm_flags & RTM_F_CLONED)
5207 		cfg->fc_flags |= RTF_CACHE;
5208 
5209 	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5210 
5211 	if (tb[RTA_NH_ID]) {
5212 		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
5213 		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5214 			NL_SET_ERR_MSG(extack,
5215 				       "Nexthop specification and nexthop id are mutually exclusive");
5216 			goto errout;
5217 		}
5218 		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5219 	}
5220 
5221 	if (tb[RTA_GATEWAY]) {
5222 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5223 		cfg->fc_flags |= RTF_GATEWAY;
5224 	}
5225 	if (tb[RTA_VIA]) {
5226 		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5227 		goto errout;
5228 	}
5229 
5230 	if (tb[RTA_DST]) {
5231 		int plen = (rtm->rtm_dst_len + 7) >> 3;
5232 
5233 		if (nla_len(tb[RTA_DST]) < plen)
5234 			goto errout;
5235 
5236 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5237 	}
5238 
5239 	if (tb[RTA_SRC]) {
5240 		int plen = (rtm->rtm_src_len + 7) >> 3;
5241 
5242 		if (nla_len(tb[RTA_SRC]) < plen)
5243 			goto errout;
5244 
5245 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5246 	}
5247 
5248 	if (tb[RTA_PREFSRC])
5249 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5250 
5251 	if (tb[RTA_OIF])
5252 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5253 
5254 	if (tb[RTA_PRIORITY])
5255 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5256 
5257 	if (tb[RTA_METRICS]) {
5258 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5259 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5260 	}
5261 
5262 	if (tb[RTA_TABLE])
5263 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5264 
5265 	if (tb[RTA_MULTIPATH]) {
5266 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5267 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5268 
5269 		err = rtm_to_fib6_multipath_config(cfg, extack, newroute);
5270 		if (err < 0)
5271 			goto errout;
5272 	}
5273 
5274 	if (tb[RTA_PREF]) {
5275 		pref = nla_get_u8(tb[RTA_PREF]);
5276 		if (pref != ICMPV6_ROUTER_PREF_LOW &&
5277 		    pref != ICMPV6_ROUTER_PREF_HIGH)
5278 			pref = ICMPV6_ROUTER_PREF_MEDIUM;
5279 		cfg->fc_flags |= RTF_PREF(pref);
5280 	}
5281 
5282 	if (tb[RTA_ENCAP])
5283 		cfg->fc_encap = tb[RTA_ENCAP];
5284 
5285 	if (tb[RTA_ENCAP_TYPE]) {
5286 		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5287 
5288 		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5289 		if (err < 0)
5290 			goto errout;
5291 	}
5292 
5293 	if (tb[RTA_EXPIRES]) {
5294 		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5295 
5296 		if (addrconf_finite_timeout(timeout)) {
5297 			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5298 			cfg->fc_flags |= RTF_EXPIRES;
5299 		}
5300 	}
5301 
5302 	err = 0;
5303 errout:
5304 	return err;
5305 }
5306 
5307 struct rt6_nh {
5308 	struct fib6_info *fib6_info;
5309 	struct fib6_config r_cfg;
5310 	struct list_head list;
5311 };
5312 
5313 static int ip6_route_info_append(struct list_head *rt6_nh_list,
5314 				 struct fib6_info *rt,
5315 				 struct fib6_config *r_cfg)
5316 {
5317 	struct rt6_nh *nh;
5318 
5319 	list_for_each_entry(nh, rt6_nh_list, list) {
5320 		/* check if fib6_info already exists */
5321 		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5322 			return -EEXIST;
5323 	}
5324 
5325 	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5326 	if (!nh)
5327 		return -ENOMEM;
5328 
5329 	nh->fib6_info = rt;
5330 	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5331 	list_add_tail(&nh->list, rt6_nh_list);
5332 
5333 	return 0;
5334 }
5335 
5336 static void ip6_route_mpath_notify(struct fib6_info *rt,
5337 				   struct fib6_info *rt_last,
5338 				   struct nl_info *info,
5339 				   __u16 nlflags)
5340 {
5341 	/* if this is an APPEND route, then rt points to the first route
5342 	 * inserted and rt_last points to last route inserted. Userspace
5343 	 * wants a consistent dump of the route which starts at the first
5344 	 * nexthop. Since sibling routes are always added at the end of
5345 	 * the list, find the first sibling of the last route appended
5346 	 */
5347 	rcu_read_lock();
5348 
5349 	if ((nlflags & NLM_F_APPEND) && rt_last &&
5350 	    READ_ONCE(rt_last->fib6_nsiblings)) {
5351 		rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
5352 					    struct fib6_info,
5353 					    fib6_siblings);
5354 	}
5355 
5356 	if (rt)
5357 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5358 
5359 	rcu_read_unlock();
5360 }
5361 
5362 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5363 {
5364 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5365 	bool should_notify = false;
5366 	struct fib6_info *leaf;
5367 	struct fib6_node *fn;
5368 
5369 	rcu_read_lock();
5370 	fn = rcu_dereference(rt->fib6_node);
5371 	if (!fn)
5372 		goto out;
5373 
5374 	leaf = rcu_dereference(fn->leaf);
5375 	if (!leaf)
5376 		goto out;
5377 
5378 	if (rt == leaf ||
5379 	    (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5380 	     rt6_qualify_for_ecmp(leaf)))
5381 		should_notify = true;
5382 out:
5383 	rcu_read_unlock();
5384 
5385 	return should_notify;
5386 }
5387 
5388 static int ip6_route_multipath_add(struct fib6_config *cfg,
5389 				   struct netlink_ext_ack *extack)
5390 {
5391 	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5392 	struct nl_info *info = &cfg->fc_nlinfo;
5393 	struct rt6_nh *nh, *nh_safe;
5394 	struct fib6_config r_cfg;
5395 	struct rtnexthop *rtnh;
5396 	LIST_HEAD(rt6_nh_list);
5397 	struct rt6_nh *err_nh;
5398 	struct fib6_info *rt;
5399 	__u16 nlflags;
5400 	int remaining;
5401 	int attrlen;
5402 	int replace;
5403 	int nhn = 0;
5404 	int err;
5405 
5406 	err = fib6_config_validate(cfg, extack);
5407 	if (err)
5408 		return err;
5409 
5410 	replace = (cfg->fc_nlinfo.nlh &&
5411 		   (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5412 
5413 	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5414 	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5415 		nlflags |= NLM_F_APPEND;
5416 
5417 	remaining = cfg->fc_mp_len;
5418 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5419 
5420 	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
5421 	 * fib6_info structs per nexthop
5422 	 */
5423 	while (rtnh_ok(rtnh, remaining)) {
5424 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5425 		if (rtnh->rtnh_ifindex)
5426 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5427 
5428 		attrlen = rtnh_attrlen(rtnh);
5429 		if (attrlen > 0) {
5430 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5431 
5432 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5433 			if (nla) {
5434 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5435 				r_cfg.fc_flags |= RTF_GATEWAY;
5436 			}
5437 
5438 			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5439 			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5440 			if (nla)
5441 				r_cfg.fc_encap_type = nla_get_u16(nla);
5442 		}
5443 
5444 		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5445 		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5446 		if (IS_ERR(rt)) {
5447 			err = PTR_ERR(rt);
5448 			rt = NULL;
5449 			goto cleanup;
5450 		}
5451 
5452 		err = ip6_route_info_create_nh(rt, &r_cfg, GFP_KERNEL, extack);
5453 		if (err) {
5454 			rt = NULL;
5455 			goto cleanup;
5456 		}
5457 
5458 		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5459 
5460 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
5461 		if (err) {
5462 			fib6_info_release(rt);
5463 			goto cleanup;
5464 		}
5465 
5466 		rtnh = rtnh_next(rtnh, &remaining);
5467 	}
5468 
5469 	/* for add and replace send one notification with all nexthops.
5470 	 * Skip the notification in fib6_add_rt2node and send one with
5471 	 * the full route when done
5472 	 */
5473 	info->skip_notify = 1;
5474 
5475 	/* For add and replace, send one notification with all nexthops. For
5476 	 * append, send one notification with all appended nexthops.
5477 	 */
5478 	info->skip_notify_kernel = 1;
5479 
5480 	err_nh = NULL;
5481 	list_for_each_entry(nh, &rt6_nh_list, list) {
5482 		err = __ip6_ins_rt(nh->fib6_info, info, extack);
5483 
5484 		if (err) {
5485 			if (replace && nhn)
5486 				NL_SET_ERR_MSG_MOD(extack,
5487 						   "multipath route replace failed (check consistency of installed routes)");
5488 			err_nh = nh;
5489 			goto add_errout;
5490 		}
5491 		/* save reference to last route successfully inserted */
5492 		rt_last = nh->fib6_info;
5493 
5494 		/* save reference to first route for notification */
5495 		if (!rt_notif)
5496 			rt_notif = nh->fib6_info;
5497 
5498 		/* Because each route is added like a single route we remove
5499 		 * these flags after the first nexthop: if there is a collision,
5500 		 * we have already failed to add the first nexthop:
5501 		 * fib6_add_rt2node() has rejected it; when replacing, old
5502 		 * nexthops have been replaced by first new, the rest should
5503 		 * be added to it.
5504 		 */
5505 		if (cfg->fc_nlinfo.nlh) {
5506 			cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5507 							     NLM_F_REPLACE);
5508 			cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5509 		}
5510 		nhn++;
5511 	}
5512 
5513 	/* An in-kernel notification should only be sent in case the new
5514 	 * multipath route is added as the first route in the node, or if
5515 	 * it was appended to it. We pass 'rt_notif' since it is the first
5516 	 * sibling and might allow us to skip some checks in the replace case.
5517 	 */
5518 	if (ip6_route_mpath_should_notify(rt_notif)) {
5519 		enum fib_event_type fib_event;
5520 
5521 		if (rt_notif->fib6_nsiblings != nhn - 1)
5522 			fib_event = FIB_EVENT_ENTRY_APPEND;
5523 		else
5524 			fib_event = FIB_EVENT_ENTRY_REPLACE;
5525 
5526 		err = call_fib6_multipath_entry_notifiers(info->nl_net,
5527 							  fib_event, rt_notif,
5528 							  nhn - 1, extack);
5529 		if (err) {
5530 			/* Delete all the siblings that were just added */
5531 			err_nh = NULL;
5532 			goto add_errout;
5533 		}
5534 	}
5535 
5536 	/* success ... tell user about new route */
5537 	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5538 	goto cleanup;
5539 
5540 add_errout:
5541 	/* send notification for routes that were added so that
5542 	 * the delete notifications sent by ip6_route_del are
5543 	 * coherent
5544 	 */
5545 	if (rt_notif)
5546 		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5547 
5548 	/* Delete routes that were already added */
5549 	list_for_each_entry(nh, &rt6_nh_list, list) {
5550 		if (err_nh == nh)
5551 			break;
5552 		ip6_route_del(&nh->r_cfg, extack);
5553 	}
5554 
5555 cleanup:
5556 	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, list) {
5557 		fib6_info_release(nh->fib6_info);
5558 		list_del(&nh->list);
5559 		kfree(nh);
5560 	}
5561 
5562 	return err;
5563 }
5564 
5565 static int ip6_route_multipath_del(struct fib6_config *cfg,
5566 				   struct netlink_ext_ack *extack)
5567 {
5568 	struct fib6_config r_cfg;
5569 	struct rtnexthop *rtnh;
5570 	int last_err = 0;
5571 	int remaining;
5572 	int attrlen;
5573 	int err;
5574 
5575 	remaining = cfg->fc_mp_len;
5576 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5577 
5578 	/* Parse a Multipath Entry */
5579 	while (rtnh_ok(rtnh, remaining)) {
5580 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5581 		if (rtnh->rtnh_ifindex)
5582 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5583 
5584 		attrlen = rtnh_attrlen(rtnh);
5585 		if (attrlen > 0) {
5586 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5587 
5588 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5589 			if (nla) {
5590 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5591 				r_cfg.fc_flags |= RTF_GATEWAY;
5592 			}
5593 		}
5594 
5595 		err = ip6_route_del(&r_cfg, extack);
5596 		if (err)
5597 			last_err = err;
5598 
5599 		rtnh = rtnh_next(rtnh, &remaining);
5600 	}
5601 
5602 	return last_err;
5603 }
5604 
5605 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5606 			      struct netlink_ext_ack *extack)
5607 {
5608 	struct fib6_config cfg;
5609 	int err;
5610 
5611 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5612 	if (err < 0)
5613 		return err;
5614 
5615 	if (cfg.fc_nh_id) {
5616 		rcu_read_lock();
5617 		err = !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id);
5618 		rcu_read_unlock();
5619 
5620 		if (err) {
5621 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5622 			return -EINVAL;
5623 		}
5624 	}
5625 
5626 	if (cfg.fc_mp) {
5627 		return ip6_route_multipath_del(&cfg, extack);
5628 	} else {
5629 		cfg.fc_delete_all_nh = 1;
5630 		return ip6_route_del(&cfg, extack);
5631 	}
5632 }
5633 
5634 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5635 			      struct netlink_ext_ack *extack)
5636 {
5637 	struct fib6_config cfg;
5638 	int err;
5639 
5640 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5641 	if (err < 0)
5642 		return err;
5643 
5644 	if (cfg.fc_metric == 0)
5645 		cfg.fc_metric = IP6_RT_PRIO_USER;
5646 
5647 	if (cfg.fc_mp)
5648 		return ip6_route_multipath_add(&cfg, extack);
5649 	else
5650 		return ip6_route_add(&cfg, GFP_KERNEL, extack);
5651 }
5652 
5653 /* add the overhead of this fib6_nh to nexthop_len */
5654 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5655 {
5656 	int *nexthop_len = arg;
5657 
5658 	*nexthop_len += nla_total_size(0)	 /* RTA_MULTIPATH */
5659 		     + NLA_ALIGN(sizeof(struct rtnexthop))
5660 		     + nla_total_size(16); /* RTA_GATEWAY */
5661 
5662 	if (nh->fib_nh_lws) {
5663 		/* RTA_ENCAP_TYPE */
5664 		*nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5665 		/* RTA_ENCAP */
5666 		*nexthop_len += nla_total_size(2);
5667 	}
5668 
5669 	return 0;
5670 }
5671 
5672 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5673 {
5674 	struct fib6_info *sibling;
5675 	struct fib6_nh *nh;
5676 	int nexthop_len;
5677 
5678 	if (f6i->nh) {
5679 		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5680 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5681 					 &nexthop_len);
5682 		goto common;
5683 	}
5684 
5685 	rcu_read_lock();
5686 retry:
5687 	nh = f6i->fib6_nh;
5688 	nexthop_len = 0;
5689 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5690 		rt6_nh_nlmsg_size(nh, &nexthop_len);
5691 
5692 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5693 					fib6_siblings) {
5694 			rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5695 			if (!READ_ONCE(f6i->fib6_nsiblings))
5696 				goto retry;
5697 		}
5698 	}
5699 	rcu_read_unlock();
5700 	nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5701 common:
5702 	return NLMSG_ALIGN(sizeof(struct rtmsg))
5703 	       + nla_total_size(16) /* RTA_SRC */
5704 	       + nla_total_size(16) /* RTA_DST */
5705 	       + nla_total_size(16) /* RTA_GATEWAY */
5706 	       + nla_total_size(16) /* RTA_PREFSRC */
5707 	       + nla_total_size(4) /* RTA_TABLE */
5708 	       + nla_total_size(4) /* RTA_IIF */
5709 	       + nla_total_size(4) /* RTA_OIF */
5710 	       + nla_total_size(4) /* RTA_PRIORITY */
5711 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5712 	       + nla_total_size(sizeof(struct rta_cacheinfo))
5713 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5714 	       + nla_total_size(1) /* RTA_PREF */
5715 	       + nexthop_len;
5716 }
5717 
5718 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5719 				 unsigned char *flags)
5720 {
5721 	if (nexthop_is_multipath(nh)) {
5722 		struct nlattr *mp;
5723 
5724 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5725 		if (!mp)
5726 			goto nla_put_failure;
5727 
5728 		if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5729 			goto nla_put_failure;
5730 
5731 		nla_nest_end(skb, mp);
5732 	} else {
5733 		struct fib6_nh *fib6_nh;
5734 
5735 		fib6_nh = nexthop_fib6_nh(nh);
5736 		if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5737 				     flags, false) < 0)
5738 			goto nla_put_failure;
5739 	}
5740 
5741 	return 0;
5742 
5743 nla_put_failure:
5744 	return -EMSGSIZE;
5745 }
5746 
5747 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5748 			 struct fib6_info *rt, struct dst_entry *dst,
5749 			 struct in6_addr *dest, struct in6_addr *src,
5750 			 int iif, int type, u32 portid, u32 seq,
5751 			 unsigned int flags)
5752 {
5753 	struct rt6_info *rt6 = dst_rt6_info(dst);
5754 	struct rt6key *rt6_dst, *rt6_src;
5755 	u32 *pmetrics, table, rt6_flags;
5756 	unsigned char nh_flags = 0;
5757 	struct nlmsghdr *nlh;
5758 	struct rtmsg *rtm;
5759 	long expires = 0;
5760 
5761 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5762 	if (!nlh)
5763 		return -EMSGSIZE;
5764 
5765 	if (rt6) {
5766 		rt6_dst = &rt6->rt6i_dst;
5767 		rt6_src = &rt6->rt6i_src;
5768 		rt6_flags = rt6->rt6i_flags;
5769 	} else {
5770 		rt6_dst = &rt->fib6_dst;
5771 		rt6_src = &rt->fib6_src;
5772 		rt6_flags = rt->fib6_flags;
5773 	}
5774 
5775 	rtm = nlmsg_data(nlh);
5776 	rtm->rtm_family = AF_INET6;
5777 	rtm->rtm_dst_len = rt6_dst->plen;
5778 	rtm->rtm_src_len = rt6_src->plen;
5779 	rtm->rtm_tos = 0;
5780 	if (rt->fib6_table)
5781 		table = rt->fib6_table->tb6_id;
5782 	else
5783 		table = RT6_TABLE_UNSPEC;
5784 	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5785 	if (nla_put_u32(skb, RTA_TABLE, table))
5786 		goto nla_put_failure;
5787 
5788 	rtm->rtm_type = rt->fib6_type;
5789 	rtm->rtm_flags = 0;
5790 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5791 	rtm->rtm_protocol = rt->fib6_protocol;
5792 
5793 	if (rt6_flags & RTF_CACHE)
5794 		rtm->rtm_flags |= RTM_F_CLONED;
5795 
5796 	if (dest) {
5797 		if (nla_put_in6_addr(skb, RTA_DST, dest))
5798 			goto nla_put_failure;
5799 		rtm->rtm_dst_len = 128;
5800 	} else if (rtm->rtm_dst_len)
5801 		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5802 			goto nla_put_failure;
5803 #ifdef CONFIG_IPV6_SUBTREES
5804 	if (src) {
5805 		if (nla_put_in6_addr(skb, RTA_SRC, src))
5806 			goto nla_put_failure;
5807 		rtm->rtm_src_len = 128;
5808 	} else if (rtm->rtm_src_len &&
5809 		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5810 		goto nla_put_failure;
5811 #endif
5812 	if (iif) {
5813 #ifdef CONFIG_IPV6_MROUTE
5814 		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5815 			int err = ip6mr_get_route(net, skb, rtm, portid);
5816 
5817 			if (err == 0)
5818 				return 0;
5819 			if (err < 0)
5820 				goto nla_put_failure;
5821 		} else
5822 #endif
5823 			if (nla_put_u32(skb, RTA_IIF, iif))
5824 				goto nla_put_failure;
5825 	} else if (dest) {
5826 		struct in6_addr saddr_buf;
5827 		if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
5828 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5829 			goto nla_put_failure;
5830 	}
5831 
5832 	if (rt->fib6_prefsrc.plen) {
5833 		struct in6_addr saddr_buf;
5834 		saddr_buf = rt->fib6_prefsrc.addr;
5835 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5836 			goto nla_put_failure;
5837 	}
5838 
5839 	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5840 	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5841 		goto nla_put_failure;
5842 
5843 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5844 		goto nla_put_failure;
5845 
5846 	/* For multipath routes, walk the siblings list and add
5847 	 * each as a nexthop within RTA_MULTIPATH.
5848 	 */
5849 	if (rt6) {
5850 		struct net_device *dev;
5851 
5852 		if (rt6_flags & RTF_GATEWAY &&
5853 		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5854 			goto nla_put_failure;
5855 
5856 		dev = dst_dev(dst);
5857 		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
5858 			goto nla_put_failure;
5859 
5860 		if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5861 			goto nla_put_failure;
5862 	} else if (READ_ONCE(rt->fib6_nsiblings)) {
5863 		struct fib6_info *sibling;
5864 		struct nlattr *mp;
5865 
5866 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5867 		if (!mp)
5868 			goto nla_put_failure;
5869 
5870 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5871 				    rt->fib6_nh->fib_nh_weight, AF_INET6,
5872 				    0) < 0)
5873 			goto nla_put_failure;
5874 
5875 		rcu_read_lock();
5876 
5877 		list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
5878 					fib6_siblings) {
5879 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5880 					    sibling->fib6_nh->fib_nh_weight,
5881 					    AF_INET6, 0) < 0) {
5882 				rcu_read_unlock();
5883 
5884 				goto nla_put_failure;
5885 			}
5886 		}
5887 
5888 		rcu_read_unlock();
5889 
5890 		nla_nest_end(skb, mp);
5891 	} else if (rt->nh) {
5892 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5893 			goto nla_put_failure;
5894 
5895 		if (nexthop_is_blackhole(rt->nh))
5896 			rtm->rtm_type = RTN_BLACKHOLE;
5897 
5898 		if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5899 		    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5900 			goto nla_put_failure;
5901 
5902 		rtm->rtm_flags |= nh_flags;
5903 	} else {
5904 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5905 				     &nh_flags, false) < 0)
5906 			goto nla_put_failure;
5907 
5908 		rtm->rtm_flags |= nh_flags;
5909 	}
5910 
5911 	if (rt6_flags & RTF_EXPIRES) {
5912 		expires = dst ? READ_ONCE(dst->expires) : rt->expires;
5913 		expires -= jiffies;
5914 	}
5915 
5916 	if (!dst) {
5917 		if (READ_ONCE(rt->offload))
5918 			rtm->rtm_flags |= RTM_F_OFFLOAD;
5919 		if (READ_ONCE(rt->trap))
5920 			rtm->rtm_flags |= RTM_F_TRAP;
5921 		if (READ_ONCE(rt->offload_failed))
5922 			rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5923 	}
5924 
5925 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5926 		goto nla_put_failure;
5927 
5928 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5929 		goto nla_put_failure;
5930 
5931 
5932 	nlmsg_end(skb, nlh);
5933 	return 0;
5934 
5935 nla_put_failure:
5936 	nlmsg_cancel(skb, nlh);
5937 	return -EMSGSIZE;
5938 }
5939 
5940 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5941 {
5942 	const struct net_device *dev = arg;
5943 
5944 	if (nh->fib_nh_dev == dev)
5945 		return 1;
5946 
5947 	return 0;
5948 }
5949 
5950 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5951 			       const struct net_device *dev)
5952 {
5953 	if (f6i->nh) {
5954 		struct net_device *_dev = (struct net_device *)dev;
5955 
5956 		return !!nexthop_for_each_fib6_nh(f6i->nh,
5957 						  fib6_info_nh_uses_dev,
5958 						  _dev);
5959 	}
5960 
5961 	if (f6i->fib6_nh->fib_nh_dev == dev)
5962 		return true;
5963 
5964 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5965 		const struct fib6_info *sibling;
5966 
5967 		rcu_read_lock();
5968 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5969 					fib6_siblings) {
5970 			if (sibling->fib6_nh->fib_nh_dev == dev) {
5971 				rcu_read_unlock();
5972 				return true;
5973 			}
5974 			if (!READ_ONCE(f6i->fib6_nsiblings))
5975 				break;
5976 		}
5977 		rcu_read_unlock();
5978 	}
5979 	return false;
5980 }
5981 
5982 struct fib6_nh_exception_dump_walker {
5983 	struct rt6_rtnl_dump_arg *dump;
5984 	struct fib6_info *rt;
5985 	unsigned int flags;
5986 	unsigned int skip;
5987 	unsigned int count;
5988 };
5989 
5990 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5991 {
5992 	struct fib6_nh_exception_dump_walker *w = arg;
5993 	struct rt6_rtnl_dump_arg *dump = w->dump;
5994 	struct rt6_exception_bucket *bucket;
5995 	struct rt6_exception *rt6_ex;
5996 	int i, err;
5997 
5998 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5999 	if (!bucket)
6000 		return 0;
6001 
6002 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
6003 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
6004 			if (w->skip) {
6005 				w->skip--;
6006 				continue;
6007 			}
6008 
6009 			/* Expiration of entries doesn't bump sernum, insertion
6010 			 * does. Removal is triggered by insertion, so we can
6011 			 * rely on the fact that if entries change between two
6012 			 * partial dumps, this node is scanned again completely,
6013 			 * see rt6_insert_exception() and fib6_dump_table().
6014 			 *
6015 			 * Count expired entries we go through as handled
6016 			 * entries that we'll skip next time, in case of partial
6017 			 * node dump. Otherwise, if entries expire meanwhile,
6018 			 * we'll skip the wrong amount.
6019 			 */
6020 			if (rt6_check_expired(rt6_ex->rt6i)) {
6021 				w->count++;
6022 				continue;
6023 			}
6024 
6025 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
6026 					    &rt6_ex->rt6i->dst, NULL, NULL, 0,
6027 					    RTM_NEWROUTE,
6028 					    NETLINK_CB(dump->cb->skb).portid,
6029 					    dump->cb->nlh->nlmsg_seq, w->flags);
6030 			if (err)
6031 				return err;
6032 
6033 			w->count++;
6034 		}
6035 		bucket++;
6036 	}
6037 
6038 	return 0;
6039 }
6040 
6041 /* Return -1 if done with node, number of handled routes on partial dump */
6042 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
6043 {
6044 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
6045 	struct fib_dump_filter *filter = &arg->filter;
6046 	unsigned int flags = NLM_F_MULTI;
6047 	struct net *net = arg->net;
6048 	int count = 0;
6049 
6050 	if (rt == net->ipv6.fib6_null_entry)
6051 		return -1;
6052 
6053 	if ((filter->flags & RTM_F_PREFIX) &&
6054 	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
6055 		/* success since this is not a prefix route */
6056 		return -1;
6057 	}
6058 	if (filter->filter_set &&
6059 	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
6060 	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
6061 	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
6062 		return -1;
6063 	}
6064 
6065 	if (filter->filter_set ||
6066 	    !filter->dump_routes || !filter->dump_exceptions) {
6067 		flags |= NLM_F_DUMP_FILTERED;
6068 	}
6069 
6070 	if (filter->dump_routes) {
6071 		if (skip) {
6072 			skip--;
6073 		} else {
6074 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
6075 					  0, RTM_NEWROUTE,
6076 					  NETLINK_CB(arg->cb->skb).portid,
6077 					  arg->cb->nlh->nlmsg_seq, flags)) {
6078 				return 0;
6079 			}
6080 			count++;
6081 		}
6082 	}
6083 
6084 	if (filter->dump_exceptions) {
6085 		struct fib6_nh_exception_dump_walker w = { .dump = arg,
6086 							   .rt = rt,
6087 							   .flags = flags,
6088 							   .skip = skip,
6089 							   .count = 0 };
6090 		int err;
6091 
6092 		rcu_read_lock();
6093 		if (rt->nh) {
6094 			err = nexthop_for_each_fib6_nh(rt->nh,
6095 						       rt6_nh_dump_exceptions,
6096 						       &w);
6097 		} else {
6098 			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
6099 		}
6100 		rcu_read_unlock();
6101 
6102 		if (err)
6103 			return count + w.count;
6104 	}
6105 
6106 	return -1;
6107 }
6108 
6109 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
6110 					const struct nlmsghdr *nlh,
6111 					struct nlattr **tb,
6112 					struct netlink_ext_ack *extack)
6113 {
6114 	struct rtmsg *rtm;
6115 	int i, err;
6116 
6117 	rtm = nlmsg_payload(nlh, sizeof(*rtm));
6118 	if (!rtm) {
6119 		NL_SET_ERR_MSG_MOD(extack,
6120 				   "Invalid header for get route request");
6121 		return -EINVAL;
6122 	}
6123 
6124 	if (!netlink_strict_get_check(skb))
6125 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
6126 					      rtm_ipv6_policy, extack);
6127 
6128 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
6129 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
6130 	    rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
6131 	    rtm->rtm_type) {
6132 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
6133 		return -EINVAL;
6134 	}
6135 	if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
6136 		NL_SET_ERR_MSG_MOD(extack,
6137 				   "Invalid flags for get route request");
6138 		return -EINVAL;
6139 	}
6140 
6141 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
6142 					    rtm_ipv6_policy, extack);
6143 	if (err)
6144 		return err;
6145 
6146 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
6147 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
6148 		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
6149 		return -EINVAL;
6150 	}
6151 
6152 	if (tb[RTA_FLOWLABEL] &&
6153 	    (nla_get_be32(tb[RTA_FLOWLABEL]) & ~IPV6_FLOWLABEL_MASK)) {
6154 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
6155 				    "Invalid flow label");
6156 		return -EINVAL;
6157 	}
6158 
6159 	for (i = 0; i <= RTA_MAX; i++) {
6160 		if (!tb[i])
6161 			continue;
6162 
6163 		switch (i) {
6164 		case RTA_SRC:
6165 		case RTA_DST:
6166 		case RTA_IIF:
6167 		case RTA_OIF:
6168 		case RTA_MARK:
6169 		case RTA_UID:
6170 		case RTA_SPORT:
6171 		case RTA_DPORT:
6172 		case RTA_IP_PROTO:
6173 		case RTA_FLOWLABEL:
6174 			break;
6175 		default:
6176 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6177 			return -EINVAL;
6178 		}
6179 	}
6180 
6181 	return 0;
6182 }
6183 
6184 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6185 			      struct netlink_ext_ack *extack)
6186 {
6187 	struct net *net = sock_net(in_skb->sk);
6188 	struct nlattr *tb[RTA_MAX+1];
6189 	int err, iif = 0, oif = 0;
6190 	struct fib6_info *from;
6191 	struct dst_entry *dst;
6192 	struct rt6_info *rt;
6193 	struct sk_buff *skb;
6194 	struct rtmsg *rtm;
6195 	struct flowi6 fl6 = {};
6196 	__be32 flowlabel;
6197 	bool fibmatch;
6198 
6199 	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6200 	if (err < 0)
6201 		goto errout;
6202 
6203 	err = -EINVAL;
6204 	rtm = nlmsg_data(nlh);
6205 	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6206 
6207 	if (tb[RTA_SRC]) {
6208 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6209 			goto errout;
6210 
6211 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6212 	}
6213 
6214 	if (tb[RTA_DST]) {
6215 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6216 			goto errout;
6217 
6218 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6219 	}
6220 
6221 	if (tb[RTA_IIF])
6222 		iif = nla_get_u32(tb[RTA_IIF]);
6223 
6224 	if (tb[RTA_OIF])
6225 		oif = nla_get_u32(tb[RTA_OIF]);
6226 
6227 	if (tb[RTA_MARK])
6228 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6229 
6230 	if (tb[RTA_UID])
6231 		fl6.flowi6_uid = make_kuid(current_user_ns(),
6232 					   nla_get_u32(tb[RTA_UID]));
6233 	else
6234 		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6235 
6236 	if (tb[RTA_SPORT])
6237 		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6238 
6239 	if (tb[RTA_DPORT])
6240 		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6241 
6242 	if (tb[RTA_IP_PROTO]) {
6243 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6244 						  &fl6.flowi6_proto, AF_INET6,
6245 						  extack);
6246 		if (err)
6247 			goto errout;
6248 	}
6249 
6250 	flowlabel = nla_get_be32_default(tb[RTA_FLOWLABEL], 0);
6251 	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, flowlabel);
6252 
6253 	if (iif) {
6254 		struct net_device *dev;
6255 		int flags = 0;
6256 
6257 		rcu_read_lock();
6258 
6259 		dev = dev_get_by_index_rcu(net, iif);
6260 		if (!dev) {
6261 			rcu_read_unlock();
6262 			err = -ENODEV;
6263 			goto errout;
6264 		}
6265 
6266 		fl6.flowi6_iif = iif;
6267 
6268 		if (!ipv6_addr_any(&fl6.saddr))
6269 			flags |= RT6_LOOKUP_F_HAS_SADDR;
6270 
6271 		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6272 
6273 		rcu_read_unlock();
6274 	} else {
6275 		fl6.flowi6_oif = oif;
6276 
6277 		dst = ip6_route_output(net, NULL, &fl6);
6278 	}
6279 
6280 
6281 	rt = dst_rt6_info(dst);
6282 	if (rt->dst.error) {
6283 		err = rt->dst.error;
6284 		ip6_rt_put(rt);
6285 		goto errout;
6286 	}
6287 
6288 	if (rt == net->ipv6.ip6_null_entry) {
6289 		err = rt->dst.error;
6290 		ip6_rt_put(rt);
6291 		goto errout;
6292 	}
6293 
6294 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6295 	if (!skb) {
6296 		ip6_rt_put(rt);
6297 		err = -ENOBUFS;
6298 		goto errout;
6299 	}
6300 
6301 	skb_dst_set(skb, &rt->dst);
6302 
6303 	rcu_read_lock();
6304 	from = rcu_dereference(rt->from);
6305 	if (from) {
6306 		if (fibmatch)
6307 			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6308 					    iif, RTM_NEWROUTE,
6309 					    NETLINK_CB(in_skb).portid,
6310 					    nlh->nlmsg_seq, 0);
6311 		else
6312 			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6313 					    &fl6.saddr, iif, RTM_NEWROUTE,
6314 					    NETLINK_CB(in_skb).portid,
6315 					    nlh->nlmsg_seq, 0);
6316 	} else {
6317 		err = -ENETUNREACH;
6318 	}
6319 	rcu_read_unlock();
6320 
6321 	if (err < 0) {
6322 		kfree_skb(skb);
6323 		goto errout;
6324 	}
6325 
6326 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6327 errout:
6328 	return err;
6329 }
6330 
6331 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6332 		     unsigned int nlm_flags)
6333 {
6334 	struct net *net = info->nl_net;
6335 	struct sk_buff *skb;
6336 	size_t sz;
6337 	u32 seq;
6338 	int err;
6339 
6340 	err = -ENOBUFS;
6341 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6342 
6343 	rcu_read_lock();
6344 	sz = rt6_nlmsg_size(rt);
6345 retry:
6346 	skb = nlmsg_new(sz, GFP_ATOMIC);
6347 	if (!skb)
6348 		goto errout;
6349 
6350 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6351 			    event, info->portid, seq, nlm_flags);
6352 	if (err < 0) {
6353 		kfree_skb(skb);
6354 		/* -EMSGSIZE implies needed space grew under us. */
6355 		if (err == -EMSGSIZE) {
6356 			sz = max(rt6_nlmsg_size(rt), sz << 1);
6357 			goto retry;
6358 		}
6359 		goto errout;
6360 	}
6361 
6362 	rcu_read_unlock();
6363 
6364 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6365 		    info->nlh, GFP_ATOMIC);
6366 	return;
6367 errout:
6368 	rcu_read_unlock();
6369 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6370 }
6371 
6372 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6373 		    struct nl_info *info)
6374 {
6375 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6376 	struct sk_buff *skb;
6377 	int err = -ENOBUFS;
6378 
6379 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6380 	if (!skb)
6381 		goto errout;
6382 
6383 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6384 			    RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6385 	if (err < 0) {
6386 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6387 		WARN_ON(err == -EMSGSIZE);
6388 		kfree_skb(skb);
6389 		goto errout;
6390 	}
6391 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6392 		    info->nlh, gfp_any());
6393 	return;
6394 errout:
6395 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6396 }
6397 
6398 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6399 			    bool offload, bool trap, bool offload_failed)
6400 {
6401 	struct sk_buff *skb;
6402 	int err;
6403 
6404 	if (READ_ONCE(f6i->offload) == offload &&
6405 	    READ_ONCE(f6i->trap) == trap &&
6406 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6407 		return;
6408 
6409 	WRITE_ONCE(f6i->offload, offload);
6410 	WRITE_ONCE(f6i->trap, trap);
6411 
6412 	/* 2 means send notifications only if offload_failed was changed. */
6413 	if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6414 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6415 		return;
6416 
6417 	WRITE_ONCE(f6i->offload_failed, offload_failed);
6418 
6419 	if (!rcu_access_pointer(f6i->fib6_node))
6420 		/* The route was removed from the tree, do not send
6421 		 * notification.
6422 		 */
6423 		return;
6424 
6425 	if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6426 		return;
6427 
6428 	skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6429 	if (!skb) {
6430 		err = -ENOBUFS;
6431 		goto errout;
6432 	}
6433 
6434 	err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6435 			    0, 0);
6436 	if (err < 0) {
6437 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6438 		WARN_ON(err == -EMSGSIZE);
6439 		kfree_skb(skb);
6440 		goto errout;
6441 	}
6442 
6443 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6444 	return;
6445 
6446 errout:
6447 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6448 }
6449 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6450 
6451 static int ip6_route_dev_notify(struct notifier_block *this,
6452 				unsigned long event, void *ptr)
6453 {
6454 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6455 	struct net *net = dev_net(dev);
6456 
6457 	if (!(dev->flags & IFF_LOOPBACK))
6458 		return NOTIFY_OK;
6459 
6460 	if (event == NETDEV_REGISTER) {
6461 		net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6462 		net->ipv6.ip6_null_entry->dst.dev = dev;
6463 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6464 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6465 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6466 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6467 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6468 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6469 #endif
6470 	 } else if (event == NETDEV_UNREGISTER &&
6471 		    dev->reg_state != NETREG_UNREGISTERED) {
6472 		/* NETDEV_UNREGISTER could be fired for multiple times by
6473 		 * netdev_wait_allrefs(). Make sure we only call this once.
6474 		 */
6475 		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6476 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6477 		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6478 		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6479 #endif
6480 	}
6481 
6482 	return NOTIFY_OK;
6483 }
6484 
6485 /*
6486  *	/proc
6487  */
6488 
6489 #ifdef CONFIG_PROC_FS
6490 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6491 {
6492 	struct net *net = (struct net *)seq->private;
6493 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6494 		   net->ipv6.rt6_stats->fib_nodes,
6495 		   net->ipv6.rt6_stats->fib_route_nodes,
6496 		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6497 		   net->ipv6.rt6_stats->fib_rt_entries,
6498 		   net->ipv6.rt6_stats->fib_rt_cache,
6499 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6500 		   net->ipv6.rt6_stats->fib_discarded_routes);
6501 
6502 	return 0;
6503 }
6504 #endif	/* CONFIG_PROC_FS */
6505 
6506 #ifdef CONFIG_SYSCTL
6507 
6508 static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
6509 			      void *buffer, size_t *lenp, loff_t *ppos)
6510 {
6511 	struct net *net;
6512 	int delay;
6513 	int ret;
6514 	if (!write)
6515 		return -EINVAL;
6516 
6517 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6518 	if (ret)
6519 		return ret;
6520 
6521 	net = (struct net *)ctl->extra1;
6522 	delay = net->ipv6.sysctl.flush_delay;
6523 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6524 	return 0;
6525 }
6526 
6527 static struct ctl_table ipv6_route_table_template[] = {
6528 	{
6529 		.procname	=	"max_size",
6530 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
6531 		.maxlen		=	sizeof(int),
6532 		.mode		=	0644,
6533 		.proc_handler	=	proc_dointvec,
6534 	},
6535 	{
6536 		.procname	=	"gc_thresh",
6537 		.data		=	&ip6_dst_ops_template.gc_thresh,
6538 		.maxlen		=	sizeof(int),
6539 		.mode		=	0644,
6540 		.proc_handler	=	proc_dointvec,
6541 	},
6542 	{
6543 		.procname	=	"flush",
6544 		.data		=	&init_net.ipv6.sysctl.flush_delay,
6545 		.maxlen		=	sizeof(int),
6546 		.mode		=	0200,
6547 		.proc_handler	=	ipv6_sysctl_rtcache_flush
6548 	},
6549 	{
6550 		.procname	=	"gc_min_interval",
6551 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6552 		.maxlen		=	sizeof(int),
6553 		.mode		=	0644,
6554 		.proc_handler	=	proc_dointvec_jiffies,
6555 	},
6556 	{
6557 		.procname	=	"gc_timeout",
6558 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6559 		.maxlen		=	sizeof(int),
6560 		.mode		=	0644,
6561 		.proc_handler	=	proc_dointvec_jiffies,
6562 	},
6563 	{
6564 		.procname	=	"gc_interval",
6565 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
6566 		.maxlen		=	sizeof(int),
6567 		.mode		=	0644,
6568 		.proc_handler	=	proc_dointvec_jiffies,
6569 	},
6570 	{
6571 		.procname	=	"gc_elasticity",
6572 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6573 		.maxlen		=	sizeof(int),
6574 		.mode		=	0644,
6575 		.proc_handler	=	proc_dointvec,
6576 	},
6577 	{
6578 		.procname	=	"mtu_expires",
6579 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6580 		.maxlen		=	sizeof(int),
6581 		.mode		=	0644,
6582 		.proc_handler	=	proc_dointvec_jiffies,
6583 	},
6584 	{
6585 		.procname	=	"min_adv_mss",
6586 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
6587 		.maxlen		=	sizeof(int),
6588 		.mode		=	0644,
6589 		.proc_handler	=	proc_dointvec,
6590 	},
6591 	{
6592 		.procname	=	"gc_min_interval_ms",
6593 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6594 		.maxlen		=	sizeof(int),
6595 		.mode		=	0644,
6596 		.proc_handler	=	proc_dointvec_ms_jiffies,
6597 	},
6598 	{
6599 		.procname	=	"skip_notify_on_dev_down",
6600 		.data		=	&init_net.ipv6.sysctl.skip_notify_on_dev_down,
6601 		.maxlen		=	sizeof(u8),
6602 		.mode		=	0644,
6603 		.proc_handler	=	proc_dou8vec_minmax,
6604 		.extra1		=	SYSCTL_ZERO,
6605 		.extra2		=	SYSCTL_ONE,
6606 	},
6607 };
6608 
6609 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6610 {
6611 	struct ctl_table *table;
6612 
6613 	table = kmemdup(ipv6_route_table_template,
6614 			sizeof(ipv6_route_table_template),
6615 			GFP_KERNEL);
6616 
6617 	if (table) {
6618 		table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6619 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6620 		table[2].data = &net->ipv6.sysctl.flush_delay;
6621 		table[2].extra1 = net;
6622 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6623 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6624 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6625 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6626 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6627 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6628 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6629 		table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6630 	}
6631 
6632 	return table;
6633 }
6634 
6635 size_t ipv6_route_sysctl_table_size(struct net *net)
6636 {
6637 	/* Don't export sysctls to unprivileged users */
6638 	if (net->user_ns != &init_user_ns)
6639 		return 1;
6640 
6641 	return ARRAY_SIZE(ipv6_route_table_template);
6642 }
6643 #endif
6644 
6645 static int __net_init ip6_route_net_init(struct net *net)
6646 {
6647 	int ret = -ENOMEM;
6648 
6649 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6650 	       sizeof(net->ipv6.ip6_dst_ops));
6651 
6652 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6653 		goto out_ip6_dst_ops;
6654 
6655 	net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6656 	if (!net->ipv6.fib6_null_entry)
6657 		goto out_ip6_dst_entries;
6658 	memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6659 	       sizeof(*net->ipv6.fib6_null_entry));
6660 
6661 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6662 					   sizeof(*net->ipv6.ip6_null_entry),
6663 					   GFP_KERNEL);
6664 	if (!net->ipv6.ip6_null_entry)
6665 		goto out_fib6_null_entry;
6666 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6667 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6668 			 ip6_template_metrics, true);
6669 	INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached);
6670 
6671 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6672 	net->ipv6.fib6_has_custom_rules = false;
6673 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6674 					       sizeof(*net->ipv6.ip6_prohibit_entry),
6675 					       GFP_KERNEL);
6676 	if (!net->ipv6.ip6_prohibit_entry)
6677 		goto out_ip6_null_entry;
6678 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6679 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6680 			 ip6_template_metrics, true);
6681 	INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached);
6682 
6683 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6684 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
6685 					       GFP_KERNEL);
6686 	if (!net->ipv6.ip6_blk_hole_entry)
6687 		goto out_ip6_prohibit_entry;
6688 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6689 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6690 			 ip6_template_metrics, true);
6691 	INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached);
6692 #ifdef CONFIG_IPV6_SUBTREES
6693 	net->ipv6.fib6_routes_require_src = 0;
6694 #endif
6695 #endif
6696 
6697 	net->ipv6.sysctl.flush_delay = 0;
6698 	net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6699 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6700 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6701 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6702 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6703 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6704 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6705 	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6706 
6707 	atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6708 
6709 	ret = 0;
6710 out:
6711 	return ret;
6712 
6713 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6714 out_ip6_prohibit_entry:
6715 	kfree(net->ipv6.ip6_prohibit_entry);
6716 out_ip6_null_entry:
6717 	kfree(net->ipv6.ip6_null_entry);
6718 #endif
6719 out_fib6_null_entry:
6720 	kfree(net->ipv6.fib6_null_entry);
6721 out_ip6_dst_entries:
6722 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6723 out_ip6_dst_ops:
6724 	goto out;
6725 }
6726 
6727 static void __net_exit ip6_route_net_exit(struct net *net)
6728 {
6729 	kfree(net->ipv6.fib6_null_entry);
6730 	kfree(net->ipv6.ip6_null_entry);
6731 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6732 	kfree(net->ipv6.ip6_prohibit_entry);
6733 	kfree(net->ipv6.ip6_blk_hole_entry);
6734 #endif
6735 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6736 }
6737 
6738 static int __net_init ip6_route_net_init_late(struct net *net)
6739 {
6740 #ifdef CONFIG_PROC_FS
6741 	if (!proc_create_net("ipv6_route", 0, net->proc_net,
6742 			     &ipv6_route_seq_ops,
6743 			     sizeof(struct ipv6_route_iter)))
6744 		return -ENOMEM;
6745 
6746 	if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6747 				    rt6_stats_seq_show, NULL)) {
6748 		remove_proc_entry("ipv6_route", net->proc_net);
6749 		return -ENOMEM;
6750 	}
6751 #endif
6752 	return 0;
6753 }
6754 
6755 static void __net_exit ip6_route_net_exit_late(struct net *net)
6756 {
6757 #ifdef CONFIG_PROC_FS
6758 	remove_proc_entry("ipv6_route", net->proc_net);
6759 	remove_proc_entry("rt6_stats", net->proc_net);
6760 #endif
6761 }
6762 
6763 static struct pernet_operations ip6_route_net_ops = {
6764 	.init = ip6_route_net_init,
6765 	.exit = ip6_route_net_exit,
6766 };
6767 
6768 static int __net_init ipv6_inetpeer_init(struct net *net)
6769 {
6770 	struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6771 
6772 	if (!bp)
6773 		return -ENOMEM;
6774 	inet_peer_base_init(bp);
6775 	net->ipv6.peers = bp;
6776 	return 0;
6777 }
6778 
6779 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6780 {
6781 	struct inet_peer_base *bp = net->ipv6.peers;
6782 
6783 	net->ipv6.peers = NULL;
6784 	inetpeer_invalidate_tree(bp);
6785 	kfree(bp);
6786 }
6787 
6788 static struct pernet_operations ipv6_inetpeer_ops = {
6789 	.init	=	ipv6_inetpeer_init,
6790 	.exit	=	ipv6_inetpeer_exit,
6791 };
6792 
6793 static struct pernet_operations ip6_route_net_late_ops = {
6794 	.init = ip6_route_net_init_late,
6795 	.exit = ip6_route_net_exit_late,
6796 };
6797 
6798 static struct notifier_block ip6_route_dev_notifier = {
6799 	.notifier_call = ip6_route_dev_notify,
6800 	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6801 };
6802 
6803 void __init ip6_route_init_special_entries(void)
6804 {
6805 	/* Registering of the loopback is done before this portion of code,
6806 	 * the loopback reference in rt6_info will not be taken, do it
6807 	 * manually for init_net */
6808 	init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6809 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6810 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6811   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6812 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6813 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6814 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6815 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6816   #endif
6817 }
6818 
6819 #if IS_BUILTIN(CONFIG_IPV6)
6820 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6821 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6822 
6823 BTF_ID_LIST_SINGLE(btf_fib6_info_id, struct, fib6_info)
6824 
6825 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6826 	.seq_ops		= &ipv6_route_seq_ops,
6827 	.init_seq_private	= bpf_iter_init_seq_net,
6828 	.fini_seq_private	= bpf_iter_fini_seq_net,
6829 	.seq_priv_size		= sizeof(struct ipv6_route_iter),
6830 };
6831 
6832 static struct bpf_iter_reg ipv6_route_reg_info = {
6833 	.target			= "ipv6_route",
6834 	.ctx_arg_info_size	= 1,
6835 	.ctx_arg_info		= {
6836 		{ offsetof(struct bpf_iter__ipv6_route, rt),
6837 		  PTR_TO_BTF_ID_OR_NULL },
6838 	},
6839 	.seq_info		= &ipv6_route_seq_info,
6840 };
6841 
6842 static int __init bpf_iter_register(void)
6843 {
6844 	ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6845 	return bpf_iter_reg_target(&ipv6_route_reg_info);
6846 }
6847 
6848 static void bpf_iter_unregister(void)
6849 {
6850 	bpf_iter_unreg_target(&ipv6_route_reg_info);
6851 }
6852 #endif
6853 #endif
6854 
6855 static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = {
6856 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE,
6857 	 .doit = inet6_rtm_newroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6858 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE,
6859 	 .doit = inet6_rtm_delroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6860 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
6861 	 .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6862 };
6863 
6864 int __init ip6_route_init(void)
6865 {
6866 	int ret;
6867 	int cpu;
6868 
6869 	ret = -ENOMEM;
6870 	ip6_dst_ops_template.kmem_cachep =
6871 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6872 				  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6873 	if (!ip6_dst_ops_template.kmem_cachep)
6874 		goto out;
6875 
6876 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
6877 	if (ret)
6878 		goto out_kmem_cache;
6879 
6880 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6881 	if (ret)
6882 		goto out_dst_entries;
6883 
6884 	ret = register_pernet_subsys(&ip6_route_net_ops);
6885 	if (ret)
6886 		goto out_register_inetpeer;
6887 
6888 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6889 
6890 	ret = fib6_init();
6891 	if (ret)
6892 		goto out_register_subsys;
6893 
6894 	ret = xfrm6_init();
6895 	if (ret)
6896 		goto out_fib6_init;
6897 
6898 	ret = fib6_rules_init();
6899 	if (ret)
6900 		goto xfrm6_init;
6901 
6902 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
6903 	if (ret)
6904 		goto fib6_rules_init;
6905 
6906 	ret = rtnl_register_many(ip6_route_rtnl_msg_handlers);
6907 	if (ret < 0)
6908 		goto out_register_late_subsys;
6909 
6910 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6911 	if (ret)
6912 		goto out_register_late_subsys;
6913 
6914 #if IS_BUILTIN(CONFIG_IPV6)
6915 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6916 	ret = bpf_iter_register();
6917 	if (ret)
6918 		goto out_register_late_subsys;
6919 #endif
6920 #endif
6921 
6922 	for_each_possible_cpu(cpu) {
6923 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6924 
6925 		INIT_LIST_HEAD(&ul->head);
6926 		spin_lock_init(&ul->lock);
6927 	}
6928 
6929 out:
6930 	return ret;
6931 
6932 out_register_late_subsys:
6933 	rtnl_unregister_all(PF_INET6);
6934 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6935 fib6_rules_init:
6936 	fib6_rules_cleanup();
6937 xfrm6_init:
6938 	xfrm6_fini();
6939 out_fib6_init:
6940 	fib6_gc_cleanup();
6941 out_register_subsys:
6942 	unregister_pernet_subsys(&ip6_route_net_ops);
6943 out_register_inetpeer:
6944 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6945 out_dst_entries:
6946 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6947 out_kmem_cache:
6948 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6949 	goto out;
6950 }
6951 
6952 void ip6_route_cleanup(void)
6953 {
6954 #if IS_BUILTIN(CONFIG_IPV6)
6955 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6956 	bpf_iter_unregister();
6957 #endif
6958 #endif
6959 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
6960 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6961 	fib6_rules_cleanup();
6962 	xfrm6_fini();
6963 	fib6_gc_cleanup();
6964 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6965 	unregister_pernet_subsys(&ip6_route_net_ops);
6966 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6967 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6968 }
6969