xref: /linux/net/ipv6/route.c (revision 5ad509c1fdad4bf0993b72d1b3d462f036d8a0d8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux INET6 implementation
4  *	FIB front-end.
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  */
9 
10 /*	Changes:
11  *
12  *	YOSHIFUJI Hideaki @USAGI
13  *		reworked default router selection.
14  *		- respect outgoing interface
15  *		- select from (probably) reachable routers (i.e.
16  *		routers in REACHABLE, STALE, DELAY or PROBE states).
17  *		- always select the same router if it is (probably)
18  *		reachable.  otherwise, round-robin the list.
19  *	Ville Nuorvala
20  *		Fixed routing subtrees.
21  */
22 
23 #define pr_fmt(fmt) "IPv6: " fmt
24 
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/dst_metadata.h>
56 #include <net/xfrm.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
59 #include <net/rtnh.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
63 #include <net/ip.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
66 
67 #ifdef CONFIG_SYSCTL
68 #include <linux/sysctl.h>
69 #endif
70 
71 static int ip6_rt_type_to_error(u8 fib6_type);
72 
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
77 
78 enum rt6_nud_state {
79 	RT6_NUD_FAIL_HARD = -3,
80 	RT6_NUD_FAIL_PROBE = -2,
81 	RT6_NUD_FAIL_DO_RR = -1,
82 	RT6_NUD_SUCCEED = 1
83 };
84 
85 INDIRECT_CALLABLE_SCOPE
86 struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
87 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
88 INDIRECT_CALLABLE_SCOPE
89 unsigned int		ip6_mtu(const struct dst_entry *dst);
90 static void		ip6_negative_advice(struct sock *sk,
91 					    struct dst_entry *dst);
92 static void		ip6_dst_destroy(struct dst_entry *);
93 static void		ip6_dst_ifdown(struct dst_entry *,
94 				       struct net_device *dev);
95 static void		 ip6_dst_gc(struct dst_ops *ops);
96 
97 static int		ip6_pkt_discard(struct sk_buff *skb);
98 static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99 static int		ip6_pkt_prohibit(struct sk_buff *skb);
100 static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
101 static void		ip6_link_failure(struct sk_buff *skb);
102 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
103 					   struct sk_buff *skb, u32 mtu,
104 					   bool confirm_neigh);
105 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
106 					struct sk_buff *skb);
107 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 			   int strict);
109 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
110 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
111 			 struct fib6_info *rt, struct dst_entry *dst,
112 			 struct in6_addr *dest, struct in6_addr *src,
113 			 int iif, int type, u32 portid, u32 seq,
114 			 unsigned int flags);
115 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
116 					   const struct in6_addr *daddr,
117 					   const struct in6_addr *saddr);
118 
119 #ifdef CONFIG_IPV6_ROUTE_INFO
120 static struct fib6_info *rt6_add_route_info(struct net *net,
121 					   const struct in6_addr *prefix, int prefixlen,
122 					   const struct in6_addr *gwaddr,
123 					   struct net_device *dev,
124 					   unsigned int pref);
125 static struct fib6_info *rt6_get_route_info(struct net *net,
126 					   const struct in6_addr *prefix, int prefixlen,
127 					   const struct in6_addr *gwaddr,
128 					   struct net_device *dev);
129 #endif
130 
131 struct uncached_list {
132 	spinlock_t		lock;
133 	struct list_head	head;
134 };
135 
136 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
137 
138 void rt6_uncached_list_add(struct rt6_info *rt)
139 {
140 	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
141 
142 	rt->dst.rt_uncached_list = ul;
143 
144 	spin_lock_bh(&ul->lock);
145 	list_add_tail(&rt->dst.rt_uncached, &ul->head);
146 	spin_unlock_bh(&ul->lock);
147 }
148 
149 void rt6_uncached_list_del(struct rt6_info *rt)
150 {
151 	struct uncached_list *ul = rt->dst.rt_uncached_list;
152 
153 	if (ul) {
154 		spin_lock_bh(&ul->lock);
155 		list_del_init(&rt->dst.rt_uncached);
156 		spin_unlock_bh(&ul->lock);
157 	}
158 }
159 
160 static void rt6_uncached_list_flush_dev(struct net_device *dev)
161 {
162 	int cpu;
163 
164 	for_each_possible_cpu(cpu) {
165 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 		struct rt6_info *rt, *safe;
167 
168 		if (list_empty(&ul->head))
169 			continue;
170 
171 		spin_lock_bh(&ul->lock);
172 		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
173 			struct inet6_dev *rt_idev = rt->rt6i_idev;
174 			struct net_device *rt_dev = rt->dst.dev;
175 			bool handled = false;
176 
177 			if (rt_idev && rt_idev->dev == dev) {
178 				rt->rt6i_idev = in6_dev_get(blackhole_netdev);
179 				in6_dev_put(rt_idev);
180 				handled = true;
181 			}
182 
183 			if (rt_dev == dev) {
184 				rt->dst.dev = blackhole_netdev;
185 				netdev_ref_replace(rt_dev, blackhole_netdev,
186 						   &rt->dst.dev_tracker,
187 						   GFP_ATOMIC);
188 				handled = true;
189 			}
190 			if (handled)
191 				list_del_init(&rt->dst.rt_uncached);
192 		}
193 		spin_unlock_bh(&ul->lock);
194 	}
195 }
196 
197 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
198 					     struct sk_buff *skb,
199 					     const void *daddr)
200 {
201 	if (!ipv6_addr_any(p))
202 		return (const void *) p;
203 	else if (skb)
204 		return &ipv6_hdr(skb)->daddr;
205 	return daddr;
206 }
207 
208 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
209 				   struct net_device *dev,
210 				   struct sk_buff *skb,
211 				   const void *daddr)
212 {
213 	struct neighbour *n;
214 
215 	daddr = choose_neigh_daddr(gw, skb, daddr);
216 	n = __ipv6_neigh_lookup(dev, daddr);
217 	if (n)
218 		return n;
219 
220 	n = neigh_create(&nd_tbl, daddr, dev);
221 	return IS_ERR(n) ? NULL : n;
222 }
223 
224 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
225 					      struct sk_buff *skb,
226 					      const void *daddr)
227 {
228 	const struct rt6_info *rt = dst_rt6_info(dst);
229 
230 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
231 				dst_dev(dst), skb, daddr);
232 }
233 
234 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
235 {
236 	const struct rt6_info *rt = dst_rt6_info(dst);
237 	struct net_device *dev = dst_dev(dst);
238 
239 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
240 	if (!daddr)
241 		return;
242 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
243 		return;
244 	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
245 		return;
246 	__ipv6_confirm_neigh(dev, daddr);
247 }
248 
249 static struct dst_ops ip6_dst_ops_template = {
250 	.family			=	AF_INET6,
251 	.gc			=	ip6_dst_gc,
252 	.gc_thresh		=	1024,
253 	.check			=	ip6_dst_check,
254 	.default_advmss		=	ip6_default_advmss,
255 	.mtu			=	ip6_mtu,
256 	.cow_metrics		=	dst_cow_metrics_generic,
257 	.destroy		=	ip6_dst_destroy,
258 	.ifdown			=	ip6_dst_ifdown,
259 	.negative_advice	=	ip6_negative_advice,
260 	.link_failure		=	ip6_link_failure,
261 	.update_pmtu		=	ip6_rt_update_pmtu,
262 	.redirect		=	rt6_do_redirect,
263 	.local_out		=	__ip6_local_out,
264 	.neigh_lookup		=	ip6_dst_neigh_lookup,
265 	.confirm_neigh		=	ip6_confirm_neigh,
266 };
267 
268 static struct dst_ops ip6_dst_blackhole_ops = {
269 	.family			= AF_INET6,
270 	.default_advmss		= ip6_default_advmss,
271 	.neigh_lookup		= ip6_dst_neigh_lookup,
272 	.check			= ip6_dst_check,
273 	.destroy		= ip6_dst_destroy,
274 	.cow_metrics		= dst_cow_metrics_generic,
275 	.update_pmtu		= dst_blackhole_update_pmtu,
276 	.redirect		= dst_blackhole_redirect,
277 	.mtu			= dst_blackhole_mtu,
278 };
279 
280 static const u32 ip6_template_metrics[RTAX_MAX] = {
281 	[RTAX_HOPLIMIT - 1] = 0,
282 };
283 
284 static const struct fib6_info fib6_null_entry_template = {
285 	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
286 	.fib6_protocol  = RTPROT_KERNEL,
287 	.fib6_metric	= ~(u32)0,
288 	.fib6_ref	= REFCOUNT_INIT(1),
289 	.fib6_type	= RTN_UNREACHABLE,
290 	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
291 };
292 
293 static const struct rt6_info ip6_null_entry_template = {
294 	.dst = {
295 		.__rcuref	= RCUREF_INIT(1),
296 		.__use		= 1,
297 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
298 		.error		= -ENETUNREACH,
299 		.input		= ip6_pkt_discard,
300 		.output		= ip6_pkt_discard_out,
301 	},
302 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
303 };
304 
305 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
306 
307 static const struct rt6_info ip6_prohibit_entry_template = {
308 	.dst = {
309 		.__rcuref	= RCUREF_INIT(1),
310 		.__use		= 1,
311 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
312 		.error		= -EACCES,
313 		.input		= ip6_pkt_prohibit,
314 		.output		= ip6_pkt_prohibit_out,
315 	},
316 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
317 };
318 
319 static const struct rt6_info ip6_blk_hole_entry_template = {
320 	.dst = {
321 		.__rcuref	= RCUREF_INIT(1),
322 		.__use		= 1,
323 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
324 		.error		= -EINVAL,
325 		.input		= dst_discard,
326 		.output		= dst_discard_out,
327 	},
328 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
329 };
330 
331 #endif
332 
333 static void rt6_info_init(struct rt6_info *rt)
334 {
335 	memset_after(rt, 0, dst);
336 }
337 
338 /* allocate dst with ip6_dst_ops */
339 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
340 			       int flags)
341 {
342 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
343 					DST_OBSOLETE_FORCE_CHK, flags);
344 
345 	if (rt) {
346 		rt6_info_init(rt);
347 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
348 	}
349 
350 	return rt;
351 }
352 EXPORT_SYMBOL(ip6_dst_alloc);
353 
354 static void ip6_dst_destroy(struct dst_entry *dst)
355 {
356 	struct rt6_info *rt = dst_rt6_info(dst);
357 	struct fib6_info *from;
358 	struct inet6_dev *idev;
359 
360 	ip_dst_metrics_put(dst);
361 	rt6_uncached_list_del(rt);
362 
363 	idev = rt->rt6i_idev;
364 	if (idev) {
365 		rt->rt6i_idev = NULL;
366 		in6_dev_put(idev);
367 	}
368 
369 	from = unrcu_pointer(xchg(&rt->from, NULL));
370 	fib6_info_release(from);
371 }
372 
373 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
374 {
375 	struct rt6_info *rt = dst_rt6_info(dst);
376 	struct inet6_dev *idev = rt->rt6i_idev;
377 	struct fib6_info *from;
378 
379 	if (idev && idev->dev != blackhole_netdev) {
380 		struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
381 
382 		if (blackhole_idev) {
383 			rt->rt6i_idev = blackhole_idev;
384 			in6_dev_put(idev);
385 		}
386 	}
387 	from = unrcu_pointer(xchg(&rt->from, NULL));
388 	fib6_info_release(from);
389 }
390 
391 static bool __rt6_check_expired(const struct rt6_info *rt)
392 {
393 	if (rt->rt6i_flags & RTF_EXPIRES)
394 		return time_after(jiffies, READ_ONCE(rt->dst.expires));
395 	return false;
396 }
397 
398 static bool rt6_check_expired(const struct rt6_info *rt)
399 {
400 	struct fib6_info *from;
401 
402 	from = rcu_dereference(rt->from);
403 
404 	if (rt->rt6i_flags & RTF_EXPIRES) {
405 		if (time_after(jiffies, READ_ONCE(rt->dst.expires)))
406 			return true;
407 	} else if (from) {
408 		return READ_ONCE(rt->dst.obsolete) != DST_OBSOLETE_FORCE_CHK ||
409 			fib6_check_expired(from);
410 	}
411 	return false;
412 }
413 
414 static struct fib6_info *
415 rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
416 {
417 	struct fib6_info *iter;
418 	struct fib6_node *fn;
419 
420 	fn = rcu_dereference(rt->fib6_node);
421 	if (!fn)
422 		goto out;
423 	iter = rcu_dereference(fn->leaf);
424 	if (!iter)
425 		goto out;
426 
427 	while (iter) {
428 		if (iter->fib6_metric == rt->fib6_metric &&
429 		    rt6_qualify_for_ecmp(iter))
430 			return iter;
431 		iter = rcu_dereference(iter->fib6_next);
432 	}
433 
434 out:
435 	return NULL;
436 }
437 
438 void fib6_select_path(const struct net *net, struct fib6_result *res,
439 		      struct flowi6 *fl6, int oif, bool have_oif_match,
440 		      const struct sk_buff *skb, int strict)
441 {
442 	struct fib6_info *first, *match = res->f6i;
443 	struct fib6_info *sibling;
444 	int hash;
445 
446 	if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
447 		goto out;
448 
449 	if (match->nh && have_oif_match && res->nh)
450 		return;
451 
452 	if (skb)
453 		IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
454 
455 	/* We might have already computed the hash for ICMPv6 errors. In such
456 	 * case it will always be non-zero. Otherwise now is the time to do it.
457 	 */
458 	if (!fl6->mp_hash &&
459 	    (!match->nh || nexthop_is_multipath(match->nh)))
460 		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
461 
462 	if (unlikely(match->nh)) {
463 		nexthop_path_fib6_result(res, fl6->mp_hash);
464 		return;
465 	}
466 
467 	first = rt6_multipath_first_sibling_rcu(match);
468 	if (!first)
469 		goto out;
470 
471 	hash = fl6->mp_hash;
472 	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
473 		if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
474 				    strict) >= 0)
475 			match = first;
476 		goto out;
477 	}
478 
479 	list_for_each_entry_rcu(sibling, &first->fib6_siblings,
480 				fib6_siblings) {
481 		const struct fib6_nh *nh = sibling->fib6_nh;
482 		int nh_upper_bound;
483 
484 		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
485 		if (hash > nh_upper_bound)
486 			continue;
487 		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
488 			break;
489 		match = sibling;
490 		break;
491 	}
492 
493 out:
494 	res->f6i = match;
495 	res->nh = match->fib6_nh;
496 }
497 
498 /*
499  *	Route lookup. rcu_read_lock() should be held.
500  */
501 
502 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
503 			       const struct in6_addr *saddr, int oif, int flags)
504 {
505 	const struct net_device *dev;
506 
507 	if (nh->fib_nh_flags & RTNH_F_DEAD)
508 		return false;
509 
510 	dev = nh->fib_nh_dev;
511 	if (oif) {
512 		if (dev->ifindex == oif)
513 			return true;
514 	} else {
515 		if (ipv6_chk_addr(net, saddr, dev,
516 				  flags & RT6_LOOKUP_F_IFACE))
517 			return true;
518 	}
519 
520 	return false;
521 }
522 
523 struct fib6_nh_dm_arg {
524 	struct net		*net;
525 	const struct in6_addr	*saddr;
526 	int			oif;
527 	int			flags;
528 	struct fib6_nh		*nh;
529 };
530 
531 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
532 {
533 	struct fib6_nh_dm_arg *arg = _arg;
534 
535 	arg->nh = nh;
536 	return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
537 				  arg->flags);
538 }
539 
540 /* returns fib6_nh from nexthop or NULL */
541 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
542 					struct fib6_result *res,
543 					const struct in6_addr *saddr,
544 					int oif, int flags)
545 {
546 	struct fib6_nh_dm_arg arg = {
547 		.net   = net,
548 		.saddr = saddr,
549 		.oif   = oif,
550 		.flags = flags,
551 	};
552 
553 	if (nexthop_is_blackhole(nh))
554 		return NULL;
555 
556 	if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
557 		return arg.nh;
558 
559 	return NULL;
560 }
561 
562 static void rt6_device_match(struct net *net, struct fib6_result *res,
563 			     const struct in6_addr *saddr, int oif, int flags)
564 {
565 	struct fib6_info *f6i = res->f6i;
566 	struct fib6_info *spf6i;
567 	struct fib6_nh *nh;
568 
569 	if (!oif && ipv6_addr_any(saddr)) {
570 		if (unlikely(f6i->nh)) {
571 			nh = nexthop_fib6_nh(f6i->nh);
572 			if (nexthop_is_blackhole(f6i->nh))
573 				goto out_blackhole;
574 		} else {
575 			nh = f6i->fib6_nh;
576 		}
577 		if (!(nh->fib_nh_flags & RTNH_F_DEAD))
578 			goto out;
579 	}
580 
581 	for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
582 		bool matched = false;
583 
584 		if (unlikely(spf6i->nh)) {
585 			nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
586 					      oif, flags);
587 			if (nh)
588 				matched = true;
589 		} else {
590 			nh = spf6i->fib6_nh;
591 			if (__rt6_device_match(net, nh, saddr, oif, flags))
592 				matched = true;
593 		}
594 		if (matched) {
595 			res->f6i = spf6i;
596 			goto out;
597 		}
598 	}
599 
600 	if (oif && flags & RT6_LOOKUP_F_IFACE) {
601 		res->f6i = net->ipv6.fib6_null_entry;
602 		nh = res->f6i->fib6_nh;
603 		goto out;
604 	}
605 
606 	if (unlikely(f6i->nh)) {
607 		nh = nexthop_fib6_nh(f6i->nh);
608 		if (nexthop_is_blackhole(f6i->nh))
609 			goto out_blackhole;
610 	} else {
611 		nh = f6i->fib6_nh;
612 	}
613 
614 	if (nh->fib_nh_flags & RTNH_F_DEAD) {
615 		res->f6i = net->ipv6.fib6_null_entry;
616 		nh = res->f6i->fib6_nh;
617 	}
618 out:
619 	res->nh = nh;
620 	res->fib6_type = res->f6i->fib6_type;
621 	res->fib6_flags = res->f6i->fib6_flags;
622 	return;
623 
624 out_blackhole:
625 	res->fib6_flags |= RTF_REJECT;
626 	res->fib6_type = RTN_BLACKHOLE;
627 	res->nh = nh;
628 }
629 
630 #ifdef CONFIG_IPV6_ROUTER_PREF
631 struct __rt6_probe_work {
632 	struct work_struct work;
633 	struct in6_addr target;
634 	struct net_device *dev;
635 	netdevice_tracker dev_tracker;
636 };
637 
638 static void rt6_probe_deferred(struct work_struct *w)
639 {
640 	struct in6_addr mcaddr;
641 	struct __rt6_probe_work *work =
642 		container_of(w, struct __rt6_probe_work, work);
643 
644 	addrconf_addr_solict_mult(&work->target, &mcaddr);
645 	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
646 	netdev_put(work->dev, &work->dev_tracker);
647 	kfree(work);
648 }
649 
650 static void rt6_probe(struct fib6_nh *fib6_nh)
651 {
652 	struct __rt6_probe_work *work = NULL;
653 	const struct in6_addr *nh_gw;
654 	unsigned long last_probe;
655 	struct neighbour *neigh;
656 	struct net_device *dev;
657 	struct inet6_dev *idev;
658 
659 	/*
660 	 * Okay, this does not seem to be appropriate
661 	 * for now, however, we need to check if it
662 	 * is really so; aka Router Reachability Probing.
663 	 *
664 	 * Router Reachability Probe MUST be rate-limited
665 	 * to no more than one per minute.
666 	 */
667 	if (!fib6_nh->fib_nh_gw_family)
668 		return;
669 
670 	nh_gw = &fib6_nh->fib_nh_gw6;
671 	dev = fib6_nh->fib_nh_dev;
672 	rcu_read_lock();
673 	last_probe = READ_ONCE(fib6_nh->last_probe);
674 	idev = __in6_dev_get(dev);
675 	if (!idev)
676 		goto out;
677 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
678 	if (neigh) {
679 		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
680 			goto out;
681 
682 		write_lock_bh(&neigh->lock);
683 		if (!(neigh->nud_state & NUD_VALID) &&
684 		    time_after(jiffies,
685 			       neigh->updated +
686 			       READ_ONCE(idev->cnf.rtr_probe_interval))) {
687 			work = kmalloc_obj(*work, GFP_ATOMIC);
688 			if (work)
689 				__neigh_set_probe_once(neigh);
690 		}
691 		write_unlock_bh(&neigh->lock);
692 	} else if (time_after(jiffies, last_probe +
693 				       READ_ONCE(idev->cnf.rtr_probe_interval))) {
694 		work = kmalloc_obj(*work, GFP_ATOMIC);
695 	}
696 
697 	if (!work || cmpxchg(&fib6_nh->last_probe,
698 			     last_probe, jiffies) != last_probe) {
699 		kfree(work);
700 	} else {
701 		INIT_WORK(&work->work, rt6_probe_deferred);
702 		work->target = *nh_gw;
703 		netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
704 		work->dev = dev;
705 		schedule_work(&work->work);
706 	}
707 
708 out:
709 	rcu_read_unlock();
710 }
711 #else
712 static inline void rt6_probe(struct fib6_nh *fib6_nh)
713 {
714 }
715 #endif
716 
717 /*
718  * Default Router Selection (RFC 2461 6.3.6)
719  */
720 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
721 {
722 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
723 	struct neighbour *neigh;
724 
725 	rcu_read_lock();
726 	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
727 					  &fib6_nh->fib_nh_gw6);
728 	if (neigh) {
729 		u8 nud_state = READ_ONCE(neigh->nud_state);
730 
731 		if (nud_state & NUD_VALID)
732 			ret = RT6_NUD_SUCCEED;
733 #ifdef CONFIG_IPV6_ROUTER_PREF
734 		else if (!(nud_state & NUD_FAILED))
735 			ret = RT6_NUD_SUCCEED;
736 		else
737 			ret = RT6_NUD_FAIL_PROBE;
738 #endif
739 	} else {
740 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
741 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
742 	}
743 	rcu_read_unlock();
744 
745 	return ret;
746 }
747 
748 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
749 			   int strict)
750 {
751 	int m = 0;
752 
753 	if (!oif || nh->fib_nh_dev->ifindex == oif)
754 		m = 2;
755 
756 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
757 		return RT6_NUD_FAIL_HARD;
758 #ifdef CONFIG_IPV6_ROUTER_PREF
759 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
760 #endif
761 	if ((strict & RT6_LOOKUP_F_REACHABLE) &&
762 	    !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
763 		int n = rt6_check_neigh(nh);
764 		if (n < 0)
765 			return n;
766 	}
767 	return m;
768 }
769 
770 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
771 		       int oif, int strict, int *mpri, bool *do_rr)
772 {
773 	bool match_do_rr = false;
774 	bool rc = false;
775 	int m;
776 
777 	if (nh->fib_nh_flags & RTNH_F_DEAD)
778 		goto out;
779 
780 	if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
781 	    nh->fib_nh_flags & RTNH_F_LINKDOWN &&
782 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
783 		goto out;
784 
785 	m = rt6_score_route(nh, fib6_flags, oif, strict);
786 	if (m == RT6_NUD_FAIL_DO_RR) {
787 		match_do_rr = true;
788 		m = 0; /* lowest valid score */
789 	} else if (m == RT6_NUD_FAIL_HARD) {
790 		goto out;
791 	}
792 
793 	if (strict & RT6_LOOKUP_F_REACHABLE)
794 		rt6_probe(nh);
795 
796 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
797 	if (m > *mpri) {
798 		*do_rr = match_do_rr;
799 		*mpri = m;
800 		rc = true;
801 	}
802 out:
803 	return rc;
804 }
805 
806 struct fib6_nh_frl_arg {
807 	u32		flags;
808 	int		oif;
809 	int		strict;
810 	int		*mpri;
811 	bool		*do_rr;
812 	struct fib6_nh	*nh;
813 };
814 
815 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
816 {
817 	struct fib6_nh_frl_arg *arg = _arg;
818 
819 	arg->nh = nh;
820 	return find_match(nh, arg->flags, arg->oif, arg->strict,
821 			  arg->mpri, arg->do_rr);
822 }
823 
824 static void __find_rr_leaf(struct fib6_info *f6i_start,
825 			   struct fib6_info *nomatch, u32 metric,
826 			   struct fib6_result *res, struct fib6_info **cont,
827 			   int oif, int strict, bool *do_rr, int *mpri)
828 {
829 	struct fib6_info *f6i;
830 
831 	for (f6i = f6i_start;
832 	     f6i && f6i != nomatch;
833 	     f6i = rcu_dereference(f6i->fib6_next)) {
834 		bool matched = false;
835 		struct fib6_nh *nh;
836 
837 		if (cont && f6i->fib6_metric != metric) {
838 			*cont = f6i;
839 			return;
840 		}
841 
842 		if (fib6_check_expired(f6i))
843 			continue;
844 
845 		if (unlikely(f6i->nh)) {
846 			struct fib6_nh_frl_arg arg = {
847 				.flags  = f6i->fib6_flags,
848 				.oif    = oif,
849 				.strict = strict,
850 				.mpri   = mpri,
851 				.do_rr  = do_rr
852 			};
853 
854 			if (nexthop_is_blackhole(f6i->nh)) {
855 				res->fib6_flags = RTF_REJECT;
856 				res->fib6_type = RTN_BLACKHOLE;
857 				res->f6i = f6i;
858 				res->nh = nexthop_fib6_nh(f6i->nh);
859 				return;
860 			}
861 			if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
862 						     &arg)) {
863 				matched = true;
864 				nh = arg.nh;
865 			}
866 		} else {
867 			nh = f6i->fib6_nh;
868 			if (find_match(nh, f6i->fib6_flags, oif, strict,
869 				       mpri, do_rr))
870 				matched = true;
871 		}
872 		if (matched) {
873 			res->f6i = f6i;
874 			res->nh = nh;
875 			res->fib6_flags = f6i->fib6_flags;
876 			res->fib6_type = f6i->fib6_type;
877 		}
878 	}
879 }
880 
881 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
882 			 struct fib6_info *rr_head, int oif, int strict,
883 			 bool *do_rr, struct fib6_result *res)
884 {
885 	u32 metric = rr_head->fib6_metric;
886 	struct fib6_info *cont = NULL;
887 	int mpri = -1;
888 
889 	__find_rr_leaf(rr_head, NULL, metric, res, &cont,
890 		       oif, strict, do_rr, &mpri);
891 
892 	__find_rr_leaf(leaf, rr_head, metric, res, &cont,
893 		       oif, strict, do_rr, &mpri);
894 
895 	if (res->f6i || !cont)
896 		return;
897 
898 	__find_rr_leaf(cont, NULL, metric, res, NULL,
899 		       oif, strict, do_rr, &mpri);
900 }
901 
902 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
903 		       struct fib6_result *res, int strict)
904 {
905 	struct fib6_info *leaf = rcu_dereference(fn->leaf);
906 	struct fib6_info *rt0;
907 	bool do_rr = false;
908 	int key_plen;
909 
910 	/* make sure this function or its helpers sets f6i */
911 	res->f6i = NULL;
912 
913 	if (!leaf || leaf == net->ipv6.fib6_null_entry)
914 		goto out;
915 
916 	rt0 = rcu_dereference(fn->rr_ptr);
917 	if (!rt0)
918 		rt0 = leaf;
919 
920 	/* Double check to make sure fn is not an intermediate node
921 	 * and fn->leaf does not points to its child's leaf
922 	 * (This might happen if all routes under fn are deleted from
923 	 * the tree and fib6_repair_tree() is called on the node.)
924 	 */
925 	key_plen = rt0->fib6_dst.plen;
926 #ifdef CONFIG_IPV6_SUBTREES
927 	if (rt0->fib6_src.plen)
928 		key_plen = rt0->fib6_src.plen;
929 #endif
930 	if (fn->fn_bit != key_plen)
931 		goto out;
932 
933 	find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
934 	if (do_rr) {
935 		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
936 
937 		/* no entries matched; do round-robin */
938 		if (!next || next->fib6_metric != rt0->fib6_metric)
939 			next = leaf;
940 
941 		if (next != rt0) {
942 			spin_lock_bh(&leaf->fib6_table->tb6_lock);
943 			/* make sure next is not being deleted from the tree */
944 			if (next->fib6_node)
945 				rcu_assign_pointer(fn->rr_ptr, next);
946 			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
947 		}
948 	}
949 
950 out:
951 	if (!res->f6i) {
952 		res->f6i = net->ipv6.fib6_null_entry;
953 		res->nh = res->f6i->fib6_nh;
954 		res->fib6_flags = res->f6i->fib6_flags;
955 		res->fib6_type = res->f6i->fib6_type;
956 	}
957 }
958 
959 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
960 {
961 	return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
962 	       res->nh->fib_nh_gw_family;
963 }
964 
965 #ifdef CONFIG_IPV6_ROUTE_INFO
966 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
967 		  const struct in6_addr *gwaddr)
968 {
969 	struct net *net = dev_net(dev);
970 	struct route_info *rinfo = (struct route_info *) opt;
971 	struct in6_addr prefix_buf, *prefix;
972 	struct fib6_table *table;
973 	unsigned int pref;
974 	unsigned long lifetime;
975 	struct fib6_info *rt;
976 
977 	if (len < sizeof(struct route_info)) {
978 		return -EINVAL;
979 	}
980 
981 	/* Sanity check for prefix_len and length */
982 	if (rinfo->length > 3) {
983 		return -EINVAL;
984 	} else if (rinfo->prefix_len > 128) {
985 		return -EINVAL;
986 	} else if (rinfo->prefix_len > 64) {
987 		if (rinfo->length < 2) {
988 			return -EINVAL;
989 		}
990 	} else if (rinfo->prefix_len > 0) {
991 		if (rinfo->length < 1) {
992 			return -EINVAL;
993 		}
994 	}
995 
996 	pref = rinfo->route_pref;
997 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
998 		return -EINVAL;
999 
1000 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
1001 
1002 	if (rinfo->length == 3)
1003 		prefix = (struct in6_addr *)rinfo->prefix;
1004 	else {
1005 		/* this function is safe */
1006 		ipv6_addr_prefix(&prefix_buf,
1007 				 (struct in6_addr *)rinfo->prefix,
1008 				 rinfo->prefix_len);
1009 		prefix = &prefix_buf;
1010 	}
1011 
1012 	if (rinfo->prefix_len == 0)
1013 		rt = rt6_get_dflt_router(net, gwaddr, dev);
1014 	else
1015 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
1016 					gwaddr, dev);
1017 
1018 	if (rt && !lifetime) {
1019 		ip6_del_rt(net, rt, false);
1020 		rt = NULL;
1021 	}
1022 
1023 	if (!rt && lifetime)
1024 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
1025 					dev, pref);
1026 	else if (rt)
1027 		rt->fib6_flags = RTF_ROUTEINFO |
1028 				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
1029 
1030 	if (rt) {
1031 		table = rt->fib6_table;
1032 		spin_lock_bh(&table->tb6_lock);
1033 
1034 		if (!addrconf_finite_timeout(lifetime)) {
1035 			fib6_clean_expires(rt);
1036 			fib6_may_remove_gc_list(net, rt);
1037 		} else {
1038 			fib6_set_expires(rt, jiffies + HZ * lifetime);
1039 			fib6_add_gc_list(rt);
1040 		}
1041 
1042 		spin_unlock_bh(&table->tb6_lock);
1043 
1044 		fib6_info_release(rt);
1045 	}
1046 	return 0;
1047 }
1048 #endif
1049 
1050 /*
1051  *	Misc support functions
1052  */
1053 
1054 /* called with rcu_lock held */
1055 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1056 {
1057 	struct net_device *dev = res->nh->fib_nh_dev;
1058 
1059 	if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1060 		/* for copies of local routes, dst->dev needs to be the
1061 		 * device if it is a master device, the master device if
1062 		 * device is enslaved, and the loopback as the default
1063 		 */
1064 		if (netif_is_l3_slave(dev) &&
1065 		    !rt6_need_strict(&res->f6i->fib6_dst.addr))
1066 			dev = l3mdev_master_dev_rcu(dev) ? :
1067 			      dev_net(dev)->loopback_dev;
1068 		else if (!netif_is_l3_master(dev))
1069 			dev = dev_net(dev)->loopback_dev;
1070 		/* last case is netif_is_l3_master(dev) is true in which
1071 		 * case we want dev returned to be dev
1072 		 */
1073 	}
1074 
1075 	return dev;
1076 }
1077 
1078 static const int fib6_prop[RTN_MAX + 1] = {
1079 	[RTN_UNSPEC]	= 0,
1080 	[RTN_UNICAST]	= 0,
1081 	[RTN_LOCAL]	= 0,
1082 	[RTN_BROADCAST]	= 0,
1083 	[RTN_ANYCAST]	= 0,
1084 	[RTN_MULTICAST]	= 0,
1085 	[RTN_BLACKHOLE]	= -EINVAL,
1086 	[RTN_UNREACHABLE] = -EHOSTUNREACH,
1087 	[RTN_PROHIBIT]	= -EACCES,
1088 	[RTN_THROW]	= -EAGAIN,
1089 	[RTN_NAT]	= -EINVAL,
1090 	[RTN_XRESOLVE]	= -EINVAL,
1091 };
1092 
1093 static int ip6_rt_type_to_error(u8 fib6_type)
1094 {
1095 	return fib6_prop[fib6_type];
1096 }
1097 
1098 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1099 {
1100 	unsigned short flags = 0;
1101 
1102 	if (rt->dst_nocount)
1103 		flags |= DST_NOCOUNT;
1104 	if (rt->dst_nopolicy)
1105 		flags |= DST_NOPOLICY;
1106 
1107 	return flags;
1108 }
1109 
1110 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1111 {
1112 	rt->dst.error = ip6_rt_type_to_error(fib6_type);
1113 
1114 	switch (fib6_type) {
1115 	case RTN_BLACKHOLE:
1116 		rt->dst.output = dst_discard_out;
1117 		rt->dst.input = dst_discard;
1118 		break;
1119 	case RTN_PROHIBIT:
1120 		rt->dst.output = ip6_pkt_prohibit_out;
1121 		rt->dst.input = ip6_pkt_prohibit;
1122 		break;
1123 	case RTN_THROW:
1124 	case RTN_UNREACHABLE:
1125 	default:
1126 		rt->dst.output = ip6_pkt_discard_out;
1127 		rt->dst.input = ip6_pkt_discard;
1128 		break;
1129 	}
1130 }
1131 
1132 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1133 {
1134 	struct fib6_info *f6i = res->f6i;
1135 
1136 	if (res->fib6_flags & RTF_REJECT) {
1137 		ip6_rt_init_dst_reject(rt, res->fib6_type);
1138 		return;
1139 	}
1140 
1141 	rt->dst.error = 0;
1142 	rt->dst.output = ip6_output;
1143 
1144 	if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1145 		rt->dst.input = ip6_input;
1146 	} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1147 		rt->dst.input = ip6_mc_input;
1148 		rt->dst.output = ip6_mr_output;
1149 	} else {
1150 		rt->dst.input = ip6_forward;
1151 	}
1152 
1153 	if (res->nh->fib_nh_lws) {
1154 		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1155 		lwtunnel_set_redirect(&rt->dst);
1156 	}
1157 
1158 	rt->dst.lastuse = jiffies;
1159 }
1160 
1161 /* Caller must already hold reference to @from */
1162 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1163 {
1164 	rt->rt6i_flags &= ~RTF_EXPIRES;
1165 	rcu_assign_pointer(rt->from, from);
1166 	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1167 }
1168 
1169 /* Caller must already hold reference to f6i in result */
1170 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1171 {
1172 	const struct fib6_nh *nh = res->nh;
1173 	const struct net_device *dev = nh->fib_nh_dev;
1174 	struct fib6_info *f6i = res->f6i;
1175 
1176 	ip6_rt_init_dst(rt, res);
1177 
1178 	rt->rt6i_dst = f6i->fib6_dst;
1179 	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1180 	rt->rt6i_flags = res->fib6_flags;
1181 	if (nh->fib_nh_gw_family) {
1182 		rt->rt6i_gateway = nh->fib_nh_gw6;
1183 		rt->rt6i_flags |= RTF_GATEWAY;
1184 	}
1185 	rt6_set_from(rt, f6i);
1186 #ifdef CONFIG_IPV6_SUBTREES
1187 	rt->rt6i_src = f6i->fib6_src;
1188 #endif
1189 }
1190 
1191 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1192 					struct in6_addr *saddr)
1193 {
1194 	struct fib6_node *pn, *sn;
1195 	while (1) {
1196 		if (fn->fn_flags & RTN_TL_ROOT)
1197 			return NULL;
1198 		pn = rcu_dereference(fn->parent);
1199 		sn = FIB6_SUBTREE(pn);
1200 		if (sn && sn != fn)
1201 			fn = fib6_node_lookup(sn, NULL, saddr);
1202 		else
1203 			fn = pn;
1204 		if (fn->fn_flags & RTN_RTINFO)
1205 			return fn;
1206 	}
1207 }
1208 
1209 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1210 {
1211 	struct rt6_info *rt = *prt;
1212 
1213 	if (dst_hold_safe(&rt->dst))
1214 		return true;
1215 	if (net) {
1216 		rt = net->ipv6.ip6_null_entry;
1217 		dst_hold(&rt->dst);
1218 	} else {
1219 		rt = NULL;
1220 	}
1221 	*prt = rt;
1222 	return false;
1223 }
1224 
1225 /* called with rcu_lock held */
1226 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1227 {
1228 	struct net_device *dev = res->nh->fib_nh_dev;
1229 	struct fib6_info *f6i = res->f6i;
1230 	unsigned short flags;
1231 	struct rt6_info *nrt;
1232 
1233 	if (!fib6_info_hold_safe(f6i))
1234 		goto fallback;
1235 
1236 	flags = fib6_info_dst_flags(f6i);
1237 	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1238 	if (!nrt) {
1239 		fib6_info_release(f6i);
1240 		goto fallback;
1241 	}
1242 
1243 	ip6_rt_copy_init(nrt, res);
1244 	return nrt;
1245 
1246 fallback:
1247 	nrt = dev_net(dev)->ipv6.ip6_null_entry;
1248 	dst_hold(&nrt->dst);
1249 	return nrt;
1250 }
1251 
1252 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1253 					     struct fib6_table *table,
1254 					     struct flowi6 *fl6,
1255 					     const struct sk_buff *skb,
1256 					     int flags)
1257 {
1258 	struct fib6_result res = {};
1259 	struct fib6_node *fn;
1260 	struct rt6_info *rt;
1261 
1262 	rcu_read_lock();
1263 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1264 restart:
1265 	res.f6i = rcu_dereference(fn->leaf);
1266 	if (!res.f6i)
1267 		res.f6i = net->ipv6.fib6_null_entry;
1268 	else
1269 		rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1270 				 flags);
1271 
1272 	if (res.f6i == net->ipv6.fib6_null_entry) {
1273 		fn = fib6_backtrack(fn, &fl6->saddr);
1274 		if (fn)
1275 			goto restart;
1276 
1277 		rt = net->ipv6.ip6_null_entry;
1278 		dst_hold(&rt->dst);
1279 		goto out;
1280 	} else if (res.fib6_flags & RTF_REJECT) {
1281 		goto do_create;
1282 	}
1283 
1284 	fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1285 			 fl6->flowi6_oif != 0, skb, flags);
1286 
1287 	/* Search through exception table */
1288 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1289 	if (rt) {
1290 		if (ip6_hold_safe(net, &rt))
1291 			dst_use_noref(&rt->dst, jiffies);
1292 	} else {
1293 do_create:
1294 		rt = ip6_create_rt_rcu(&res);
1295 	}
1296 
1297 out:
1298 	trace_fib6_table_lookup(net, &res, table, fl6);
1299 
1300 	rcu_read_unlock();
1301 
1302 	return rt;
1303 }
1304 
1305 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1306 				   const struct sk_buff *skb, int flags)
1307 {
1308 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1309 }
1310 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1311 
1312 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1313 			    const struct in6_addr *saddr, int oif,
1314 			    const struct sk_buff *skb, int strict)
1315 {
1316 	struct flowi6 fl6 = {
1317 		.flowi6_oif = oif,
1318 		.daddr = *daddr,
1319 	};
1320 	struct dst_entry *dst;
1321 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1322 
1323 	if (saddr) {
1324 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1325 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1326 	}
1327 
1328 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1329 	if (dst->error == 0)
1330 		return dst_rt6_info(dst);
1331 
1332 	dst_release(dst);
1333 
1334 	return NULL;
1335 }
1336 EXPORT_SYMBOL(rt6_lookup);
1337 
1338 /* ip6_ins_rt is called with FREE table->tb6_lock.
1339  * It takes new route entry, the addition fails by any reason the
1340  * route is released.
1341  * Caller must hold dst before calling it.
1342  */
1343 
1344 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1345 			struct netlink_ext_ack *extack)
1346 {
1347 	int err;
1348 	struct fib6_table *table;
1349 
1350 	table = rt->fib6_table;
1351 	spin_lock_bh(&table->tb6_lock);
1352 	err = fib6_add(&table->tb6_root, rt, info, extack);
1353 	spin_unlock_bh(&table->tb6_lock);
1354 
1355 	return err;
1356 }
1357 
1358 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1359 {
1360 	struct nl_info info = {	.nl_net = net, };
1361 
1362 	return __ip6_ins_rt(rt, &info, NULL);
1363 }
1364 
1365 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1366 					   const struct in6_addr *daddr,
1367 					   const struct in6_addr *saddr)
1368 {
1369 	struct fib6_info *f6i = res->f6i;
1370 	struct net_device *dev;
1371 	struct rt6_info *rt;
1372 
1373 	/*
1374 	 *	Clone the route.
1375 	 */
1376 
1377 	if (!fib6_info_hold_safe(f6i))
1378 		return NULL;
1379 
1380 	dev = ip6_rt_get_dev_rcu(res);
1381 	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1382 	if (!rt) {
1383 		fib6_info_release(f6i);
1384 		return NULL;
1385 	}
1386 
1387 	ip6_rt_copy_init(rt, res);
1388 	rt->rt6i_flags |= RTF_CACHE;
1389 	rt->rt6i_dst.addr = *daddr;
1390 	rt->rt6i_dst.plen = 128;
1391 
1392 	if (!rt6_is_gw_or_nonexthop(res)) {
1393 		if (f6i->fib6_dst.plen != 128 &&
1394 		    ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1395 			rt->rt6i_flags |= RTF_ANYCAST;
1396 #ifdef CONFIG_IPV6_SUBTREES
1397 		if (rt->rt6i_src.plen && saddr) {
1398 			rt->rt6i_src.addr = *saddr;
1399 			rt->rt6i_src.plen = 128;
1400 		}
1401 #endif
1402 	}
1403 
1404 	return rt;
1405 }
1406 
1407 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1408 {
1409 	struct fib6_info *f6i = res->f6i;
1410 	unsigned short flags = fib6_info_dst_flags(f6i);
1411 	struct net_device *dev;
1412 	struct rt6_info *pcpu_rt;
1413 
1414 	if (!fib6_info_hold_safe(f6i))
1415 		return NULL;
1416 
1417 	rcu_read_lock();
1418 	dev = ip6_rt_get_dev_rcu(res);
1419 	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1420 	rcu_read_unlock();
1421 	if (!pcpu_rt) {
1422 		fib6_info_release(f6i);
1423 		return NULL;
1424 	}
1425 	ip6_rt_copy_init(pcpu_rt, res);
1426 	pcpu_rt->rt6i_flags |= RTF_PCPU;
1427 
1428 	if (f6i->nh)
1429 		pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1430 
1431 	return pcpu_rt;
1432 }
1433 
1434 static bool rt6_is_valid(const struct rt6_info *rt6)
1435 {
1436 	return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1437 }
1438 
1439 /* It should be called with rcu_read_lock() acquired */
1440 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1441 {
1442 	struct rt6_info *pcpu_rt;
1443 
1444 	pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1445 
1446 	if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1447 		struct rt6_info *prev, **p;
1448 
1449 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
1450 		/* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
1451 		prev = xchg(p, NULL);
1452 		if (prev) {
1453 			dst_dev_put(&prev->dst);
1454 			dst_release(&prev->dst);
1455 		}
1456 
1457 		pcpu_rt = NULL;
1458 	}
1459 
1460 	return pcpu_rt;
1461 }
1462 
1463 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1464 					    const struct fib6_result *res)
1465 {
1466 	struct rt6_info *pcpu_rt, *prev, **p;
1467 
1468 	pcpu_rt = ip6_rt_pcpu_alloc(res);
1469 	if (!pcpu_rt)
1470 		return NULL;
1471 
1472 	p = this_cpu_ptr(res->nh->rt6i_pcpu);
1473 	prev = cmpxchg(p, NULL, pcpu_rt);
1474 	if (unlikely(prev)) {
1475 		/*
1476 		 * Another task on this CPU already installed a pcpu_rt.
1477 		 * This can happen on PREEMPT_RT where preemption is possible.
1478 		 * Free our allocation and return the existing one.
1479 		 */
1480 		WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RT));
1481 
1482 		dst_dev_put(&pcpu_rt->dst);
1483 		dst_release(&pcpu_rt->dst);
1484 		return prev;
1485 	}
1486 
1487 	if (res->f6i->fib6_destroying) {
1488 		struct fib6_info *from;
1489 
1490 		from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
1491 		fib6_info_release(from);
1492 	}
1493 
1494 	return pcpu_rt;
1495 }
1496 
1497 /* exception hash table implementation
1498  */
1499 static DEFINE_SPINLOCK(rt6_exception_lock);
1500 
1501 /* Remove rt6_ex from hash table and free the memory
1502  * Caller must hold rt6_exception_lock
1503  */
1504 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1505 				 struct rt6_exception *rt6_ex)
1506 {
1507 	struct net *net;
1508 
1509 	if (!bucket || !rt6_ex)
1510 		return;
1511 
1512 	net = dev_net(rt6_ex->rt6i->dst.dev);
1513 	net->ipv6.rt6_stats->fib_rt_cache--;
1514 
1515 	/* purge completely the exception to allow releasing the held resources:
1516 	 * some [sk] cache may keep the dst around for unlimited time
1517 	 */
1518 	dst_dev_put(&rt6_ex->rt6i->dst);
1519 
1520 	hlist_del_rcu(&rt6_ex->hlist);
1521 	dst_release(&rt6_ex->rt6i->dst);
1522 	kfree_rcu(rt6_ex, rcu);
1523 	WARN_ON_ONCE(!bucket->depth);
1524 	bucket->depth--;
1525 }
1526 
1527 /* Remove oldest rt6_ex in bucket and free the memory
1528  * Caller must hold rt6_exception_lock
1529  */
1530 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1531 {
1532 	struct rt6_exception *rt6_ex, *oldest = NULL;
1533 
1534 	if (!bucket)
1535 		return;
1536 
1537 	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1538 		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1539 			oldest = rt6_ex;
1540 	}
1541 	rt6_remove_exception(bucket, oldest);
1542 }
1543 
1544 static u32 rt6_exception_hash(const struct in6_addr *dst,
1545 			      const struct in6_addr *src)
1546 {
1547 	static siphash_aligned_key_t rt6_exception_key;
1548 	struct {
1549 		struct in6_addr dst;
1550 		struct in6_addr src;
1551 	} __aligned(SIPHASH_ALIGNMENT) combined = {
1552 		.dst = *dst,
1553 	};
1554 	u64 val;
1555 
1556 	net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1557 
1558 #ifdef CONFIG_IPV6_SUBTREES
1559 	if (src)
1560 		combined.src = *src;
1561 #endif
1562 	val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1563 
1564 	return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1565 }
1566 
1567 /* Helper function to find the cached rt in the hash table
1568  * and update bucket pointer to point to the bucket for this
1569  * (daddr, saddr) pair
1570  * Caller must hold rt6_exception_lock
1571  */
1572 static struct rt6_exception *
1573 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1574 			      const struct in6_addr *daddr,
1575 			      const struct in6_addr *saddr)
1576 {
1577 	struct rt6_exception *rt6_ex;
1578 	u32 hval;
1579 
1580 	if (!(*bucket) || !daddr)
1581 		return NULL;
1582 
1583 	hval = rt6_exception_hash(daddr, saddr);
1584 	*bucket += hval;
1585 
1586 	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1587 		struct rt6_info *rt6 = rt6_ex->rt6i;
1588 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1589 
1590 #ifdef CONFIG_IPV6_SUBTREES
1591 		if (matched && saddr)
1592 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1593 #endif
1594 		if (matched)
1595 			return rt6_ex;
1596 	}
1597 	return NULL;
1598 }
1599 
1600 /* Helper function to find the cached rt in the hash table
1601  * and update bucket pointer to point to the bucket for this
1602  * (daddr, saddr) pair
1603  * Caller must hold rcu_read_lock()
1604  */
1605 static struct rt6_exception *
1606 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1607 			 const struct in6_addr *daddr,
1608 			 const struct in6_addr *saddr)
1609 {
1610 	struct rt6_exception *rt6_ex;
1611 	u32 hval;
1612 
1613 	WARN_ON_ONCE(!rcu_read_lock_held());
1614 
1615 	if (!(*bucket) || !daddr)
1616 		return NULL;
1617 
1618 	hval = rt6_exception_hash(daddr, saddr);
1619 	*bucket += hval;
1620 
1621 	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1622 		struct rt6_info *rt6 = rt6_ex->rt6i;
1623 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1624 
1625 #ifdef CONFIG_IPV6_SUBTREES
1626 		if (matched && saddr)
1627 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1628 #endif
1629 		if (matched)
1630 			return rt6_ex;
1631 	}
1632 	return NULL;
1633 }
1634 
1635 static unsigned int fib6_mtu(const struct fib6_result *res)
1636 {
1637 	const struct fib6_nh *nh = res->nh;
1638 	unsigned int mtu;
1639 
1640 	if (res->f6i->fib6_pmtu) {
1641 		mtu = res->f6i->fib6_pmtu;
1642 	} else {
1643 		struct net_device *dev = nh->fib_nh_dev;
1644 		struct inet6_dev *idev;
1645 
1646 		rcu_read_lock();
1647 		idev = __in6_dev_get(dev);
1648 		if (!idev) {
1649 			rcu_read_unlock();
1650 			return 0;
1651 		}
1652 		mtu = READ_ONCE(idev->cnf.mtu6);
1653 		rcu_read_unlock();
1654 	}
1655 
1656 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1657 
1658 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1659 }
1660 
1661 #define FIB6_EXCEPTION_BUCKET_FLUSHED  0x1UL
1662 
1663 /* used when the flushed bit is not relevant, only access to the bucket
1664  * (ie., all bucket users except rt6_insert_exception);
1665  *
1666  * called under rcu lock; sometimes called with rt6_exception_lock held
1667  */
1668 static
1669 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1670 						       spinlock_t *lock)
1671 {
1672 	struct rt6_exception_bucket *bucket;
1673 
1674 	if (lock)
1675 		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1676 						   lockdep_is_held(lock));
1677 	else
1678 		bucket = rcu_dereference(nh->rt6i_exception_bucket);
1679 
1680 	/* remove bucket flushed bit if set */
1681 	if (bucket) {
1682 		unsigned long p = (unsigned long)bucket;
1683 
1684 		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1685 		bucket = (struct rt6_exception_bucket *)p;
1686 	}
1687 
1688 	return bucket;
1689 }
1690 
1691 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1692 {
1693 	unsigned long p = (unsigned long)bucket;
1694 
1695 	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1696 }
1697 
1698 /* called with rt6_exception_lock held */
1699 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1700 					      spinlock_t *lock)
1701 {
1702 	struct rt6_exception_bucket *bucket;
1703 	unsigned long p;
1704 
1705 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1706 					   lockdep_is_held(lock));
1707 
1708 	p = (unsigned long)bucket;
1709 	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1710 	bucket = (struct rt6_exception_bucket *)p;
1711 	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1712 }
1713 
1714 static int rt6_insert_exception(struct rt6_info *nrt,
1715 				const struct fib6_result *res)
1716 {
1717 	struct net *net = dev_net(nrt->dst.dev);
1718 	struct rt6_exception_bucket *bucket;
1719 	struct fib6_info *f6i = res->f6i;
1720 	struct in6_addr *src_key = NULL;
1721 	struct rt6_exception *rt6_ex;
1722 	struct fib6_nh *nh = res->nh;
1723 	int max_depth;
1724 	int err = 0;
1725 
1726 	spin_lock_bh(&rt6_exception_lock);
1727 
1728 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1729 					  lockdep_is_held(&rt6_exception_lock));
1730 	if (!bucket) {
1731 		bucket = kzalloc_objs(*bucket, FIB6_EXCEPTION_BUCKET_SIZE,
1732 				      GFP_ATOMIC);
1733 		if (!bucket) {
1734 			err = -ENOMEM;
1735 			goto out;
1736 		}
1737 		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1738 	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1739 		err = -EINVAL;
1740 		goto out;
1741 	}
1742 
1743 #ifdef CONFIG_IPV6_SUBTREES
1744 	/* fib6_src.plen != 0 indicates f6i is in subtree
1745 	 * and exception table is indexed by a hash of
1746 	 * both fib6_dst and fib6_src.
1747 	 * Otherwise, the exception table is indexed by
1748 	 * a hash of only fib6_dst.
1749 	 */
1750 	if (f6i->fib6_src.plen)
1751 		src_key = &nrt->rt6i_src.addr;
1752 #endif
1753 	/* rt6_mtu_change() might lower mtu on f6i.
1754 	 * Only insert this exception route if its mtu
1755 	 * is less than f6i's mtu value.
1756 	 */
1757 	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1758 		err = -EINVAL;
1759 		goto out;
1760 	}
1761 
1762 	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1763 					       src_key);
1764 	if (rt6_ex)
1765 		rt6_remove_exception(bucket, rt6_ex);
1766 
1767 	rt6_ex = kzalloc_obj(*rt6_ex, GFP_ATOMIC);
1768 	if (!rt6_ex) {
1769 		err = -ENOMEM;
1770 		goto out;
1771 	}
1772 	rt6_ex->rt6i = nrt;
1773 	rt6_ex->stamp = jiffies;
1774 	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1775 	bucket->depth++;
1776 	net->ipv6.rt6_stats->fib_rt_cache++;
1777 
1778 	/* Randomize max depth to avoid some side channels attacks. */
1779 	max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
1780 	while (bucket->depth > max_depth)
1781 		rt6_exception_remove_oldest(bucket);
1782 
1783 out:
1784 	spin_unlock_bh(&rt6_exception_lock);
1785 
1786 	/* Update fn->fn_sernum to invalidate all cached dst */
1787 	if (!err) {
1788 		spin_lock_bh(&f6i->fib6_table->tb6_lock);
1789 		fib6_update_sernum(net, f6i);
1790 		fib6_add_gc_list(f6i);
1791 		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1792 		fib6_force_start_gc(net);
1793 	}
1794 
1795 	return err;
1796 }
1797 
1798 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1799 {
1800 	struct rt6_exception_bucket *bucket;
1801 	struct rt6_exception *rt6_ex;
1802 	struct hlist_node *tmp;
1803 	int i;
1804 
1805 	spin_lock_bh(&rt6_exception_lock);
1806 
1807 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1808 	if (!bucket)
1809 		goto out;
1810 
1811 	/* Prevent rt6_insert_exception() to recreate the bucket list */
1812 	if (!from)
1813 		fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1814 
1815 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1816 		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1817 			if (!from ||
1818 			    rcu_access_pointer(rt6_ex->rt6i->from) == from)
1819 				rt6_remove_exception(bucket, rt6_ex);
1820 		}
1821 		WARN_ON_ONCE(!from && bucket->depth);
1822 		bucket++;
1823 	}
1824 out:
1825 	spin_unlock_bh(&rt6_exception_lock);
1826 }
1827 
1828 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1829 {
1830 	struct fib6_info *f6i = arg;
1831 
1832 	fib6_nh_flush_exceptions(nh, f6i);
1833 
1834 	return 0;
1835 }
1836 
1837 void rt6_flush_exceptions(struct fib6_info *f6i)
1838 {
1839 	if (f6i->nh) {
1840 		rcu_read_lock();
1841 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, f6i);
1842 		rcu_read_unlock();
1843 	} else {
1844 		fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1845 	}
1846 }
1847 
1848 /* Find cached rt in the hash table inside passed in rt
1849  * Caller has to hold rcu_read_lock()
1850  */
1851 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1852 					   const struct in6_addr *daddr,
1853 					   const struct in6_addr *saddr)
1854 {
1855 	const struct in6_addr *src_key = NULL;
1856 	struct rt6_exception_bucket *bucket;
1857 	struct rt6_exception *rt6_ex;
1858 	struct rt6_info *ret = NULL;
1859 
1860 #ifdef CONFIG_IPV6_SUBTREES
1861 	/* fib6i_src.plen != 0 indicates f6i is in subtree
1862 	 * and exception table is indexed by a hash of
1863 	 * both fib6_dst and fib6_src.
1864 	 * However, the src addr used to create the hash
1865 	 * might not be exactly the passed in saddr which
1866 	 * is a /128 addr from the flow.
1867 	 * So we need to use f6i->fib6_src to redo lookup
1868 	 * if the passed in saddr does not find anything.
1869 	 * (See the logic in ip6_rt_cache_alloc() on how
1870 	 * rt->rt6i_src is updated.)
1871 	 */
1872 	if (res->f6i->fib6_src.plen)
1873 		src_key = saddr;
1874 find_ex:
1875 #endif
1876 	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1877 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1878 
1879 	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1880 		ret = rt6_ex->rt6i;
1881 
1882 #ifdef CONFIG_IPV6_SUBTREES
1883 	/* Use fib6_src as src_key and redo lookup */
1884 	if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1885 		src_key = &res->f6i->fib6_src.addr;
1886 		goto find_ex;
1887 	}
1888 #endif
1889 
1890 	return ret;
1891 }
1892 
1893 /* Remove the passed in cached rt from the hash table that contains it */
1894 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1895 				    const struct rt6_info *rt)
1896 {
1897 	const struct in6_addr *src_key = NULL;
1898 	struct rt6_exception_bucket *bucket;
1899 	struct rt6_exception *rt6_ex;
1900 	int err;
1901 
1902 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1903 		return -ENOENT;
1904 
1905 	spin_lock_bh(&rt6_exception_lock);
1906 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1907 
1908 #ifdef CONFIG_IPV6_SUBTREES
1909 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1910 	 * and exception table is indexed by a hash of
1911 	 * both rt6i_dst and rt6i_src.
1912 	 * Otherwise, the exception table is indexed by
1913 	 * a hash of only rt6i_dst.
1914 	 */
1915 	if (plen)
1916 		src_key = &rt->rt6i_src.addr;
1917 #endif
1918 	rt6_ex = __rt6_find_exception_spinlock(&bucket,
1919 					       &rt->rt6i_dst.addr,
1920 					       src_key);
1921 	if (rt6_ex) {
1922 		rt6_remove_exception(bucket, rt6_ex);
1923 		err = 0;
1924 	} else {
1925 		err = -ENOENT;
1926 	}
1927 
1928 	spin_unlock_bh(&rt6_exception_lock);
1929 	return err;
1930 }
1931 
1932 struct fib6_nh_excptn_arg {
1933 	struct rt6_info	*rt;
1934 	int		plen;
1935 };
1936 
1937 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1938 {
1939 	struct fib6_nh_excptn_arg *arg = _arg;
1940 	int err;
1941 
1942 	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1943 	if (err == 0)
1944 		return 1;
1945 
1946 	return 0;
1947 }
1948 
1949 static int rt6_remove_exception_rt(struct rt6_info *rt)
1950 {
1951 	struct fib6_info *from;
1952 
1953 	from = rcu_dereference(rt->from);
1954 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
1955 		return -EINVAL;
1956 
1957 	if (from->nh) {
1958 		struct fib6_nh_excptn_arg arg = {
1959 			.rt = rt,
1960 			.plen = from->fib6_src.plen
1961 		};
1962 		int rc;
1963 
1964 		/* rc = 1 means an entry was found */
1965 		rc = nexthop_for_each_fib6_nh(from->nh,
1966 					      rt6_nh_remove_exception_rt,
1967 					      &arg);
1968 		return rc ? 0 : -ENOENT;
1969 	}
1970 
1971 	return fib6_nh_remove_exception(from->fib6_nh,
1972 					from->fib6_src.plen, rt);
1973 }
1974 
1975 /* Find rt6_ex which contains the passed in rt cache and
1976  * refresh its stamp
1977  */
1978 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1979 				     const struct rt6_info *rt)
1980 {
1981 	const struct in6_addr *src_key = NULL;
1982 	struct rt6_exception_bucket *bucket;
1983 	struct rt6_exception *rt6_ex;
1984 
1985 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1986 #ifdef CONFIG_IPV6_SUBTREES
1987 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1988 	 * and exception table is indexed by a hash of
1989 	 * both rt6i_dst and rt6i_src.
1990 	 * Otherwise, the exception table is indexed by
1991 	 * a hash of only rt6i_dst.
1992 	 */
1993 	if (plen)
1994 		src_key = &rt->rt6i_src.addr;
1995 #endif
1996 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1997 	if (rt6_ex)
1998 		rt6_ex->stamp = jiffies;
1999 }
2000 
2001 struct fib6_nh_match_arg {
2002 	const struct net_device *dev;
2003 	const struct in6_addr	*gw;
2004 	struct fib6_nh		*match;
2005 };
2006 
2007 /* determine if fib6_nh has given device and gateway */
2008 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
2009 {
2010 	struct fib6_nh_match_arg *arg = _arg;
2011 
2012 	if (arg->dev != nh->fib_nh_dev ||
2013 	    (arg->gw && !nh->fib_nh_gw_family) ||
2014 	    (!arg->gw && nh->fib_nh_gw_family) ||
2015 	    (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
2016 		return 0;
2017 
2018 	arg->match = nh;
2019 
2020 	/* found a match, break the loop */
2021 	return 1;
2022 }
2023 
2024 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
2025 {
2026 	struct fib6_info *from;
2027 	struct fib6_nh *fib6_nh;
2028 
2029 	rcu_read_lock();
2030 
2031 	from = rcu_dereference(rt->from);
2032 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
2033 		goto unlock;
2034 
2035 	if (from->nh) {
2036 		struct fib6_nh_match_arg arg = {
2037 			.dev = rt->dst.dev,
2038 			.gw = &rt->rt6i_gateway,
2039 		};
2040 
2041 		nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
2042 
2043 		if (!arg.match)
2044 			goto unlock;
2045 		fib6_nh = arg.match;
2046 	} else {
2047 		fib6_nh = from->fib6_nh;
2048 	}
2049 	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
2050 unlock:
2051 	rcu_read_unlock();
2052 }
2053 
2054 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2055 					 struct rt6_info *rt, int mtu)
2056 {
2057 	u32 dmtu = dst6_mtu(&rt->dst);
2058 
2059 	/* If the new MTU is lower than the route PMTU, this new MTU will be the
2060 	 * lowest MTU in the path: always allow updating the route PMTU to
2061 	 * reflect PMTU decreases.
2062 	 *
2063 	 * If the new MTU is higher, and the route PMTU is equal to the local
2064 	 * MTU, this means the old MTU is the lowest in the path, so allow
2065 	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2066 	 * handle this.
2067 	 */
2068 
2069 	if (dmtu >= mtu)
2070 		return true;
2071 
2072 	if (dmtu == idev->cnf.mtu6)
2073 		return true;
2074 
2075 	return false;
2076 }
2077 
2078 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2079 				       const struct fib6_nh *nh, int mtu)
2080 {
2081 	struct rt6_exception_bucket *bucket;
2082 	struct rt6_exception *rt6_ex;
2083 	int i;
2084 
2085 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2086 	if (!bucket)
2087 		return;
2088 
2089 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2090 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2091 			struct rt6_info *entry = rt6_ex->rt6i;
2092 
2093 			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2094 			 * route), the metrics of its rt->from have already
2095 			 * been updated.
2096 			 */
2097 			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2098 			    rt6_mtu_change_route_allowed(idev, entry, mtu))
2099 				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2100 		}
2101 		bucket++;
2102 	}
2103 }
2104 
2105 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2106 
2107 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2108 					    const struct in6_addr *gateway)
2109 {
2110 	struct rt6_exception_bucket *bucket;
2111 	struct rt6_exception *rt6_ex;
2112 	struct hlist_node *tmp;
2113 	int i;
2114 
2115 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2116 		return;
2117 
2118 	spin_lock_bh(&rt6_exception_lock);
2119 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2120 	if (bucket) {
2121 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2122 			hlist_for_each_entry_safe(rt6_ex, tmp,
2123 						  &bucket->chain, hlist) {
2124 				struct rt6_info *entry = rt6_ex->rt6i;
2125 
2126 				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2127 				    RTF_CACHE_GATEWAY &&
2128 				    ipv6_addr_equal(gateway,
2129 						    &entry->rt6i_gateway)) {
2130 					rt6_remove_exception(bucket, rt6_ex);
2131 				}
2132 			}
2133 			bucket++;
2134 		}
2135 	}
2136 
2137 	spin_unlock_bh(&rt6_exception_lock);
2138 }
2139 
2140 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2141 				      struct rt6_exception *rt6_ex,
2142 				      struct fib6_gc_args *gc_args,
2143 				      unsigned long now)
2144 {
2145 	struct rt6_info *rt = rt6_ex->rt6i;
2146 
2147 	/* we are pruning and obsoleting aged-out and non gateway exceptions
2148 	 * even if others have still references to them, so that on next
2149 	 * dst_check() such references can be dropped.
2150 	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2151 	 * expired, independently from their aging, as per RFC 8201 section 4
2152 	 */
2153 	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2154 		if (time_after_eq(now, READ_ONCE(rt->dst.lastuse) +
2155 				       gc_args->timeout)) {
2156 			pr_debug("aging clone %p\n", rt);
2157 			rt6_remove_exception(bucket, rt6_ex);
2158 			return;
2159 		}
2160 	} else if (time_after(jiffies, READ_ONCE(rt->dst.expires))) {
2161 		pr_debug("purging expired route %p\n", rt);
2162 		rt6_remove_exception(bucket, rt6_ex);
2163 		return;
2164 	}
2165 
2166 	if (rt->rt6i_flags & RTF_GATEWAY) {
2167 		struct neighbour *neigh;
2168 
2169 		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2170 
2171 		if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2172 			pr_debug("purging route %p via non-router but gateway\n",
2173 				 rt);
2174 			rt6_remove_exception(bucket, rt6_ex);
2175 			return;
2176 		}
2177 	}
2178 
2179 	gc_args->more++;
2180 }
2181 
2182 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2183 				   struct fib6_gc_args *gc_args,
2184 				   unsigned long now)
2185 {
2186 	struct rt6_exception_bucket *bucket;
2187 	struct rt6_exception *rt6_ex;
2188 	struct hlist_node *tmp;
2189 	int i;
2190 
2191 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2192 		return;
2193 
2194 	rcu_read_lock_bh();
2195 	spin_lock(&rt6_exception_lock);
2196 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2197 	if (bucket) {
2198 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2199 			hlist_for_each_entry_safe(rt6_ex, tmp,
2200 						  &bucket->chain, hlist) {
2201 				rt6_age_examine_exception(bucket, rt6_ex,
2202 							  gc_args, now);
2203 			}
2204 			bucket++;
2205 		}
2206 	}
2207 	spin_unlock(&rt6_exception_lock);
2208 	rcu_read_unlock_bh();
2209 }
2210 
2211 struct fib6_nh_age_excptn_arg {
2212 	struct fib6_gc_args	*gc_args;
2213 	unsigned long		now;
2214 };
2215 
2216 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2217 {
2218 	struct fib6_nh_age_excptn_arg *arg = _arg;
2219 
2220 	fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2221 	return 0;
2222 }
2223 
2224 void rt6_age_exceptions(struct fib6_info *f6i,
2225 			struct fib6_gc_args *gc_args,
2226 			unsigned long now)
2227 {
2228 	if (f6i->nh) {
2229 		struct fib6_nh_age_excptn_arg arg = {
2230 			.gc_args = gc_args,
2231 			.now = now
2232 		};
2233 
2234 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2235 					 &arg);
2236 	} else {
2237 		fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2238 	}
2239 }
2240 
2241 /* must be called with rcu lock held */
2242 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2243 		      struct flowi6 *fl6, struct fib6_result *res, int strict)
2244 {
2245 	struct fib6_node *fn, *saved_fn;
2246 
2247 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2248 	saved_fn = fn;
2249 
2250 redo_rt6_select:
2251 	rt6_select(net, fn, oif, res, strict);
2252 	if (res->f6i == net->ipv6.fib6_null_entry) {
2253 		fn = fib6_backtrack(fn, &fl6->saddr);
2254 		if (fn)
2255 			goto redo_rt6_select;
2256 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
2257 			/* also consider unreachable route */
2258 			strict &= ~RT6_LOOKUP_F_REACHABLE;
2259 			fn = saved_fn;
2260 			goto redo_rt6_select;
2261 		}
2262 	}
2263 
2264 	trace_fib6_table_lookup(net, res, table, fl6);
2265 
2266 	return 0;
2267 }
2268 
2269 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2270 			       int oif, struct flowi6 *fl6,
2271 			       const struct sk_buff *skb, int flags)
2272 {
2273 	struct fib6_result res = {};
2274 	struct rt6_info *rt = NULL;
2275 	int strict = 0;
2276 
2277 	WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2278 		     !rcu_read_lock_held());
2279 
2280 	strict |= flags & RT6_LOOKUP_F_IFACE;
2281 	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2282 	if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
2283 		strict |= RT6_LOOKUP_F_REACHABLE;
2284 
2285 	rcu_read_lock();
2286 
2287 	fib6_table_lookup(net, table, oif, fl6, &res, strict);
2288 	if (res.f6i == net->ipv6.fib6_null_entry)
2289 		goto out;
2290 
2291 	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2292 
2293 	/*Search through exception table */
2294 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2295 	if (rt) {
2296 		goto out;
2297 	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2298 			    !res.nh->fib_nh_gw_family)) {
2299 		/* Create a RTF_CACHE clone which will not be
2300 		 * owned by the fib6 tree.  It is for the special case where
2301 		 * the daddr in the skb during the neighbor look-up is different
2302 		 * from the fl6->daddr used to look-up route here.
2303 		 */
2304 		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2305 
2306 		if (rt) {
2307 			/* 1 refcnt is taken during ip6_rt_cache_alloc().
2308 			 * As rt6_uncached_list_add() does not consume refcnt,
2309 			 * this refcnt is always returned to the caller even
2310 			 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2311 			 */
2312 			rt6_uncached_list_add(rt);
2313 			rcu_read_unlock();
2314 
2315 			return rt;
2316 		}
2317 	} else {
2318 		/* Get a percpu copy */
2319 		local_bh_disable();
2320 		rt = rt6_get_pcpu_route(&res);
2321 
2322 		if (!rt)
2323 			rt = rt6_make_pcpu_route(net, &res);
2324 
2325 		local_bh_enable();
2326 	}
2327 out:
2328 	if (!rt)
2329 		rt = net->ipv6.ip6_null_entry;
2330 	if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2331 		ip6_hold_safe(net, &rt);
2332 	rcu_read_unlock();
2333 
2334 	return rt;
2335 }
2336 EXPORT_SYMBOL_GPL(ip6_pol_route);
2337 
2338 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2339 					    struct fib6_table *table,
2340 					    struct flowi6 *fl6,
2341 					    const struct sk_buff *skb,
2342 					    int flags)
2343 {
2344 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2345 }
2346 
2347 struct dst_entry *ip6_route_input_lookup(struct net *net,
2348 					 struct net_device *dev,
2349 					 struct flowi6 *fl6,
2350 					 const struct sk_buff *skb,
2351 					 int flags)
2352 {
2353 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2354 		flags |= RT6_LOOKUP_F_IFACE;
2355 
2356 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2357 }
2358 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2359 
2360 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2361 				  struct flow_keys *keys,
2362 				  struct flow_keys *flkeys)
2363 {
2364 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2365 	const struct ipv6hdr *key_iph = outer_iph;
2366 	struct flow_keys *_flkeys = flkeys;
2367 	const struct ipv6hdr *inner_iph;
2368 	const struct icmp6hdr *icmph;
2369 	struct ipv6hdr _inner_iph;
2370 	struct icmp6hdr _icmph;
2371 
2372 	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2373 		goto out;
2374 
2375 	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2376 				   sizeof(_icmph), &_icmph);
2377 	if (!icmph)
2378 		goto out;
2379 
2380 	if (!icmpv6_is_err(icmph->icmp6_type))
2381 		goto out;
2382 
2383 	inner_iph = skb_header_pointer(skb,
2384 				       skb_transport_offset(skb) + sizeof(*icmph),
2385 				       sizeof(_inner_iph), &_inner_iph);
2386 	if (!inner_iph)
2387 		goto out;
2388 
2389 	key_iph = inner_iph;
2390 	_flkeys = NULL;
2391 out:
2392 	if (_flkeys) {
2393 		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2394 		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2395 		keys->tags.flow_label = _flkeys->tags.flow_label;
2396 		keys->basic.ip_proto = _flkeys->basic.ip_proto;
2397 	} else {
2398 		keys->addrs.v6addrs.src = key_iph->saddr;
2399 		keys->addrs.v6addrs.dst = key_iph->daddr;
2400 		keys->tags.flow_label = ip6_flowlabel(key_iph);
2401 		keys->basic.ip_proto = key_iph->nexthdr;
2402 	}
2403 }
2404 
2405 static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2406 					   const struct sk_buff *skb,
2407 					   bool *p_has_inner)
2408 {
2409 	u32 hash_fields = ip6_multipath_hash_fields(net);
2410 	struct flow_keys keys, hash_keys;
2411 
2412 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2413 		return 0;
2414 
2415 	memset(&hash_keys, 0, sizeof(hash_keys));
2416 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2417 
2418 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2419 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2420 		hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2421 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2422 		hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2423 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2424 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2425 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2426 		hash_keys.tags.flow_label = keys.tags.flow_label;
2427 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2428 		hash_keys.ports.src = keys.ports.src;
2429 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2430 		hash_keys.ports.dst = keys.ports.dst;
2431 
2432 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2433 	return fib_multipath_hash_from_keys(net, &hash_keys);
2434 }
2435 
2436 static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2437 					   const struct sk_buff *skb,
2438 					   bool has_inner)
2439 {
2440 	u32 hash_fields = ip6_multipath_hash_fields(net);
2441 	struct flow_keys keys, hash_keys;
2442 
2443 	/* We assume the packet carries an encapsulation, but if none was
2444 	 * encountered during dissection of the outer flow, then there is no
2445 	 * point in calling the flow dissector again.
2446 	 */
2447 	if (!has_inner)
2448 		return 0;
2449 
2450 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2451 		return 0;
2452 
2453 	memset(&hash_keys, 0, sizeof(hash_keys));
2454 	skb_flow_dissect_flow_keys(skb, &keys, 0);
2455 
2456 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2457 		return 0;
2458 
2459 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2460 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2461 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2462 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2463 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2464 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2465 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2466 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2467 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2468 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2469 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2470 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2471 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2472 			hash_keys.tags.flow_label = keys.tags.flow_label;
2473 	}
2474 
2475 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2476 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2477 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2478 		hash_keys.ports.src = keys.ports.src;
2479 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2480 		hash_keys.ports.dst = keys.ports.dst;
2481 
2482 	return fib_multipath_hash_from_keys(net, &hash_keys);
2483 }
2484 
2485 static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2486 					 const struct sk_buff *skb)
2487 {
2488 	u32 mhash, mhash_inner;
2489 	bool has_inner = true;
2490 
2491 	mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2492 	mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2493 
2494 	return jhash_2words(mhash, mhash_inner, 0);
2495 }
2496 
2497 static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2498 					 const struct flowi6 *fl6)
2499 {
2500 	u32 hash_fields = ip6_multipath_hash_fields(net);
2501 	struct flow_keys hash_keys;
2502 
2503 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2504 		return 0;
2505 
2506 	memset(&hash_keys, 0, sizeof(hash_keys));
2507 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2508 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2509 		hash_keys.addrs.v6addrs.src = fl6->saddr;
2510 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2511 		hash_keys.addrs.v6addrs.dst = fl6->daddr;
2512 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2513 		hash_keys.basic.ip_proto = fl6->flowi6_proto;
2514 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2515 		hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2516 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) {
2517 		if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2518 			hash_keys.ports.src = (__force __be16)get_random_u16();
2519 		else
2520 			hash_keys.ports.src = fl6->fl6_sport;
2521 	}
2522 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2523 		hash_keys.ports.dst = fl6->fl6_dport;
2524 
2525 	return fib_multipath_hash_from_keys(net, &hash_keys);
2526 }
2527 
2528 /* if skb is set it will be used and fl6 can be NULL */
2529 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2530 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2531 {
2532 	struct flow_keys hash_keys;
2533 	u32 mhash = 0;
2534 
2535 	switch (ip6_multipath_hash_policy(net)) {
2536 	case 0:
2537 		memset(&hash_keys, 0, sizeof(hash_keys));
2538 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2539 		if (skb) {
2540 			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2541 		} else {
2542 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2543 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2544 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2545 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2546 		}
2547 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2548 		break;
2549 	case 1:
2550 		if (skb) {
2551 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2552 			struct flow_keys keys;
2553 
2554 			/* short-circuit if we already have L4 hash present */
2555 			if (skb->l4_hash)
2556 				return skb_get_hash_raw(skb) >> 1;
2557 
2558 			memset(&hash_keys, 0, sizeof(hash_keys));
2559 
2560 			if (!flkeys) {
2561 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2562 				flkeys = &keys;
2563 			}
2564 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2565 			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2566 			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2567 			hash_keys.ports.src = flkeys->ports.src;
2568 			hash_keys.ports.dst = flkeys->ports.dst;
2569 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2570 		} else {
2571 			memset(&hash_keys, 0, sizeof(hash_keys));
2572 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2573 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2574 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2575 			if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2576 				hash_keys.ports.src = (__force __be16)get_random_u16();
2577 			else
2578 				hash_keys.ports.src = fl6->fl6_sport;
2579 			hash_keys.ports.dst = fl6->fl6_dport;
2580 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2581 		}
2582 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2583 		break;
2584 	case 2:
2585 		memset(&hash_keys, 0, sizeof(hash_keys));
2586 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2587 		if (skb) {
2588 			struct flow_keys keys;
2589 
2590 			if (!flkeys) {
2591 				skb_flow_dissect_flow_keys(skb, &keys, 0);
2592 				flkeys = &keys;
2593 			}
2594 
2595 			/* Inner can be v4 or v6 */
2596 			if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2597 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2598 				hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2599 				hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2600 			} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2601 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2602 				hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2603 				hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2604 				hash_keys.tags.flow_label = flkeys->tags.flow_label;
2605 				hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2606 			} else {
2607 				/* Same as case 0 */
2608 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2609 				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2610 			}
2611 		} else {
2612 			/* Same as case 0 */
2613 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2614 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2615 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2616 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2617 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2618 		}
2619 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2620 		break;
2621 	case 3:
2622 		if (skb)
2623 			mhash = rt6_multipath_custom_hash_skb(net, skb);
2624 		else
2625 			mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2626 		break;
2627 	}
2628 
2629 	return mhash >> 1;
2630 }
2631 
2632 /* Called with rcu held */
2633 void ip6_route_input(struct sk_buff *skb)
2634 {
2635 	const struct ipv6hdr *iph = ipv6_hdr(skb);
2636 	struct net *net = dev_net(skb->dev);
2637 	int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2638 	struct ip_tunnel_info *tun_info;
2639 	struct flowi6 fl6 = {
2640 		.flowi6_iif = skb->dev->ifindex,
2641 		.daddr = iph->daddr,
2642 		.saddr = iph->saddr,
2643 		.flowlabel = ip6_flowinfo(iph),
2644 		.flowi6_mark = skb->mark,
2645 		.flowi6_proto = iph->nexthdr,
2646 	};
2647 	struct flow_keys *flkeys = NULL, _flkeys;
2648 
2649 	tun_info = skb_tunnel_info(skb);
2650 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2651 		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2652 
2653 	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2654 		flkeys = &_flkeys;
2655 
2656 	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2657 		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2658 	skb_dst_drop(skb);
2659 	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2660 						      &fl6, skb, flags));
2661 }
2662 EXPORT_SYMBOL_GPL(ip6_route_input);
2663 
2664 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2665 					     struct fib6_table *table,
2666 					     struct flowi6 *fl6,
2667 					     const struct sk_buff *skb,
2668 					     int flags)
2669 {
2670 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2671 }
2672 
2673 static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2674 						      const struct sock *sk,
2675 						      struct flowi6 *fl6,
2676 						      int flags)
2677 {
2678 	bool any_src;
2679 
2680 	if (ipv6_addr_type(&fl6->daddr) &
2681 	    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2682 		struct dst_entry *dst;
2683 
2684 		/* This function does not take refcnt on the dst */
2685 		dst = l3mdev_link_scope_lookup(net, fl6);
2686 		if (dst)
2687 			return dst;
2688 	}
2689 
2690 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2691 
2692 	flags |= RT6_LOOKUP_F_DST_NOREF;
2693 	any_src = ipv6_addr_any(&fl6->saddr);
2694 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2695 	    (fl6->flowi6_oif && any_src))
2696 		flags |= RT6_LOOKUP_F_IFACE;
2697 
2698 	if (!any_src)
2699 		flags |= RT6_LOOKUP_F_HAS_SADDR;
2700 	else if (sk)
2701 		flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
2702 
2703 	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2704 }
2705 
2706 struct dst_entry *ip6_route_output_flags(struct net *net,
2707 					 const struct sock *sk,
2708 					 struct flowi6 *fl6,
2709 					 int flags)
2710 {
2711 	struct dst_entry *dst;
2712 	struct rt6_info *rt6;
2713 
2714 	rcu_read_lock();
2715 	dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2716 	rt6 = dst_rt6_info(dst);
2717 	/* For dst cached in uncached_list, refcnt is already taken. */
2718 	if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
2719 		dst = &net->ipv6.ip6_null_entry->dst;
2720 		dst_hold(dst);
2721 	}
2722 	rcu_read_unlock();
2723 
2724 	return dst;
2725 }
2726 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2727 
2728 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2729 {
2730 	struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
2731 	struct net_device *loopback_dev = net->loopback_dev;
2732 	struct dst_entry *new = NULL;
2733 
2734 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
2735 		       DST_OBSOLETE_DEAD, 0);
2736 	if (rt) {
2737 		rt6_info_init(rt);
2738 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2739 
2740 		new = &rt->dst;
2741 		new->__use = 1;
2742 		new->input = dst_discard;
2743 		new->output = dst_discard_out;
2744 
2745 		dst_copy_metrics(new, &ort->dst);
2746 
2747 		rt->rt6i_idev = in6_dev_get(loopback_dev);
2748 		rt->rt6i_gateway = ort->rt6i_gateway;
2749 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2750 
2751 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2752 #ifdef CONFIG_IPV6_SUBTREES
2753 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2754 #endif
2755 	}
2756 
2757 	dst_release(dst_orig);
2758 	return new ? new : ERR_PTR(-ENOMEM);
2759 }
2760 
2761 /*
2762  *	Destination cache support functions
2763  */
2764 
2765 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2766 {
2767 	u32 rt_cookie = 0;
2768 
2769 	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2770 		return false;
2771 
2772 	if (fib6_check_expired(f6i))
2773 		return false;
2774 
2775 	return true;
2776 }
2777 
2778 static struct dst_entry *rt6_check(struct rt6_info *rt,
2779 				   struct fib6_info *from,
2780 				   u32 cookie)
2781 {
2782 	u32 rt_cookie = 0;
2783 
2784 	if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2785 	    rt_cookie != cookie)
2786 		return NULL;
2787 
2788 	if (rt6_check_expired(rt))
2789 		return NULL;
2790 
2791 	return &rt->dst;
2792 }
2793 
2794 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2795 					    struct fib6_info *from,
2796 					    u32 cookie)
2797 {
2798 	if (!__rt6_check_expired(rt) &&
2799 	    READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
2800 	    fib6_check(from, cookie))
2801 		return &rt->dst;
2802 	return NULL;
2803 }
2804 
2805 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2806 							u32 cookie)
2807 {
2808 	struct dst_entry *dst_ret;
2809 	struct fib6_info *from;
2810 	struct rt6_info *rt;
2811 
2812 	rt = dst_rt6_info(dst);
2813 
2814 	if (rt->sernum)
2815 		return rt6_is_valid(rt) ? dst : NULL;
2816 
2817 	rcu_read_lock();
2818 
2819 	/* All IPV6 dsts are created with ->obsolete set to the value
2820 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2821 	 * into this function always.
2822 	 */
2823 
2824 	from = rcu_dereference(rt->from);
2825 
2826 	if (from && (rt->rt6i_flags & RTF_PCPU ||
2827 	    unlikely(!list_empty(&rt->dst.rt_uncached))))
2828 		dst_ret = rt6_dst_from_check(rt, from, cookie);
2829 	else
2830 		dst_ret = rt6_check(rt, from, cookie);
2831 
2832 	rcu_read_unlock();
2833 
2834 	return dst_ret;
2835 }
2836 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2837 
2838 static void ip6_negative_advice(struct sock *sk,
2839 				struct dst_entry *dst)
2840 {
2841 	struct rt6_info *rt = dst_rt6_info(dst);
2842 
2843 	if (rt->rt6i_flags & RTF_CACHE) {
2844 		rcu_read_lock();
2845 		if (rt6_check_expired(rt)) {
2846 			/* rt/dst can not be destroyed yet,
2847 			 * because of rcu_read_lock()
2848 			 */
2849 			sk_dst_reset(sk);
2850 			rt6_remove_exception_rt(rt);
2851 		}
2852 		rcu_read_unlock();
2853 		return;
2854 	}
2855 	sk_dst_reset(sk);
2856 }
2857 
2858 static void ip6_link_failure(struct sk_buff *skb)
2859 {
2860 	struct rt6_info *rt;
2861 
2862 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2863 
2864 	rt = dst_rt6_info(skb_dst(skb));
2865 	if (rt) {
2866 		rcu_read_lock();
2867 		if (rt->rt6i_flags & RTF_CACHE) {
2868 			rt6_remove_exception_rt(rt);
2869 		} else {
2870 			struct fib6_info *from;
2871 			struct fib6_node *fn;
2872 
2873 			from = rcu_dereference(rt->from);
2874 			if (from) {
2875 				fn = rcu_dereference(from->fib6_node);
2876 				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2877 					WRITE_ONCE(fn->fn_sernum, -1);
2878 			}
2879 		}
2880 		rcu_read_unlock();
2881 	}
2882 }
2883 
2884 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2885 {
2886 	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2887 		struct fib6_info *from;
2888 
2889 		rcu_read_lock();
2890 		from = rcu_dereference(rt0->from);
2891 		if (from)
2892 			WRITE_ONCE(rt0->dst.expires, from->expires);
2893 		rcu_read_unlock();
2894 	}
2895 
2896 	dst_set_expires(&rt0->dst, timeout);
2897 	rt0->rt6i_flags |= RTF_EXPIRES;
2898 }
2899 
2900 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2901 {
2902 	struct net *net = dev_net(rt->dst.dev);
2903 
2904 	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2905 	rt->rt6i_flags |= RTF_MODIFIED;
2906 	rt6_update_expires(rt, READ_ONCE(net->ipv6.sysctl.ip6_rt_mtu_expires));
2907 }
2908 
2909 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2910 {
2911 	return !(rt->rt6i_flags & RTF_CACHE) &&
2912 		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2913 }
2914 
2915 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2916 				 const struct ipv6hdr *iph, u32 mtu,
2917 				 bool confirm_neigh)
2918 {
2919 	const struct in6_addr *daddr, *saddr;
2920 	struct rt6_info *rt6 = dst_rt6_info(dst);
2921 
2922 	/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2923 	 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2924 	 * [see also comment in rt6_mtu_change_route()]
2925 	 */
2926 
2927 	if (iph) {
2928 		daddr = &iph->daddr;
2929 		saddr = &iph->saddr;
2930 	} else if (sk) {
2931 		daddr = &sk->sk_v6_daddr;
2932 		saddr = &inet6_sk(sk)->saddr;
2933 	} else {
2934 		daddr = NULL;
2935 		saddr = NULL;
2936 	}
2937 
2938 	if (confirm_neigh)
2939 		dst_confirm_neigh(dst, daddr);
2940 
2941 	if (mtu < IPV6_MIN_MTU)
2942 		return;
2943 	if (mtu >= dst6_mtu(dst))
2944 		return;
2945 
2946 	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2947 		rt6_do_update_pmtu(rt6, mtu);
2948 		/* update rt6_ex->stamp for cache */
2949 		if (rt6->rt6i_flags & RTF_CACHE)
2950 			rt6_update_exception_stamp_rt(rt6);
2951 	} else if (daddr) {
2952 		struct fib6_result res = {};
2953 		struct rt6_info *nrt6;
2954 
2955 		rcu_read_lock();
2956 		res.f6i = rcu_dereference(rt6->from);
2957 		if (!res.f6i)
2958 			goto out_unlock;
2959 
2960 		res.fib6_flags = res.f6i->fib6_flags;
2961 		res.fib6_type = res.f6i->fib6_type;
2962 
2963 		if (res.f6i->nh) {
2964 			struct fib6_nh_match_arg arg = {
2965 				.dev = dst_dev_rcu(dst),
2966 				.gw = &rt6->rt6i_gateway,
2967 			};
2968 
2969 			nexthop_for_each_fib6_nh(res.f6i->nh,
2970 						 fib6_nh_find_match, &arg);
2971 
2972 			/* fib6_info uses a nexthop that does not have fib6_nh
2973 			 * using the dst->dev + gw. Should be impossible.
2974 			 */
2975 			if (!arg.match)
2976 				goto out_unlock;
2977 
2978 			res.nh = arg.match;
2979 		} else {
2980 			res.nh = res.f6i->fib6_nh;
2981 		}
2982 
2983 		nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2984 		if (nrt6) {
2985 			rt6_do_update_pmtu(nrt6, mtu);
2986 			if (rt6_insert_exception(nrt6, &res))
2987 				dst_release_immediate(&nrt6->dst);
2988 		}
2989 out_unlock:
2990 		rcu_read_unlock();
2991 	}
2992 }
2993 
2994 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2995 			       struct sk_buff *skb, u32 mtu,
2996 			       bool confirm_neigh)
2997 {
2998 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2999 			     confirm_neigh);
3000 }
3001 
3002 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
3003 		     int oif, u32 mark, kuid_t uid)
3004 {
3005 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3006 	struct dst_entry *dst;
3007 	struct flowi6 fl6 = {
3008 		.flowi6_oif = oif,
3009 		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
3010 		.daddr = iph->daddr,
3011 		.saddr = iph->saddr,
3012 		.flowlabel = ip6_flowinfo(iph),
3013 		.flowi6_uid = uid,
3014 	};
3015 
3016 	dst = ip6_route_output(net, NULL, &fl6);
3017 	if (!dst->error)
3018 		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
3019 	dst_release(dst);
3020 }
3021 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
3022 
3023 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
3024 {
3025 	int oif = sk->sk_bound_dev_if;
3026 	struct dst_entry *dst;
3027 
3028 	if (!oif && skb->dev)
3029 		oif = l3mdev_master_ifindex(skb->dev);
3030 
3031 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
3032 			sk_uid(sk));
3033 
3034 	dst = __sk_dst_get(sk);
3035 	if (!dst || !READ_ONCE(dst->obsolete) ||
3036 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
3037 		return;
3038 
3039 	bh_lock_sock(sk);
3040 	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
3041 		ip6_datagram_dst_update(sk, false);
3042 	bh_unlock_sock(sk);
3043 }
3044 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
3045 
3046 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
3047 			   const struct flowi6 *fl6)
3048 {
3049 #ifdef CONFIG_IPV6_SUBTREES
3050 	struct ipv6_pinfo *np = inet6_sk(sk);
3051 #endif
3052 
3053 	ip6_dst_store(sk, dst,
3054 		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr),
3055 #ifdef CONFIG_IPV6_SUBTREES
3056 		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
3057 		      true :
3058 #endif
3059 		      false);
3060 }
3061 
3062 static bool ip6_redirect_nh_match(const struct fib6_result *res,
3063 				  struct flowi6 *fl6,
3064 				  const struct in6_addr *gw,
3065 				  struct rt6_info **ret)
3066 {
3067 	const struct fib6_nh *nh = res->nh;
3068 
3069 	if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
3070 	    fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3071 		return false;
3072 
3073 	/* rt_cache's gateway might be different from its 'parent'
3074 	 * in the case of an ip redirect.
3075 	 * So we keep searching in the exception table if the gateway
3076 	 * is different.
3077 	 */
3078 	if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3079 		struct rt6_info *rt_cache;
3080 
3081 		rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3082 		if (rt_cache &&
3083 		    ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3084 			*ret = rt_cache;
3085 			return true;
3086 		}
3087 		return false;
3088 	}
3089 	return true;
3090 }
3091 
3092 struct fib6_nh_rd_arg {
3093 	struct fib6_result	*res;
3094 	struct flowi6		*fl6;
3095 	const struct in6_addr	*gw;
3096 	struct rt6_info		**ret;
3097 };
3098 
3099 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3100 {
3101 	struct fib6_nh_rd_arg *arg = _arg;
3102 
3103 	arg->res->nh = nh;
3104 	return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3105 }
3106 
3107 /* Handle redirects */
3108 struct ip6rd_flowi {
3109 	struct flowi6 fl6;
3110 	struct in6_addr gateway;
3111 };
3112 
3113 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3114 					     struct fib6_table *table,
3115 					     struct flowi6 *fl6,
3116 					     const struct sk_buff *skb,
3117 					     int flags)
3118 {
3119 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3120 	struct rt6_info *ret = NULL;
3121 	struct fib6_result res = {};
3122 	struct fib6_nh_rd_arg arg = {
3123 		.res = &res,
3124 		.fl6 = fl6,
3125 		.gw  = &rdfl->gateway,
3126 		.ret = &ret
3127 	};
3128 	struct fib6_info *rt;
3129 	struct fib6_node *fn;
3130 
3131 	/* Get the "current" route for this destination and
3132 	 * check if the redirect has come from appropriate router.
3133 	 *
3134 	 * RFC 4861 specifies that redirects should only be
3135 	 * accepted if they come from the nexthop to the target.
3136 	 * Due to the way the routes are chosen, this notion
3137 	 * is a bit fuzzy and one might need to check all possible
3138 	 * routes.
3139 	 */
3140 
3141 	rcu_read_lock();
3142 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3143 restart:
3144 	for_each_fib6_node_rt_rcu(fn) {
3145 		res.f6i = rt;
3146 		if (fib6_check_expired(rt))
3147 			continue;
3148 		if (rt->fib6_flags & RTF_REJECT)
3149 			break;
3150 		if (unlikely(rt->nh)) {
3151 			if (nexthop_is_blackhole(rt->nh))
3152 				continue;
3153 			/* on match, res->nh is filled in and potentially ret */
3154 			if (nexthop_for_each_fib6_nh(rt->nh,
3155 						     fib6_nh_redirect_match,
3156 						     &arg))
3157 				goto out;
3158 		} else {
3159 			res.nh = rt->fib6_nh;
3160 			if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3161 						  &ret))
3162 				goto out;
3163 		}
3164 	}
3165 
3166 	if (!rt)
3167 		rt = net->ipv6.fib6_null_entry;
3168 	else if (rt->fib6_flags & RTF_REJECT) {
3169 		ret = net->ipv6.ip6_null_entry;
3170 		goto out;
3171 	}
3172 
3173 	if (rt == net->ipv6.fib6_null_entry) {
3174 		fn = fib6_backtrack(fn, &fl6->saddr);
3175 		if (fn)
3176 			goto restart;
3177 	}
3178 
3179 	res.f6i = rt;
3180 	res.nh = rt->fib6_nh;
3181 out:
3182 	if (ret) {
3183 		ip6_hold_safe(net, &ret);
3184 	} else {
3185 		res.fib6_flags = res.f6i->fib6_flags;
3186 		res.fib6_type = res.f6i->fib6_type;
3187 		ret = ip6_create_rt_rcu(&res);
3188 	}
3189 
3190 	rcu_read_unlock();
3191 
3192 	trace_fib6_table_lookup(net, &res, table, fl6);
3193 	return ret;
3194 };
3195 
3196 static struct dst_entry *ip6_route_redirect(struct net *net,
3197 					    const struct flowi6 *fl6,
3198 					    const struct sk_buff *skb,
3199 					    const struct in6_addr *gateway)
3200 {
3201 	int flags = RT6_LOOKUP_F_HAS_SADDR;
3202 	struct ip6rd_flowi rdfl;
3203 
3204 	rdfl.fl6 = *fl6;
3205 	rdfl.gateway = *gateway;
3206 
3207 	return fib6_rule_lookup(net, &rdfl.fl6, skb,
3208 				flags, __ip6_route_redirect);
3209 }
3210 
3211 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3212 		  kuid_t uid)
3213 {
3214 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3215 	struct dst_entry *dst;
3216 	struct flowi6 fl6 = {
3217 		.flowi6_iif = LOOPBACK_IFINDEX,
3218 		.flowi6_oif = oif,
3219 		.flowi6_mark = mark,
3220 		.daddr = iph->daddr,
3221 		.saddr = iph->saddr,
3222 		.flowlabel = ip6_flowinfo(iph),
3223 		.flowi6_uid = uid,
3224 	};
3225 
3226 	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3227 	rt6_do_redirect(dst, NULL, skb);
3228 	dst_release(dst);
3229 }
3230 EXPORT_SYMBOL_GPL(ip6_redirect);
3231 
3232 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3233 {
3234 	const struct ipv6hdr *iph = ipv6_hdr(skb);
3235 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3236 	struct dst_entry *dst;
3237 	struct flowi6 fl6 = {
3238 		.flowi6_iif = LOOPBACK_IFINDEX,
3239 		.flowi6_oif = oif,
3240 		.daddr = msg->dest,
3241 		.saddr = iph->daddr,
3242 		.flowi6_uid = sock_net_uid(net, NULL),
3243 	};
3244 
3245 	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3246 	rt6_do_redirect(dst, NULL, skb);
3247 	dst_release(dst);
3248 }
3249 
3250 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3251 {
3252 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3253 		     READ_ONCE(sk->sk_mark), sk_uid(sk));
3254 }
3255 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3256 
3257 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3258 {
3259 	unsigned int mtu = dst6_mtu(dst);
3260 	struct net *net;
3261 
3262 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3263 
3264 	rcu_read_lock();
3265 
3266 	net = dst_dev_net_rcu(dst);
3267 	mtu = max_t(unsigned int, mtu,
3268 		    READ_ONCE(net->ipv6.sysctl.ip6_rt_min_advmss));
3269 
3270 	rcu_read_unlock();
3271 
3272 	/*
3273 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3274 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3275 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
3276 	 * rely only on pmtu discovery"
3277 	 */
3278 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3279 		mtu = IPV6_MAXPLEN;
3280 	return mtu;
3281 }
3282 
3283 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3284 {
3285 	return ip6_dst_mtu_maybe_forward(dst, false);
3286 }
3287 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3288 
3289 /* MTU selection:
3290  * 1. mtu on route is locked - use it
3291  * 2. mtu from nexthop exception
3292  * 3. mtu from egress device
3293  *
3294  * based on ip6_dst_mtu_forward and exception logic of
3295  * rt6_find_cached_rt; called with rcu_read_lock
3296  */
3297 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3298 		      const struct in6_addr *daddr,
3299 		      const struct in6_addr *saddr)
3300 {
3301 	const struct fib6_nh *nh = res->nh;
3302 	struct fib6_info *f6i = res->f6i;
3303 	struct inet6_dev *idev;
3304 	struct rt6_info *rt;
3305 	u32 mtu = 0;
3306 
3307 	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3308 		mtu = f6i->fib6_pmtu;
3309 		if (mtu)
3310 			goto out;
3311 	}
3312 
3313 	rt = rt6_find_cached_rt(res, daddr, saddr);
3314 	if (unlikely(rt)) {
3315 		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3316 	} else {
3317 		struct net_device *dev = nh->fib_nh_dev;
3318 
3319 		mtu = IPV6_MIN_MTU;
3320 		idev = __in6_dev_get(dev);
3321 		if (idev)
3322 			mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
3323 	}
3324 
3325 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3326 out:
3327 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3328 }
3329 
3330 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3331 				  struct flowi6 *fl6)
3332 {
3333 	struct dst_entry *dst;
3334 	struct rt6_info *rt;
3335 	struct inet6_dev *idev = in6_dev_get(dev);
3336 	struct net *net = dev_net(dev);
3337 
3338 	if (unlikely(!idev))
3339 		return ERR_PTR(-ENODEV);
3340 
3341 	rt = ip6_dst_alloc(net, dev, 0);
3342 	if (unlikely(!rt)) {
3343 		in6_dev_put(idev);
3344 		dst = ERR_PTR(-ENOMEM);
3345 		goto out;
3346 	}
3347 
3348 	rt->dst.input = ip6_input;
3349 	rt->dst.output  = ip6_output;
3350 	rt->rt6i_gateway  = fl6->daddr;
3351 	rt->rt6i_dst.addr = fl6->daddr;
3352 	rt->rt6i_dst.plen = 128;
3353 	rt->rt6i_idev     = idev;
3354 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3355 
3356 	/* Add this dst into uncached_list so that rt6_disable_ip() can
3357 	 * do proper release of the net_device
3358 	 */
3359 	rt6_uncached_list_add(rt);
3360 
3361 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3362 
3363 out:
3364 	return dst;
3365 }
3366 
3367 static void ip6_dst_gc(struct dst_ops *ops)
3368 {
3369 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3370 	int rt_min_interval = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_min_interval);
3371 	int rt_elasticity = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_elasticity);
3372 	int rt_gc_timeout = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_timeout);
3373 	unsigned long rt_last_gc = READ_ONCE(net->ipv6.ip6_rt_last_gc);
3374 	unsigned int val;
3375 	int entries;
3376 
3377 	if (time_after(rt_last_gc + rt_min_interval, jiffies))
3378 		goto out;
3379 
3380 	fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3381 	entries = dst_entries_get_slow(ops);
3382 	if (entries < ops->gc_thresh)
3383 		atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3384 out:
3385 	val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3386 	atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3387 }
3388 
3389 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3390 			       const struct in6_addr *gw_addr, u32 tbid,
3391 			       int flags, struct fib6_result *res)
3392 {
3393 	struct flowi6 fl6 = {
3394 		.flowi6_oif = cfg->fc_ifindex,
3395 		.daddr = *gw_addr,
3396 		.saddr = cfg->fc_prefsrc,
3397 	};
3398 	struct fib6_table *table;
3399 	int err;
3400 
3401 	table = fib6_get_table(net, tbid);
3402 	if (!table)
3403 		return -EINVAL;
3404 
3405 	if (!ipv6_addr_any(&cfg->fc_prefsrc))
3406 		flags |= RT6_LOOKUP_F_HAS_SADDR;
3407 
3408 	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3409 
3410 	err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3411 	if (!err && res->f6i != net->ipv6.fib6_null_entry)
3412 		fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3413 				 cfg->fc_ifindex != 0, NULL, flags);
3414 
3415 	return err;
3416 }
3417 
3418 static int ip6_route_check_nh_onlink(struct net *net,
3419 				     struct fib6_config *cfg,
3420 				     const struct net_device *dev,
3421 				     struct netlink_ext_ack *extack)
3422 {
3423 	u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3424 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3425 	struct fib6_result res = {};
3426 	int err;
3427 
3428 	err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3429 	if (!err && !(res.fib6_flags & RTF_REJECT) &&
3430 	    res.fib6_type != RTN_UNICAST) {
3431 		NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
3432 		err = -EINVAL;
3433 	}
3434 
3435 	return err;
3436 }
3437 
3438 static int ip6_route_check_nh(struct net *net,
3439 			      struct fib6_config *cfg,
3440 			      struct net_device **_dev,
3441 			      netdevice_tracker *dev_tracker,
3442 			      struct inet6_dev **idev)
3443 {
3444 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3445 	struct net_device *dev = _dev ? *_dev : NULL;
3446 	int flags = RT6_LOOKUP_F_IFACE;
3447 	struct fib6_result res = {};
3448 	int err = -EHOSTUNREACH;
3449 
3450 	if (cfg->fc_table) {
3451 		err = ip6_nh_lookup_table(net, cfg, gw_addr,
3452 					  cfg->fc_table, flags, &res);
3453 		/* gw_addr can not require a gateway or resolve to a reject
3454 		 * route. If a device is given, it must match the result.
3455 		 */
3456 		if (err || res.fib6_flags & RTF_REJECT ||
3457 		    res.nh->fib_nh_gw_family ||
3458 		    (dev && dev != res.nh->fib_nh_dev))
3459 			err = -EHOSTUNREACH;
3460 	}
3461 
3462 	if (err < 0) {
3463 		struct flowi6 fl6 = {
3464 			.flowi6_oif = cfg->fc_ifindex,
3465 			.daddr = *gw_addr,
3466 		};
3467 
3468 		err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3469 		if (err || res.fib6_flags & RTF_REJECT ||
3470 		    res.nh->fib_nh_gw_family)
3471 			err = -EHOSTUNREACH;
3472 
3473 		if (err)
3474 			return err;
3475 
3476 		fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3477 				 cfg->fc_ifindex != 0, NULL, flags);
3478 	}
3479 
3480 	err = 0;
3481 	if (dev) {
3482 		if (dev != res.nh->fib_nh_dev)
3483 			err = -EHOSTUNREACH;
3484 	} else {
3485 		*_dev = dev = res.nh->fib_nh_dev;
3486 		netdev_hold(dev, dev_tracker, GFP_ATOMIC);
3487 		*idev = in6_dev_get(dev);
3488 	}
3489 
3490 	return err;
3491 }
3492 
3493 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3494 			   struct net_device **_dev,
3495 			   netdevice_tracker *dev_tracker,
3496 			   struct inet6_dev **idev,
3497 			   struct netlink_ext_ack *extack)
3498 {
3499 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3500 	int gwa_type = ipv6_addr_type(gw_addr);
3501 	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3502 	const struct net_device *dev = *_dev;
3503 	bool need_addr_check = !dev;
3504 	int err = -EINVAL;
3505 
3506 	/* if gw_addr is local we will fail to detect this in case
3507 	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3508 	 * will return already-added prefix route via interface that
3509 	 * prefix route was assigned to, which might be non-loopback.
3510 	 */
3511 	if (dev &&
3512 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3513 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3514 		goto out;
3515 	}
3516 
3517 	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3518 		/* IPv6 strictly inhibits using not link-local
3519 		 * addresses as nexthop address.
3520 		 * Otherwise, router will not able to send redirects.
3521 		 * It is very good, but in some (rare!) circumstances
3522 		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3523 		 * some exceptions. --ANK
3524 		 * We allow IPv4-mapped nexthops to support RFC4798-type
3525 		 * addressing
3526 		 */
3527 		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3528 			NL_SET_ERR_MSG(extack, "Invalid gateway address");
3529 			goto out;
3530 		}
3531 
3532 		rcu_read_lock();
3533 
3534 		if (cfg->fc_flags & RTNH_F_ONLINK)
3535 			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3536 		else
3537 			err = ip6_route_check_nh(net, cfg, _dev, dev_tracker,
3538 						 idev);
3539 
3540 		rcu_read_unlock();
3541 
3542 		if (err)
3543 			goto out;
3544 	}
3545 
3546 	/* reload in case device was changed */
3547 	dev = *_dev;
3548 
3549 	err = -EINVAL;
3550 	if (!dev) {
3551 		NL_SET_ERR_MSG(extack, "Egress device not specified");
3552 		goto out;
3553 	} else if (dev->flags & IFF_LOOPBACK) {
3554 		NL_SET_ERR_MSG(extack,
3555 			       "Egress device can not be loopback device for this route");
3556 		goto out;
3557 	}
3558 
3559 	/* if we did not check gw_addr above, do so now that the
3560 	 * egress device has been resolved.
3561 	 */
3562 	if (need_addr_check &&
3563 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3564 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3565 		goto out;
3566 	}
3567 
3568 	err = 0;
3569 out:
3570 	return err;
3571 }
3572 
3573 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3574 {
3575 	if ((flags & RTF_REJECT) ||
3576 	    (dev && (dev->flags & IFF_LOOPBACK) &&
3577 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
3578 	     !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3579 		return true;
3580 
3581 	return false;
3582 }
3583 
3584 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3585 		 struct fib6_config *cfg, gfp_t gfp_flags,
3586 		 struct netlink_ext_ack *extack)
3587 {
3588 	netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
3589 	struct net_device *dev = NULL;
3590 	struct inet6_dev *idev = NULL;
3591 	int err;
3592 
3593 	if (!ipv6_mod_enabled()) {
3594 		NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
3595 		return -EAFNOSUPPORT;
3596 	}
3597 
3598 	fib6_nh->fib_nh_family = AF_INET6;
3599 #ifdef CONFIG_IPV6_ROUTER_PREF
3600 	fib6_nh->last_probe = jiffies;
3601 #endif
3602 	if (cfg->fc_is_fdb) {
3603 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3604 		fib6_nh->fib_nh_gw_family = AF_INET6;
3605 		return 0;
3606 	}
3607 
3608 	err = -ENODEV;
3609 	if (cfg->fc_ifindex) {
3610 		dev = netdev_get_by_index(net, cfg->fc_ifindex,
3611 					  dev_tracker, gfp_flags);
3612 		if (!dev)
3613 			goto out;
3614 		idev = in6_dev_get(dev);
3615 		if (!idev)
3616 			goto out;
3617 	}
3618 
3619 	if (cfg->fc_flags & RTNH_F_ONLINK) {
3620 		if (!dev) {
3621 			NL_SET_ERR_MSG(extack,
3622 				       "Nexthop device required for onlink");
3623 			goto out;
3624 		}
3625 
3626 		if (!(dev->flags & IFF_UP)) {
3627 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3628 			err = -ENETDOWN;
3629 			goto out;
3630 		}
3631 
3632 		fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3633 	}
3634 
3635 	fib6_nh->fib_nh_weight = 1;
3636 
3637 	/* Reset the nexthop device to the loopback device in case of reject
3638 	 * routes.
3639 	 */
3640 	if (cfg->fc_flags & RTF_REJECT) {
3641 		/* hold loopback dev/idev if we haven't done so. */
3642 		if (dev != net->loopback_dev) {
3643 			if (dev) {
3644 				netdev_put(dev, dev_tracker);
3645 				in6_dev_put(idev);
3646 			}
3647 			dev = net->loopback_dev;
3648 			netdev_hold(dev, dev_tracker, gfp_flags);
3649 			idev = in6_dev_get(dev);
3650 			if (!idev) {
3651 				err = -ENODEV;
3652 				goto out;
3653 			}
3654 		}
3655 		goto pcpu_alloc;
3656 	}
3657 
3658 	if (cfg->fc_flags & RTF_GATEWAY) {
3659 		err = ip6_validate_gw(net, cfg, &dev, dev_tracker,
3660 				      &idev, extack);
3661 		if (err)
3662 			goto out;
3663 
3664 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3665 		fib6_nh->fib_nh_gw_family = AF_INET6;
3666 	}
3667 
3668 	err = -ENODEV;
3669 	if (!dev)
3670 		goto out;
3671 
3672 	if (!idev || idev->cnf.disable_ipv6) {
3673 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3674 		err = -EACCES;
3675 		goto out;
3676 	}
3677 
3678 	if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3679 		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3680 		err = -ENETDOWN;
3681 		goto out;
3682 	}
3683 
3684 	if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3685 	    !netif_carrier_ok(dev))
3686 		fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3687 
3688 	err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3689 				 cfg->fc_encap_type, cfg, gfp_flags, extack);
3690 	if (err)
3691 		goto out;
3692 
3693 pcpu_alloc:
3694 	fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3695 	if (!fib6_nh->rt6i_pcpu) {
3696 		err = -ENOMEM;
3697 		goto out;
3698 	}
3699 
3700 	fib6_nh->fib_nh_dev = dev;
3701 	fib6_nh->fib_nh_oif = dev->ifindex;
3702 	err = 0;
3703 out:
3704 	if (idev)
3705 		in6_dev_put(idev);
3706 
3707 	if (err) {
3708 		fib_nh_common_release(&fib6_nh->nh_common);
3709 		fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
3710 		fib6_nh->fib_nh_lws = NULL;
3711 		netdev_put(dev, dev_tracker);
3712 	}
3713 
3714 	return err;
3715 }
3716 
3717 void fib6_nh_release(struct fib6_nh *fib6_nh)
3718 {
3719 	struct rt6_exception_bucket *bucket;
3720 
3721 	rcu_read_lock();
3722 
3723 	fib6_nh_flush_exceptions(fib6_nh, NULL);
3724 	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3725 	if (bucket) {
3726 		rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3727 		kfree(bucket);
3728 	}
3729 
3730 	rcu_read_unlock();
3731 
3732 	fib6_nh_release_dsts(fib6_nh);
3733 	free_percpu(fib6_nh->rt6i_pcpu);
3734 
3735 	fib_nh_common_release(&fib6_nh->nh_common);
3736 }
3737 
3738 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3739 {
3740 	int cpu;
3741 
3742 	if (!fib6_nh->rt6i_pcpu)
3743 		return;
3744 
3745 	for_each_possible_cpu(cpu) {
3746 		struct rt6_info *pcpu_rt, **ppcpu_rt;
3747 
3748 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3749 		pcpu_rt = xchg(ppcpu_rt, NULL);
3750 		if (pcpu_rt) {
3751 			dst_dev_put(&pcpu_rt->dst);
3752 			dst_release(&pcpu_rt->dst);
3753 		}
3754 	}
3755 }
3756 
3757 static int fib6_config_validate(struct fib6_config *cfg,
3758 				struct netlink_ext_ack *extack)
3759 {
3760 	/* RTF_PCPU is an internal flag; can not be set by userspace */
3761 	if (cfg->fc_flags & RTF_PCPU) {
3762 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3763 		goto errout;
3764 	}
3765 
3766 	/* RTF_CACHE is an internal flag; can not be set by userspace */
3767 	if (cfg->fc_flags & RTF_CACHE) {
3768 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3769 		goto errout;
3770 	}
3771 
3772 	if (cfg->fc_type > RTN_MAX) {
3773 		NL_SET_ERR_MSG(extack, "Invalid route type");
3774 		goto errout;
3775 	}
3776 
3777 	if (cfg->fc_dst_len > 128) {
3778 		NL_SET_ERR_MSG(extack, "Invalid prefix length");
3779 		goto errout;
3780 	}
3781 
3782 #ifdef CONFIG_IPV6_SUBTREES
3783 	if (cfg->fc_src_len > 128) {
3784 		NL_SET_ERR_MSG(extack, "Invalid source address length");
3785 		goto errout;
3786 	}
3787 
3788 	if (cfg->fc_nh_id && cfg->fc_src_len) {
3789 		NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3790 		goto errout;
3791 	}
3792 #else
3793 	if (cfg->fc_src_len) {
3794 		NL_SET_ERR_MSG(extack,
3795 			       "Specifying source address requires IPV6_SUBTREES to be enabled");
3796 		goto errout;
3797 	}
3798 #endif
3799 	return 0;
3800 errout:
3801 	return -EINVAL;
3802 }
3803 
3804 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3805 					       gfp_t gfp_flags,
3806 					       struct netlink_ext_ack *extack)
3807 {
3808 	struct net *net = cfg->fc_nlinfo.nl_net;
3809 	struct fib6_table *table;
3810 	struct fib6_info *rt;
3811 	int err;
3812 
3813 	if (cfg->fc_nlinfo.nlh &&
3814 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3815 		table = fib6_get_table(net, cfg->fc_table);
3816 		if (!table) {
3817 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3818 			table = fib6_new_table(net, cfg->fc_table);
3819 		}
3820 	} else {
3821 		table = fib6_new_table(net, cfg->fc_table);
3822 	}
3823 	if (!table) {
3824 		err = -ENOBUFS;
3825 		goto err;
3826 	}
3827 
3828 	rt = fib6_info_alloc(gfp_flags, !cfg->fc_nh_id);
3829 	if (!rt) {
3830 		err = -ENOMEM;
3831 		goto err;
3832 	}
3833 
3834 	rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
3835 					       extack);
3836 	if (IS_ERR(rt->fib6_metrics)) {
3837 		err = PTR_ERR(rt->fib6_metrics);
3838 		goto free;
3839 	}
3840 
3841 	if (cfg->fc_flags & RTF_ADDRCONF)
3842 		rt->dst_nocount = true;
3843 
3844 	if (cfg->fc_flags & RTF_EXPIRES)
3845 		fib6_set_expires(rt, jiffies +
3846 				 clock_t_to_jiffies(cfg->fc_expires));
3847 
3848 	if (cfg->fc_protocol == RTPROT_UNSPEC)
3849 		cfg->fc_protocol = RTPROT_BOOT;
3850 
3851 	rt->fib6_protocol = cfg->fc_protocol;
3852 	rt->fib6_table = table;
3853 	rt->fib6_metric = cfg->fc_metric;
3854 	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3855 	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3856 
3857 	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3858 	rt->fib6_dst.plen = cfg->fc_dst_len;
3859 
3860 #ifdef CONFIG_IPV6_SUBTREES
3861 	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3862 	rt->fib6_src.plen = cfg->fc_src_len;
3863 #endif
3864 	return rt;
3865 free:
3866 	kfree(rt);
3867 err:
3868 	return ERR_PTR(err);
3869 }
3870 
3871 static int ip6_route_info_create_nh(struct fib6_info *rt,
3872 				    struct fib6_config *cfg,
3873 				    gfp_t gfp_flags,
3874 				    struct netlink_ext_ack *extack)
3875 {
3876 	struct net *net = cfg->fc_nlinfo.nl_net;
3877 	struct fib6_nh *fib6_nh;
3878 	int err;
3879 
3880 	if (cfg->fc_nh_id) {
3881 		struct nexthop *nh;
3882 
3883 		rcu_read_lock();
3884 
3885 		nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3886 		if (!nh) {
3887 			err = -EINVAL;
3888 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3889 			goto out_free;
3890 		}
3891 
3892 		err = fib6_check_nexthop(nh, cfg, extack);
3893 		if (err)
3894 			goto out_free;
3895 
3896 		if (!nexthop_get(nh)) {
3897 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3898 			err = -ENOENT;
3899 			goto out_free;
3900 		}
3901 
3902 		rt->nh = nh;
3903 		fib6_nh = nexthop_fib6_nh(rt->nh);
3904 
3905 		rcu_read_unlock();
3906 	} else {
3907 		int addr_type;
3908 
3909 		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3910 		if (err)
3911 			goto out_release;
3912 
3913 		fib6_nh = rt->fib6_nh;
3914 
3915 		/* We cannot add true routes via loopback here, they would
3916 		 * result in kernel looping; promote them to reject routes
3917 		 */
3918 		addr_type = ipv6_addr_type(&cfg->fc_dst);
3919 		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3920 				   addr_type))
3921 			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3922 	}
3923 
3924 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3925 		struct net_device *dev = fib6_nh->fib_nh_dev;
3926 
3927 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3928 			NL_SET_ERR_MSG(extack, "Invalid source address");
3929 			err = -EINVAL;
3930 			goto out_release;
3931 		}
3932 		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3933 		rt->fib6_prefsrc.plen = 128;
3934 	}
3935 
3936 	return 0;
3937 out_release:
3938 	fib6_info_release(rt);
3939 	return err;
3940 out_free:
3941 	rcu_read_unlock();
3942 	ip_fib_metrics_put(rt->fib6_metrics);
3943 	kfree(rt);
3944 	return err;
3945 }
3946 
3947 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3948 		  struct netlink_ext_ack *extack)
3949 {
3950 	struct fib6_info *rt;
3951 	int err;
3952 
3953 	err = fib6_config_validate(cfg, extack);
3954 	if (err)
3955 		return err;
3956 
3957 	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3958 	if (IS_ERR(rt))
3959 		return PTR_ERR(rt);
3960 
3961 	err = ip6_route_info_create_nh(rt, cfg, gfp_flags, extack);
3962 	if (err)
3963 		return err;
3964 
3965 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3966 	fib6_info_release(rt);
3967 
3968 	return err;
3969 }
3970 
3971 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3972 {
3973 	struct net *net = info->nl_net;
3974 	struct fib6_table *table;
3975 	int err;
3976 
3977 	if (rt == net->ipv6.fib6_null_entry) {
3978 		err = -ENOENT;
3979 		goto out;
3980 	}
3981 
3982 	table = rt->fib6_table;
3983 	spin_lock_bh(&table->tb6_lock);
3984 	err = fib6_del(rt, info);
3985 	spin_unlock_bh(&table->tb6_lock);
3986 
3987 out:
3988 	fib6_info_release(rt);
3989 	return err;
3990 }
3991 
3992 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3993 {
3994 	struct nl_info info = {
3995 		.nl_net = net,
3996 		.skip_notify = skip_notify
3997 	};
3998 
3999 	return __ip6_del_rt(rt, &info);
4000 }
4001 
4002 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
4003 {
4004 	struct nl_info *info = &cfg->fc_nlinfo;
4005 	struct net *net = info->nl_net;
4006 	struct sk_buff *skb = NULL;
4007 	struct fib6_table *table;
4008 	int err = -ENOENT;
4009 
4010 	if (rt == net->ipv6.fib6_null_entry)
4011 		goto out_put;
4012 	table = rt->fib6_table;
4013 	spin_lock_bh(&table->tb6_lock);
4014 
4015 	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
4016 		struct fib6_info *sibling, *next_sibling;
4017 		struct fib6_node *fn;
4018 
4019 		/* prefer to send a single notification with all hops */
4020 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4021 		if (skb) {
4022 			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4023 
4024 			if (rt6_fill_node(net, skb, rt, NULL,
4025 					  NULL, NULL, 0, RTM_DELROUTE,
4026 					  info->portid, seq, 0) < 0) {
4027 				kfree_skb(skb);
4028 				skb = NULL;
4029 			} else
4030 				info->skip_notify = 1;
4031 		}
4032 
4033 		/* 'rt' points to the first sibling route. If it is not the
4034 		 * leaf, then we do not need to send a notification. Otherwise,
4035 		 * we need to check if the last sibling has a next route or not
4036 		 * and emit a replace or delete notification, respectively.
4037 		 */
4038 		info->skip_notify_kernel = 1;
4039 		fn = rcu_dereference_protected(rt->fib6_node,
4040 					    lockdep_is_held(&table->tb6_lock));
4041 		if (rcu_access_pointer(fn->leaf) == rt) {
4042 			struct fib6_info *last_sibling, *replace_rt;
4043 
4044 			last_sibling = list_last_entry(&rt->fib6_siblings,
4045 						       struct fib6_info,
4046 						       fib6_siblings);
4047 			replace_rt = rcu_dereference_protected(
4048 					    last_sibling->fib6_next,
4049 					    lockdep_is_held(&table->tb6_lock));
4050 			if (replace_rt)
4051 				call_fib6_entry_notifiers_replace(net,
4052 								  replace_rt);
4053 			else
4054 				call_fib6_multipath_entry_notifiers(net,
4055 						       FIB_EVENT_ENTRY_DEL,
4056 						       rt, rt->fib6_nsiblings,
4057 						       NULL);
4058 		}
4059 		list_for_each_entry_safe(sibling, next_sibling,
4060 					 &rt->fib6_siblings,
4061 					 fib6_siblings) {
4062 			err = fib6_del(sibling, info);
4063 			if (err)
4064 				goto out_unlock;
4065 		}
4066 	}
4067 
4068 	err = fib6_del(rt, info);
4069 out_unlock:
4070 	spin_unlock_bh(&table->tb6_lock);
4071 out_put:
4072 	fib6_info_release(rt);
4073 
4074 	if (skb) {
4075 		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4076 			    info->nlh, gfp_any());
4077 	}
4078 	return err;
4079 }
4080 
4081 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
4082 {
4083 	int rc = -ESRCH;
4084 
4085 	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
4086 		goto out;
4087 
4088 	if (cfg->fc_flags & RTF_GATEWAY &&
4089 	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
4090 		goto out;
4091 
4092 	rc = rt6_remove_exception_rt(rt);
4093 out:
4094 	return rc;
4095 }
4096 
4097 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
4098 			     struct fib6_nh *nh)
4099 {
4100 	struct fib6_result res = {
4101 		.f6i = rt,
4102 		.nh = nh,
4103 	};
4104 	struct rt6_info *rt_cache;
4105 
4106 	rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4107 	if (rt_cache)
4108 		return __ip6_del_cached_rt(rt_cache, cfg);
4109 
4110 	return 0;
4111 }
4112 
4113 struct fib6_nh_del_cached_rt_arg {
4114 	struct fib6_config *cfg;
4115 	struct fib6_info *f6i;
4116 };
4117 
4118 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4119 {
4120 	struct fib6_nh_del_cached_rt_arg *arg = _arg;
4121 	int rc;
4122 
4123 	rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4124 	return rc != -ESRCH ? rc : 0;
4125 }
4126 
4127 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4128 {
4129 	struct fib6_nh_del_cached_rt_arg arg = {
4130 		.cfg = cfg,
4131 		.f6i = f6i
4132 	};
4133 
4134 	return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4135 }
4136 
4137 static int ip6_route_del(struct fib6_config *cfg,
4138 			 struct netlink_ext_ack *extack)
4139 {
4140 	struct fib6_table *table;
4141 	struct fib6_info *rt;
4142 	struct fib6_node *fn;
4143 	int err = -ESRCH;
4144 
4145 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4146 	if (!table) {
4147 		NL_SET_ERR_MSG(extack, "FIB table does not exist");
4148 		return err;
4149 	}
4150 
4151 	rcu_read_lock();
4152 
4153 	fn = fib6_locate(&table->tb6_root,
4154 			 &cfg->fc_dst, cfg->fc_dst_len,
4155 			 &cfg->fc_src, cfg->fc_src_len,
4156 			 !(cfg->fc_flags & RTF_CACHE));
4157 
4158 	if (fn) {
4159 		for_each_fib6_node_rt_rcu(fn) {
4160 			struct fib6_nh *nh;
4161 
4162 			if (rt->nh && cfg->fc_nh_id &&
4163 			    rt->nh->id != cfg->fc_nh_id)
4164 				continue;
4165 
4166 			if (cfg->fc_flags & RTF_CACHE) {
4167 				int rc = 0;
4168 
4169 				if (rt->nh) {
4170 					rc = ip6_del_cached_rt_nh(cfg, rt);
4171 				} else if (cfg->fc_nh_id) {
4172 					continue;
4173 				} else {
4174 					nh = rt->fib6_nh;
4175 					rc = ip6_del_cached_rt(cfg, rt, nh);
4176 				}
4177 				if (rc != -ESRCH) {
4178 					rcu_read_unlock();
4179 					return rc;
4180 				}
4181 				continue;
4182 			}
4183 
4184 			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4185 				continue;
4186 			if (cfg->fc_protocol &&
4187 			    cfg->fc_protocol != rt->fib6_protocol)
4188 				continue;
4189 
4190 			if (rt->nh) {
4191 				if (!fib6_info_hold_safe(rt))
4192 					continue;
4193 
4194 				err =  __ip6_del_rt(rt, &cfg->fc_nlinfo);
4195 				break;
4196 			}
4197 			if (cfg->fc_nh_id)
4198 				continue;
4199 
4200 			nh = rt->fib6_nh;
4201 			if (cfg->fc_ifindex &&
4202 			    (!nh->fib_nh_dev ||
4203 			     nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4204 				continue;
4205 			if (cfg->fc_flags & RTF_GATEWAY &&
4206 			    !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4207 				continue;
4208 			if (!fib6_info_hold_safe(rt))
4209 				continue;
4210 
4211 			/* if gateway was specified only delete the one hop */
4212 			if (cfg->fc_flags & RTF_GATEWAY)
4213 				err = __ip6_del_rt(rt, &cfg->fc_nlinfo);
4214 			else
4215 				err = __ip6_del_rt_siblings(rt, cfg);
4216 			break;
4217 		}
4218 	}
4219 	rcu_read_unlock();
4220 
4221 	return err;
4222 }
4223 
4224 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4225 {
4226 	struct netevent_redirect netevent;
4227 	struct rt6_info *rt, *nrt = NULL;
4228 	struct fib6_result res = {};
4229 	struct ndisc_options ndopts;
4230 	struct inet6_dev *in6_dev;
4231 	struct neighbour *neigh;
4232 	struct rd_msg *msg;
4233 	int optlen, on_link;
4234 	u8 *lladdr;
4235 
4236 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4237 	optlen -= sizeof(*msg);
4238 
4239 	if (optlen < 0) {
4240 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4241 		return;
4242 	}
4243 
4244 	msg = (struct rd_msg *)icmp6_hdr(skb);
4245 
4246 	if (ipv6_addr_is_multicast(&msg->dest)) {
4247 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4248 		return;
4249 	}
4250 
4251 	on_link = 0;
4252 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4253 		on_link = 1;
4254 	} else if (ipv6_addr_type(&msg->target) !=
4255 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4256 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4257 		return;
4258 	}
4259 
4260 	in6_dev = __in6_dev_get(skb->dev);
4261 	if (!in6_dev)
4262 		return;
4263 	if (READ_ONCE(in6_dev->cnf.forwarding) ||
4264 	    !READ_ONCE(in6_dev->cnf.accept_redirects))
4265 		return;
4266 
4267 	/* RFC2461 8.1:
4268 	 *	The IP source address of the Redirect MUST be the same as the current
4269 	 *	first-hop router for the specified ICMP Destination Address.
4270 	 */
4271 
4272 	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4273 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4274 		return;
4275 	}
4276 
4277 	lladdr = NULL;
4278 	if (ndopts.nd_opts_tgt_lladdr) {
4279 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4280 					     skb->dev);
4281 		if (!lladdr) {
4282 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4283 			return;
4284 		}
4285 	}
4286 
4287 	rt = dst_rt6_info(dst);
4288 	if (rt->rt6i_flags & RTF_REJECT) {
4289 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4290 		return;
4291 	}
4292 
4293 	/* Redirect received -> path was valid.
4294 	 * Look, redirects are sent only in response to data packets,
4295 	 * so that this nexthop apparently is reachable. --ANK
4296 	 */
4297 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4298 
4299 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4300 	if (!neigh)
4301 		return;
4302 
4303 	/*
4304 	 *	We have finally decided to accept it.
4305 	 */
4306 
4307 	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4308 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
4309 		     NEIGH_UPDATE_F_OVERRIDE|
4310 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4311 				     NEIGH_UPDATE_F_ISROUTER)),
4312 		     NDISC_REDIRECT, &ndopts);
4313 
4314 	rcu_read_lock();
4315 	res.f6i = rcu_dereference(rt->from);
4316 	if (!res.f6i)
4317 		goto out;
4318 
4319 	if (res.f6i->nh) {
4320 		struct fib6_nh_match_arg arg = {
4321 			.dev = dst_dev_rcu(dst),
4322 			.gw = &rt->rt6i_gateway,
4323 		};
4324 
4325 		nexthop_for_each_fib6_nh(res.f6i->nh,
4326 					 fib6_nh_find_match, &arg);
4327 
4328 		/* fib6_info uses a nexthop that does not have fib6_nh
4329 		 * using the dst->dev. Should be impossible
4330 		 */
4331 		if (!arg.match)
4332 			goto out;
4333 		res.nh = arg.match;
4334 	} else {
4335 		res.nh = res.f6i->fib6_nh;
4336 	}
4337 
4338 	res.fib6_flags = res.f6i->fib6_flags;
4339 	res.fib6_type = res.f6i->fib6_type;
4340 	nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4341 	if (!nrt)
4342 		goto out;
4343 
4344 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4345 	if (on_link)
4346 		nrt->rt6i_flags &= ~RTF_GATEWAY;
4347 
4348 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4349 
4350 	/* rt6_insert_exception() will take care of duplicated exceptions */
4351 	if (rt6_insert_exception(nrt, &res)) {
4352 		dst_release_immediate(&nrt->dst);
4353 		goto out;
4354 	}
4355 
4356 	netevent.old = &rt->dst;
4357 	netevent.new = &nrt->dst;
4358 	netevent.daddr = &msg->dest;
4359 	netevent.neigh = neigh;
4360 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4361 
4362 out:
4363 	rcu_read_unlock();
4364 	neigh_release(neigh);
4365 }
4366 
4367 #ifdef CONFIG_IPV6_ROUTE_INFO
4368 static struct fib6_info *rt6_get_route_info(struct net *net,
4369 					   const struct in6_addr *prefix, int prefixlen,
4370 					   const struct in6_addr *gwaddr,
4371 					   struct net_device *dev)
4372 {
4373 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4374 	int ifindex = dev->ifindex;
4375 	struct fib6_node *fn;
4376 	struct fib6_info *rt = NULL;
4377 	struct fib6_table *table;
4378 
4379 	table = fib6_get_table(net, tb_id);
4380 	if (!table)
4381 		return NULL;
4382 
4383 	rcu_read_lock();
4384 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4385 	if (!fn)
4386 		goto out;
4387 
4388 	for_each_fib6_node_rt_rcu(fn) {
4389 		/* these routes do not use nexthops */
4390 		if (rt->nh)
4391 			continue;
4392 		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4393 			continue;
4394 		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4395 		    !rt->fib6_nh->fib_nh_gw_family)
4396 			continue;
4397 		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4398 			continue;
4399 		if (!fib6_info_hold_safe(rt))
4400 			continue;
4401 		break;
4402 	}
4403 out:
4404 	rcu_read_unlock();
4405 	return rt;
4406 }
4407 
4408 static struct fib6_info *rt6_add_route_info(struct net *net,
4409 					   const struct in6_addr *prefix, int prefixlen,
4410 					   const struct in6_addr *gwaddr,
4411 					   struct net_device *dev,
4412 					   unsigned int pref)
4413 {
4414 	struct fib6_config cfg = {
4415 		.fc_metric	= IP6_RT_PRIO_USER,
4416 		.fc_ifindex	= dev->ifindex,
4417 		.fc_dst_len	= prefixlen,
4418 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4419 				  RTF_UP | RTF_PREF(pref),
4420 		.fc_protocol = RTPROT_RA,
4421 		.fc_type = RTN_UNICAST,
4422 		.fc_nlinfo.portid = 0,
4423 		.fc_nlinfo.nlh = NULL,
4424 		.fc_nlinfo.nl_net = net,
4425 	};
4426 
4427 	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4428 	cfg.fc_dst = *prefix;
4429 	cfg.fc_gateway = *gwaddr;
4430 
4431 	/* We should treat it as a default route if prefix length is 0. */
4432 	if (!prefixlen)
4433 		cfg.fc_flags |= RTF_DEFAULT;
4434 
4435 	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4436 
4437 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4438 }
4439 #endif
4440 
4441 struct fib6_info *rt6_get_dflt_router(struct net *net,
4442 				     const struct in6_addr *addr,
4443 				     struct net_device *dev)
4444 {
4445 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4446 	struct fib6_info *rt;
4447 	struct fib6_table *table;
4448 
4449 	table = fib6_get_table(net, tb_id);
4450 	if (!table)
4451 		return NULL;
4452 
4453 	rcu_read_lock();
4454 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4455 		struct fib6_nh *nh;
4456 
4457 		/* RA routes do not use nexthops */
4458 		if (rt->nh)
4459 			continue;
4460 
4461 		nh = rt->fib6_nh;
4462 		if (dev == nh->fib_nh_dev &&
4463 		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4464 		    ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4465 			break;
4466 	}
4467 	if (rt && !fib6_info_hold_safe(rt))
4468 		rt = NULL;
4469 	rcu_read_unlock();
4470 	return rt;
4471 }
4472 
4473 struct fib6_info *rt6_add_dflt_router(struct net *net,
4474 				     const struct in6_addr *gwaddr,
4475 				     struct net_device *dev,
4476 				     unsigned int pref,
4477 				     u32 defrtr_usr_metric,
4478 				     int lifetime)
4479 {
4480 	struct fib6_config cfg = {
4481 		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4482 		.fc_metric	= defrtr_usr_metric,
4483 		.fc_ifindex	= dev->ifindex,
4484 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4485 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4486 		.fc_protocol = RTPROT_RA,
4487 		.fc_type = RTN_UNICAST,
4488 		.fc_nlinfo.portid = 0,
4489 		.fc_nlinfo.nlh = NULL,
4490 		.fc_nlinfo.nl_net = net,
4491 		.fc_expires = jiffies_to_clock_t(lifetime * HZ),
4492 	};
4493 
4494 	cfg.fc_gateway = *gwaddr;
4495 
4496 	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4497 		struct fib6_table *table;
4498 
4499 		table = fib6_get_table(dev_net(dev), cfg.fc_table);
4500 		if (table)
4501 			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4502 	}
4503 
4504 	return rt6_get_dflt_router(net, gwaddr, dev);
4505 }
4506 
4507 static void __rt6_purge_dflt_routers(struct net *net,
4508 				     struct fib6_table *table)
4509 {
4510 	struct fib6_info *rt;
4511 
4512 restart:
4513 	rcu_read_lock();
4514 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4515 		struct net_device *dev = fib6_info_nh_dev(rt);
4516 		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4517 
4518 		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4519 		    (!idev || idev->cnf.accept_ra != 2) &&
4520 		    fib6_info_hold_safe(rt)) {
4521 			rcu_read_unlock();
4522 			ip6_del_rt(net, rt, false);
4523 			goto restart;
4524 		}
4525 	}
4526 	rcu_read_unlock();
4527 
4528 	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4529 }
4530 
4531 void rt6_purge_dflt_routers(struct net *net)
4532 {
4533 	struct fib6_table *table;
4534 	struct hlist_head *head;
4535 	unsigned int h;
4536 
4537 	rcu_read_lock();
4538 
4539 	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4540 		head = &net->ipv6.fib_table_hash[h];
4541 		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4542 			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4543 				__rt6_purge_dflt_routers(net, table);
4544 		}
4545 	}
4546 
4547 	rcu_read_unlock();
4548 }
4549 
4550 static void rtmsg_to_fib6_config(struct net *net,
4551 				 struct in6_rtmsg *rtmsg,
4552 				 struct fib6_config *cfg)
4553 {
4554 	*cfg = (struct fib6_config){
4555 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4556 			 : RT6_TABLE_MAIN,
4557 		.fc_ifindex = rtmsg->rtmsg_ifindex,
4558 		.fc_metric = rtmsg->rtmsg_metric,
4559 		.fc_expires = rtmsg->rtmsg_info,
4560 		.fc_dst_len = rtmsg->rtmsg_dst_len,
4561 		.fc_src_len = rtmsg->rtmsg_src_len,
4562 		.fc_flags = rtmsg->rtmsg_flags,
4563 		.fc_type = rtmsg->rtmsg_type,
4564 
4565 		.fc_nlinfo.nl_net = net,
4566 
4567 		.fc_dst = rtmsg->rtmsg_dst,
4568 		.fc_src = rtmsg->rtmsg_src,
4569 		.fc_gateway = rtmsg->rtmsg_gateway,
4570 	};
4571 }
4572 
4573 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4574 {
4575 	struct fib6_config cfg;
4576 	int err;
4577 
4578 	if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4579 		return -EINVAL;
4580 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4581 		return -EPERM;
4582 
4583 	rtmsg_to_fib6_config(net, rtmsg, &cfg);
4584 
4585 	switch (cmd) {
4586 	case SIOCADDRT:
4587 		/* Only do the default setting of fc_metric in route adding */
4588 		if (cfg.fc_metric == 0)
4589 			cfg.fc_metric = IP6_RT_PRIO_USER;
4590 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4591 		break;
4592 	case SIOCDELRT:
4593 		err = ip6_route_del(&cfg, NULL);
4594 		break;
4595 	}
4596 
4597 	return err;
4598 }
4599 
4600 /*
4601  *	Drop the packet on the floor
4602  */
4603 
4604 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4605 {
4606 	struct dst_entry *dst = skb_dst(skb);
4607 	struct net_device *dev = dst_dev(dst);
4608 	struct net *net = dev_net(dev);
4609 	struct inet6_dev *idev;
4610 	SKB_DR(reason);
4611 	int type;
4612 
4613 	if (netif_is_l3_master(skb->dev) ||
4614 	    dev == net->loopback_dev)
4615 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4616 	else
4617 		idev = ip6_dst_idev(dst);
4618 
4619 	switch (ipstats_mib_noroutes) {
4620 	case IPSTATS_MIB_INNOROUTES:
4621 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4622 		if (type == IPV6_ADDR_ANY) {
4623 			SKB_DR_SET(reason, IP_INADDRERRORS);
4624 			IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4625 			break;
4626 		}
4627 		SKB_DR_SET(reason, IP_INNOROUTES);
4628 		fallthrough;
4629 	case IPSTATS_MIB_OUTNOROUTES:
4630 		SKB_DR_OR(reason, IP_OUTNOROUTES);
4631 		IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4632 		break;
4633 	}
4634 
4635 	/* Start over by dropping the dst for l3mdev case */
4636 	if (netif_is_l3_master(skb->dev))
4637 		skb_dst_drop(skb);
4638 
4639 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4640 	kfree_skb_reason(skb, reason);
4641 	return 0;
4642 }
4643 
4644 static int ip6_pkt_discard(struct sk_buff *skb)
4645 {
4646 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4647 }
4648 
4649 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4650 {
4651 	skb->dev = skb_dst_dev(skb);
4652 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4653 }
4654 
4655 static int ip6_pkt_prohibit(struct sk_buff *skb)
4656 {
4657 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4658 }
4659 
4660 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4661 {
4662 	skb->dev = skb_dst_dev(skb);
4663 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4664 }
4665 
4666 /*
4667  *	Allocate a dst for local (unicast / anycast) address.
4668  */
4669 
4670 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4671 				     struct inet6_dev *idev,
4672 				     const struct in6_addr *addr,
4673 				     bool anycast, gfp_t gfp_flags,
4674 				     struct netlink_ext_ack *extack)
4675 {
4676 	struct fib6_config cfg = {
4677 		.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4678 		.fc_ifindex = idev->dev->ifindex,
4679 		.fc_flags = RTF_UP | RTF_NONEXTHOP,
4680 		.fc_dst = *addr,
4681 		.fc_dst_len = 128,
4682 		.fc_protocol = RTPROT_KERNEL,
4683 		.fc_nlinfo.nl_net = net,
4684 		.fc_ignore_dev_down = true,
4685 	};
4686 	struct fib6_info *f6i;
4687 	int err;
4688 
4689 	if (anycast) {
4690 		cfg.fc_type = RTN_ANYCAST;
4691 		cfg.fc_flags |= RTF_ANYCAST;
4692 	} else {
4693 		cfg.fc_type = RTN_LOCAL;
4694 		cfg.fc_flags |= RTF_LOCAL;
4695 	}
4696 
4697 	f6i = ip6_route_info_create(&cfg, gfp_flags, extack);
4698 	if (IS_ERR(f6i))
4699 		return f6i;
4700 
4701 	err = ip6_route_info_create_nh(f6i, &cfg, gfp_flags, extack);
4702 	if (err)
4703 		return ERR_PTR(err);
4704 
4705 	f6i->dst_nocount = true;
4706 
4707 	if (!anycast &&
4708 	    (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
4709 	     READ_ONCE(idev->cnf.disable_policy)))
4710 		f6i->dst_nopolicy = true;
4711 
4712 	return f6i;
4713 }
4714 
4715 /* remove deleted ip from prefsrc entries */
4716 struct arg_dev_net_ip {
4717 	struct net *net;
4718 	struct in6_addr *addr;
4719 };
4720 
4721 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4722 {
4723 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4724 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4725 
4726 	if (!rt->nh &&
4727 	    rt != net->ipv6.fib6_null_entry &&
4728 	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) &&
4729 	    !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) {
4730 		spin_lock_bh(&rt6_exception_lock);
4731 		/* remove prefsrc entry */
4732 		rt->fib6_prefsrc.plen = 0;
4733 		spin_unlock_bh(&rt6_exception_lock);
4734 	}
4735 	return 0;
4736 }
4737 
4738 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4739 {
4740 	struct net *net = dev_net(ifp->idev->dev);
4741 	struct arg_dev_net_ip adni = {
4742 		.net = net,
4743 		.addr = &ifp->addr,
4744 	};
4745 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4746 }
4747 
4748 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT)
4749 
4750 /* Remove routers and update dst entries when gateway turn into host. */
4751 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4752 {
4753 	struct in6_addr *gateway = (struct in6_addr *)arg;
4754 	struct fib6_nh *nh;
4755 
4756 	/* RA routes do not use nexthops */
4757 	if (rt->nh)
4758 		return 0;
4759 
4760 	nh = rt->fib6_nh;
4761 	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4762 	    nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4763 		return -1;
4764 
4765 	/* Further clean up cached routes in exception table.
4766 	 * This is needed because cached route may have a different
4767 	 * gateway than its 'parent' in the case of an ip redirect.
4768 	 */
4769 	fib6_nh_exceptions_clean_tohost(nh, gateway);
4770 
4771 	return 0;
4772 }
4773 
4774 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4775 {
4776 	fib6_clean_all(net, fib6_clean_tohost, gateway);
4777 }
4778 
4779 struct arg_netdev_event {
4780 	const struct net_device *dev;
4781 	union {
4782 		unsigned char nh_flags;
4783 		unsigned long event;
4784 	};
4785 };
4786 
4787 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4788 {
4789 	struct fib6_info *iter;
4790 	struct fib6_node *fn;
4791 
4792 	fn = rcu_dereference_protected(rt->fib6_node,
4793 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4794 	iter = rcu_dereference_protected(fn->leaf,
4795 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4796 	while (iter) {
4797 		if (iter->fib6_metric == rt->fib6_metric &&
4798 		    rt6_qualify_for_ecmp(iter))
4799 			return iter;
4800 		iter = rcu_dereference_protected(iter->fib6_next,
4801 				lockdep_is_held(&rt->fib6_table->tb6_lock));
4802 	}
4803 
4804 	return NULL;
4805 }
4806 
4807 /* only called for fib entries with builtin fib6_nh */
4808 static bool rt6_is_dead(const struct fib6_info *rt)
4809 {
4810 	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4811 	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4812 	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4813 		return true;
4814 
4815 	return false;
4816 }
4817 
4818 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4819 {
4820 	struct fib6_info *iter;
4821 	int total = 0;
4822 
4823 	if (!rt6_is_dead(rt))
4824 		total += rt->fib6_nh->fib_nh_weight;
4825 
4826 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4827 		if (!rt6_is_dead(iter))
4828 			total += iter->fib6_nh->fib_nh_weight;
4829 	}
4830 
4831 	return total;
4832 }
4833 
4834 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4835 {
4836 	int upper_bound = -1;
4837 
4838 	if (!rt6_is_dead(rt)) {
4839 		*weight += rt->fib6_nh->fib_nh_weight;
4840 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4841 						    total) - 1;
4842 	}
4843 	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4844 }
4845 
4846 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4847 {
4848 	struct fib6_info *iter;
4849 	int weight = 0;
4850 
4851 	rt6_upper_bound_set(rt, &weight, total);
4852 
4853 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4854 		rt6_upper_bound_set(iter, &weight, total);
4855 }
4856 
4857 void rt6_multipath_rebalance(struct fib6_info *rt)
4858 {
4859 	struct fib6_info *first;
4860 	int total;
4861 
4862 	/* In case the entire multipath route was marked for flushing,
4863 	 * then there is no need to rebalance upon the removal of every
4864 	 * sibling route.
4865 	 */
4866 	if (!rt->fib6_nsiblings || rt->should_flush)
4867 		return;
4868 
4869 	/* During lookup routes are evaluated in order, so we need to
4870 	 * make sure upper bounds are assigned from the first sibling
4871 	 * onwards.
4872 	 */
4873 	first = rt6_multipath_first_sibling(rt);
4874 	if (WARN_ON_ONCE(!first))
4875 		return;
4876 
4877 	total = rt6_multipath_total_weight(first);
4878 	rt6_multipath_upper_bound_set(first, total);
4879 }
4880 
4881 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4882 {
4883 	const struct arg_netdev_event *arg = p_arg;
4884 	struct net *net = dev_net(arg->dev);
4885 
4886 	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4887 	    rt->fib6_nh->fib_nh_dev == arg->dev) {
4888 		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4889 		fib6_update_sernum_upto_root(net, rt);
4890 		rt6_multipath_rebalance(rt);
4891 	}
4892 
4893 	return 0;
4894 }
4895 
4896 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4897 {
4898 	struct arg_netdev_event arg = {
4899 		.dev = dev,
4900 		{
4901 			.nh_flags = nh_flags,
4902 		},
4903 	};
4904 
4905 	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4906 		arg.nh_flags |= RTNH_F_LINKDOWN;
4907 
4908 	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4909 }
4910 
4911 /* only called for fib entries with inline fib6_nh */
4912 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4913 				   const struct net_device *dev)
4914 {
4915 	struct fib6_info *iter;
4916 
4917 	if (rt->fib6_nh->fib_nh_dev == dev)
4918 		return true;
4919 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4920 		if (iter->fib6_nh->fib_nh_dev == dev)
4921 			return true;
4922 
4923 	return false;
4924 }
4925 
4926 static void rt6_multipath_flush(struct fib6_info *rt)
4927 {
4928 	struct fib6_info *iter;
4929 
4930 	rt->should_flush = 1;
4931 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4932 		iter->should_flush = 1;
4933 }
4934 
4935 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4936 					     const struct net_device *down_dev)
4937 {
4938 	struct fib6_info *iter;
4939 	unsigned int dead = 0;
4940 
4941 	if (rt->fib6_nh->fib_nh_dev == down_dev ||
4942 	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4943 		dead++;
4944 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4945 		if (iter->fib6_nh->fib_nh_dev == down_dev ||
4946 		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4947 			dead++;
4948 
4949 	return dead;
4950 }
4951 
4952 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4953 				       const struct net_device *dev,
4954 				       unsigned char nh_flags)
4955 {
4956 	struct fib6_info *iter;
4957 
4958 	if (rt->fib6_nh->fib_nh_dev == dev)
4959 		rt->fib6_nh->fib_nh_flags |= nh_flags;
4960 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4961 		if (iter->fib6_nh->fib_nh_dev == dev)
4962 			iter->fib6_nh->fib_nh_flags |= nh_flags;
4963 }
4964 
4965 /* called with write lock held for table with rt */
4966 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4967 {
4968 	const struct arg_netdev_event *arg = p_arg;
4969 	const struct net_device *dev = arg->dev;
4970 	struct net *net = dev_net(dev);
4971 
4972 	if (rt == net->ipv6.fib6_null_entry || rt->nh)
4973 		return 0;
4974 
4975 	switch (arg->event) {
4976 	case NETDEV_UNREGISTER:
4977 		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4978 	case NETDEV_DOWN:
4979 		if (rt->should_flush)
4980 			return -1;
4981 		if (!rt->fib6_nsiblings)
4982 			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4983 		if (rt6_multipath_uses_dev(rt, dev)) {
4984 			unsigned int count;
4985 
4986 			count = rt6_multipath_dead_count(rt, dev);
4987 			if (rt->fib6_nsiblings + 1 == count) {
4988 				rt6_multipath_flush(rt);
4989 				return -1;
4990 			}
4991 			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4992 						   RTNH_F_LINKDOWN);
4993 			fib6_update_sernum(net, rt);
4994 			rt6_multipath_rebalance(rt);
4995 		}
4996 		return -2;
4997 	case NETDEV_CHANGE:
4998 		if (rt->fib6_nh->fib_nh_dev != dev ||
4999 		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5000 			break;
5001 		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
5002 		fib6_update_sernum(net, rt);
5003 		rt6_multipath_rebalance(rt);
5004 		break;
5005 	}
5006 
5007 	return 0;
5008 }
5009 
5010 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
5011 {
5012 	struct arg_netdev_event arg = {
5013 		.dev = dev,
5014 		{
5015 			.event = event,
5016 		},
5017 	};
5018 	struct net *net = dev_net(dev);
5019 
5020 	if (READ_ONCE(net->ipv6.sysctl.skip_notify_on_dev_down))
5021 		fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
5022 	else
5023 		fib6_clean_all(net, fib6_ifdown, &arg);
5024 }
5025 
5026 void rt6_disable_ip(struct net_device *dev, unsigned long event)
5027 {
5028 	rt6_sync_down_dev(dev, event);
5029 	rt6_uncached_list_flush_dev(dev);
5030 	neigh_ifdown(&nd_tbl, dev);
5031 }
5032 
5033 struct rt6_mtu_change_arg {
5034 	struct net_device *dev;
5035 	unsigned int mtu;
5036 	struct fib6_info *f6i;
5037 };
5038 
5039 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
5040 {
5041 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
5042 	struct fib6_info *f6i = arg->f6i;
5043 
5044 	/* For administrative MTU increase, there is no way to discover
5045 	 * IPv6 PMTU increase, so PMTU increase should be updated here.
5046 	 * Since RFC 1981 doesn't include administrative MTU increase
5047 	 * update PMTU increase is a MUST. (i.e. jumbo frame)
5048 	 */
5049 	if (nh->fib_nh_dev == arg->dev) {
5050 		struct inet6_dev *idev = __in6_dev_get(arg->dev);
5051 		u32 mtu = f6i->fib6_pmtu;
5052 
5053 		if (mtu >= arg->mtu ||
5054 		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
5055 			fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
5056 
5057 		spin_lock_bh(&rt6_exception_lock);
5058 		rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
5059 		spin_unlock_bh(&rt6_exception_lock);
5060 	}
5061 
5062 	return 0;
5063 }
5064 
5065 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
5066 {
5067 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
5068 	struct inet6_dev *idev;
5069 
5070 	/* In IPv6 pmtu discovery is not optional,
5071 	   so that RTAX_MTU lock cannot disable it.
5072 	   We still use this lock to block changes
5073 	   caused by addrconf/ndisc.
5074 	*/
5075 
5076 	idev = __in6_dev_get(arg->dev);
5077 	if (!idev)
5078 		return 0;
5079 
5080 	if (fib6_metric_locked(f6i, RTAX_MTU))
5081 		return 0;
5082 
5083 	arg->f6i = f6i;
5084 	if (f6i->nh) {
5085 		/* fib6_nh_mtu_change only returns 0, so this is safe */
5086 		return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
5087 						arg);
5088 	}
5089 
5090 	return fib6_nh_mtu_change(f6i->fib6_nh, arg);
5091 }
5092 
5093 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
5094 {
5095 	struct rt6_mtu_change_arg arg = {
5096 		.dev = dev,
5097 		.mtu = mtu,
5098 	};
5099 
5100 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
5101 }
5102 
5103 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
5104 	[RTA_UNSPEC]		= { .strict_start_type = RTA_DPORT + 1 },
5105 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
5106 	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
5107 	[RTA_OIF]               = { .type = NLA_U32 },
5108 	[RTA_IIF]		= { .type = NLA_U32 },
5109 	[RTA_PRIORITY]          = { .type = NLA_U32 },
5110 	[RTA_METRICS]           = { .type = NLA_NESTED },
5111 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
5112 	[RTA_PREF]              = { .type = NLA_U8 },
5113 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
5114 	[RTA_ENCAP]		= { .type = NLA_NESTED },
5115 	[RTA_EXPIRES]		= { .type = NLA_U32 },
5116 	[RTA_UID]		= { .type = NLA_U32 },
5117 	[RTA_MARK]		= { .type = NLA_U32 },
5118 	[RTA_TABLE]		= { .type = NLA_U32 },
5119 	[RTA_IP_PROTO]		= { .type = NLA_U8 },
5120 	[RTA_SPORT]		= { .type = NLA_U16 },
5121 	[RTA_DPORT]		= { .type = NLA_U16 },
5122 	[RTA_NH_ID]		= { .type = NLA_U32 },
5123 	[RTA_FLOWLABEL]		= { .type = NLA_BE32 },
5124 };
5125 
5126 static int rtm_to_fib6_multipath_config(struct fib6_config *cfg,
5127 					struct netlink_ext_ack *extack,
5128 					bool newroute)
5129 {
5130 	struct rtnexthop *rtnh;
5131 	int remaining;
5132 
5133 	remaining = cfg->fc_mp_len;
5134 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5135 
5136 	if (!rtnh_ok(rtnh, remaining)) {
5137 		NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - no valid nexthops");
5138 		return -EINVAL;
5139 	}
5140 
5141 	do {
5142 		bool has_gateway = cfg->fc_flags & RTF_GATEWAY;
5143 		int attrlen = rtnh_attrlen(rtnh);
5144 
5145 		if (attrlen > 0) {
5146 			struct nlattr *nla, *attrs;
5147 
5148 			attrs = rtnh_attrs(rtnh);
5149 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5150 			if (nla) {
5151 				if (nla_len(nla) < sizeof(cfg->fc_gateway)) {
5152 					NL_SET_ERR_MSG(extack,
5153 						       "Invalid IPv6 address in RTA_GATEWAY");
5154 					return -EINVAL;
5155 				}
5156 
5157 				has_gateway = true;
5158 			}
5159 		}
5160 
5161 		if (newroute && (cfg->fc_nh_id || !has_gateway)) {
5162 			NL_SET_ERR_MSG(extack,
5163 				       "Device only routes can not be added for IPv6 using the multipath API.");
5164 			return -EINVAL;
5165 		}
5166 
5167 		rtnh = rtnh_next(rtnh, &remaining);
5168 	} while (rtnh_ok(rtnh, remaining));
5169 
5170 	return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack);
5171 }
5172 
5173 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5174 			      struct fib6_config *cfg,
5175 			      struct netlink_ext_ack *extack)
5176 {
5177 	bool newroute = nlh->nlmsg_type == RTM_NEWROUTE;
5178 	struct nlattr *tb[RTA_MAX+1];
5179 	struct rtmsg *rtm;
5180 	unsigned int pref;
5181 	int err;
5182 
5183 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5184 				     rtm_ipv6_policy, extack);
5185 	if (err < 0)
5186 		goto errout;
5187 
5188 	err = -EINVAL;
5189 	rtm = nlmsg_data(nlh);
5190 
5191 	if (rtm->rtm_tos) {
5192 		NL_SET_ERR_MSG(extack,
5193 			       "Invalid dsfield (tos): option not available for IPv6");
5194 		goto errout;
5195 	}
5196 
5197 	if (tb[RTA_FLOWLABEL]) {
5198 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
5199 				    "Flow label cannot be specified for this operation");
5200 		goto errout;
5201 	}
5202 
5203 	*cfg = (struct fib6_config){
5204 		.fc_table = rtm->rtm_table,
5205 		.fc_dst_len = rtm->rtm_dst_len,
5206 		.fc_src_len = rtm->rtm_src_len,
5207 		.fc_flags = RTF_UP,
5208 		.fc_protocol = rtm->rtm_protocol,
5209 		.fc_type = rtm->rtm_type,
5210 
5211 		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
5212 		.fc_nlinfo.nlh = nlh,
5213 		.fc_nlinfo.nl_net = sock_net(skb->sk),
5214 	};
5215 
5216 	if (rtm->rtm_type == RTN_UNREACHABLE ||
5217 	    rtm->rtm_type == RTN_BLACKHOLE ||
5218 	    rtm->rtm_type == RTN_PROHIBIT ||
5219 	    rtm->rtm_type == RTN_THROW)
5220 		cfg->fc_flags |= RTF_REJECT;
5221 
5222 	if (rtm->rtm_type == RTN_LOCAL)
5223 		cfg->fc_flags |= RTF_LOCAL;
5224 
5225 	if (rtm->rtm_flags & RTM_F_CLONED)
5226 		cfg->fc_flags |= RTF_CACHE;
5227 
5228 	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5229 
5230 	if (tb[RTA_NH_ID]) {
5231 		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
5232 		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5233 			NL_SET_ERR_MSG(extack,
5234 				       "Nexthop specification and nexthop id are mutually exclusive");
5235 			goto errout;
5236 		}
5237 		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5238 	}
5239 
5240 	if (tb[RTA_GATEWAY]) {
5241 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5242 		cfg->fc_flags |= RTF_GATEWAY;
5243 	}
5244 	if (tb[RTA_VIA]) {
5245 		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5246 		goto errout;
5247 	}
5248 
5249 	if (tb[RTA_DST]) {
5250 		int plen = (rtm->rtm_dst_len + 7) >> 3;
5251 
5252 		if (nla_len(tb[RTA_DST]) < plen)
5253 			goto errout;
5254 
5255 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5256 	}
5257 
5258 	if (tb[RTA_SRC]) {
5259 		int plen = (rtm->rtm_src_len + 7) >> 3;
5260 
5261 		if (nla_len(tb[RTA_SRC]) < plen)
5262 			goto errout;
5263 
5264 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5265 	}
5266 
5267 	if (tb[RTA_PREFSRC])
5268 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5269 
5270 	if (tb[RTA_OIF])
5271 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5272 
5273 	if (tb[RTA_PRIORITY])
5274 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5275 
5276 	if (tb[RTA_METRICS]) {
5277 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5278 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5279 	}
5280 
5281 	if (tb[RTA_TABLE])
5282 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5283 
5284 	if (tb[RTA_MULTIPATH]) {
5285 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5286 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5287 
5288 		err = rtm_to_fib6_multipath_config(cfg, extack, newroute);
5289 		if (err < 0)
5290 			goto errout;
5291 	}
5292 
5293 	if (tb[RTA_PREF]) {
5294 		pref = nla_get_u8(tb[RTA_PREF]);
5295 		if (pref != ICMPV6_ROUTER_PREF_LOW &&
5296 		    pref != ICMPV6_ROUTER_PREF_HIGH)
5297 			pref = ICMPV6_ROUTER_PREF_MEDIUM;
5298 		cfg->fc_flags |= RTF_PREF(pref);
5299 	}
5300 
5301 	if (tb[RTA_ENCAP])
5302 		cfg->fc_encap = tb[RTA_ENCAP];
5303 
5304 	if (tb[RTA_ENCAP_TYPE]) {
5305 		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5306 
5307 		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5308 		if (err < 0)
5309 			goto errout;
5310 	}
5311 
5312 	if (tb[RTA_EXPIRES]) {
5313 		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5314 
5315 		if (addrconf_finite_timeout(timeout)) {
5316 			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5317 			cfg->fc_flags |= RTF_EXPIRES;
5318 		}
5319 	}
5320 
5321 	err = 0;
5322 errout:
5323 	return err;
5324 }
5325 
5326 struct rt6_nh {
5327 	struct fib6_info *fib6_info;
5328 	struct fib6_config r_cfg;
5329 	struct list_head list;
5330 };
5331 
5332 static int ip6_route_info_append(struct list_head *rt6_nh_list,
5333 				 struct fib6_info *rt,
5334 				 struct fib6_config *r_cfg)
5335 {
5336 	struct rt6_nh *nh;
5337 
5338 	list_for_each_entry(nh, rt6_nh_list, list) {
5339 		/* check if fib6_info already exists */
5340 		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5341 			return -EEXIST;
5342 	}
5343 
5344 	nh = kzalloc_obj(*nh);
5345 	if (!nh)
5346 		return -ENOMEM;
5347 
5348 	nh->fib6_info = rt;
5349 	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5350 	list_add_tail(&nh->list, rt6_nh_list);
5351 
5352 	return 0;
5353 }
5354 
5355 static void ip6_route_mpath_notify(struct fib6_info *rt,
5356 				   struct fib6_info *rt_last,
5357 				   struct nl_info *info,
5358 				   __u16 nlflags)
5359 {
5360 	/* if this is an APPEND route, then rt points to the first route
5361 	 * inserted and rt_last points to last route inserted. Userspace
5362 	 * wants a consistent dump of the route which starts at the first
5363 	 * nexthop. Since sibling routes are always added at the end of
5364 	 * the list, find the first sibling of the last route appended
5365 	 */
5366 	rcu_read_lock();
5367 
5368 	if ((nlflags & NLM_F_APPEND) && rt_last &&
5369 	    READ_ONCE(rt_last->fib6_nsiblings)) {
5370 		rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
5371 					    struct fib6_info,
5372 					    fib6_siblings);
5373 	}
5374 
5375 	if (rt)
5376 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5377 
5378 	rcu_read_unlock();
5379 }
5380 
5381 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5382 {
5383 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5384 	bool should_notify = false;
5385 	struct fib6_info *leaf;
5386 	struct fib6_node *fn;
5387 
5388 	rcu_read_lock();
5389 	fn = rcu_dereference(rt->fib6_node);
5390 	if (!fn)
5391 		goto out;
5392 
5393 	leaf = rcu_dereference(fn->leaf);
5394 	if (!leaf)
5395 		goto out;
5396 
5397 	if (rt == leaf ||
5398 	    (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5399 	     rt6_qualify_for_ecmp(leaf)))
5400 		should_notify = true;
5401 out:
5402 	rcu_read_unlock();
5403 
5404 	return should_notify;
5405 }
5406 
5407 static int ip6_route_multipath_add(struct fib6_config *cfg,
5408 				   struct netlink_ext_ack *extack)
5409 {
5410 	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5411 	struct nl_info *info = &cfg->fc_nlinfo;
5412 	struct rt6_nh *nh, *nh_safe;
5413 	struct fib6_config r_cfg;
5414 	struct rtnexthop *rtnh;
5415 	LIST_HEAD(rt6_nh_list);
5416 	struct rt6_nh *err_nh;
5417 	struct fib6_info *rt;
5418 	__u16 nlflags;
5419 	int remaining;
5420 	int attrlen;
5421 	int replace;
5422 	int nhn = 0;
5423 	int err;
5424 
5425 	err = fib6_config_validate(cfg, extack);
5426 	if (err)
5427 		return err;
5428 
5429 	replace = (cfg->fc_nlinfo.nlh &&
5430 		   (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5431 
5432 	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5433 	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5434 		nlflags |= NLM_F_APPEND;
5435 
5436 	remaining = cfg->fc_mp_len;
5437 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5438 
5439 	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
5440 	 * fib6_info structs per nexthop
5441 	 */
5442 	while (rtnh_ok(rtnh, remaining)) {
5443 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5444 		if (rtnh->rtnh_ifindex)
5445 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5446 
5447 		attrlen = rtnh_attrlen(rtnh);
5448 		if (attrlen > 0) {
5449 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5450 
5451 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5452 			if (nla) {
5453 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5454 				r_cfg.fc_flags |= RTF_GATEWAY;
5455 			}
5456 
5457 			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5458 			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5459 			if (nla)
5460 				r_cfg.fc_encap_type = nla_get_u16(nla);
5461 		}
5462 
5463 		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5464 		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5465 		if (IS_ERR(rt)) {
5466 			err = PTR_ERR(rt);
5467 			rt = NULL;
5468 			goto cleanup;
5469 		}
5470 
5471 		err = ip6_route_info_create_nh(rt, &r_cfg, GFP_KERNEL, extack);
5472 		if (err) {
5473 			rt = NULL;
5474 			goto cleanup;
5475 		}
5476 
5477 		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5478 
5479 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
5480 		if (err) {
5481 			fib6_info_release(rt);
5482 			goto cleanup;
5483 		}
5484 
5485 		rtnh = rtnh_next(rtnh, &remaining);
5486 	}
5487 
5488 	/* for add and replace send one notification with all nexthops.
5489 	 * Skip the notification in fib6_add_rt2node and send one with
5490 	 * the full route when done
5491 	 */
5492 	info->skip_notify = 1;
5493 
5494 	/* For add and replace, send one notification with all nexthops. For
5495 	 * append, send one notification with all appended nexthops.
5496 	 */
5497 	info->skip_notify_kernel = 1;
5498 
5499 	err_nh = NULL;
5500 	list_for_each_entry(nh, &rt6_nh_list, list) {
5501 		err = __ip6_ins_rt(nh->fib6_info, info, extack);
5502 
5503 		if (err) {
5504 			if (replace && nhn)
5505 				NL_SET_ERR_MSG_MOD(extack,
5506 						   "multipath route replace failed (check consistency of installed routes)");
5507 			err_nh = nh;
5508 			goto add_errout;
5509 		}
5510 		/* save reference to last route successfully inserted */
5511 		rt_last = nh->fib6_info;
5512 
5513 		/* save reference to first route for notification */
5514 		if (!rt_notif)
5515 			rt_notif = nh->fib6_info;
5516 
5517 		/* Because each route is added like a single route we remove
5518 		 * these flags after the first nexthop: if there is a collision,
5519 		 * we have already failed to add the first nexthop:
5520 		 * fib6_add_rt2node() has rejected it; when replacing, old
5521 		 * nexthops have been replaced by first new, the rest should
5522 		 * be added to it.
5523 		 */
5524 		if (cfg->fc_nlinfo.nlh) {
5525 			cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5526 							     NLM_F_REPLACE);
5527 			cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5528 		}
5529 		nhn++;
5530 	}
5531 
5532 	/* An in-kernel notification should only be sent in case the new
5533 	 * multipath route is added as the first route in the node, or if
5534 	 * it was appended to it. We pass 'rt_notif' since it is the first
5535 	 * sibling and might allow us to skip some checks in the replace case.
5536 	 */
5537 	if (ip6_route_mpath_should_notify(rt_notif)) {
5538 		enum fib_event_type fib_event;
5539 
5540 		if (rt_notif->fib6_nsiblings != nhn - 1)
5541 			fib_event = FIB_EVENT_ENTRY_APPEND;
5542 		else
5543 			fib_event = FIB_EVENT_ENTRY_REPLACE;
5544 
5545 		err = call_fib6_multipath_entry_notifiers(info->nl_net,
5546 							  fib_event, rt_notif,
5547 							  nhn - 1, extack);
5548 		if (err) {
5549 			/* Delete all the siblings that were just added */
5550 			err_nh = NULL;
5551 			goto add_errout;
5552 		}
5553 	}
5554 
5555 	/* success ... tell user about new route */
5556 	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5557 	goto cleanup;
5558 
5559 add_errout:
5560 	/* send notification for routes that were added so that
5561 	 * the delete notifications sent by ip6_route_del are
5562 	 * coherent
5563 	 */
5564 	if (rt_notif)
5565 		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5566 
5567 	/* Delete routes that were already added */
5568 	list_for_each_entry(nh, &rt6_nh_list, list) {
5569 		if (err_nh == nh)
5570 			break;
5571 		ip6_route_del(&nh->r_cfg, extack);
5572 	}
5573 
5574 cleanup:
5575 	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, list) {
5576 		fib6_info_release(nh->fib6_info);
5577 		list_del(&nh->list);
5578 		kfree(nh);
5579 	}
5580 
5581 	return err;
5582 }
5583 
5584 static int ip6_route_multipath_del(struct fib6_config *cfg,
5585 				   struct netlink_ext_ack *extack)
5586 {
5587 	struct fib6_config r_cfg;
5588 	struct rtnexthop *rtnh;
5589 	int last_err = 0;
5590 	int remaining;
5591 	int attrlen;
5592 	int err;
5593 
5594 	remaining = cfg->fc_mp_len;
5595 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5596 
5597 	/* Parse a Multipath Entry */
5598 	while (rtnh_ok(rtnh, remaining)) {
5599 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5600 		if (rtnh->rtnh_ifindex)
5601 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5602 
5603 		attrlen = rtnh_attrlen(rtnh);
5604 		if (attrlen > 0) {
5605 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5606 
5607 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5608 			if (nla) {
5609 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5610 				r_cfg.fc_flags |= RTF_GATEWAY;
5611 			}
5612 		}
5613 
5614 		err = ip6_route_del(&r_cfg, extack);
5615 		if (err)
5616 			last_err = err;
5617 
5618 		rtnh = rtnh_next(rtnh, &remaining);
5619 	}
5620 
5621 	return last_err;
5622 }
5623 
5624 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5625 			      struct netlink_ext_ack *extack)
5626 {
5627 	struct fib6_config cfg;
5628 	int err;
5629 
5630 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5631 	if (err < 0)
5632 		return err;
5633 
5634 	if (cfg.fc_nh_id) {
5635 		rcu_read_lock();
5636 		err = !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id);
5637 		rcu_read_unlock();
5638 
5639 		if (err) {
5640 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5641 			return -EINVAL;
5642 		}
5643 	}
5644 
5645 	if (cfg.fc_mp) {
5646 		return ip6_route_multipath_del(&cfg, extack);
5647 	} else {
5648 		cfg.fc_delete_all_nh = 1;
5649 		return ip6_route_del(&cfg, extack);
5650 	}
5651 }
5652 
5653 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5654 			      struct netlink_ext_ack *extack)
5655 {
5656 	struct fib6_config cfg;
5657 	int err;
5658 
5659 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5660 	if (err < 0)
5661 		return err;
5662 
5663 	if (cfg.fc_metric == 0)
5664 		cfg.fc_metric = IP6_RT_PRIO_USER;
5665 
5666 	if (cfg.fc_mp)
5667 		return ip6_route_multipath_add(&cfg, extack);
5668 	else
5669 		return ip6_route_add(&cfg, GFP_KERNEL, extack);
5670 }
5671 
5672 /* add the overhead of this fib6_nh to nexthop_len */
5673 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5674 {
5675 	int *nexthop_len = arg;
5676 
5677 	*nexthop_len += nla_total_size(0)	 /* RTA_MULTIPATH */
5678 		     + NLA_ALIGN(sizeof(struct rtnexthop))
5679 		     + nla_total_size(16); /* RTA_GATEWAY */
5680 
5681 	if (nh->fib_nh_lws) {
5682 		/* RTA_ENCAP_TYPE */
5683 		*nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5684 		/* RTA_ENCAP */
5685 		*nexthop_len += nla_total_size(2);
5686 	}
5687 
5688 	return 0;
5689 }
5690 
5691 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5692 {
5693 	struct fib6_info *sibling;
5694 	struct fib6_nh *nh;
5695 	int nexthop_len;
5696 
5697 	if (f6i->nh) {
5698 		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5699 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5700 					 &nexthop_len);
5701 		goto common;
5702 	}
5703 
5704 	rcu_read_lock();
5705 retry:
5706 	nh = f6i->fib6_nh;
5707 	nexthop_len = 0;
5708 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5709 		rt6_nh_nlmsg_size(nh, &nexthop_len);
5710 
5711 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5712 					fib6_siblings) {
5713 			rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5714 			if (!READ_ONCE(f6i->fib6_nsiblings))
5715 				goto retry;
5716 		}
5717 	}
5718 	rcu_read_unlock();
5719 	nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5720 common:
5721 	return NLMSG_ALIGN(sizeof(struct rtmsg))
5722 	       + nla_total_size(16) /* RTA_SRC */
5723 	       + nla_total_size(16) /* RTA_DST */
5724 	       + nla_total_size(16) /* RTA_GATEWAY */
5725 	       + nla_total_size(16) /* RTA_PREFSRC */
5726 	       + nla_total_size(4) /* RTA_TABLE */
5727 	       + nla_total_size(4) /* RTA_IIF */
5728 	       + nla_total_size(4) /* RTA_OIF */
5729 	       + nla_total_size(4) /* RTA_PRIORITY */
5730 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5731 	       + nla_total_size(sizeof(struct rta_cacheinfo))
5732 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5733 	       + nla_total_size(1) /* RTA_PREF */
5734 	       + nexthop_len;
5735 }
5736 
5737 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5738 				 unsigned char *flags)
5739 {
5740 	if (nexthop_is_multipath(nh)) {
5741 		struct nlattr *mp;
5742 
5743 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5744 		if (!mp)
5745 			goto nla_put_failure;
5746 
5747 		if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5748 			goto nla_put_failure;
5749 
5750 		nla_nest_end(skb, mp);
5751 	} else {
5752 		struct fib6_nh *fib6_nh;
5753 
5754 		fib6_nh = nexthop_fib6_nh(nh);
5755 		if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5756 				     flags, false) < 0)
5757 			goto nla_put_failure;
5758 	}
5759 
5760 	return 0;
5761 
5762 nla_put_failure:
5763 	return -EMSGSIZE;
5764 }
5765 
5766 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5767 			 struct fib6_info *rt, struct dst_entry *dst,
5768 			 struct in6_addr *dest, struct in6_addr *src,
5769 			 int iif, int type, u32 portid, u32 seq,
5770 			 unsigned int flags)
5771 {
5772 	struct rt6_info *rt6 = dst_rt6_info(dst);
5773 	struct rt6key *rt6_dst, *rt6_src;
5774 	u32 *pmetrics, table, rt6_flags;
5775 	unsigned char nh_flags = 0;
5776 	struct nlmsghdr *nlh;
5777 	struct rtmsg *rtm;
5778 	long expires = 0;
5779 
5780 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5781 	if (!nlh)
5782 		return -EMSGSIZE;
5783 
5784 	if (rt6) {
5785 		rt6_dst = &rt6->rt6i_dst;
5786 		rt6_src = &rt6->rt6i_src;
5787 		rt6_flags = rt6->rt6i_flags;
5788 	} else {
5789 		rt6_dst = &rt->fib6_dst;
5790 		rt6_src = &rt->fib6_src;
5791 		rt6_flags = rt->fib6_flags;
5792 	}
5793 
5794 	rtm = nlmsg_data(nlh);
5795 	rtm->rtm_family = AF_INET6;
5796 	rtm->rtm_dst_len = rt6_dst->plen;
5797 	rtm->rtm_src_len = rt6_src->plen;
5798 	rtm->rtm_tos = 0;
5799 	if (rt->fib6_table)
5800 		table = rt->fib6_table->tb6_id;
5801 	else
5802 		table = RT6_TABLE_UNSPEC;
5803 	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5804 	if (nla_put_u32(skb, RTA_TABLE, table))
5805 		goto nla_put_failure;
5806 
5807 	rtm->rtm_type = rt->fib6_type;
5808 	rtm->rtm_flags = 0;
5809 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5810 	rtm->rtm_protocol = rt->fib6_protocol;
5811 
5812 	if (rt6_flags & RTF_CACHE)
5813 		rtm->rtm_flags |= RTM_F_CLONED;
5814 
5815 	if (dest) {
5816 		if (nla_put_in6_addr(skb, RTA_DST, dest))
5817 			goto nla_put_failure;
5818 		rtm->rtm_dst_len = 128;
5819 	} else if (rtm->rtm_dst_len)
5820 		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5821 			goto nla_put_failure;
5822 #ifdef CONFIG_IPV6_SUBTREES
5823 	if (src) {
5824 		if (nla_put_in6_addr(skb, RTA_SRC, src))
5825 			goto nla_put_failure;
5826 		rtm->rtm_src_len = 128;
5827 	} else if (rtm->rtm_src_len &&
5828 		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5829 		goto nla_put_failure;
5830 #endif
5831 	if (iif) {
5832 #ifdef CONFIG_IPV6_MROUTE
5833 		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5834 			int err = ip6mr_get_route(net, skb, rtm, portid);
5835 
5836 			if (err == 0)
5837 				return 0;
5838 			if (err < 0)
5839 				goto nla_put_failure;
5840 		} else
5841 #endif
5842 			if (nla_put_u32(skb, RTA_IIF, iif))
5843 				goto nla_put_failure;
5844 	} else if (dest) {
5845 		struct in6_addr saddr_buf;
5846 		if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
5847 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5848 			goto nla_put_failure;
5849 	}
5850 
5851 	if (rt->fib6_prefsrc.plen) {
5852 		struct in6_addr saddr_buf;
5853 		saddr_buf = rt->fib6_prefsrc.addr;
5854 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5855 			goto nla_put_failure;
5856 	}
5857 
5858 	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5859 	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5860 		goto nla_put_failure;
5861 
5862 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5863 		goto nla_put_failure;
5864 
5865 	/* For multipath routes, walk the siblings list and add
5866 	 * each as a nexthop within RTA_MULTIPATH.
5867 	 */
5868 	if (rt6) {
5869 		struct net_device *dev;
5870 
5871 		if (rt6_flags & RTF_GATEWAY &&
5872 		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5873 			goto nla_put_failure;
5874 
5875 		dev = dst_dev(dst);
5876 		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
5877 			goto nla_put_failure;
5878 
5879 		if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5880 			goto nla_put_failure;
5881 	} else if (READ_ONCE(rt->fib6_nsiblings)) {
5882 		struct fib6_info *sibling;
5883 		struct nlattr *mp;
5884 
5885 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5886 		if (!mp)
5887 			goto nla_put_failure;
5888 
5889 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5890 				    rt->fib6_nh->fib_nh_weight, AF_INET6,
5891 				    0) < 0)
5892 			goto nla_put_failure;
5893 
5894 		rcu_read_lock();
5895 
5896 		list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
5897 					fib6_siblings) {
5898 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5899 					    sibling->fib6_nh->fib_nh_weight,
5900 					    AF_INET6, 0) < 0) {
5901 				rcu_read_unlock();
5902 
5903 				goto nla_put_failure;
5904 			}
5905 		}
5906 
5907 		rcu_read_unlock();
5908 
5909 		nla_nest_end(skb, mp);
5910 	} else if (rt->nh) {
5911 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5912 			goto nla_put_failure;
5913 
5914 		if (nexthop_is_blackhole(rt->nh))
5915 			rtm->rtm_type = RTN_BLACKHOLE;
5916 
5917 		if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5918 		    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5919 			goto nla_put_failure;
5920 
5921 		rtm->rtm_flags |= nh_flags;
5922 	} else {
5923 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5924 				     &nh_flags, false) < 0)
5925 			goto nla_put_failure;
5926 
5927 		rtm->rtm_flags |= nh_flags;
5928 	}
5929 
5930 	if (rt6_flags & RTF_EXPIRES) {
5931 		expires = dst ? READ_ONCE(dst->expires) : rt->expires;
5932 		expires -= jiffies;
5933 	}
5934 
5935 	if (!dst) {
5936 		if (READ_ONCE(rt->offload))
5937 			rtm->rtm_flags |= RTM_F_OFFLOAD;
5938 		if (READ_ONCE(rt->trap))
5939 			rtm->rtm_flags |= RTM_F_TRAP;
5940 		if (READ_ONCE(rt->offload_failed))
5941 			rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5942 	}
5943 
5944 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5945 		goto nla_put_failure;
5946 
5947 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5948 		goto nla_put_failure;
5949 
5950 
5951 	nlmsg_end(skb, nlh);
5952 	return 0;
5953 
5954 nla_put_failure:
5955 	nlmsg_cancel(skb, nlh);
5956 	return -EMSGSIZE;
5957 }
5958 
5959 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5960 {
5961 	const struct net_device *dev = arg;
5962 
5963 	if (nh->fib_nh_dev == dev)
5964 		return 1;
5965 
5966 	return 0;
5967 }
5968 
5969 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5970 			       const struct net_device *dev)
5971 {
5972 	if (f6i->nh) {
5973 		struct net_device *_dev = (struct net_device *)dev;
5974 
5975 		return !!nexthop_for_each_fib6_nh(f6i->nh,
5976 						  fib6_info_nh_uses_dev,
5977 						  _dev);
5978 	}
5979 
5980 	if (f6i->fib6_nh->fib_nh_dev == dev)
5981 		return true;
5982 
5983 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5984 		const struct fib6_info *sibling;
5985 
5986 		rcu_read_lock();
5987 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5988 					fib6_siblings) {
5989 			if (sibling->fib6_nh->fib_nh_dev == dev) {
5990 				rcu_read_unlock();
5991 				return true;
5992 			}
5993 			if (!READ_ONCE(f6i->fib6_nsiblings))
5994 				break;
5995 		}
5996 		rcu_read_unlock();
5997 	}
5998 	return false;
5999 }
6000 
6001 struct fib6_nh_exception_dump_walker {
6002 	struct rt6_rtnl_dump_arg *dump;
6003 	struct fib6_info *rt;
6004 	unsigned int flags;
6005 	unsigned int skip;
6006 	unsigned int count;
6007 };
6008 
6009 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
6010 {
6011 	struct fib6_nh_exception_dump_walker *w = arg;
6012 	struct rt6_rtnl_dump_arg *dump = w->dump;
6013 	struct rt6_exception_bucket *bucket;
6014 	struct rt6_exception *rt6_ex;
6015 	int i, err;
6016 
6017 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
6018 	if (!bucket)
6019 		return 0;
6020 
6021 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
6022 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
6023 			if (w->skip) {
6024 				w->skip--;
6025 				continue;
6026 			}
6027 
6028 			/* Expiration of entries doesn't bump sernum, insertion
6029 			 * does. Removal is triggered by insertion, so we can
6030 			 * rely on the fact that if entries change between two
6031 			 * partial dumps, this node is scanned again completely,
6032 			 * see rt6_insert_exception() and fib6_dump_table().
6033 			 *
6034 			 * Count expired entries we go through as handled
6035 			 * entries that we'll skip next time, in case of partial
6036 			 * node dump. Otherwise, if entries expire meanwhile,
6037 			 * we'll skip the wrong amount.
6038 			 */
6039 			if (rt6_check_expired(rt6_ex->rt6i)) {
6040 				w->count++;
6041 				continue;
6042 			}
6043 
6044 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
6045 					    &rt6_ex->rt6i->dst, NULL, NULL, 0,
6046 					    RTM_NEWROUTE,
6047 					    NETLINK_CB(dump->cb->skb).portid,
6048 					    dump->cb->nlh->nlmsg_seq, w->flags);
6049 			if (err)
6050 				return err;
6051 
6052 			w->count++;
6053 		}
6054 		bucket++;
6055 	}
6056 
6057 	return 0;
6058 }
6059 
6060 /* Return -1 if done with node, number of handled routes on partial dump */
6061 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
6062 {
6063 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
6064 	struct fib_dump_filter *filter = &arg->filter;
6065 	unsigned int flags = NLM_F_MULTI;
6066 	struct net *net = arg->net;
6067 	int count = 0;
6068 
6069 	if (rt == net->ipv6.fib6_null_entry)
6070 		return -1;
6071 
6072 	if ((filter->flags & RTM_F_PREFIX) &&
6073 	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
6074 		/* success since this is not a prefix route */
6075 		return -1;
6076 	}
6077 	if (filter->filter_set &&
6078 	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
6079 	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
6080 	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
6081 		return -1;
6082 	}
6083 
6084 	if (filter->filter_set ||
6085 	    !filter->dump_routes || !filter->dump_exceptions) {
6086 		flags |= NLM_F_DUMP_FILTERED;
6087 	}
6088 
6089 	if (filter->dump_routes) {
6090 		if (skip) {
6091 			skip--;
6092 		} else {
6093 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
6094 					  0, RTM_NEWROUTE,
6095 					  NETLINK_CB(arg->cb->skb).portid,
6096 					  arg->cb->nlh->nlmsg_seq, flags)) {
6097 				return 0;
6098 			}
6099 			count++;
6100 		}
6101 	}
6102 
6103 	if (filter->dump_exceptions) {
6104 		struct fib6_nh_exception_dump_walker w = { .dump = arg,
6105 							   .rt = rt,
6106 							   .flags = flags,
6107 							   .skip = skip,
6108 							   .count = 0 };
6109 		int err;
6110 
6111 		rcu_read_lock();
6112 		if (rt->nh) {
6113 			err = nexthop_for_each_fib6_nh(rt->nh,
6114 						       rt6_nh_dump_exceptions,
6115 						       &w);
6116 		} else {
6117 			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
6118 		}
6119 		rcu_read_unlock();
6120 
6121 		if (err)
6122 			return count + w.count;
6123 	}
6124 
6125 	return -1;
6126 }
6127 
6128 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
6129 					const struct nlmsghdr *nlh,
6130 					struct nlattr **tb,
6131 					struct netlink_ext_ack *extack)
6132 {
6133 	struct rtmsg *rtm;
6134 	int i, err;
6135 
6136 	rtm = nlmsg_payload(nlh, sizeof(*rtm));
6137 	if (!rtm) {
6138 		NL_SET_ERR_MSG_MOD(extack,
6139 				   "Invalid header for get route request");
6140 		return -EINVAL;
6141 	}
6142 
6143 	if (!netlink_strict_get_check(skb))
6144 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
6145 					      rtm_ipv6_policy, extack);
6146 
6147 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
6148 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
6149 	    rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
6150 	    rtm->rtm_type) {
6151 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
6152 		return -EINVAL;
6153 	}
6154 	if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
6155 		NL_SET_ERR_MSG_MOD(extack,
6156 				   "Invalid flags for get route request");
6157 		return -EINVAL;
6158 	}
6159 
6160 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
6161 					    rtm_ipv6_policy, extack);
6162 	if (err)
6163 		return err;
6164 
6165 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
6166 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
6167 		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
6168 		return -EINVAL;
6169 	}
6170 
6171 	if (tb[RTA_FLOWLABEL] &&
6172 	    (nla_get_be32(tb[RTA_FLOWLABEL]) & ~IPV6_FLOWLABEL_MASK)) {
6173 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
6174 				    "Invalid flow label");
6175 		return -EINVAL;
6176 	}
6177 
6178 	for (i = 0; i <= RTA_MAX; i++) {
6179 		if (!tb[i])
6180 			continue;
6181 
6182 		switch (i) {
6183 		case RTA_SRC:
6184 		case RTA_DST:
6185 		case RTA_IIF:
6186 		case RTA_OIF:
6187 		case RTA_MARK:
6188 		case RTA_UID:
6189 		case RTA_SPORT:
6190 		case RTA_DPORT:
6191 		case RTA_IP_PROTO:
6192 		case RTA_FLOWLABEL:
6193 			break;
6194 		default:
6195 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6196 			return -EINVAL;
6197 		}
6198 	}
6199 
6200 	return 0;
6201 }
6202 
6203 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6204 			      struct netlink_ext_ack *extack)
6205 {
6206 	struct net *net = sock_net(in_skb->sk);
6207 	struct nlattr *tb[RTA_MAX+1];
6208 	int err, iif = 0, oif = 0;
6209 	struct fib6_info *from;
6210 	struct dst_entry *dst;
6211 	struct rt6_info *rt;
6212 	struct sk_buff *skb;
6213 	struct rtmsg *rtm;
6214 	struct flowi6 fl6 = {};
6215 	__be32 flowlabel;
6216 	bool fibmatch;
6217 
6218 	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6219 	if (err < 0)
6220 		goto errout;
6221 
6222 	err = -EINVAL;
6223 	rtm = nlmsg_data(nlh);
6224 	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6225 
6226 	if (tb[RTA_SRC]) {
6227 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6228 			goto errout;
6229 
6230 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6231 	}
6232 
6233 	if (tb[RTA_DST]) {
6234 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6235 			goto errout;
6236 
6237 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6238 	}
6239 
6240 	if (tb[RTA_IIF])
6241 		iif = nla_get_u32(tb[RTA_IIF]);
6242 
6243 	if (tb[RTA_OIF])
6244 		oif = nla_get_u32(tb[RTA_OIF]);
6245 
6246 	if (tb[RTA_MARK])
6247 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6248 
6249 	if (tb[RTA_UID])
6250 		fl6.flowi6_uid = make_kuid(current_user_ns(),
6251 					   nla_get_u32(tb[RTA_UID]));
6252 	else
6253 		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6254 
6255 	if (tb[RTA_SPORT])
6256 		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6257 
6258 	if (tb[RTA_DPORT])
6259 		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6260 
6261 	if (tb[RTA_IP_PROTO]) {
6262 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6263 						  &fl6.flowi6_proto, AF_INET6,
6264 						  extack);
6265 		if (err)
6266 			goto errout;
6267 	}
6268 
6269 	flowlabel = nla_get_be32_default(tb[RTA_FLOWLABEL], 0);
6270 	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, flowlabel);
6271 
6272 	if (iif) {
6273 		struct net_device *dev;
6274 		int flags = 0;
6275 
6276 		rcu_read_lock();
6277 
6278 		dev = dev_get_by_index_rcu(net, iif);
6279 		if (!dev) {
6280 			rcu_read_unlock();
6281 			err = -ENODEV;
6282 			goto errout;
6283 		}
6284 
6285 		fl6.flowi6_iif = iif;
6286 
6287 		if (!ipv6_addr_any(&fl6.saddr))
6288 			flags |= RT6_LOOKUP_F_HAS_SADDR;
6289 
6290 		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6291 
6292 		rcu_read_unlock();
6293 	} else {
6294 		fl6.flowi6_oif = oif;
6295 
6296 		dst = ip6_route_output(net, NULL, &fl6);
6297 	}
6298 
6299 
6300 	rt = dst_rt6_info(dst);
6301 	if (rt->dst.error) {
6302 		err = rt->dst.error;
6303 		ip6_rt_put(rt);
6304 		goto errout;
6305 	}
6306 
6307 	if (rt == net->ipv6.ip6_null_entry) {
6308 		err = rt->dst.error;
6309 		ip6_rt_put(rt);
6310 		goto errout;
6311 	}
6312 
6313 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6314 	if (!skb) {
6315 		ip6_rt_put(rt);
6316 		err = -ENOBUFS;
6317 		goto errout;
6318 	}
6319 
6320 	skb_dst_set(skb, &rt->dst);
6321 
6322 	rcu_read_lock();
6323 	from = rcu_dereference(rt->from);
6324 	if (from) {
6325 		if (fibmatch)
6326 			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6327 					    iif, RTM_NEWROUTE,
6328 					    NETLINK_CB(in_skb).portid,
6329 					    nlh->nlmsg_seq, 0);
6330 		else
6331 			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6332 					    &fl6.saddr, iif, RTM_NEWROUTE,
6333 					    NETLINK_CB(in_skb).portid,
6334 					    nlh->nlmsg_seq, 0);
6335 	} else {
6336 		err = -ENETUNREACH;
6337 	}
6338 	rcu_read_unlock();
6339 
6340 	if (err < 0) {
6341 		kfree_skb(skb);
6342 		goto errout;
6343 	}
6344 
6345 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6346 errout:
6347 	return err;
6348 }
6349 
6350 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6351 		     unsigned int nlm_flags)
6352 {
6353 	struct net *net = info->nl_net;
6354 	struct sk_buff *skb;
6355 	size_t sz;
6356 	u32 seq;
6357 	int err;
6358 
6359 	err = -ENOBUFS;
6360 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6361 
6362 	rcu_read_lock();
6363 	sz = rt6_nlmsg_size(rt);
6364 retry:
6365 	skb = nlmsg_new(sz, GFP_ATOMIC);
6366 	if (!skb)
6367 		goto errout;
6368 
6369 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6370 			    event, info->portid, seq, nlm_flags);
6371 	if (err < 0) {
6372 		kfree_skb(skb);
6373 		/* -EMSGSIZE implies needed space grew under us. */
6374 		if (err == -EMSGSIZE) {
6375 			sz = max(rt6_nlmsg_size(rt), sz << 1);
6376 			goto retry;
6377 		}
6378 		goto errout;
6379 	}
6380 
6381 	rcu_read_unlock();
6382 
6383 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6384 		    info->nlh, GFP_ATOMIC);
6385 	return;
6386 errout:
6387 	rcu_read_unlock();
6388 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6389 }
6390 
6391 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6392 		    struct nl_info *info)
6393 {
6394 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6395 	struct sk_buff *skb;
6396 	int err = -ENOBUFS;
6397 
6398 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6399 	if (!skb)
6400 		goto errout;
6401 
6402 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6403 			    RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6404 	if (err < 0) {
6405 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6406 		WARN_ON(err == -EMSGSIZE);
6407 		kfree_skb(skb);
6408 		goto errout;
6409 	}
6410 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6411 		    info->nlh, gfp_any());
6412 	return;
6413 errout:
6414 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6415 }
6416 
6417 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6418 			    bool offload, bool trap, bool offload_failed)
6419 {
6420 	u8 fib_notify_on_flag_change;
6421 	struct sk_buff *skb;
6422 	int err;
6423 
6424 	if (READ_ONCE(f6i->offload) == offload &&
6425 	    READ_ONCE(f6i->trap) == trap &&
6426 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6427 		return;
6428 
6429 	WRITE_ONCE(f6i->offload, offload);
6430 	WRITE_ONCE(f6i->trap, trap);
6431 
6432 	fib_notify_on_flag_change = READ_ONCE(net->ipv6.sysctl.fib_notify_on_flag_change);
6433 	/* 2 means send notifications only if offload_failed was changed. */
6434 	if (fib_notify_on_flag_change == 2 &&
6435 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6436 		return;
6437 
6438 	WRITE_ONCE(f6i->offload_failed, offload_failed);
6439 
6440 	if (!rcu_access_pointer(f6i->fib6_node))
6441 		/* The route was removed from the tree, do not send
6442 		 * notification.
6443 		 */
6444 		return;
6445 
6446 	if (!fib_notify_on_flag_change)
6447 		return;
6448 
6449 	skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6450 	if (!skb) {
6451 		err = -ENOBUFS;
6452 		goto errout;
6453 	}
6454 
6455 	err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6456 			    0, 0);
6457 	if (err < 0) {
6458 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6459 		WARN_ON(err == -EMSGSIZE);
6460 		kfree_skb(skb);
6461 		goto errout;
6462 	}
6463 
6464 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6465 	return;
6466 
6467 errout:
6468 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6469 }
6470 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6471 
6472 static int ip6_route_dev_notify(struct notifier_block *this,
6473 				unsigned long event, void *ptr)
6474 {
6475 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6476 	struct net *net = dev_net(dev);
6477 
6478 	if (!(dev->flags & IFF_LOOPBACK))
6479 		return NOTIFY_OK;
6480 
6481 	if (event == NETDEV_REGISTER) {
6482 		net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6483 		net->ipv6.ip6_null_entry->dst.dev = dev;
6484 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6485 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6486 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6487 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6488 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6489 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6490 #endif
6491 	 } else if (event == NETDEV_UNREGISTER &&
6492 		    dev->reg_state != NETREG_UNREGISTERED) {
6493 		/* NETDEV_UNREGISTER could be fired for multiple times by
6494 		 * netdev_wait_allrefs(). Make sure we only call this once.
6495 		 */
6496 		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6497 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6498 		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6499 		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6500 #endif
6501 	}
6502 
6503 	return NOTIFY_OK;
6504 }
6505 
6506 /*
6507  *	/proc
6508  */
6509 
6510 #ifdef CONFIG_PROC_FS
6511 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6512 {
6513 	struct net *net = (struct net *)seq->private;
6514 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6515 		   net->ipv6.rt6_stats->fib_nodes,
6516 		   net->ipv6.rt6_stats->fib_route_nodes,
6517 		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6518 		   net->ipv6.rt6_stats->fib_rt_entries,
6519 		   net->ipv6.rt6_stats->fib_rt_cache,
6520 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6521 		   net->ipv6.rt6_stats->fib_discarded_routes);
6522 
6523 	return 0;
6524 }
6525 #endif	/* CONFIG_PROC_FS */
6526 
6527 #ifdef CONFIG_SYSCTL
6528 
6529 static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
6530 			      void *buffer, size_t *lenp, loff_t *ppos)
6531 {
6532 	struct net *net;
6533 	int delay;
6534 	int ret;
6535 	if (!write)
6536 		return -EINVAL;
6537 
6538 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6539 	if (ret)
6540 		return ret;
6541 
6542 	net = (struct net *)ctl->extra1;
6543 	delay = READ_ONCE(net->ipv6.sysctl.flush_delay);
6544 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6545 	return 0;
6546 }
6547 
6548 static struct ctl_table ipv6_route_table_template[] = {
6549 	{
6550 		.procname	=	"max_size",
6551 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
6552 		.maxlen		=	sizeof(int),
6553 		.mode		=	0644,
6554 		.proc_handler	=	proc_dointvec,
6555 	},
6556 	{
6557 		.procname	=	"gc_thresh",
6558 		.data		=	&ip6_dst_ops_template.gc_thresh,
6559 		.maxlen		=	sizeof(int),
6560 		.mode		=	0644,
6561 		.proc_handler	=	proc_dointvec,
6562 	},
6563 	{
6564 		.procname	=	"flush",
6565 		.data		=	&init_net.ipv6.sysctl.flush_delay,
6566 		.maxlen		=	sizeof(int),
6567 		.mode		=	0200,
6568 		.proc_handler	=	ipv6_sysctl_rtcache_flush
6569 	},
6570 	{
6571 		.procname	=	"gc_min_interval",
6572 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6573 		.maxlen		=	sizeof(int),
6574 		.mode		=	0644,
6575 		.proc_handler	=	proc_dointvec_jiffies,
6576 	},
6577 	{
6578 		.procname	=	"gc_timeout",
6579 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6580 		.maxlen		=	sizeof(int),
6581 		.mode		=	0644,
6582 		.proc_handler	=	proc_dointvec_jiffies,
6583 	},
6584 	{
6585 		.procname	=	"gc_interval",
6586 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
6587 		.maxlen		=	sizeof(int),
6588 		.mode		=	0644,
6589 		.proc_handler	=	proc_dointvec_jiffies,
6590 	},
6591 	{
6592 		.procname	=	"gc_elasticity",
6593 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6594 		.maxlen		=	sizeof(int),
6595 		.mode		=	0644,
6596 		.proc_handler	=	proc_dointvec,
6597 	},
6598 	{
6599 		.procname	=	"mtu_expires",
6600 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6601 		.maxlen		=	sizeof(int),
6602 		.mode		=	0644,
6603 		.proc_handler	=	proc_dointvec_jiffies,
6604 	},
6605 	{
6606 		.procname	=	"min_adv_mss",
6607 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
6608 		.maxlen		=	sizeof(int),
6609 		.mode		=	0644,
6610 		.proc_handler	=	proc_dointvec,
6611 	},
6612 	{
6613 		.procname	=	"gc_min_interval_ms",
6614 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6615 		.maxlen		=	sizeof(int),
6616 		.mode		=	0644,
6617 		.proc_handler	=	proc_dointvec_ms_jiffies,
6618 	},
6619 	{
6620 		.procname	=	"skip_notify_on_dev_down",
6621 		.data		=	&init_net.ipv6.sysctl.skip_notify_on_dev_down,
6622 		.maxlen		=	sizeof(u8),
6623 		.mode		=	0644,
6624 		.proc_handler	=	proc_dou8vec_minmax,
6625 		.extra1		=	SYSCTL_ZERO,
6626 		.extra2		=	SYSCTL_ONE,
6627 	},
6628 };
6629 
6630 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6631 {
6632 	struct ctl_table *table;
6633 
6634 	table = kmemdup(ipv6_route_table_template,
6635 			sizeof(ipv6_route_table_template),
6636 			GFP_KERNEL);
6637 
6638 	if (table) {
6639 		table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6640 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6641 		table[2].data = &net->ipv6.sysctl.flush_delay;
6642 		table[2].extra1 = net;
6643 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6644 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6645 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6646 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6647 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6648 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6649 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6650 		table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6651 	}
6652 
6653 	return table;
6654 }
6655 
6656 size_t ipv6_route_sysctl_table_size(struct net *net)
6657 {
6658 	/* Don't export sysctls to unprivileged users */
6659 	if (net->user_ns != &init_user_ns)
6660 		return 1;
6661 
6662 	return ARRAY_SIZE(ipv6_route_table_template);
6663 }
6664 #endif
6665 
6666 static int __net_init ip6_route_net_init(struct net *net)
6667 {
6668 	int ret = -ENOMEM;
6669 
6670 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6671 	       sizeof(net->ipv6.ip6_dst_ops));
6672 
6673 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6674 		goto out_ip6_dst_ops;
6675 
6676 	net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6677 	if (!net->ipv6.fib6_null_entry)
6678 		goto out_ip6_dst_entries;
6679 	memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6680 	       sizeof(*net->ipv6.fib6_null_entry));
6681 
6682 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6683 					   sizeof(*net->ipv6.ip6_null_entry),
6684 					   GFP_KERNEL);
6685 	if (!net->ipv6.ip6_null_entry)
6686 		goto out_fib6_null_entry;
6687 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6688 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6689 			 ip6_template_metrics, true);
6690 	INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached);
6691 
6692 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6693 	net->ipv6.fib6_has_custom_rules = false;
6694 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6695 					       sizeof(*net->ipv6.ip6_prohibit_entry),
6696 					       GFP_KERNEL);
6697 	if (!net->ipv6.ip6_prohibit_entry)
6698 		goto out_ip6_null_entry;
6699 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6700 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6701 			 ip6_template_metrics, true);
6702 	INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached);
6703 
6704 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6705 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
6706 					       GFP_KERNEL);
6707 	if (!net->ipv6.ip6_blk_hole_entry)
6708 		goto out_ip6_prohibit_entry;
6709 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6710 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6711 			 ip6_template_metrics, true);
6712 	INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached);
6713 #ifdef CONFIG_IPV6_SUBTREES
6714 	net->ipv6.fib6_routes_require_src = 0;
6715 #endif
6716 #endif
6717 
6718 	net->ipv6.sysctl.flush_delay = 0;
6719 	net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6720 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6721 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6722 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6723 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6724 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6725 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6726 	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6727 
6728 	atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6729 
6730 	ret = 0;
6731 out:
6732 	return ret;
6733 
6734 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6735 out_ip6_prohibit_entry:
6736 	kfree(net->ipv6.ip6_prohibit_entry);
6737 out_ip6_null_entry:
6738 	kfree(net->ipv6.ip6_null_entry);
6739 #endif
6740 out_fib6_null_entry:
6741 	kfree(net->ipv6.fib6_null_entry);
6742 out_ip6_dst_entries:
6743 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6744 out_ip6_dst_ops:
6745 	goto out;
6746 }
6747 
6748 static void __net_exit ip6_route_net_exit(struct net *net)
6749 {
6750 	kfree(net->ipv6.fib6_null_entry);
6751 	kfree(net->ipv6.ip6_null_entry);
6752 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6753 	kfree(net->ipv6.ip6_prohibit_entry);
6754 	kfree(net->ipv6.ip6_blk_hole_entry);
6755 #endif
6756 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6757 }
6758 
6759 static int __net_init ip6_route_net_init_late(struct net *net)
6760 {
6761 #ifdef CONFIG_PROC_FS
6762 	if (!proc_create_net("ipv6_route", 0, net->proc_net,
6763 			     &ipv6_route_seq_ops,
6764 			     sizeof(struct ipv6_route_iter)))
6765 		return -ENOMEM;
6766 
6767 	if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6768 				    rt6_stats_seq_show, NULL)) {
6769 		remove_proc_entry("ipv6_route", net->proc_net);
6770 		return -ENOMEM;
6771 	}
6772 #endif
6773 	return 0;
6774 }
6775 
6776 static void __net_exit ip6_route_net_exit_late(struct net *net)
6777 {
6778 #ifdef CONFIG_PROC_FS
6779 	remove_proc_entry("ipv6_route", net->proc_net);
6780 	remove_proc_entry("rt6_stats", net->proc_net);
6781 #endif
6782 }
6783 
6784 static struct pernet_operations ip6_route_net_ops = {
6785 	.init = ip6_route_net_init,
6786 	.exit = ip6_route_net_exit,
6787 };
6788 
6789 static int __net_init ipv6_inetpeer_init(struct net *net)
6790 {
6791 	struct inet_peer_base *bp = kmalloc_obj(*bp);
6792 
6793 	if (!bp)
6794 		return -ENOMEM;
6795 	inet_peer_base_init(bp);
6796 	net->ipv6.peers = bp;
6797 	return 0;
6798 }
6799 
6800 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6801 {
6802 	struct inet_peer_base *bp = net->ipv6.peers;
6803 
6804 	net->ipv6.peers = NULL;
6805 	inetpeer_invalidate_tree(bp);
6806 	kfree(bp);
6807 }
6808 
6809 static struct pernet_operations ipv6_inetpeer_ops = {
6810 	.init	=	ipv6_inetpeer_init,
6811 	.exit	=	ipv6_inetpeer_exit,
6812 };
6813 
6814 static struct pernet_operations ip6_route_net_late_ops = {
6815 	.init = ip6_route_net_init_late,
6816 	.exit = ip6_route_net_exit_late,
6817 };
6818 
6819 static struct notifier_block ip6_route_dev_notifier = {
6820 	.notifier_call = ip6_route_dev_notify,
6821 	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6822 };
6823 
6824 void __init ip6_route_init_special_entries(void)
6825 {
6826 	/* Registering of the loopback is done before this portion of code,
6827 	 * the loopback reference in rt6_info will not be taken, do it
6828 	 * manually for init_net */
6829 	init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6830 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6831 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6832   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6833 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6834 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6835 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6836 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6837   #endif
6838 }
6839 
6840 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6841 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6842 
6843 BTF_ID_LIST_SINGLE(btf_fib6_info_id, struct, fib6_info)
6844 
6845 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6846 	.seq_ops		= &ipv6_route_seq_ops,
6847 	.init_seq_private	= bpf_iter_init_seq_net,
6848 	.fini_seq_private	= bpf_iter_fini_seq_net,
6849 	.seq_priv_size		= sizeof(struct ipv6_route_iter),
6850 };
6851 
6852 static struct bpf_iter_reg ipv6_route_reg_info = {
6853 	.target			= "ipv6_route",
6854 	.ctx_arg_info_size	= 1,
6855 	.ctx_arg_info		= {
6856 		{ offsetof(struct bpf_iter__ipv6_route, rt),
6857 		  PTR_TO_BTF_ID_OR_NULL },
6858 	},
6859 	.seq_info		= &ipv6_route_seq_info,
6860 };
6861 
6862 static int __init bpf_iter_register(void)
6863 {
6864 	ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6865 	return bpf_iter_reg_target(&ipv6_route_reg_info);
6866 }
6867 
6868 static void bpf_iter_unregister(void)
6869 {
6870 	bpf_iter_unreg_target(&ipv6_route_reg_info);
6871 }
6872 #endif
6873 
6874 static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = {
6875 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE,
6876 	 .doit = inet6_rtm_newroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6877 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE,
6878 	 .doit = inet6_rtm_delroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6879 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
6880 	 .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6881 };
6882 
6883 int __init ip6_route_init(void)
6884 {
6885 	int ret;
6886 	int cpu;
6887 
6888 	ret = -ENOMEM;
6889 	ip6_dst_ops_template.kmem_cachep =
6890 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6891 				  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6892 	if (!ip6_dst_ops_template.kmem_cachep)
6893 		goto out;
6894 
6895 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
6896 	if (ret)
6897 		goto out_kmem_cache;
6898 
6899 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6900 	if (ret)
6901 		goto out_dst_entries;
6902 
6903 	ret = register_pernet_subsys(&ip6_route_net_ops);
6904 	if (ret)
6905 		goto out_register_inetpeer;
6906 
6907 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6908 
6909 	ret = fib6_init();
6910 	if (ret)
6911 		goto out_register_subsys;
6912 
6913 	ret = xfrm6_init();
6914 	if (ret)
6915 		goto out_fib6_init;
6916 
6917 	ret = fib6_rules_init();
6918 	if (ret)
6919 		goto xfrm6_init;
6920 
6921 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
6922 	if (ret)
6923 		goto fib6_rules_init;
6924 
6925 	ret = rtnl_register_many(ip6_route_rtnl_msg_handlers);
6926 	if (ret < 0)
6927 		goto out_register_late_subsys;
6928 
6929 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6930 	if (ret)
6931 		goto out_register_late_subsys;
6932 
6933 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6934 	ret = bpf_iter_register();
6935 	if (ret)
6936 		goto out_register_late_subsys;
6937 #endif
6938 
6939 	for_each_possible_cpu(cpu) {
6940 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6941 
6942 		INIT_LIST_HEAD(&ul->head);
6943 		spin_lock_init(&ul->lock);
6944 	}
6945 
6946 out:
6947 	return ret;
6948 
6949 out_register_late_subsys:
6950 	rtnl_unregister_all(PF_INET6);
6951 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6952 fib6_rules_init:
6953 	fib6_rules_cleanup();
6954 xfrm6_init:
6955 	xfrm6_fini();
6956 out_fib6_init:
6957 	fib6_gc_cleanup();
6958 out_register_subsys:
6959 	unregister_pernet_subsys(&ip6_route_net_ops);
6960 out_register_inetpeer:
6961 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6962 out_dst_entries:
6963 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6964 out_kmem_cache:
6965 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6966 	goto out;
6967 }
6968 
6969 void ip6_route_cleanup(void)
6970 {
6971 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6972 	bpf_iter_unregister();
6973 #endif
6974 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
6975 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6976 	fib6_rules_cleanup();
6977 	xfrm6_fini();
6978 	fib6_gc_cleanup();
6979 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6980 	unregister_pernet_subsys(&ip6_route_net_ops);
6981 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6982 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6983 }
6984