xref: /linux/net/ipv6/addrconf.c (revision dec1c62e91ba268ab2a6e339d4d7a59287d5eba1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 Address [auto]configuration
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
9  */
10 
11 /*
12  *	Changes:
13  *
14  *	Janos Farkas			:	delete timer on ifdown
15  *	<chexum@bankinf.banki.hu>
16  *	Andi Kleen			:	kill double kfree on module
17  *						unload.
18  *	Maciej W. Rozycki		:	FDDI support
19  *	sekiya@USAGI			:	Don't send too many RS
20  *						packets.
21  *	yoshfuji@USAGI			:       Fixed interval between DAD
22  *						packets.
23  *	YOSHIFUJI Hideaki @USAGI	:	improved accuracy of
24  *						address validation timer.
25  *	YOSHIFUJI Hideaki @USAGI	:	Privacy Extensions (RFC3041)
26  *						support.
27  *	Yuji SEKIYA @USAGI		:	Don't assign a same IPv6
28  *						address on a same interface.
29  *	YOSHIFUJI Hideaki @USAGI	:	ARCnet support
30  *	YOSHIFUJI Hideaki @USAGI	:	convert /proc/net/if_inet6 to
31  *						seq_file.
32  *	YOSHIFUJI Hideaki @USAGI	:	improved source address
33  *						selection; consider scope,
34  *						status etc.
35  */
36 
37 #define pr_fmt(fmt) "IPv6: " fmt
38 
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/sock.h>
68 #include <net/snmp.h>
69 
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
72 #include <net/ipv6.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
77 #include <net/tcp.h>
78 #include <net/ip.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
88 
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92 #include <linux/ioam6.h>
93 
94 #define	INFINITY_LIFE_TIME	0xFFFFFFFF
95 
96 #define IPV6_MAX_STRLEN \
97 	sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98 
99 static inline u32 cstamp_delta(unsigned long cstamp)
100 {
101 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102 }
103 
104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
105 {
106 	/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 	u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
108 	do_div(tmp, 1000000);
109 	return (s32)tmp;
110 }
111 
112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113 {
114 	/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 	u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
116 	do_div(tmp, 1000000);
117 	if ((s32)tmp > mrt) {
118 		/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 		tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
120 		do_div(tmp, 1000000);
121 	}
122 	return (s32)tmp;
123 }
124 
125 #ifdef CONFIG_SYSCTL
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128 #else
129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130 {
131 	return 0;
132 }
133 
134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135 {
136 }
137 #endif
138 
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140 
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
144 					u8 dad_count,
145 					const struct inet6_dev *idev);
146 
147 #define IN6_ADDR_HSIZE_SHIFT	8
148 #define IN6_ADDR_HSIZE		(1 << IN6_ADDR_HSIZE_SHIFT)
149 
150 static void addrconf_verify(struct net *net);
151 static void addrconf_verify_rtnl(struct net *net);
152 
153 static struct workqueue_struct *addrconf_wq;
154 
155 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
157 
158 static void addrconf_type_change(struct net_device *dev,
159 				 unsigned long event);
160 static int addrconf_ifdown(struct net_device *dev, bool unregister);
161 
162 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
163 						  int plen,
164 						  const struct net_device *dev,
165 						  u32 flags, u32 noflags,
166 						  bool no_gw);
167 
168 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169 static void addrconf_dad_work(struct work_struct *w);
170 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
171 				   bool send_na);
172 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173 static void addrconf_rs_timer(struct timer_list *t);
174 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
176 
177 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178 				struct prefix_info *pinfo);
179 
180 static struct ipv6_devconf ipv6_devconf __read_mostly = {
181 	.forwarding		= 0,
182 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
183 	.mtu6			= IPV6_MIN_MTU,
184 	.accept_ra		= 1,
185 	.accept_redirects	= 1,
186 	.autoconf		= 1,
187 	.force_mld_version	= 0,
188 	.mldv1_unsolicited_report_interval = 10 * HZ,
189 	.mldv2_unsolicited_report_interval = HZ,
190 	.dad_transmits		= 1,
191 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
192 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
193 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
195 	.use_tempaddr		= 0,
196 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
197 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
198 	.regen_max_retry	= REGEN_MAX_RETRY,
199 	.max_desync_factor	= MAX_DESYNC_FACTOR,
200 	.max_addresses		= IPV6_MAX_ADDRESSES,
201 	.accept_ra_defrtr	= 1,
202 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
203 	.accept_ra_from_local	= 0,
204 	.accept_ra_min_hop_limit= 1,
205 	.accept_ra_pinfo	= 1,
206 #ifdef CONFIG_IPV6_ROUTER_PREF
207 	.accept_ra_rtr_pref	= 1,
208 	.rtr_probe_interval	= 60 * HZ,
209 #ifdef CONFIG_IPV6_ROUTE_INFO
210 	.accept_ra_rt_info_min_plen = 0,
211 	.accept_ra_rt_info_max_plen = 0,
212 #endif
213 #endif
214 	.proxy_ndp		= 0,
215 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
216 	.disable_ipv6		= 0,
217 	.accept_dad		= 0,
218 	.suppress_frag_ndisc	= 1,
219 	.accept_ra_mtu		= 1,
220 	.stable_secret		= {
221 		.initialized = false,
222 	},
223 	.use_oif_addrs_only	= 0,
224 	.ignore_routes_with_linkdown = 0,
225 	.keep_addr_on_down	= 0,
226 	.seg6_enabled		= 0,
227 #ifdef CONFIG_IPV6_SEG6_HMAC
228 	.seg6_require_hmac	= 0,
229 #endif
230 	.enhanced_dad           = 1,
231 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
232 	.disable_policy		= 0,
233 	.rpl_seg_enabled	= 0,
234 	.ioam6_enabled		= 0,
235 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
236 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
237 	.ndisc_evict_nocarrier	= 1,
238 };
239 
240 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
241 	.forwarding		= 0,
242 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
243 	.mtu6			= IPV6_MIN_MTU,
244 	.accept_ra		= 1,
245 	.accept_redirects	= 1,
246 	.autoconf		= 1,
247 	.force_mld_version	= 0,
248 	.mldv1_unsolicited_report_interval = 10 * HZ,
249 	.mldv2_unsolicited_report_interval = HZ,
250 	.dad_transmits		= 1,
251 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
252 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
253 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
254 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
255 	.use_tempaddr		= 0,
256 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
257 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
258 	.regen_max_retry	= REGEN_MAX_RETRY,
259 	.max_desync_factor	= MAX_DESYNC_FACTOR,
260 	.max_addresses		= IPV6_MAX_ADDRESSES,
261 	.accept_ra_defrtr	= 1,
262 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
263 	.accept_ra_from_local	= 0,
264 	.accept_ra_min_hop_limit= 1,
265 	.accept_ra_pinfo	= 1,
266 #ifdef CONFIG_IPV6_ROUTER_PREF
267 	.accept_ra_rtr_pref	= 1,
268 	.rtr_probe_interval	= 60 * HZ,
269 #ifdef CONFIG_IPV6_ROUTE_INFO
270 	.accept_ra_rt_info_min_plen = 0,
271 	.accept_ra_rt_info_max_plen = 0,
272 #endif
273 #endif
274 	.proxy_ndp		= 0,
275 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
276 	.disable_ipv6		= 0,
277 	.accept_dad		= 1,
278 	.suppress_frag_ndisc	= 1,
279 	.accept_ra_mtu		= 1,
280 	.stable_secret		= {
281 		.initialized = false,
282 	},
283 	.use_oif_addrs_only	= 0,
284 	.ignore_routes_with_linkdown = 0,
285 	.keep_addr_on_down	= 0,
286 	.seg6_enabled		= 0,
287 #ifdef CONFIG_IPV6_SEG6_HMAC
288 	.seg6_require_hmac	= 0,
289 #endif
290 	.enhanced_dad           = 1,
291 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
292 	.disable_policy		= 0,
293 	.rpl_seg_enabled	= 0,
294 	.ioam6_enabled		= 0,
295 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
296 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
297 	.ndisc_evict_nocarrier	= 1,
298 };
299 
300 /* Check if link is ready: is it up and is a valid qdisc available */
301 static inline bool addrconf_link_ready(const struct net_device *dev)
302 {
303 	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
304 }
305 
306 static void addrconf_del_rs_timer(struct inet6_dev *idev)
307 {
308 	if (del_timer(&idev->rs_timer))
309 		__in6_dev_put(idev);
310 }
311 
312 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
313 {
314 	if (cancel_delayed_work(&ifp->dad_work))
315 		__in6_ifa_put(ifp);
316 }
317 
318 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
319 				  unsigned long when)
320 {
321 	if (!timer_pending(&idev->rs_timer))
322 		in6_dev_hold(idev);
323 	mod_timer(&idev->rs_timer, jiffies + when);
324 }
325 
326 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
327 				   unsigned long delay)
328 {
329 	in6_ifa_hold(ifp);
330 	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
331 		in6_ifa_put(ifp);
332 }
333 
334 static int snmp6_alloc_dev(struct inet6_dev *idev)
335 {
336 	int i;
337 
338 	idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
339 	if (!idev->stats.ipv6)
340 		goto err_ip;
341 
342 	for_each_possible_cpu(i) {
343 		struct ipstats_mib *addrconf_stats;
344 		addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
345 		u64_stats_init(&addrconf_stats->syncp);
346 	}
347 
348 
349 	idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
350 					GFP_KERNEL);
351 	if (!idev->stats.icmpv6dev)
352 		goto err_icmp;
353 	idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
354 					   GFP_KERNEL_ACCOUNT);
355 	if (!idev->stats.icmpv6msgdev)
356 		goto err_icmpmsg;
357 
358 	return 0;
359 
360 err_icmpmsg:
361 	kfree(idev->stats.icmpv6dev);
362 err_icmp:
363 	free_percpu(idev->stats.ipv6);
364 err_ip:
365 	return -ENOMEM;
366 }
367 
368 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
369 {
370 	struct inet6_dev *ndev;
371 	int err = -ENOMEM;
372 
373 	ASSERT_RTNL();
374 
375 	if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
376 		return ERR_PTR(-EINVAL);
377 
378 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
379 	if (!ndev)
380 		return ERR_PTR(err);
381 
382 	rwlock_init(&ndev->lock);
383 	ndev->dev = dev;
384 	INIT_LIST_HEAD(&ndev->addr_list);
385 	timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
386 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
387 
388 	if (ndev->cnf.stable_secret.initialized)
389 		ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
390 
391 	ndev->cnf.mtu6 = dev->mtu;
392 	ndev->ra_mtu = 0;
393 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
394 	if (!ndev->nd_parms) {
395 		kfree(ndev);
396 		return ERR_PTR(err);
397 	}
398 	if (ndev->cnf.forwarding)
399 		dev_disable_lro(dev);
400 	/* We refer to the device */
401 	dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL);
402 
403 	if (snmp6_alloc_dev(ndev) < 0) {
404 		netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
405 			   __func__);
406 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
407 		dev_put_track(dev, &ndev->dev_tracker);
408 		kfree(ndev);
409 		return ERR_PTR(err);
410 	}
411 
412 	if (dev != blackhole_netdev) {
413 		if (snmp6_register_dev(ndev) < 0) {
414 			netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
415 				   __func__, dev->name);
416 			goto err_release;
417 		}
418 	}
419 	/* One reference from device. */
420 	refcount_set(&ndev->refcnt, 1);
421 
422 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
423 		ndev->cnf.accept_dad = -1;
424 
425 #if IS_ENABLED(CONFIG_IPV6_SIT)
426 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
427 		pr_info("%s: Disabled Multicast RS\n", dev->name);
428 		ndev->cnf.rtr_solicits = 0;
429 	}
430 #endif
431 
432 	INIT_LIST_HEAD(&ndev->tempaddr_list);
433 	ndev->desync_factor = U32_MAX;
434 	if ((dev->flags&IFF_LOOPBACK) ||
435 	    dev->type == ARPHRD_TUNNEL ||
436 	    dev->type == ARPHRD_TUNNEL6 ||
437 	    dev->type == ARPHRD_SIT ||
438 	    dev->type == ARPHRD_NONE) {
439 		ndev->cnf.use_tempaddr = -1;
440 	}
441 
442 	ndev->token = in6addr_any;
443 
444 	if (netif_running(dev) && addrconf_link_ready(dev))
445 		ndev->if_flags |= IF_READY;
446 
447 	ipv6_mc_init_dev(ndev);
448 	ndev->tstamp = jiffies;
449 	if (dev != blackhole_netdev) {
450 		err = addrconf_sysctl_register(ndev);
451 		if (err) {
452 			ipv6_mc_destroy_dev(ndev);
453 			snmp6_unregister_dev(ndev);
454 			goto err_release;
455 		}
456 	}
457 	/* protected by rtnl_lock */
458 	rcu_assign_pointer(dev->ip6_ptr, ndev);
459 
460 	if (dev != blackhole_netdev) {
461 		/* Join interface-local all-node multicast group */
462 		ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
463 
464 		/* Join all-node multicast group */
465 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
466 
467 		/* Join all-router multicast group if forwarding is set */
468 		if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
469 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
470 	}
471 	return ndev;
472 
473 err_release:
474 	neigh_parms_release(&nd_tbl, ndev->nd_parms);
475 	ndev->dead = 1;
476 	in6_dev_finish_destroy(ndev);
477 	return ERR_PTR(err);
478 }
479 
480 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
481 {
482 	struct inet6_dev *idev;
483 
484 	ASSERT_RTNL();
485 
486 	idev = __in6_dev_get(dev);
487 	if (!idev) {
488 		idev = ipv6_add_dev(dev);
489 		if (IS_ERR(idev))
490 			return idev;
491 	}
492 
493 	if (dev->flags&IFF_UP)
494 		ipv6_mc_up(idev);
495 	return idev;
496 }
497 
498 static int inet6_netconf_msgsize_devconf(int type)
499 {
500 	int size =  NLMSG_ALIGN(sizeof(struct netconfmsg))
501 		    + nla_total_size(4);	/* NETCONFA_IFINDEX */
502 	bool all = false;
503 
504 	if (type == NETCONFA_ALL)
505 		all = true;
506 
507 	if (all || type == NETCONFA_FORWARDING)
508 		size += nla_total_size(4);
509 #ifdef CONFIG_IPV6_MROUTE
510 	if (all || type == NETCONFA_MC_FORWARDING)
511 		size += nla_total_size(4);
512 #endif
513 	if (all || type == NETCONFA_PROXY_NEIGH)
514 		size += nla_total_size(4);
515 
516 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
517 		size += nla_total_size(4);
518 
519 	return size;
520 }
521 
522 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
523 				      struct ipv6_devconf *devconf, u32 portid,
524 				      u32 seq, int event, unsigned int flags,
525 				      int type)
526 {
527 	struct nlmsghdr  *nlh;
528 	struct netconfmsg *ncm;
529 	bool all = false;
530 
531 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
532 			flags);
533 	if (!nlh)
534 		return -EMSGSIZE;
535 
536 	if (type == NETCONFA_ALL)
537 		all = true;
538 
539 	ncm = nlmsg_data(nlh);
540 	ncm->ncm_family = AF_INET6;
541 
542 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
543 		goto nla_put_failure;
544 
545 	if (!devconf)
546 		goto out;
547 
548 	if ((all || type == NETCONFA_FORWARDING) &&
549 	    nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
550 		goto nla_put_failure;
551 #ifdef CONFIG_IPV6_MROUTE
552 	if ((all || type == NETCONFA_MC_FORWARDING) &&
553 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
554 			atomic_read(&devconf->mc_forwarding)) < 0)
555 		goto nla_put_failure;
556 #endif
557 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
558 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
559 		goto nla_put_failure;
560 
561 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
562 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
563 			devconf->ignore_routes_with_linkdown) < 0)
564 		goto nla_put_failure;
565 
566 out:
567 	nlmsg_end(skb, nlh);
568 	return 0;
569 
570 nla_put_failure:
571 	nlmsg_cancel(skb, nlh);
572 	return -EMSGSIZE;
573 }
574 
575 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
576 				  int ifindex, struct ipv6_devconf *devconf)
577 {
578 	struct sk_buff *skb;
579 	int err = -ENOBUFS;
580 
581 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
582 	if (!skb)
583 		goto errout;
584 
585 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
586 					 event, 0, type);
587 	if (err < 0) {
588 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
589 		WARN_ON(err == -EMSGSIZE);
590 		kfree_skb(skb);
591 		goto errout;
592 	}
593 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
594 	return;
595 errout:
596 	rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
597 }
598 
599 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
600 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
601 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
602 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
603 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
604 };
605 
606 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
607 				       const struct nlmsghdr *nlh,
608 				       struct nlattr **tb,
609 				       struct netlink_ext_ack *extack)
610 {
611 	int i, err;
612 
613 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
614 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
615 		return -EINVAL;
616 	}
617 
618 	if (!netlink_strict_get_check(skb))
619 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
620 					      tb, NETCONFA_MAX,
621 					      devconf_ipv6_policy, extack);
622 
623 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
624 					    tb, NETCONFA_MAX,
625 					    devconf_ipv6_policy, extack);
626 	if (err)
627 		return err;
628 
629 	for (i = 0; i <= NETCONFA_MAX; i++) {
630 		if (!tb[i])
631 			continue;
632 
633 		switch (i) {
634 		case NETCONFA_IFINDEX:
635 			break;
636 		default:
637 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
638 			return -EINVAL;
639 		}
640 	}
641 
642 	return 0;
643 }
644 
645 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
646 				     struct nlmsghdr *nlh,
647 				     struct netlink_ext_ack *extack)
648 {
649 	struct net *net = sock_net(in_skb->sk);
650 	struct nlattr *tb[NETCONFA_MAX+1];
651 	struct inet6_dev *in6_dev = NULL;
652 	struct net_device *dev = NULL;
653 	struct sk_buff *skb;
654 	struct ipv6_devconf *devconf;
655 	int ifindex;
656 	int err;
657 
658 	err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
659 	if (err < 0)
660 		return err;
661 
662 	if (!tb[NETCONFA_IFINDEX])
663 		return -EINVAL;
664 
665 	err = -EINVAL;
666 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
667 	switch (ifindex) {
668 	case NETCONFA_IFINDEX_ALL:
669 		devconf = net->ipv6.devconf_all;
670 		break;
671 	case NETCONFA_IFINDEX_DEFAULT:
672 		devconf = net->ipv6.devconf_dflt;
673 		break;
674 	default:
675 		dev = dev_get_by_index(net, ifindex);
676 		if (!dev)
677 			return -EINVAL;
678 		in6_dev = in6_dev_get(dev);
679 		if (!in6_dev)
680 			goto errout;
681 		devconf = &in6_dev->cnf;
682 		break;
683 	}
684 
685 	err = -ENOBUFS;
686 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
687 	if (!skb)
688 		goto errout;
689 
690 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
691 					 NETLINK_CB(in_skb).portid,
692 					 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
693 					 NETCONFA_ALL);
694 	if (err < 0) {
695 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
696 		WARN_ON(err == -EMSGSIZE);
697 		kfree_skb(skb);
698 		goto errout;
699 	}
700 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
701 errout:
702 	if (in6_dev)
703 		in6_dev_put(in6_dev);
704 	dev_put(dev);
705 	return err;
706 }
707 
708 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
709 				      struct netlink_callback *cb)
710 {
711 	const struct nlmsghdr *nlh = cb->nlh;
712 	struct net *net = sock_net(skb->sk);
713 	int h, s_h;
714 	int idx, s_idx;
715 	struct net_device *dev;
716 	struct inet6_dev *idev;
717 	struct hlist_head *head;
718 
719 	if (cb->strict_check) {
720 		struct netlink_ext_ack *extack = cb->extack;
721 		struct netconfmsg *ncm;
722 
723 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
724 			NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
725 			return -EINVAL;
726 		}
727 
728 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
729 			NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
730 			return -EINVAL;
731 		}
732 	}
733 
734 	s_h = cb->args[0];
735 	s_idx = idx = cb->args[1];
736 
737 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
738 		idx = 0;
739 		head = &net->dev_index_head[h];
740 		rcu_read_lock();
741 		cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
742 			  net->dev_base_seq;
743 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
744 			if (idx < s_idx)
745 				goto cont;
746 			idev = __in6_dev_get(dev);
747 			if (!idev)
748 				goto cont;
749 
750 			if (inet6_netconf_fill_devconf(skb, dev->ifindex,
751 						       &idev->cnf,
752 						       NETLINK_CB(cb->skb).portid,
753 						       nlh->nlmsg_seq,
754 						       RTM_NEWNETCONF,
755 						       NLM_F_MULTI,
756 						       NETCONFA_ALL) < 0) {
757 				rcu_read_unlock();
758 				goto done;
759 			}
760 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
761 cont:
762 			idx++;
763 		}
764 		rcu_read_unlock();
765 	}
766 	if (h == NETDEV_HASHENTRIES) {
767 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
768 					       net->ipv6.devconf_all,
769 					       NETLINK_CB(cb->skb).portid,
770 					       nlh->nlmsg_seq,
771 					       RTM_NEWNETCONF, NLM_F_MULTI,
772 					       NETCONFA_ALL) < 0)
773 			goto done;
774 		else
775 			h++;
776 	}
777 	if (h == NETDEV_HASHENTRIES + 1) {
778 		if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
779 					       net->ipv6.devconf_dflt,
780 					       NETLINK_CB(cb->skb).portid,
781 					       nlh->nlmsg_seq,
782 					       RTM_NEWNETCONF, NLM_F_MULTI,
783 					       NETCONFA_ALL) < 0)
784 			goto done;
785 		else
786 			h++;
787 	}
788 done:
789 	cb->args[0] = h;
790 	cb->args[1] = idx;
791 
792 	return skb->len;
793 }
794 
795 #ifdef CONFIG_SYSCTL
796 static void dev_forward_change(struct inet6_dev *idev)
797 {
798 	struct net_device *dev;
799 	struct inet6_ifaddr *ifa;
800 	LIST_HEAD(tmp_addr_list);
801 
802 	if (!idev)
803 		return;
804 	dev = idev->dev;
805 	if (idev->cnf.forwarding)
806 		dev_disable_lro(dev);
807 	if (dev->flags & IFF_MULTICAST) {
808 		if (idev->cnf.forwarding) {
809 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
810 			ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
811 			ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
812 		} else {
813 			ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
814 			ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
815 			ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
816 		}
817 	}
818 
819 	read_lock_bh(&idev->lock);
820 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
821 		if (ifa->flags&IFA_F_TENTATIVE)
822 			continue;
823 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
824 	}
825 	read_unlock_bh(&idev->lock);
826 
827 	while (!list_empty(&tmp_addr_list)) {
828 		ifa = list_first_entry(&tmp_addr_list,
829 				       struct inet6_ifaddr, if_list_aux);
830 		list_del(&ifa->if_list_aux);
831 		if (idev->cnf.forwarding)
832 			addrconf_join_anycast(ifa);
833 		else
834 			addrconf_leave_anycast(ifa);
835 	}
836 
837 	inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
838 				     NETCONFA_FORWARDING,
839 				     dev->ifindex, &idev->cnf);
840 }
841 
842 
843 static void addrconf_forward_change(struct net *net, __s32 newf)
844 {
845 	struct net_device *dev;
846 	struct inet6_dev *idev;
847 
848 	for_each_netdev(net, dev) {
849 		idev = __in6_dev_get(dev);
850 		if (idev) {
851 			int changed = (!idev->cnf.forwarding) ^ (!newf);
852 			idev->cnf.forwarding = newf;
853 			if (changed)
854 				dev_forward_change(idev);
855 		}
856 	}
857 }
858 
859 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
860 {
861 	struct net *net;
862 	int old;
863 
864 	if (!rtnl_trylock())
865 		return restart_syscall();
866 
867 	net = (struct net *)table->extra2;
868 	old = *p;
869 	*p = newf;
870 
871 	if (p == &net->ipv6.devconf_dflt->forwarding) {
872 		if ((!newf) ^ (!old))
873 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
874 						     NETCONFA_FORWARDING,
875 						     NETCONFA_IFINDEX_DEFAULT,
876 						     net->ipv6.devconf_dflt);
877 		rtnl_unlock();
878 		return 0;
879 	}
880 
881 	if (p == &net->ipv6.devconf_all->forwarding) {
882 		int old_dflt = net->ipv6.devconf_dflt->forwarding;
883 
884 		net->ipv6.devconf_dflt->forwarding = newf;
885 		if ((!newf) ^ (!old_dflt))
886 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
887 						     NETCONFA_FORWARDING,
888 						     NETCONFA_IFINDEX_DEFAULT,
889 						     net->ipv6.devconf_dflt);
890 
891 		addrconf_forward_change(net, newf);
892 		if ((!newf) ^ (!old))
893 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
894 						     NETCONFA_FORWARDING,
895 						     NETCONFA_IFINDEX_ALL,
896 						     net->ipv6.devconf_all);
897 	} else if ((!newf) ^ (!old))
898 		dev_forward_change((struct inet6_dev *)table->extra1);
899 	rtnl_unlock();
900 
901 	if (newf)
902 		rt6_purge_dflt_routers(net);
903 	return 1;
904 }
905 
906 static void addrconf_linkdown_change(struct net *net, __s32 newf)
907 {
908 	struct net_device *dev;
909 	struct inet6_dev *idev;
910 
911 	for_each_netdev(net, dev) {
912 		idev = __in6_dev_get(dev);
913 		if (idev) {
914 			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
915 
916 			idev->cnf.ignore_routes_with_linkdown = newf;
917 			if (changed)
918 				inet6_netconf_notify_devconf(dev_net(dev),
919 							     RTM_NEWNETCONF,
920 							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
921 							     dev->ifindex,
922 							     &idev->cnf);
923 		}
924 	}
925 }
926 
927 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
928 {
929 	struct net *net;
930 	int old;
931 
932 	if (!rtnl_trylock())
933 		return restart_syscall();
934 
935 	net = (struct net *)table->extra2;
936 	old = *p;
937 	*p = newf;
938 
939 	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
940 		if ((!newf) ^ (!old))
941 			inet6_netconf_notify_devconf(net,
942 						     RTM_NEWNETCONF,
943 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
944 						     NETCONFA_IFINDEX_DEFAULT,
945 						     net->ipv6.devconf_dflt);
946 		rtnl_unlock();
947 		return 0;
948 	}
949 
950 	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
951 		net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
952 		addrconf_linkdown_change(net, newf);
953 		if ((!newf) ^ (!old))
954 			inet6_netconf_notify_devconf(net,
955 						     RTM_NEWNETCONF,
956 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
957 						     NETCONFA_IFINDEX_ALL,
958 						     net->ipv6.devconf_all);
959 	}
960 	rtnl_unlock();
961 
962 	return 1;
963 }
964 
965 #endif
966 
967 /* Nobody refers to this ifaddr, destroy it */
968 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
969 {
970 	WARN_ON(!hlist_unhashed(&ifp->addr_lst));
971 
972 #ifdef NET_REFCNT_DEBUG
973 	pr_debug("%s\n", __func__);
974 #endif
975 
976 	in6_dev_put(ifp->idev);
977 
978 	if (cancel_delayed_work(&ifp->dad_work))
979 		pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
980 			  ifp);
981 
982 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
983 		pr_warn("Freeing alive inet6 address %p\n", ifp);
984 		return;
985 	}
986 
987 	kfree_rcu(ifp, rcu);
988 }
989 
990 static void
991 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
992 {
993 	struct list_head *p;
994 	int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
995 
996 	/*
997 	 * Each device address list is sorted in order of scope -
998 	 * global before linklocal.
999 	 */
1000 	list_for_each(p, &idev->addr_list) {
1001 		struct inet6_ifaddr *ifa
1002 			= list_entry(p, struct inet6_ifaddr, if_list);
1003 		if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1004 			break;
1005 	}
1006 
1007 	list_add_tail_rcu(&ifp->if_list, p);
1008 }
1009 
1010 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1011 {
1012 	u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1013 
1014 	return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1015 }
1016 
1017 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1018 			       struct net_device *dev, unsigned int hash)
1019 {
1020 	struct inet6_ifaddr *ifp;
1021 
1022 	hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1023 		if (ipv6_addr_equal(&ifp->addr, addr)) {
1024 			if (!dev || ifp->idev->dev == dev)
1025 				return true;
1026 		}
1027 	}
1028 	return false;
1029 }
1030 
1031 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1032 {
1033 	struct net *net = dev_net(dev);
1034 	unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1035 	int err = 0;
1036 
1037 	spin_lock(&net->ipv6.addrconf_hash_lock);
1038 
1039 	/* Ignore adding duplicate addresses on an interface */
1040 	if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1041 		netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1042 		err = -EEXIST;
1043 	} else {
1044 		hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1045 	}
1046 
1047 	spin_unlock(&net->ipv6.addrconf_hash_lock);
1048 
1049 	return err;
1050 }
1051 
1052 /* On success it returns ifp with increased reference count */
1053 
1054 static struct inet6_ifaddr *
1055 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1056 	      bool can_block, struct netlink_ext_ack *extack)
1057 {
1058 	gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1059 	int addr_type = ipv6_addr_type(cfg->pfx);
1060 	struct net *net = dev_net(idev->dev);
1061 	struct inet6_ifaddr *ifa = NULL;
1062 	struct fib6_info *f6i = NULL;
1063 	int err = 0;
1064 
1065 	if (addr_type == IPV6_ADDR_ANY ||
1066 	    (addr_type & IPV6_ADDR_MULTICAST &&
1067 	     !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1068 	    (!(idev->dev->flags & IFF_LOOPBACK) &&
1069 	     !netif_is_l3_master(idev->dev) &&
1070 	     addr_type & IPV6_ADDR_LOOPBACK))
1071 		return ERR_PTR(-EADDRNOTAVAIL);
1072 
1073 	if (idev->dead) {
1074 		err = -ENODEV;			/*XXX*/
1075 		goto out;
1076 	}
1077 
1078 	if (idev->cnf.disable_ipv6) {
1079 		err = -EACCES;
1080 		goto out;
1081 	}
1082 
1083 	/* validator notifier needs to be blocking;
1084 	 * do not call in atomic context
1085 	 */
1086 	if (can_block) {
1087 		struct in6_validator_info i6vi = {
1088 			.i6vi_addr = *cfg->pfx,
1089 			.i6vi_dev = idev,
1090 			.extack = extack,
1091 		};
1092 
1093 		err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1094 		err = notifier_to_errno(err);
1095 		if (err < 0)
1096 			goto out;
1097 	}
1098 
1099 	ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1100 	if (!ifa) {
1101 		err = -ENOBUFS;
1102 		goto out;
1103 	}
1104 
1105 	f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1106 	if (IS_ERR(f6i)) {
1107 		err = PTR_ERR(f6i);
1108 		f6i = NULL;
1109 		goto out;
1110 	}
1111 
1112 	if (net->ipv6.devconf_all->disable_policy ||
1113 	    idev->cnf.disable_policy)
1114 		f6i->dst_nopolicy = true;
1115 
1116 	neigh_parms_data_state_setall(idev->nd_parms);
1117 
1118 	ifa->addr = *cfg->pfx;
1119 	if (cfg->peer_pfx)
1120 		ifa->peer_addr = *cfg->peer_pfx;
1121 
1122 	spin_lock_init(&ifa->lock);
1123 	INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1124 	INIT_HLIST_NODE(&ifa->addr_lst);
1125 	ifa->scope = cfg->scope;
1126 	ifa->prefix_len = cfg->plen;
1127 	ifa->rt_priority = cfg->rt_priority;
1128 	ifa->flags = cfg->ifa_flags;
1129 	ifa->ifa_proto = cfg->ifa_proto;
1130 	/* No need to add the TENTATIVE flag for addresses with NODAD */
1131 	if (!(cfg->ifa_flags & IFA_F_NODAD))
1132 		ifa->flags |= IFA_F_TENTATIVE;
1133 	ifa->valid_lft = cfg->valid_lft;
1134 	ifa->prefered_lft = cfg->preferred_lft;
1135 	ifa->cstamp = ifa->tstamp = jiffies;
1136 	ifa->tokenized = false;
1137 
1138 	ifa->rt = f6i;
1139 
1140 	ifa->idev = idev;
1141 	in6_dev_hold(idev);
1142 
1143 	/* For caller */
1144 	refcount_set(&ifa->refcnt, 1);
1145 
1146 	rcu_read_lock_bh();
1147 
1148 	err = ipv6_add_addr_hash(idev->dev, ifa);
1149 	if (err < 0) {
1150 		rcu_read_unlock_bh();
1151 		goto out;
1152 	}
1153 
1154 	write_lock(&idev->lock);
1155 
1156 	/* Add to inet6_dev unicast addr list. */
1157 	ipv6_link_dev_addr(idev, ifa);
1158 
1159 	if (ifa->flags&IFA_F_TEMPORARY) {
1160 		list_add(&ifa->tmp_list, &idev->tempaddr_list);
1161 		in6_ifa_hold(ifa);
1162 	}
1163 
1164 	in6_ifa_hold(ifa);
1165 	write_unlock(&idev->lock);
1166 
1167 	rcu_read_unlock_bh();
1168 
1169 	inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1170 out:
1171 	if (unlikely(err < 0)) {
1172 		fib6_info_release(f6i);
1173 
1174 		if (ifa) {
1175 			if (ifa->idev)
1176 				in6_dev_put(ifa->idev);
1177 			kfree(ifa);
1178 		}
1179 		ifa = ERR_PTR(err);
1180 	}
1181 
1182 	return ifa;
1183 }
1184 
1185 enum cleanup_prefix_rt_t {
1186 	CLEANUP_PREFIX_RT_NOP,    /* no cleanup action for prefix route */
1187 	CLEANUP_PREFIX_RT_DEL,    /* delete the prefix route */
1188 	CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1189 };
1190 
1191 /*
1192  * Check, whether the prefix for ifp would still need a prefix route
1193  * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1194  * constants.
1195  *
1196  * 1) we don't purge prefix if address was not permanent.
1197  *    prefix is managed by its own lifetime.
1198  * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1199  * 3) if there are no addresses, delete prefix.
1200  * 4) if there are still other permanent address(es),
1201  *    corresponding prefix is still permanent.
1202  * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1203  *    don't purge the prefix, assume user space is managing it.
1204  * 6) otherwise, update prefix lifetime to the
1205  *    longest valid lifetime among the corresponding
1206  *    addresses on the device.
1207  *    Note: subsequent RA will update lifetime.
1208  **/
1209 static enum cleanup_prefix_rt_t
1210 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1211 {
1212 	struct inet6_ifaddr *ifa;
1213 	struct inet6_dev *idev = ifp->idev;
1214 	unsigned long lifetime;
1215 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1216 
1217 	*expires = jiffies;
1218 
1219 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
1220 		if (ifa == ifp)
1221 			continue;
1222 		if (ifa->prefix_len != ifp->prefix_len ||
1223 		    !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1224 				       ifp->prefix_len))
1225 			continue;
1226 		if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1227 			return CLEANUP_PREFIX_RT_NOP;
1228 
1229 		action = CLEANUP_PREFIX_RT_EXPIRE;
1230 
1231 		spin_lock(&ifa->lock);
1232 
1233 		lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1234 		/*
1235 		 * Note: Because this address is
1236 		 * not permanent, lifetime <
1237 		 * LONG_MAX / HZ here.
1238 		 */
1239 		if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1240 			*expires = ifa->tstamp + lifetime * HZ;
1241 		spin_unlock(&ifa->lock);
1242 	}
1243 
1244 	return action;
1245 }
1246 
1247 static void
1248 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1249 		     bool del_rt, bool del_peer)
1250 {
1251 	struct fib6_info *f6i;
1252 
1253 	f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1254 					ifp->prefix_len,
1255 					ifp->idev->dev, 0, RTF_DEFAULT, true);
1256 	if (f6i) {
1257 		if (del_rt)
1258 			ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1259 		else {
1260 			if (!(f6i->fib6_flags & RTF_EXPIRES))
1261 				fib6_set_expires(f6i, expires);
1262 			fib6_info_release(f6i);
1263 		}
1264 	}
1265 }
1266 
1267 
1268 /* This function wants to get referenced ifp and releases it before return */
1269 
1270 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1271 {
1272 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1273 	struct net *net = dev_net(ifp->idev->dev);
1274 	unsigned long expires;
1275 	int state;
1276 
1277 	ASSERT_RTNL();
1278 
1279 	spin_lock_bh(&ifp->lock);
1280 	state = ifp->state;
1281 	ifp->state = INET6_IFADDR_STATE_DEAD;
1282 	spin_unlock_bh(&ifp->lock);
1283 
1284 	if (state == INET6_IFADDR_STATE_DEAD)
1285 		goto out;
1286 
1287 	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1288 	hlist_del_init_rcu(&ifp->addr_lst);
1289 	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1290 
1291 	write_lock_bh(&ifp->idev->lock);
1292 
1293 	if (ifp->flags&IFA_F_TEMPORARY) {
1294 		list_del(&ifp->tmp_list);
1295 		if (ifp->ifpub) {
1296 			in6_ifa_put(ifp->ifpub);
1297 			ifp->ifpub = NULL;
1298 		}
1299 		__in6_ifa_put(ifp);
1300 	}
1301 
1302 	if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1303 		action = check_cleanup_prefix_route(ifp, &expires);
1304 
1305 	list_del_rcu(&ifp->if_list);
1306 	__in6_ifa_put(ifp);
1307 
1308 	write_unlock_bh(&ifp->idev->lock);
1309 
1310 	addrconf_del_dad_work(ifp);
1311 
1312 	ipv6_ifa_notify(RTM_DELADDR, ifp);
1313 
1314 	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1315 
1316 	if (action != CLEANUP_PREFIX_RT_NOP) {
1317 		cleanup_prefix_route(ifp, expires,
1318 			action == CLEANUP_PREFIX_RT_DEL, false);
1319 	}
1320 
1321 	/* clean up prefsrc entries */
1322 	rt6_remove_prefsrc(ifp);
1323 out:
1324 	in6_ifa_put(ifp);
1325 }
1326 
1327 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1328 {
1329 	struct inet6_dev *idev = ifp->idev;
1330 	unsigned long tmp_tstamp, age;
1331 	unsigned long regen_advance;
1332 	unsigned long now = jiffies;
1333 	s32 cnf_temp_preferred_lft;
1334 	struct inet6_ifaddr *ift;
1335 	struct ifa6_config cfg;
1336 	long max_desync_factor;
1337 	struct in6_addr addr;
1338 	int ret = 0;
1339 
1340 	write_lock_bh(&idev->lock);
1341 
1342 retry:
1343 	in6_dev_hold(idev);
1344 	if (idev->cnf.use_tempaddr <= 0) {
1345 		write_unlock_bh(&idev->lock);
1346 		pr_info("%s: use_tempaddr is disabled\n", __func__);
1347 		in6_dev_put(idev);
1348 		ret = -1;
1349 		goto out;
1350 	}
1351 	spin_lock_bh(&ifp->lock);
1352 	if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1353 		idev->cnf.use_tempaddr = -1;	/*XXX*/
1354 		spin_unlock_bh(&ifp->lock);
1355 		write_unlock_bh(&idev->lock);
1356 		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1357 			__func__);
1358 		in6_dev_put(idev);
1359 		ret = -1;
1360 		goto out;
1361 	}
1362 	in6_ifa_hold(ifp);
1363 	memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1364 	ipv6_gen_rnd_iid(&addr);
1365 
1366 	age = (now - ifp->tstamp) / HZ;
1367 
1368 	regen_advance = idev->cnf.regen_max_retry *
1369 			idev->cnf.dad_transmits *
1370 			max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1371 
1372 	/* recalculate max_desync_factor each time and update
1373 	 * idev->desync_factor if it's larger
1374 	 */
1375 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1376 	max_desync_factor = min_t(__u32,
1377 				  idev->cnf.max_desync_factor,
1378 				  cnf_temp_preferred_lft - regen_advance);
1379 
1380 	if (unlikely(idev->desync_factor > max_desync_factor)) {
1381 		if (max_desync_factor > 0) {
1382 			get_random_bytes(&idev->desync_factor,
1383 					 sizeof(idev->desync_factor));
1384 			idev->desync_factor %= max_desync_factor;
1385 		} else {
1386 			idev->desync_factor = 0;
1387 		}
1388 	}
1389 
1390 	memset(&cfg, 0, sizeof(cfg));
1391 	cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1392 			      idev->cnf.temp_valid_lft + age);
1393 	cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1394 	cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1395 
1396 	cfg.plen = ifp->prefix_len;
1397 	tmp_tstamp = ifp->tstamp;
1398 	spin_unlock_bh(&ifp->lock);
1399 
1400 	write_unlock_bh(&idev->lock);
1401 
1402 	/* A temporary address is created only if this calculated Preferred
1403 	 * Lifetime is greater than REGEN_ADVANCE time units.  In particular,
1404 	 * an implementation must not create a temporary address with a zero
1405 	 * Preferred Lifetime.
1406 	 * Use age calculation as in addrconf_verify to avoid unnecessary
1407 	 * temporary addresses being generated.
1408 	 */
1409 	age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1410 	if (cfg.preferred_lft <= regen_advance + age) {
1411 		in6_ifa_put(ifp);
1412 		in6_dev_put(idev);
1413 		ret = -1;
1414 		goto out;
1415 	}
1416 
1417 	cfg.ifa_flags = IFA_F_TEMPORARY;
1418 	/* set in addrconf_prefix_rcv() */
1419 	if (ifp->flags & IFA_F_OPTIMISTIC)
1420 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1421 
1422 	cfg.pfx = &addr;
1423 	cfg.scope = ipv6_addr_scope(cfg.pfx);
1424 
1425 	ift = ipv6_add_addr(idev, &cfg, block, NULL);
1426 	if (IS_ERR(ift)) {
1427 		in6_ifa_put(ifp);
1428 		in6_dev_put(idev);
1429 		pr_info("%s: retry temporary address regeneration\n", __func__);
1430 		write_lock_bh(&idev->lock);
1431 		goto retry;
1432 	}
1433 
1434 	spin_lock_bh(&ift->lock);
1435 	ift->ifpub = ifp;
1436 	ift->cstamp = now;
1437 	ift->tstamp = tmp_tstamp;
1438 	spin_unlock_bh(&ift->lock);
1439 
1440 	addrconf_dad_start(ift);
1441 	in6_ifa_put(ift);
1442 	in6_dev_put(idev);
1443 out:
1444 	return ret;
1445 }
1446 
1447 /*
1448  *	Choose an appropriate source address (RFC3484)
1449  */
1450 enum {
1451 	IPV6_SADDR_RULE_INIT = 0,
1452 	IPV6_SADDR_RULE_LOCAL,
1453 	IPV6_SADDR_RULE_SCOPE,
1454 	IPV6_SADDR_RULE_PREFERRED,
1455 #ifdef CONFIG_IPV6_MIP6
1456 	IPV6_SADDR_RULE_HOA,
1457 #endif
1458 	IPV6_SADDR_RULE_OIF,
1459 	IPV6_SADDR_RULE_LABEL,
1460 	IPV6_SADDR_RULE_PRIVACY,
1461 	IPV6_SADDR_RULE_ORCHID,
1462 	IPV6_SADDR_RULE_PREFIX,
1463 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1464 	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1465 #endif
1466 	IPV6_SADDR_RULE_MAX
1467 };
1468 
1469 struct ipv6_saddr_score {
1470 	int			rule;
1471 	int			addr_type;
1472 	struct inet6_ifaddr	*ifa;
1473 	DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1474 	int			scopedist;
1475 	int			matchlen;
1476 };
1477 
1478 struct ipv6_saddr_dst {
1479 	const struct in6_addr *addr;
1480 	int ifindex;
1481 	int scope;
1482 	int label;
1483 	unsigned int prefs;
1484 };
1485 
1486 static inline int ipv6_saddr_preferred(int type)
1487 {
1488 	if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1489 		return 1;
1490 	return 0;
1491 }
1492 
1493 static bool ipv6_use_optimistic_addr(struct net *net,
1494 				     struct inet6_dev *idev)
1495 {
1496 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1497 	if (!idev)
1498 		return false;
1499 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1500 		return false;
1501 	if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1502 		return false;
1503 
1504 	return true;
1505 #else
1506 	return false;
1507 #endif
1508 }
1509 
1510 static bool ipv6_allow_optimistic_dad(struct net *net,
1511 				      struct inet6_dev *idev)
1512 {
1513 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1514 	if (!idev)
1515 		return false;
1516 	if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1517 		return false;
1518 
1519 	return true;
1520 #else
1521 	return false;
1522 #endif
1523 }
1524 
1525 static int ipv6_get_saddr_eval(struct net *net,
1526 			       struct ipv6_saddr_score *score,
1527 			       struct ipv6_saddr_dst *dst,
1528 			       int i)
1529 {
1530 	int ret;
1531 
1532 	if (i <= score->rule) {
1533 		switch (i) {
1534 		case IPV6_SADDR_RULE_SCOPE:
1535 			ret = score->scopedist;
1536 			break;
1537 		case IPV6_SADDR_RULE_PREFIX:
1538 			ret = score->matchlen;
1539 			break;
1540 		default:
1541 			ret = !!test_bit(i, score->scorebits);
1542 		}
1543 		goto out;
1544 	}
1545 
1546 	switch (i) {
1547 	case IPV6_SADDR_RULE_INIT:
1548 		/* Rule 0: remember if hiscore is not ready yet */
1549 		ret = !!score->ifa;
1550 		break;
1551 	case IPV6_SADDR_RULE_LOCAL:
1552 		/* Rule 1: Prefer same address */
1553 		ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1554 		break;
1555 	case IPV6_SADDR_RULE_SCOPE:
1556 		/* Rule 2: Prefer appropriate scope
1557 		 *
1558 		 *      ret
1559 		 *       ^
1560 		 *    -1 |  d 15
1561 		 *    ---+--+-+---> scope
1562 		 *       |
1563 		 *       |             d is scope of the destination.
1564 		 *  B-d  |  \
1565 		 *       |   \      <- smaller scope is better if
1566 		 *  B-15 |    \        if scope is enough for destination.
1567 		 *       |             ret = B - scope (-1 <= scope >= d <= 15).
1568 		 * d-C-1 | /
1569 		 *       |/         <- greater is better
1570 		 *   -C  /             if scope is not enough for destination.
1571 		 *      /|             ret = scope - C (-1 <= d < scope <= 15).
1572 		 *
1573 		 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1574 		 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1575 		 * Assume B = 0 and we get C > 29.
1576 		 */
1577 		ret = __ipv6_addr_src_scope(score->addr_type);
1578 		if (ret >= dst->scope)
1579 			ret = -ret;
1580 		else
1581 			ret -= 128;	/* 30 is enough */
1582 		score->scopedist = ret;
1583 		break;
1584 	case IPV6_SADDR_RULE_PREFERRED:
1585 	    {
1586 		/* Rule 3: Avoid deprecated and optimistic addresses */
1587 		u8 avoid = IFA_F_DEPRECATED;
1588 
1589 		if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1590 			avoid |= IFA_F_OPTIMISTIC;
1591 		ret = ipv6_saddr_preferred(score->addr_type) ||
1592 		      !(score->ifa->flags & avoid);
1593 		break;
1594 	    }
1595 #ifdef CONFIG_IPV6_MIP6
1596 	case IPV6_SADDR_RULE_HOA:
1597 	    {
1598 		/* Rule 4: Prefer home address */
1599 		int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1600 		ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1601 		break;
1602 	    }
1603 #endif
1604 	case IPV6_SADDR_RULE_OIF:
1605 		/* Rule 5: Prefer outgoing interface */
1606 		ret = (!dst->ifindex ||
1607 		       dst->ifindex == score->ifa->idev->dev->ifindex);
1608 		break;
1609 	case IPV6_SADDR_RULE_LABEL:
1610 		/* Rule 6: Prefer matching label */
1611 		ret = ipv6_addr_label(net,
1612 				      &score->ifa->addr, score->addr_type,
1613 				      score->ifa->idev->dev->ifindex) == dst->label;
1614 		break;
1615 	case IPV6_SADDR_RULE_PRIVACY:
1616 	    {
1617 		/* Rule 7: Prefer public address
1618 		 * Note: prefer temporary address if use_tempaddr >= 2
1619 		 */
1620 		int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1621 				!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1622 				score->ifa->idev->cnf.use_tempaddr >= 2;
1623 		ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1624 		break;
1625 	    }
1626 	case IPV6_SADDR_RULE_ORCHID:
1627 		/* Rule 8-: Prefer ORCHID vs ORCHID or
1628 		 *	    non-ORCHID vs non-ORCHID
1629 		 */
1630 		ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1631 			ipv6_addr_orchid(dst->addr));
1632 		break;
1633 	case IPV6_SADDR_RULE_PREFIX:
1634 		/* Rule 8: Use longest matching prefix */
1635 		ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1636 		if (ret > score->ifa->prefix_len)
1637 			ret = score->ifa->prefix_len;
1638 		score->matchlen = ret;
1639 		break;
1640 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1641 	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1642 		/* Optimistic addresses still have lower precedence than other
1643 		 * preferred addresses.
1644 		 */
1645 		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1646 		break;
1647 #endif
1648 	default:
1649 		ret = 0;
1650 	}
1651 
1652 	if (ret)
1653 		__set_bit(i, score->scorebits);
1654 	score->rule = i;
1655 out:
1656 	return ret;
1657 }
1658 
1659 static int __ipv6_dev_get_saddr(struct net *net,
1660 				struct ipv6_saddr_dst *dst,
1661 				struct inet6_dev *idev,
1662 				struct ipv6_saddr_score *scores,
1663 				int hiscore_idx)
1664 {
1665 	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1666 
1667 	list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1668 		int i;
1669 
1670 		/*
1671 		 * - Tentative Address (RFC2462 section 5.4)
1672 		 *  - A tentative address is not considered
1673 		 *    "assigned to an interface" in the traditional
1674 		 *    sense, unless it is also flagged as optimistic.
1675 		 * - Candidate Source Address (section 4)
1676 		 *  - In any case, anycast addresses, multicast
1677 		 *    addresses, and the unspecified address MUST
1678 		 *    NOT be included in a candidate set.
1679 		 */
1680 		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1681 		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1682 			continue;
1683 
1684 		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1685 
1686 		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1687 			     score->addr_type & IPV6_ADDR_MULTICAST)) {
1688 			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1689 					    idev->dev->name);
1690 			continue;
1691 		}
1692 
1693 		score->rule = -1;
1694 		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1695 
1696 		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1697 			int minihiscore, miniscore;
1698 
1699 			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1700 			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1701 
1702 			if (minihiscore > miniscore) {
1703 				if (i == IPV6_SADDR_RULE_SCOPE &&
1704 				    score->scopedist > 0) {
1705 					/*
1706 					 * special case:
1707 					 * each remaining entry
1708 					 * has too small (not enough)
1709 					 * scope, because ifa entries
1710 					 * are sorted by their scope
1711 					 * values.
1712 					 */
1713 					goto out;
1714 				}
1715 				break;
1716 			} else if (minihiscore < miniscore) {
1717 				swap(hiscore, score);
1718 				hiscore_idx = 1 - hiscore_idx;
1719 
1720 				/* restore our iterator */
1721 				score->ifa = hiscore->ifa;
1722 
1723 				break;
1724 			}
1725 		}
1726 	}
1727 out:
1728 	return hiscore_idx;
1729 }
1730 
1731 static int ipv6_get_saddr_master(struct net *net,
1732 				 const struct net_device *dst_dev,
1733 				 const struct net_device *master,
1734 				 struct ipv6_saddr_dst *dst,
1735 				 struct ipv6_saddr_score *scores,
1736 				 int hiscore_idx)
1737 {
1738 	struct inet6_dev *idev;
1739 
1740 	idev = __in6_dev_get(dst_dev);
1741 	if (idev)
1742 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1743 						   scores, hiscore_idx);
1744 
1745 	idev = __in6_dev_get(master);
1746 	if (idev)
1747 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1748 						   scores, hiscore_idx);
1749 
1750 	return hiscore_idx;
1751 }
1752 
1753 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1754 		       const struct in6_addr *daddr, unsigned int prefs,
1755 		       struct in6_addr *saddr)
1756 {
1757 	struct ipv6_saddr_score scores[2], *hiscore;
1758 	struct ipv6_saddr_dst dst;
1759 	struct inet6_dev *idev;
1760 	struct net_device *dev;
1761 	int dst_type;
1762 	bool use_oif_addr = false;
1763 	int hiscore_idx = 0;
1764 	int ret = 0;
1765 
1766 	dst_type = __ipv6_addr_type(daddr);
1767 	dst.addr = daddr;
1768 	dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1769 	dst.scope = __ipv6_addr_src_scope(dst_type);
1770 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1771 	dst.prefs = prefs;
1772 
1773 	scores[hiscore_idx].rule = -1;
1774 	scores[hiscore_idx].ifa = NULL;
1775 
1776 	rcu_read_lock();
1777 
1778 	/* Candidate Source Address (section 4)
1779 	 *  - multicast and link-local destination address,
1780 	 *    the set of candidate source address MUST only
1781 	 *    include addresses assigned to interfaces
1782 	 *    belonging to the same link as the outgoing
1783 	 *    interface.
1784 	 * (- For site-local destination addresses, the
1785 	 *    set of candidate source addresses MUST only
1786 	 *    include addresses assigned to interfaces
1787 	 *    belonging to the same site as the outgoing
1788 	 *    interface.)
1789 	 *  - "It is RECOMMENDED that the candidate source addresses
1790 	 *    be the set of unicast addresses assigned to the
1791 	 *    interface that will be used to send to the destination
1792 	 *    (the 'outgoing' interface)." (RFC 6724)
1793 	 */
1794 	if (dst_dev) {
1795 		idev = __in6_dev_get(dst_dev);
1796 		if ((dst_type & IPV6_ADDR_MULTICAST) ||
1797 		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1798 		    (idev && idev->cnf.use_oif_addrs_only)) {
1799 			use_oif_addr = true;
1800 		}
1801 	}
1802 
1803 	if (use_oif_addr) {
1804 		if (idev)
1805 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1806 	} else {
1807 		const struct net_device *master;
1808 		int master_idx = 0;
1809 
1810 		/* if dst_dev exists and is enslaved to an L3 device, then
1811 		 * prefer addresses from dst_dev and then the master over
1812 		 * any other enslaved devices in the L3 domain.
1813 		 */
1814 		master = l3mdev_master_dev_rcu(dst_dev);
1815 		if (master) {
1816 			master_idx = master->ifindex;
1817 
1818 			hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1819 							    master, &dst,
1820 							    scores, hiscore_idx);
1821 
1822 			if (scores[hiscore_idx].ifa)
1823 				goto out;
1824 		}
1825 
1826 		for_each_netdev_rcu(net, dev) {
1827 			/* only consider addresses on devices in the
1828 			 * same L3 domain
1829 			 */
1830 			if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1831 				continue;
1832 			idev = __in6_dev_get(dev);
1833 			if (!idev)
1834 				continue;
1835 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1836 		}
1837 	}
1838 
1839 out:
1840 	hiscore = &scores[hiscore_idx];
1841 	if (!hiscore->ifa)
1842 		ret = -EADDRNOTAVAIL;
1843 	else
1844 		*saddr = hiscore->ifa->addr;
1845 
1846 	rcu_read_unlock();
1847 	return ret;
1848 }
1849 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1850 
1851 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1852 			      u32 banned_flags)
1853 {
1854 	struct inet6_ifaddr *ifp;
1855 	int err = -EADDRNOTAVAIL;
1856 
1857 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1858 		if (ifp->scope > IFA_LINK)
1859 			break;
1860 		if (ifp->scope == IFA_LINK &&
1861 		    !(ifp->flags & banned_flags)) {
1862 			*addr = ifp->addr;
1863 			err = 0;
1864 			break;
1865 		}
1866 	}
1867 	return err;
1868 }
1869 
1870 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1871 		    u32 banned_flags)
1872 {
1873 	struct inet6_dev *idev;
1874 	int err = -EADDRNOTAVAIL;
1875 
1876 	rcu_read_lock();
1877 	idev = __in6_dev_get(dev);
1878 	if (idev) {
1879 		read_lock_bh(&idev->lock);
1880 		err = __ipv6_get_lladdr(idev, addr, banned_flags);
1881 		read_unlock_bh(&idev->lock);
1882 	}
1883 	rcu_read_unlock();
1884 	return err;
1885 }
1886 
1887 static int ipv6_count_addresses(const struct inet6_dev *idev)
1888 {
1889 	const struct inet6_ifaddr *ifp;
1890 	int cnt = 0;
1891 
1892 	rcu_read_lock();
1893 	list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1894 		cnt++;
1895 	rcu_read_unlock();
1896 	return cnt;
1897 }
1898 
1899 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1900 		  const struct net_device *dev, int strict)
1901 {
1902 	return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1903 				       strict, IFA_F_TENTATIVE);
1904 }
1905 EXPORT_SYMBOL(ipv6_chk_addr);
1906 
1907 /* device argument is used to find the L3 domain of interest. If
1908  * skip_dev_check is set, then the ifp device is not checked against
1909  * the passed in dev argument. So the 2 cases for addresses checks are:
1910  *   1. does the address exist in the L3 domain that dev is part of
1911  *      (skip_dev_check = true), or
1912  *
1913  *   2. does the address exist on the specific device
1914  *      (skip_dev_check = false)
1915  */
1916 static struct net_device *
1917 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1918 			  const struct net_device *dev, bool skip_dev_check,
1919 			  int strict, u32 banned_flags)
1920 {
1921 	unsigned int hash = inet6_addr_hash(net, addr);
1922 	struct net_device *l3mdev, *ndev;
1923 	struct inet6_ifaddr *ifp;
1924 	u32 ifp_flags;
1925 
1926 	rcu_read_lock();
1927 
1928 	l3mdev = l3mdev_master_dev_rcu(dev);
1929 	if (skip_dev_check)
1930 		dev = NULL;
1931 
1932 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1933 		ndev = ifp->idev->dev;
1934 
1935 		if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1936 			continue;
1937 
1938 		/* Decouple optimistic from tentative for evaluation here.
1939 		 * Ban optimistic addresses explicitly, when required.
1940 		 */
1941 		ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1942 			    ? (ifp->flags&~IFA_F_TENTATIVE)
1943 			    : ifp->flags;
1944 		if (ipv6_addr_equal(&ifp->addr, addr) &&
1945 		    !(ifp_flags&banned_flags) &&
1946 		    (!dev || ndev == dev ||
1947 		     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1948 			rcu_read_unlock();
1949 			return ndev;
1950 		}
1951 	}
1952 
1953 	rcu_read_unlock();
1954 	return NULL;
1955 }
1956 
1957 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1958 			    const struct net_device *dev, bool skip_dev_check,
1959 			    int strict, u32 banned_flags)
1960 {
1961 	return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1962 					 strict, banned_flags) ? 1 : 0;
1963 }
1964 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1965 
1966 
1967 /* Compares an address/prefix_len with addresses on device @dev.
1968  * If one is found it returns true.
1969  */
1970 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1971 	const unsigned int prefix_len, struct net_device *dev)
1972 {
1973 	const struct inet6_ifaddr *ifa;
1974 	const struct inet6_dev *idev;
1975 	bool ret = false;
1976 
1977 	rcu_read_lock();
1978 	idev = __in6_dev_get(dev);
1979 	if (idev) {
1980 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1981 			ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1982 			if (ret)
1983 				break;
1984 		}
1985 	}
1986 	rcu_read_unlock();
1987 
1988 	return ret;
1989 }
1990 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1991 
1992 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1993 {
1994 	const struct inet6_ifaddr *ifa;
1995 	const struct inet6_dev *idev;
1996 	int	onlink;
1997 
1998 	onlink = 0;
1999 	rcu_read_lock();
2000 	idev = __in6_dev_get(dev);
2001 	if (idev) {
2002 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2003 			onlink = ipv6_prefix_equal(addr, &ifa->addr,
2004 						   ifa->prefix_len);
2005 			if (onlink)
2006 				break;
2007 		}
2008 	}
2009 	rcu_read_unlock();
2010 	return onlink;
2011 }
2012 EXPORT_SYMBOL(ipv6_chk_prefix);
2013 
2014 /**
2015  * ipv6_dev_find - find the first device with a given source address.
2016  * @net: the net namespace
2017  * @addr: the source address
2018  * @dev: used to find the L3 domain of interest
2019  *
2020  * The caller should be protected by RCU, or RTNL.
2021  */
2022 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2023 				 struct net_device *dev)
2024 {
2025 	return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2026 					 IFA_F_TENTATIVE);
2027 }
2028 EXPORT_SYMBOL(ipv6_dev_find);
2029 
2030 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2031 				     struct net_device *dev, int strict)
2032 {
2033 	unsigned int hash = inet6_addr_hash(net, addr);
2034 	struct inet6_ifaddr *ifp, *result = NULL;
2035 
2036 	rcu_read_lock();
2037 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2038 		if (ipv6_addr_equal(&ifp->addr, addr)) {
2039 			if (!dev || ifp->idev->dev == dev ||
2040 			    !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2041 				result = ifp;
2042 				in6_ifa_hold(ifp);
2043 				break;
2044 			}
2045 		}
2046 	}
2047 	rcu_read_unlock();
2048 
2049 	return result;
2050 }
2051 
2052 /* Gets referenced address, destroys ifaddr */
2053 
2054 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2055 {
2056 	if (dad_failed)
2057 		ifp->flags |= IFA_F_DADFAILED;
2058 
2059 	if (ifp->flags&IFA_F_TEMPORARY) {
2060 		struct inet6_ifaddr *ifpub;
2061 		spin_lock_bh(&ifp->lock);
2062 		ifpub = ifp->ifpub;
2063 		if (ifpub) {
2064 			in6_ifa_hold(ifpub);
2065 			spin_unlock_bh(&ifp->lock);
2066 			ipv6_create_tempaddr(ifpub, true);
2067 			in6_ifa_put(ifpub);
2068 		} else {
2069 			spin_unlock_bh(&ifp->lock);
2070 		}
2071 		ipv6_del_addr(ifp);
2072 	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2073 		spin_lock_bh(&ifp->lock);
2074 		addrconf_del_dad_work(ifp);
2075 		ifp->flags |= IFA_F_TENTATIVE;
2076 		if (dad_failed)
2077 			ifp->flags &= ~IFA_F_OPTIMISTIC;
2078 		spin_unlock_bh(&ifp->lock);
2079 		if (dad_failed)
2080 			ipv6_ifa_notify(0, ifp);
2081 		in6_ifa_put(ifp);
2082 	} else {
2083 		ipv6_del_addr(ifp);
2084 	}
2085 }
2086 
2087 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2088 {
2089 	int err = -ENOENT;
2090 
2091 	spin_lock_bh(&ifp->lock);
2092 	if (ifp->state == INET6_IFADDR_STATE_DAD) {
2093 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
2094 		err = 0;
2095 	}
2096 	spin_unlock_bh(&ifp->lock);
2097 
2098 	return err;
2099 }
2100 
2101 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2102 {
2103 	struct inet6_dev *idev = ifp->idev;
2104 	struct net *net = dev_net(idev->dev);
2105 
2106 	if (addrconf_dad_end(ifp)) {
2107 		in6_ifa_put(ifp);
2108 		return;
2109 	}
2110 
2111 	net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2112 			     ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2113 
2114 	spin_lock_bh(&ifp->lock);
2115 
2116 	if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2117 		struct in6_addr new_addr;
2118 		struct inet6_ifaddr *ifp2;
2119 		int retries = ifp->stable_privacy_retry + 1;
2120 		struct ifa6_config cfg = {
2121 			.pfx = &new_addr,
2122 			.plen = ifp->prefix_len,
2123 			.ifa_flags = ifp->flags,
2124 			.valid_lft = ifp->valid_lft,
2125 			.preferred_lft = ifp->prefered_lft,
2126 			.scope = ifp->scope,
2127 		};
2128 
2129 		if (retries > net->ipv6.sysctl.idgen_retries) {
2130 			net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2131 					     ifp->idev->dev->name);
2132 			goto errdad;
2133 		}
2134 
2135 		new_addr = ifp->addr;
2136 		if (ipv6_generate_stable_address(&new_addr, retries,
2137 						 idev))
2138 			goto errdad;
2139 
2140 		spin_unlock_bh(&ifp->lock);
2141 
2142 		if (idev->cnf.max_addresses &&
2143 		    ipv6_count_addresses(idev) >=
2144 		    idev->cnf.max_addresses)
2145 			goto lock_errdad;
2146 
2147 		net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2148 				     ifp->idev->dev->name);
2149 
2150 		ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2151 		if (IS_ERR(ifp2))
2152 			goto lock_errdad;
2153 
2154 		spin_lock_bh(&ifp2->lock);
2155 		ifp2->stable_privacy_retry = retries;
2156 		ifp2->state = INET6_IFADDR_STATE_PREDAD;
2157 		spin_unlock_bh(&ifp2->lock);
2158 
2159 		addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2160 		in6_ifa_put(ifp2);
2161 lock_errdad:
2162 		spin_lock_bh(&ifp->lock);
2163 	}
2164 
2165 errdad:
2166 	/* transition from _POSTDAD to _ERRDAD */
2167 	ifp->state = INET6_IFADDR_STATE_ERRDAD;
2168 	spin_unlock_bh(&ifp->lock);
2169 
2170 	addrconf_mod_dad_work(ifp, 0);
2171 	in6_ifa_put(ifp);
2172 }
2173 
2174 /* Join to solicited addr multicast group.
2175  * caller must hold RTNL */
2176 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2177 {
2178 	struct in6_addr maddr;
2179 
2180 	if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2181 		return;
2182 
2183 	addrconf_addr_solict_mult(addr, &maddr);
2184 	ipv6_dev_mc_inc(dev, &maddr);
2185 }
2186 
2187 /* caller must hold RTNL */
2188 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2189 {
2190 	struct in6_addr maddr;
2191 
2192 	if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2193 		return;
2194 
2195 	addrconf_addr_solict_mult(addr, &maddr);
2196 	__ipv6_dev_mc_dec(idev, &maddr);
2197 }
2198 
2199 /* caller must hold RTNL */
2200 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2201 {
2202 	struct in6_addr addr;
2203 
2204 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2205 		return;
2206 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2207 	if (ipv6_addr_any(&addr))
2208 		return;
2209 	__ipv6_dev_ac_inc(ifp->idev, &addr);
2210 }
2211 
2212 /* caller must hold RTNL */
2213 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2214 {
2215 	struct in6_addr addr;
2216 
2217 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2218 		return;
2219 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2220 	if (ipv6_addr_any(&addr))
2221 		return;
2222 	__ipv6_dev_ac_dec(ifp->idev, &addr);
2223 }
2224 
2225 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2226 {
2227 	switch (dev->addr_len) {
2228 	case ETH_ALEN:
2229 		memcpy(eui, dev->dev_addr, 3);
2230 		eui[3] = 0xFF;
2231 		eui[4] = 0xFE;
2232 		memcpy(eui + 5, dev->dev_addr + 3, 3);
2233 		break;
2234 	case EUI64_ADDR_LEN:
2235 		memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2236 		eui[0] ^= 2;
2237 		break;
2238 	default:
2239 		return -1;
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2246 {
2247 	const union fwnet_hwaddr *ha;
2248 
2249 	if (dev->addr_len != FWNET_ALEN)
2250 		return -1;
2251 
2252 	ha = (const union fwnet_hwaddr *)dev->dev_addr;
2253 
2254 	memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2255 	eui[0] ^= 2;
2256 	return 0;
2257 }
2258 
2259 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2260 {
2261 	/* XXX: inherit EUI-64 from other interface -- yoshfuji */
2262 	if (dev->addr_len != ARCNET_ALEN)
2263 		return -1;
2264 	memset(eui, 0, 7);
2265 	eui[7] = *(u8 *)dev->dev_addr;
2266 	return 0;
2267 }
2268 
2269 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2270 {
2271 	if (dev->addr_len != INFINIBAND_ALEN)
2272 		return -1;
2273 	memcpy(eui, dev->dev_addr + 12, 8);
2274 	eui[0] |= 2;
2275 	return 0;
2276 }
2277 
2278 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2279 {
2280 	if (addr == 0)
2281 		return -1;
2282 	eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2283 		  ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2284 		  ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2285 		  ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2286 		  ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2287 		  ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2288 	eui[1] = 0;
2289 	eui[2] = 0x5E;
2290 	eui[3] = 0xFE;
2291 	memcpy(eui + 4, &addr, 4);
2292 	return 0;
2293 }
2294 
2295 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2296 {
2297 	if (dev->priv_flags & IFF_ISATAP)
2298 		return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2299 	return -1;
2300 }
2301 
2302 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2303 {
2304 	return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2305 }
2306 
2307 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2308 {
2309 	memcpy(eui, dev->perm_addr, 3);
2310 	memcpy(eui + 5, dev->perm_addr + 3, 3);
2311 	eui[3] = 0xFF;
2312 	eui[4] = 0xFE;
2313 	eui[0] ^= 2;
2314 	return 0;
2315 }
2316 
2317 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2318 {
2319 	switch (dev->type) {
2320 	case ARPHRD_ETHER:
2321 	case ARPHRD_FDDI:
2322 		return addrconf_ifid_eui48(eui, dev);
2323 	case ARPHRD_ARCNET:
2324 		return addrconf_ifid_arcnet(eui, dev);
2325 	case ARPHRD_INFINIBAND:
2326 		return addrconf_ifid_infiniband(eui, dev);
2327 	case ARPHRD_SIT:
2328 		return addrconf_ifid_sit(eui, dev);
2329 	case ARPHRD_IPGRE:
2330 	case ARPHRD_TUNNEL:
2331 		return addrconf_ifid_gre(eui, dev);
2332 	case ARPHRD_6LOWPAN:
2333 		return addrconf_ifid_6lowpan(eui, dev);
2334 	case ARPHRD_IEEE1394:
2335 		return addrconf_ifid_ieee1394(eui, dev);
2336 	case ARPHRD_TUNNEL6:
2337 	case ARPHRD_IP6GRE:
2338 	case ARPHRD_RAWIP:
2339 		return addrconf_ifid_ip6tnl(eui, dev);
2340 	}
2341 	return -1;
2342 }
2343 
2344 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2345 {
2346 	int err = -1;
2347 	struct inet6_ifaddr *ifp;
2348 
2349 	read_lock_bh(&idev->lock);
2350 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2351 		if (ifp->scope > IFA_LINK)
2352 			break;
2353 		if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2354 			memcpy(eui, ifp->addr.s6_addr+8, 8);
2355 			err = 0;
2356 			break;
2357 		}
2358 	}
2359 	read_unlock_bh(&idev->lock);
2360 	return err;
2361 }
2362 
2363 /* Generation of a randomized Interface Identifier
2364  * draft-ietf-6man-rfc4941bis, Section 3.3.1
2365  */
2366 
2367 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2368 {
2369 regen:
2370 	get_random_bytes(&addr->s6_addr[8], 8);
2371 
2372 	/* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2373 	 * check if generated address is not inappropriate:
2374 	 *
2375 	 * - Reserved IPv6 Interface Identifiers
2376 	 * - XXX: already assigned to an address on the device
2377 	 */
2378 
2379 	/* Subnet-router anycast: 0000:0000:0000:0000 */
2380 	if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2381 		goto regen;
2382 
2383 	/* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2384 	 * Proxy Mobile IPv6:   0200:5EFF:FE00:5213
2385 	 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2386 	 */
2387 	if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2388 	    (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2389 		goto regen;
2390 
2391 	/* Reserved subnet anycast addresses */
2392 	if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2393 	    ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2394 		goto regen;
2395 }
2396 
2397 /*
2398  *	Add prefix route.
2399  */
2400 
2401 static void
2402 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2403 		      struct net_device *dev, unsigned long expires,
2404 		      u32 flags, gfp_t gfp_flags)
2405 {
2406 	struct fib6_config cfg = {
2407 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2408 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2409 		.fc_ifindex = dev->ifindex,
2410 		.fc_expires = expires,
2411 		.fc_dst_len = plen,
2412 		.fc_flags = RTF_UP | flags,
2413 		.fc_nlinfo.nl_net = dev_net(dev),
2414 		.fc_protocol = RTPROT_KERNEL,
2415 		.fc_type = RTN_UNICAST,
2416 	};
2417 
2418 	cfg.fc_dst = *pfx;
2419 
2420 	/* Prevent useless cloning on PtP SIT.
2421 	   This thing is done here expecting that the whole
2422 	   class of non-broadcast devices need not cloning.
2423 	 */
2424 #if IS_ENABLED(CONFIG_IPV6_SIT)
2425 	if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2426 		cfg.fc_flags |= RTF_NONEXTHOP;
2427 #endif
2428 
2429 	ip6_route_add(&cfg, gfp_flags, NULL);
2430 }
2431 
2432 
2433 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2434 						  int plen,
2435 						  const struct net_device *dev,
2436 						  u32 flags, u32 noflags,
2437 						  bool no_gw)
2438 {
2439 	struct fib6_node *fn;
2440 	struct fib6_info *rt = NULL;
2441 	struct fib6_table *table;
2442 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2443 
2444 	table = fib6_get_table(dev_net(dev), tb_id);
2445 	if (!table)
2446 		return NULL;
2447 
2448 	rcu_read_lock();
2449 	fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2450 	if (!fn)
2451 		goto out;
2452 
2453 	for_each_fib6_node_rt_rcu(fn) {
2454 		/* prefix routes only use builtin fib6_nh */
2455 		if (rt->nh)
2456 			continue;
2457 
2458 		if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2459 			continue;
2460 		if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2461 			continue;
2462 		if ((rt->fib6_flags & flags) != flags)
2463 			continue;
2464 		if ((rt->fib6_flags & noflags) != 0)
2465 			continue;
2466 		if (!fib6_info_hold_safe(rt))
2467 			continue;
2468 		break;
2469 	}
2470 out:
2471 	rcu_read_unlock();
2472 	return rt;
2473 }
2474 
2475 
2476 /* Create "default" multicast route to the interface */
2477 
2478 static void addrconf_add_mroute(struct net_device *dev)
2479 {
2480 	struct fib6_config cfg = {
2481 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2482 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
2483 		.fc_ifindex = dev->ifindex,
2484 		.fc_dst_len = 8,
2485 		.fc_flags = RTF_UP,
2486 		.fc_type = RTN_MULTICAST,
2487 		.fc_nlinfo.nl_net = dev_net(dev),
2488 		.fc_protocol = RTPROT_KERNEL,
2489 	};
2490 
2491 	ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2492 
2493 	ip6_route_add(&cfg, GFP_KERNEL, NULL);
2494 }
2495 
2496 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2497 {
2498 	struct inet6_dev *idev;
2499 
2500 	ASSERT_RTNL();
2501 
2502 	idev = ipv6_find_idev(dev);
2503 	if (IS_ERR(idev))
2504 		return idev;
2505 
2506 	if (idev->cnf.disable_ipv6)
2507 		return ERR_PTR(-EACCES);
2508 
2509 	/* Add default multicast route */
2510 	if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2511 		addrconf_add_mroute(dev);
2512 
2513 	return idev;
2514 }
2515 
2516 static void manage_tempaddrs(struct inet6_dev *idev,
2517 			     struct inet6_ifaddr *ifp,
2518 			     __u32 valid_lft, __u32 prefered_lft,
2519 			     bool create, unsigned long now)
2520 {
2521 	u32 flags;
2522 	struct inet6_ifaddr *ift;
2523 
2524 	read_lock_bh(&idev->lock);
2525 	/* update all temporary addresses in the list */
2526 	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2527 		int age, max_valid, max_prefered;
2528 
2529 		if (ifp != ift->ifpub)
2530 			continue;
2531 
2532 		/* RFC 4941 section 3.3:
2533 		 * If a received option will extend the lifetime of a public
2534 		 * address, the lifetimes of temporary addresses should
2535 		 * be extended, subject to the overall constraint that no
2536 		 * temporary addresses should ever remain "valid" or "preferred"
2537 		 * for a time longer than (TEMP_VALID_LIFETIME) or
2538 		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2539 		 */
2540 		age = (now - ift->cstamp) / HZ;
2541 		max_valid = idev->cnf.temp_valid_lft - age;
2542 		if (max_valid < 0)
2543 			max_valid = 0;
2544 
2545 		max_prefered = idev->cnf.temp_prefered_lft -
2546 			       idev->desync_factor - age;
2547 		if (max_prefered < 0)
2548 			max_prefered = 0;
2549 
2550 		if (valid_lft > max_valid)
2551 			valid_lft = max_valid;
2552 
2553 		if (prefered_lft > max_prefered)
2554 			prefered_lft = max_prefered;
2555 
2556 		spin_lock(&ift->lock);
2557 		flags = ift->flags;
2558 		ift->valid_lft = valid_lft;
2559 		ift->prefered_lft = prefered_lft;
2560 		ift->tstamp = now;
2561 		if (prefered_lft > 0)
2562 			ift->flags &= ~IFA_F_DEPRECATED;
2563 
2564 		spin_unlock(&ift->lock);
2565 		if (!(flags&IFA_F_TENTATIVE))
2566 			ipv6_ifa_notify(0, ift);
2567 	}
2568 
2569 	if ((create || list_empty(&idev->tempaddr_list)) &&
2570 	    idev->cnf.use_tempaddr > 0) {
2571 		/* When a new public address is created as described
2572 		 * in [ADDRCONF], also create a new temporary address.
2573 		 * Also create a temporary address if it's enabled but
2574 		 * no temporary address currently exists.
2575 		 */
2576 		read_unlock_bh(&idev->lock);
2577 		ipv6_create_tempaddr(ifp, false);
2578 	} else {
2579 		read_unlock_bh(&idev->lock);
2580 	}
2581 }
2582 
2583 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2584 {
2585 	return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2586 	       idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2587 }
2588 
2589 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2590 				 const struct prefix_info *pinfo,
2591 				 struct inet6_dev *in6_dev,
2592 				 const struct in6_addr *addr, int addr_type,
2593 				 u32 addr_flags, bool sllao, bool tokenized,
2594 				 __u32 valid_lft, u32 prefered_lft)
2595 {
2596 	struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2597 	int create = 0, update_lft = 0;
2598 
2599 	if (!ifp && valid_lft) {
2600 		int max_addresses = in6_dev->cnf.max_addresses;
2601 		struct ifa6_config cfg = {
2602 			.pfx = addr,
2603 			.plen = pinfo->prefix_len,
2604 			.ifa_flags = addr_flags,
2605 			.valid_lft = valid_lft,
2606 			.preferred_lft = prefered_lft,
2607 			.scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2608 			.ifa_proto = IFAPROT_KERNEL_RA
2609 		};
2610 
2611 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2612 		if ((net->ipv6.devconf_all->optimistic_dad ||
2613 		     in6_dev->cnf.optimistic_dad) &&
2614 		    !net->ipv6.devconf_all->forwarding && sllao)
2615 			cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2616 #endif
2617 
2618 		/* Do not allow to create too much of autoconfigured
2619 		 * addresses; this would be too easy way to crash kernel.
2620 		 */
2621 		if (!max_addresses ||
2622 		    ipv6_count_addresses(in6_dev) < max_addresses)
2623 			ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2624 
2625 		if (IS_ERR_OR_NULL(ifp))
2626 			return -1;
2627 
2628 		create = 1;
2629 		spin_lock_bh(&ifp->lock);
2630 		ifp->flags |= IFA_F_MANAGETEMPADDR;
2631 		ifp->cstamp = jiffies;
2632 		ifp->tokenized = tokenized;
2633 		spin_unlock_bh(&ifp->lock);
2634 		addrconf_dad_start(ifp);
2635 	}
2636 
2637 	if (ifp) {
2638 		u32 flags;
2639 		unsigned long now;
2640 		u32 stored_lft;
2641 
2642 		/* update lifetime (RFC2462 5.5.3 e) */
2643 		spin_lock_bh(&ifp->lock);
2644 		now = jiffies;
2645 		if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2646 			stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2647 		else
2648 			stored_lft = 0;
2649 		if (!create && stored_lft) {
2650 			const u32 minimum_lft = min_t(u32,
2651 				stored_lft, MIN_VALID_LIFETIME);
2652 			valid_lft = max(valid_lft, minimum_lft);
2653 
2654 			/* RFC4862 Section 5.5.3e:
2655 			 * "Note that the preferred lifetime of the
2656 			 *  corresponding address is always reset to
2657 			 *  the Preferred Lifetime in the received
2658 			 *  Prefix Information option, regardless of
2659 			 *  whether the valid lifetime is also reset or
2660 			 *  ignored."
2661 			 *
2662 			 * So we should always update prefered_lft here.
2663 			 */
2664 			update_lft = 1;
2665 		}
2666 
2667 		if (update_lft) {
2668 			ifp->valid_lft = valid_lft;
2669 			ifp->prefered_lft = prefered_lft;
2670 			ifp->tstamp = now;
2671 			flags = ifp->flags;
2672 			ifp->flags &= ~IFA_F_DEPRECATED;
2673 			spin_unlock_bh(&ifp->lock);
2674 
2675 			if (!(flags&IFA_F_TENTATIVE))
2676 				ipv6_ifa_notify(0, ifp);
2677 		} else
2678 			spin_unlock_bh(&ifp->lock);
2679 
2680 		manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2681 				 create, now);
2682 
2683 		in6_ifa_put(ifp);
2684 		addrconf_verify(net);
2685 	}
2686 
2687 	return 0;
2688 }
2689 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2690 
2691 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2692 {
2693 	struct prefix_info *pinfo;
2694 	__u32 valid_lft;
2695 	__u32 prefered_lft;
2696 	int addr_type, err;
2697 	u32 addr_flags = 0;
2698 	struct inet6_dev *in6_dev;
2699 	struct net *net = dev_net(dev);
2700 
2701 	pinfo = (struct prefix_info *) opt;
2702 
2703 	if (len < sizeof(struct prefix_info)) {
2704 		netdev_dbg(dev, "addrconf: prefix option too short\n");
2705 		return;
2706 	}
2707 
2708 	/*
2709 	 *	Validation checks ([ADDRCONF], page 19)
2710 	 */
2711 
2712 	addr_type = ipv6_addr_type(&pinfo->prefix);
2713 
2714 	if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2715 		return;
2716 
2717 	valid_lft = ntohl(pinfo->valid);
2718 	prefered_lft = ntohl(pinfo->prefered);
2719 
2720 	if (prefered_lft > valid_lft) {
2721 		net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2722 		return;
2723 	}
2724 
2725 	in6_dev = in6_dev_get(dev);
2726 
2727 	if (!in6_dev) {
2728 		net_dbg_ratelimited("addrconf: device %s not configured\n",
2729 				    dev->name);
2730 		return;
2731 	}
2732 
2733 	/*
2734 	 *	Two things going on here:
2735 	 *	1) Add routes for on-link prefixes
2736 	 *	2) Configure prefixes with the auto flag set
2737 	 */
2738 
2739 	if (pinfo->onlink) {
2740 		struct fib6_info *rt;
2741 		unsigned long rt_expires;
2742 
2743 		/* Avoid arithmetic overflow. Really, we could
2744 		 * save rt_expires in seconds, likely valid_lft,
2745 		 * but it would require division in fib gc, that it
2746 		 * not good.
2747 		 */
2748 		if (HZ > USER_HZ)
2749 			rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2750 		else
2751 			rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2752 
2753 		if (addrconf_finite_timeout(rt_expires))
2754 			rt_expires *= HZ;
2755 
2756 		rt = addrconf_get_prefix_route(&pinfo->prefix,
2757 					       pinfo->prefix_len,
2758 					       dev,
2759 					       RTF_ADDRCONF | RTF_PREFIX_RT,
2760 					       RTF_DEFAULT, true);
2761 
2762 		if (rt) {
2763 			/* Autoconf prefix route */
2764 			if (valid_lft == 0) {
2765 				ip6_del_rt(net, rt, false);
2766 				rt = NULL;
2767 			} else if (addrconf_finite_timeout(rt_expires)) {
2768 				/* not infinity */
2769 				fib6_set_expires(rt, jiffies + rt_expires);
2770 			} else {
2771 				fib6_clean_expires(rt);
2772 			}
2773 		} else if (valid_lft) {
2774 			clock_t expires = 0;
2775 			int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2776 			if (addrconf_finite_timeout(rt_expires)) {
2777 				/* not infinity */
2778 				flags |= RTF_EXPIRES;
2779 				expires = jiffies_to_clock_t(rt_expires);
2780 			}
2781 			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2782 					      0, dev, expires, flags,
2783 					      GFP_ATOMIC);
2784 		}
2785 		fib6_info_release(rt);
2786 	}
2787 
2788 	/* Try to figure out our local address for this prefix */
2789 
2790 	if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2791 		struct in6_addr addr;
2792 		bool tokenized = false, dev_addr_generated = false;
2793 
2794 		if (pinfo->prefix_len == 64) {
2795 			memcpy(&addr, &pinfo->prefix, 8);
2796 
2797 			if (!ipv6_addr_any(&in6_dev->token)) {
2798 				read_lock_bh(&in6_dev->lock);
2799 				memcpy(addr.s6_addr + 8,
2800 				       in6_dev->token.s6_addr + 8, 8);
2801 				read_unlock_bh(&in6_dev->lock);
2802 				tokenized = true;
2803 			} else if (is_addr_mode_generate_stable(in6_dev) &&
2804 				   !ipv6_generate_stable_address(&addr, 0,
2805 								 in6_dev)) {
2806 				addr_flags |= IFA_F_STABLE_PRIVACY;
2807 				goto ok;
2808 			} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2809 				   ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2810 				goto put;
2811 			} else {
2812 				dev_addr_generated = true;
2813 			}
2814 			goto ok;
2815 		}
2816 		net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2817 				    pinfo->prefix_len);
2818 		goto put;
2819 
2820 ok:
2821 		err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2822 						   &addr, addr_type,
2823 						   addr_flags, sllao,
2824 						   tokenized, valid_lft,
2825 						   prefered_lft);
2826 		if (err)
2827 			goto put;
2828 
2829 		/* Ignore error case here because previous prefix add addr was
2830 		 * successful which will be notified.
2831 		 */
2832 		ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2833 					      addr_type, addr_flags, sllao,
2834 					      tokenized, valid_lft,
2835 					      prefered_lft,
2836 					      dev_addr_generated);
2837 	}
2838 	inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2839 put:
2840 	in6_dev_put(in6_dev);
2841 }
2842 
2843 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2844 		struct in6_ifreq *ireq)
2845 {
2846 	struct ip_tunnel_parm p = { };
2847 	int err;
2848 
2849 	if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2850 		return -EADDRNOTAVAIL;
2851 
2852 	p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2853 	p.iph.version = 4;
2854 	p.iph.ihl = 5;
2855 	p.iph.protocol = IPPROTO_IPV6;
2856 	p.iph.ttl = 64;
2857 
2858 	if (!dev->netdev_ops->ndo_tunnel_ctl)
2859 		return -EOPNOTSUPP;
2860 	err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2861 	if (err)
2862 		return err;
2863 
2864 	dev = __dev_get_by_name(net, p.name);
2865 	if (!dev)
2866 		return -ENOBUFS;
2867 	return dev_open(dev, NULL);
2868 }
2869 
2870 /*
2871  *	Set destination address.
2872  *	Special case for SIT interfaces where we create a new "virtual"
2873  *	device.
2874  */
2875 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2876 {
2877 	struct net_device *dev;
2878 	struct in6_ifreq ireq;
2879 	int err = -ENODEV;
2880 
2881 	if (!IS_ENABLED(CONFIG_IPV6_SIT))
2882 		return -ENODEV;
2883 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2884 		return -EFAULT;
2885 
2886 	rtnl_lock();
2887 	dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2888 	if (dev && dev->type == ARPHRD_SIT)
2889 		err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2890 	rtnl_unlock();
2891 	return err;
2892 }
2893 
2894 static int ipv6_mc_config(struct sock *sk, bool join,
2895 			  const struct in6_addr *addr, int ifindex)
2896 {
2897 	int ret;
2898 
2899 	ASSERT_RTNL();
2900 
2901 	lock_sock(sk);
2902 	if (join)
2903 		ret = ipv6_sock_mc_join(sk, ifindex, addr);
2904 	else
2905 		ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2906 	release_sock(sk);
2907 
2908 	return ret;
2909 }
2910 
2911 /*
2912  *	Manual configuration of address on an interface
2913  */
2914 static int inet6_addr_add(struct net *net, int ifindex,
2915 			  struct ifa6_config *cfg,
2916 			  struct netlink_ext_ack *extack)
2917 {
2918 	struct inet6_ifaddr *ifp;
2919 	struct inet6_dev *idev;
2920 	struct net_device *dev;
2921 	unsigned long timeout;
2922 	clock_t expires;
2923 	u32 flags;
2924 
2925 	ASSERT_RTNL();
2926 
2927 	if (cfg->plen > 128)
2928 		return -EINVAL;
2929 
2930 	/* check the lifetime */
2931 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2932 		return -EINVAL;
2933 
2934 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2935 		return -EINVAL;
2936 
2937 	dev = __dev_get_by_index(net, ifindex);
2938 	if (!dev)
2939 		return -ENODEV;
2940 
2941 	idev = addrconf_add_dev(dev);
2942 	if (IS_ERR(idev))
2943 		return PTR_ERR(idev);
2944 
2945 	if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2946 		int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2947 					 true, cfg->pfx, ifindex);
2948 
2949 		if (ret < 0)
2950 			return ret;
2951 	}
2952 
2953 	cfg->scope = ipv6_addr_scope(cfg->pfx);
2954 
2955 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2956 	if (addrconf_finite_timeout(timeout)) {
2957 		expires = jiffies_to_clock_t(timeout * HZ);
2958 		cfg->valid_lft = timeout;
2959 		flags = RTF_EXPIRES;
2960 	} else {
2961 		expires = 0;
2962 		flags = 0;
2963 		cfg->ifa_flags |= IFA_F_PERMANENT;
2964 	}
2965 
2966 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2967 	if (addrconf_finite_timeout(timeout)) {
2968 		if (timeout == 0)
2969 			cfg->ifa_flags |= IFA_F_DEPRECATED;
2970 		cfg->preferred_lft = timeout;
2971 	}
2972 
2973 	ifp = ipv6_add_addr(idev, cfg, true, extack);
2974 	if (!IS_ERR(ifp)) {
2975 		if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2976 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2977 					      ifp->rt_priority, dev, expires,
2978 					      flags, GFP_KERNEL);
2979 		}
2980 
2981 		/* Send a netlink notification if DAD is enabled and
2982 		 * optimistic flag is not set
2983 		 */
2984 		if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2985 			ipv6_ifa_notify(0, ifp);
2986 		/*
2987 		 * Note that section 3.1 of RFC 4429 indicates
2988 		 * that the Optimistic flag should not be set for
2989 		 * manually configured addresses
2990 		 */
2991 		addrconf_dad_start(ifp);
2992 		if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2993 			manage_tempaddrs(idev, ifp, cfg->valid_lft,
2994 					 cfg->preferred_lft, true, jiffies);
2995 		in6_ifa_put(ifp);
2996 		addrconf_verify_rtnl(net);
2997 		return 0;
2998 	} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2999 		ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3000 			       cfg->pfx, ifindex);
3001 	}
3002 
3003 	return PTR_ERR(ifp);
3004 }
3005 
3006 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3007 			  const struct in6_addr *pfx, unsigned int plen)
3008 {
3009 	struct inet6_ifaddr *ifp;
3010 	struct inet6_dev *idev;
3011 	struct net_device *dev;
3012 
3013 	if (plen > 128)
3014 		return -EINVAL;
3015 
3016 	dev = __dev_get_by_index(net, ifindex);
3017 	if (!dev)
3018 		return -ENODEV;
3019 
3020 	idev = __in6_dev_get(dev);
3021 	if (!idev)
3022 		return -ENXIO;
3023 
3024 	read_lock_bh(&idev->lock);
3025 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
3026 		if (ifp->prefix_len == plen &&
3027 		    ipv6_addr_equal(pfx, &ifp->addr)) {
3028 			in6_ifa_hold(ifp);
3029 			read_unlock_bh(&idev->lock);
3030 
3031 			if (!(ifp->flags & IFA_F_TEMPORARY) &&
3032 			    (ifa_flags & IFA_F_MANAGETEMPADDR))
3033 				manage_tempaddrs(idev, ifp, 0, 0, false,
3034 						 jiffies);
3035 			ipv6_del_addr(ifp);
3036 			addrconf_verify_rtnl(net);
3037 			if (ipv6_addr_is_multicast(pfx)) {
3038 				ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3039 					       false, pfx, dev->ifindex);
3040 			}
3041 			return 0;
3042 		}
3043 	}
3044 	read_unlock_bh(&idev->lock);
3045 	return -EADDRNOTAVAIL;
3046 }
3047 
3048 
3049 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3050 {
3051 	struct ifa6_config cfg = {
3052 		.ifa_flags = IFA_F_PERMANENT,
3053 		.preferred_lft = INFINITY_LIFE_TIME,
3054 		.valid_lft = INFINITY_LIFE_TIME,
3055 	};
3056 	struct in6_ifreq ireq;
3057 	int err;
3058 
3059 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3060 		return -EPERM;
3061 
3062 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3063 		return -EFAULT;
3064 
3065 	cfg.pfx = &ireq.ifr6_addr;
3066 	cfg.plen = ireq.ifr6_prefixlen;
3067 
3068 	rtnl_lock();
3069 	err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3070 	rtnl_unlock();
3071 	return err;
3072 }
3073 
3074 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3075 {
3076 	struct in6_ifreq ireq;
3077 	int err;
3078 
3079 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3080 		return -EPERM;
3081 
3082 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3083 		return -EFAULT;
3084 
3085 	rtnl_lock();
3086 	err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3087 			     ireq.ifr6_prefixlen);
3088 	rtnl_unlock();
3089 	return err;
3090 }
3091 
3092 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3093 		     int plen, int scope, u8 proto)
3094 {
3095 	struct inet6_ifaddr *ifp;
3096 	struct ifa6_config cfg = {
3097 		.pfx = addr,
3098 		.plen = plen,
3099 		.ifa_flags = IFA_F_PERMANENT,
3100 		.valid_lft = INFINITY_LIFE_TIME,
3101 		.preferred_lft = INFINITY_LIFE_TIME,
3102 		.scope = scope,
3103 		.ifa_proto = proto
3104 	};
3105 
3106 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3107 	if (!IS_ERR(ifp)) {
3108 		spin_lock_bh(&ifp->lock);
3109 		ifp->flags &= ~IFA_F_TENTATIVE;
3110 		spin_unlock_bh(&ifp->lock);
3111 		rt_genid_bump_ipv6(dev_net(idev->dev));
3112 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
3113 		in6_ifa_put(ifp);
3114 	}
3115 }
3116 
3117 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3118 static void add_v4_addrs(struct inet6_dev *idev)
3119 {
3120 	struct in6_addr addr;
3121 	struct net_device *dev;
3122 	struct net *net = dev_net(idev->dev);
3123 	int scope, plen, offset = 0;
3124 	u32 pflags = 0;
3125 
3126 	ASSERT_RTNL();
3127 
3128 	memset(&addr, 0, sizeof(struct in6_addr));
3129 	/* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
3130 	if (idev->dev->addr_len == sizeof(struct in6_addr))
3131 		offset = sizeof(struct in6_addr) - 4;
3132 	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
3133 
3134 	if (idev->dev->flags&IFF_POINTOPOINT) {
3135 		if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3136 			return;
3137 
3138 		addr.s6_addr32[0] = htonl(0xfe800000);
3139 		scope = IFA_LINK;
3140 		plen = 64;
3141 	} else {
3142 		scope = IPV6_ADDR_COMPATv4;
3143 		plen = 96;
3144 		pflags |= RTF_NONEXTHOP;
3145 	}
3146 
3147 	if (addr.s6_addr32[3]) {
3148 		add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3149 		addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3150 				      GFP_KERNEL);
3151 		return;
3152 	}
3153 
3154 	for_each_netdev(net, dev) {
3155 		struct in_device *in_dev = __in_dev_get_rtnl(dev);
3156 		if (in_dev && (dev->flags & IFF_UP)) {
3157 			struct in_ifaddr *ifa;
3158 			int flag = scope;
3159 
3160 			in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3161 				addr.s6_addr32[3] = ifa->ifa_local;
3162 
3163 				if (ifa->ifa_scope == RT_SCOPE_LINK)
3164 					continue;
3165 				if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3166 					if (idev->dev->flags&IFF_POINTOPOINT)
3167 						continue;
3168 					flag |= IFA_HOST;
3169 				}
3170 
3171 				add_addr(idev, &addr, plen, flag,
3172 					 IFAPROT_UNSPEC);
3173 				addrconf_prefix_route(&addr, plen, 0, idev->dev,
3174 						      0, pflags, GFP_KERNEL);
3175 			}
3176 		}
3177 	}
3178 }
3179 #endif
3180 
3181 static void init_loopback(struct net_device *dev)
3182 {
3183 	struct inet6_dev  *idev;
3184 
3185 	/* ::1 */
3186 
3187 	ASSERT_RTNL();
3188 
3189 	idev = ipv6_find_idev(dev);
3190 	if (IS_ERR(idev)) {
3191 		pr_debug("%s: add_dev failed\n", __func__);
3192 		return;
3193 	}
3194 
3195 	add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3196 }
3197 
3198 void addrconf_add_linklocal(struct inet6_dev *idev,
3199 			    const struct in6_addr *addr, u32 flags)
3200 {
3201 	struct ifa6_config cfg = {
3202 		.pfx = addr,
3203 		.plen = 64,
3204 		.ifa_flags = flags | IFA_F_PERMANENT,
3205 		.valid_lft = INFINITY_LIFE_TIME,
3206 		.preferred_lft = INFINITY_LIFE_TIME,
3207 		.scope = IFA_LINK,
3208 		.ifa_proto = IFAPROT_KERNEL_LL
3209 	};
3210 	struct inet6_ifaddr *ifp;
3211 
3212 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3213 	if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3214 	     idev->cnf.optimistic_dad) &&
3215 	    !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3216 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3217 #endif
3218 
3219 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3220 	if (!IS_ERR(ifp)) {
3221 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3222 				      0, 0, GFP_ATOMIC);
3223 		addrconf_dad_start(ifp);
3224 		in6_ifa_put(ifp);
3225 	}
3226 }
3227 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3228 
3229 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3230 {
3231 	if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3232 		return true;
3233 
3234 	if (address.s6_addr32[2] == htonl(0x02005eff) &&
3235 	    ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3236 		return true;
3237 
3238 	if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3239 	    ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3240 		return true;
3241 
3242 	return false;
3243 }
3244 
3245 static int ipv6_generate_stable_address(struct in6_addr *address,
3246 					u8 dad_count,
3247 					const struct inet6_dev *idev)
3248 {
3249 	static DEFINE_SPINLOCK(lock);
3250 	static __u32 digest[SHA1_DIGEST_WORDS];
3251 	static __u32 workspace[SHA1_WORKSPACE_WORDS];
3252 
3253 	static union {
3254 		char __data[SHA1_BLOCK_SIZE];
3255 		struct {
3256 			struct in6_addr secret;
3257 			__be32 prefix[2];
3258 			unsigned char hwaddr[MAX_ADDR_LEN];
3259 			u8 dad_count;
3260 		} __packed;
3261 	} data;
3262 
3263 	struct in6_addr secret;
3264 	struct in6_addr temp;
3265 	struct net *net = dev_net(idev->dev);
3266 
3267 	BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3268 
3269 	if (idev->cnf.stable_secret.initialized)
3270 		secret = idev->cnf.stable_secret.secret;
3271 	else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3272 		secret = net->ipv6.devconf_dflt->stable_secret.secret;
3273 	else
3274 		return -1;
3275 
3276 retry:
3277 	spin_lock_bh(&lock);
3278 
3279 	sha1_init(digest);
3280 	memset(&data, 0, sizeof(data));
3281 	memset(workspace, 0, sizeof(workspace));
3282 	memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3283 	data.prefix[0] = address->s6_addr32[0];
3284 	data.prefix[1] = address->s6_addr32[1];
3285 	data.secret = secret;
3286 	data.dad_count = dad_count;
3287 
3288 	sha1_transform(digest, data.__data, workspace);
3289 
3290 	temp = *address;
3291 	temp.s6_addr32[2] = (__force __be32)digest[0];
3292 	temp.s6_addr32[3] = (__force __be32)digest[1];
3293 
3294 	spin_unlock_bh(&lock);
3295 
3296 	if (ipv6_reserved_interfaceid(temp)) {
3297 		dad_count++;
3298 		if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3299 			return -1;
3300 		goto retry;
3301 	}
3302 
3303 	*address = temp;
3304 	return 0;
3305 }
3306 
3307 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3308 {
3309 	struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3310 
3311 	if (s->initialized)
3312 		return;
3313 	s = &idev->cnf.stable_secret;
3314 	get_random_bytes(&s->secret, sizeof(s->secret));
3315 	s->initialized = true;
3316 }
3317 
3318 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3319 {
3320 	struct in6_addr addr;
3321 
3322 	/* no link local addresses on L3 master devices */
3323 	if (netif_is_l3_master(idev->dev))
3324 		return;
3325 
3326 	/* no link local addresses on devices flagged as slaves */
3327 	if (idev->dev->flags & IFF_SLAVE)
3328 		return;
3329 
3330 	ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3331 
3332 	switch (idev->cnf.addr_gen_mode) {
3333 	case IN6_ADDR_GEN_MODE_RANDOM:
3334 		ipv6_gen_mode_random_init(idev);
3335 		fallthrough;
3336 	case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3337 		if (!ipv6_generate_stable_address(&addr, 0, idev))
3338 			addrconf_add_linklocal(idev, &addr,
3339 					       IFA_F_STABLE_PRIVACY);
3340 		else if (prefix_route)
3341 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3342 					      0, 0, GFP_KERNEL);
3343 		break;
3344 	case IN6_ADDR_GEN_MODE_EUI64:
3345 		/* addrconf_add_linklocal also adds a prefix_route and we
3346 		 * only need to care about prefix routes if ipv6_generate_eui64
3347 		 * couldn't generate one.
3348 		 */
3349 		if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3350 			addrconf_add_linklocal(idev, &addr, 0);
3351 		else if (prefix_route)
3352 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3353 					      0, 0, GFP_KERNEL);
3354 		break;
3355 	case IN6_ADDR_GEN_MODE_NONE:
3356 	default:
3357 		/* will not add any link local address */
3358 		break;
3359 	}
3360 }
3361 
3362 static void addrconf_dev_config(struct net_device *dev)
3363 {
3364 	struct inet6_dev *idev;
3365 
3366 	ASSERT_RTNL();
3367 
3368 	if ((dev->type != ARPHRD_ETHER) &&
3369 	    (dev->type != ARPHRD_FDDI) &&
3370 	    (dev->type != ARPHRD_ARCNET) &&
3371 	    (dev->type != ARPHRD_INFINIBAND) &&
3372 	    (dev->type != ARPHRD_IEEE1394) &&
3373 	    (dev->type != ARPHRD_TUNNEL6) &&
3374 	    (dev->type != ARPHRD_6LOWPAN) &&
3375 	    (dev->type != ARPHRD_TUNNEL) &&
3376 	    (dev->type != ARPHRD_NONE) &&
3377 	    (dev->type != ARPHRD_RAWIP)) {
3378 		/* Alas, we support only Ethernet autoconfiguration. */
3379 		idev = __in6_dev_get(dev);
3380 		if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3381 		    dev->flags & IFF_MULTICAST)
3382 			ipv6_mc_up(idev);
3383 		return;
3384 	}
3385 
3386 	idev = addrconf_add_dev(dev);
3387 	if (IS_ERR(idev))
3388 		return;
3389 
3390 	/* this device type has no EUI support */
3391 	if (dev->type == ARPHRD_NONE &&
3392 	    idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3393 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3394 
3395 	addrconf_addr_gen(idev, false);
3396 }
3397 
3398 #if IS_ENABLED(CONFIG_IPV6_SIT)
3399 static void addrconf_sit_config(struct net_device *dev)
3400 {
3401 	struct inet6_dev *idev;
3402 
3403 	ASSERT_RTNL();
3404 
3405 	/*
3406 	 * Configure the tunnel with one of our IPv4
3407 	 * addresses... we should configure all of
3408 	 * our v4 addrs in the tunnel
3409 	 */
3410 
3411 	idev = ipv6_find_idev(dev);
3412 	if (IS_ERR(idev)) {
3413 		pr_debug("%s: add_dev failed\n", __func__);
3414 		return;
3415 	}
3416 
3417 	if (dev->priv_flags & IFF_ISATAP) {
3418 		addrconf_addr_gen(idev, false);
3419 		return;
3420 	}
3421 
3422 	add_v4_addrs(idev);
3423 
3424 	if (dev->flags&IFF_POINTOPOINT)
3425 		addrconf_add_mroute(dev);
3426 }
3427 #endif
3428 
3429 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3430 static void addrconf_gre_config(struct net_device *dev)
3431 {
3432 	struct inet6_dev *idev;
3433 
3434 	ASSERT_RTNL();
3435 
3436 	idev = ipv6_find_idev(dev);
3437 	if (IS_ERR(idev)) {
3438 		pr_debug("%s: add_dev failed\n", __func__);
3439 		return;
3440 	}
3441 
3442 	if (dev->type == ARPHRD_ETHER) {
3443 		addrconf_addr_gen(idev, true);
3444 		return;
3445 	}
3446 
3447 	add_v4_addrs(idev);
3448 
3449 	if (dev->flags & IFF_POINTOPOINT)
3450 		addrconf_add_mroute(dev);
3451 }
3452 #endif
3453 
3454 static int fixup_permanent_addr(struct net *net,
3455 				struct inet6_dev *idev,
3456 				struct inet6_ifaddr *ifp)
3457 {
3458 	/* !fib6_node means the host route was removed from the
3459 	 * FIB, for example, if 'lo' device is taken down. In that
3460 	 * case regenerate the host route.
3461 	 */
3462 	if (!ifp->rt || !ifp->rt->fib6_node) {
3463 		struct fib6_info *f6i, *prev;
3464 
3465 		f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3466 					 GFP_ATOMIC);
3467 		if (IS_ERR(f6i))
3468 			return PTR_ERR(f6i);
3469 
3470 		/* ifp->rt can be accessed outside of rtnl */
3471 		spin_lock(&ifp->lock);
3472 		prev = ifp->rt;
3473 		ifp->rt = f6i;
3474 		spin_unlock(&ifp->lock);
3475 
3476 		fib6_info_release(prev);
3477 	}
3478 
3479 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3480 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3481 				      ifp->rt_priority, idev->dev, 0, 0,
3482 				      GFP_ATOMIC);
3483 	}
3484 
3485 	if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3486 		addrconf_dad_start(ifp);
3487 
3488 	return 0;
3489 }
3490 
3491 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3492 {
3493 	struct inet6_ifaddr *ifp, *tmp;
3494 	struct inet6_dev *idev;
3495 
3496 	idev = __in6_dev_get(dev);
3497 	if (!idev)
3498 		return;
3499 
3500 	write_lock_bh(&idev->lock);
3501 
3502 	list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3503 		if ((ifp->flags & IFA_F_PERMANENT) &&
3504 		    fixup_permanent_addr(net, idev, ifp) < 0) {
3505 			write_unlock_bh(&idev->lock);
3506 			in6_ifa_hold(ifp);
3507 			ipv6_del_addr(ifp);
3508 			write_lock_bh(&idev->lock);
3509 
3510 			net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3511 					     idev->dev->name, &ifp->addr);
3512 		}
3513 	}
3514 
3515 	write_unlock_bh(&idev->lock);
3516 }
3517 
3518 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3519 			   void *ptr)
3520 {
3521 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3522 	struct netdev_notifier_change_info *change_info;
3523 	struct netdev_notifier_changeupper_info *info;
3524 	struct inet6_dev *idev = __in6_dev_get(dev);
3525 	struct net *net = dev_net(dev);
3526 	int run_pending = 0;
3527 	int err;
3528 
3529 	switch (event) {
3530 	case NETDEV_REGISTER:
3531 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3532 			idev = ipv6_add_dev(dev);
3533 			if (IS_ERR(idev))
3534 				return notifier_from_errno(PTR_ERR(idev));
3535 		}
3536 		break;
3537 
3538 	case NETDEV_CHANGEMTU:
3539 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3540 		if (dev->mtu < IPV6_MIN_MTU) {
3541 			addrconf_ifdown(dev, dev != net->loopback_dev);
3542 			break;
3543 		}
3544 
3545 		if (idev) {
3546 			rt6_mtu_change(dev, dev->mtu);
3547 			idev->cnf.mtu6 = dev->mtu;
3548 			break;
3549 		}
3550 
3551 		/* allocate new idev */
3552 		idev = ipv6_add_dev(dev);
3553 		if (IS_ERR(idev))
3554 			break;
3555 
3556 		/* device is still not ready */
3557 		if (!(idev->if_flags & IF_READY))
3558 			break;
3559 
3560 		run_pending = 1;
3561 		fallthrough;
3562 	case NETDEV_UP:
3563 	case NETDEV_CHANGE:
3564 		if (dev->flags & IFF_SLAVE)
3565 			break;
3566 
3567 		if (idev && idev->cnf.disable_ipv6)
3568 			break;
3569 
3570 		if (event == NETDEV_UP) {
3571 			/* restore routes for permanent addresses */
3572 			addrconf_permanent_addr(net, dev);
3573 
3574 			if (!addrconf_link_ready(dev)) {
3575 				/* device is not ready yet. */
3576 				pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3577 					 dev->name);
3578 				break;
3579 			}
3580 
3581 			if (!idev && dev->mtu >= IPV6_MIN_MTU)
3582 				idev = ipv6_add_dev(dev);
3583 
3584 			if (!IS_ERR_OR_NULL(idev)) {
3585 				idev->if_flags |= IF_READY;
3586 				run_pending = 1;
3587 			}
3588 		} else if (event == NETDEV_CHANGE) {
3589 			if (!addrconf_link_ready(dev)) {
3590 				/* device is still not ready. */
3591 				rt6_sync_down_dev(dev, event);
3592 				break;
3593 			}
3594 
3595 			if (!IS_ERR_OR_NULL(idev)) {
3596 				if (idev->if_flags & IF_READY) {
3597 					/* device is already configured -
3598 					 * but resend MLD reports, we might
3599 					 * have roamed and need to update
3600 					 * multicast snooping switches
3601 					 */
3602 					ipv6_mc_up(idev);
3603 					change_info = ptr;
3604 					if (change_info->flags_changed & IFF_NOARP)
3605 						addrconf_dad_run(idev, true);
3606 					rt6_sync_up(dev, RTNH_F_LINKDOWN);
3607 					break;
3608 				}
3609 				idev->if_flags |= IF_READY;
3610 			}
3611 
3612 			pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3613 				dev->name);
3614 
3615 			run_pending = 1;
3616 		}
3617 
3618 		switch (dev->type) {
3619 #if IS_ENABLED(CONFIG_IPV6_SIT)
3620 		case ARPHRD_SIT:
3621 			addrconf_sit_config(dev);
3622 			break;
3623 #endif
3624 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3625 		case ARPHRD_IP6GRE:
3626 		case ARPHRD_IPGRE:
3627 			addrconf_gre_config(dev);
3628 			break;
3629 #endif
3630 		case ARPHRD_LOOPBACK:
3631 			init_loopback(dev);
3632 			break;
3633 
3634 		default:
3635 			addrconf_dev_config(dev);
3636 			break;
3637 		}
3638 
3639 		if (!IS_ERR_OR_NULL(idev)) {
3640 			if (run_pending)
3641 				addrconf_dad_run(idev, false);
3642 
3643 			/* Device has an address by now */
3644 			rt6_sync_up(dev, RTNH_F_DEAD);
3645 
3646 			/*
3647 			 * If the MTU changed during the interface down,
3648 			 * when the interface up, the changed MTU must be
3649 			 * reflected in the idev as well as routers.
3650 			 */
3651 			if (idev->cnf.mtu6 != dev->mtu &&
3652 			    dev->mtu >= IPV6_MIN_MTU) {
3653 				rt6_mtu_change(dev, dev->mtu);
3654 				idev->cnf.mtu6 = dev->mtu;
3655 			}
3656 			idev->tstamp = jiffies;
3657 			inet6_ifinfo_notify(RTM_NEWLINK, idev);
3658 
3659 			/*
3660 			 * If the changed mtu during down is lower than
3661 			 * IPV6_MIN_MTU stop IPv6 on this interface.
3662 			 */
3663 			if (dev->mtu < IPV6_MIN_MTU)
3664 				addrconf_ifdown(dev, dev != net->loopback_dev);
3665 		}
3666 		break;
3667 
3668 	case NETDEV_DOWN:
3669 	case NETDEV_UNREGISTER:
3670 		/*
3671 		 *	Remove all addresses from this interface.
3672 		 */
3673 		addrconf_ifdown(dev, event != NETDEV_DOWN);
3674 		break;
3675 
3676 	case NETDEV_CHANGENAME:
3677 		if (idev) {
3678 			snmp6_unregister_dev(idev);
3679 			addrconf_sysctl_unregister(idev);
3680 			err = addrconf_sysctl_register(idev);
3681 			if (err)
3682 				return notifier_from_errno(err);
3683 			err = snmp6_register_dev(idev);
3684 			if (err) {
3685 				addrconf_sysctl_unregister(idev);
3686 				return notifier_from_errno(err);
3687 			}
3688 		}
3689 		break;
3690 
3691 	case NETDEV_PRE_TYPE_CHANGE:
3692 	case NETDEV_POST_TYPE_CHANGE:
3693 		if (idev)
3694 			addrconf_type_change(dev, event);
3695 		break;
3696 
3697 	case NETDEV_CHANGEUPPER:
3698 		info = ptr;
3699 
3700 		/* flush all routes if dev is linked to or unlinked from
3701 		 * an L3 master device (e.g., VRF)
3702 		 */
3703 		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3704 			addrconf_ifdown(dev, false);
3705 	}
3706 
3707 	return NOTIFY_OK;
3708 }
3709 
3710 /*
3711  *	addrconf module should be notified of a device going up
3712  */
3713 static struct notifier_block ipv6_dev_notf = {
3714 	.notifier_call = addrconf_notify,
3715 	.priority = ADDRCONF_NOTIFY_PRIORITY,
3716 };
3717 
3718 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3719 {
3720 	struct inet6_dev *idev;
3721 	ASSERT_RTNL();
3722 
3723 	idev = __in6_dev_get(dev);
3724 
3725 	if (event == NETDEV_POST_TYPE_CHANGE)
3726 		ipv6_mc_remap(idev);
3727 	else if (event == NETDEV_PRE_TYPE_CHANGE)
3728 		ipv6_mc_unmap(idev);
3729 }
3730 
3731 static bool addr_is_local(const struct in6_addr *addr)
3732 {
3733 	return ipv6_addr_type(addr) &
3734 		(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3735 }
3736 
3737 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3738 {
3739 	unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3740 	struct net *net = dev_net(dev);
3741 	struct inet6_dev *idev;
3742 	struct inet6_ifaddr *ifa;
3743 	LIST_HEAD(tmp_addr_list);
3744 	bool keep_addr = false;
3745 	bool was_ready;
3746 	int state, i;
3747 
3748 	ASSERT_RTNL();
3749 
3750 	rt6_disable_ip(dev, event);
3751 
3752 	idev = __in6_dev_get(dev);
3753 	if (!idev)
3754 		return -ENODEV;
3755 
3756 	/*
3757 	 * Step 1: remove reference to ipv6 device from parent device.
3758 	 *	   Do not dev_put!
3759 	 */
3760 	if (unregister) {
3761 		idev->dead = 1;
3762 
3763 		/* protected by rtnl_lock */
3764 		RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3765 
3766 		/* Step 1.5: remove snmp6 entry */
3767 		snmp6_unregister_dev(idev);
3768 
3769 	}
3770 
3771 	/* combine the user config with event to determine if permanent
3772 	 * addresses are to be removed from address hash table
3773 	 */
3774 	if (!unregister && !idev->cnf.disable_ipv6) {
3775 		/* aggregate the system setting and interface setting */
3776 		int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3777 
3778 		if (!_keep_addr)
3779 			_keep_addr = idev->cnf.keep_addr_on_down;
3780 
3781 		keep_addr = (_keep_addr > 0);
3782 	}
3783 
3784 	/* Step 2: clear hash table */
3785 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3786 		struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3787 
3788 		spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3789 restart:
3790 		hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3791 			if (ifa->idev == idev) {
3792 				addrconf_del_dad_work(ifa);
3793 				/* combined flag + permanent flag decide if
3794 				 * address is retained on a down event
3795 				 */
3796 				if (!keep_addr ||
3797 				    !(ifa->flags & IFA_F_PERMANENT) ||
3798 				    addr_is_local(&ifa->addr)) {
3799 					hlist_del_init_rcu(&ifa->addr_lst);
3800 					goto restart;
3801 				}
3802 			}
3803 		}
3804 		spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3805 	}
3806 
3807 	write_lock_bh(&idev->lock);
3808 
3809 	addrconf_del_rs_timer(idev);
3810 
3811 	/* Step 2: clear flags for stateless addrconf, repeated down
3812 	 *         detection
3813 	 */
3814 	was_ready = idev->if_flags & IF_READY;
3815 	if (!unregister)
3816 		idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3817 
3818 	/* Step 3: clear tempaddr list */
3819 	while (!list_empty(&idev->tempaddr_list)) {
3820 		ifa = list_first_entry(&idev->tempaddr_list,
3821 				       struct inet6_ifaddr, tmp_list);
3822 		list_del(&ifa->tmp_list);
3823 		write_unlock_bh(&idev->lock);
3824 		spin_lock_bh(&ifa->lock);
3825 
3826 		if (ifa->ifpub) {
3827 			in6_ifa_put(ifa->ifpub);
3828 			ifa->ifpub = NULL;
3829 		}
3830 		spin_unlock_bh(&ifa->lock);
3831 		in6_ifa_put(ifa);
3832 		write_lock_bh(&idev->lock);
3833 	}
3834 
3835 	list_for_each_entry(ifa, &idev->addr_list, if_list)
3836 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3837 	write_unlock_bh(&idev->lock);
3838 
3839 	while (!list_empty(&tmp_addr_list)) {
3840 		struct fib6_info *rt = NULL;
3841 		bool keep;
3842 
3843 		ifa = list_first_entry(&tmp_addr_list,
3844 				       struct inet6_ifaddr, if_list_aux);
3845 		list_del(&ifa->if_list_aux);
3846 
3847 		addrconf_del_dad_work(ifa);
3848 
3849 		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3850 			!addr_is_local(&ifa->addr);
3851 
3852 		spin_lock_bh(&ifa->lock);
3853 
3854 		if (keep) {
3855 			/* set state to skip the notifier below */
3856 			state = INET6_IFADDR_STATE_DEAD;
3857 			ifa->state = INET6_IFADDR_STATE_PREDAD;
3858 			if (!(ifa->flags & IFA_F_NODAD))
3859 				ifa->flags |= IFA_F_TENTATIVE;
3860 
3861 			rt = ifa->rt;
3862 			ifa->rt = NULL;
3863 		} else {
3864 			state = ifa->state;
3865 			ifa->state = INET6_IFADDR_STATE_DEAD;
3866 		}
3867 
3868 		spin_unlock_bh(&ifa->lock);
3869 
3870 		if (rt)
3871 			ip6_del_rt(net, rt, false);
3872 
3873 		if (state != INET6_IFADDR_STATE_DEAD) {
3874 			__ipv6_ifa_notify(RTM_DELADDR, ifa);
3875 			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3876 		} else {
3877 			if (idev->cnf.forwarding)
3878 				addrconf_leave_anycast(ifa);
3879 			addrconf_leave_solict(ifa->idev, &ifa->addr);
3880 		}
3881 
3882 		if (!keep) {
3883 			write_lock_bh(&idev->lock);
3884 			list_del_rcu(&ifa->if_list);
3885 			write_unlock_bh(&idev->lock);
3886 			in6_ifa_put(ifa);
3887 		}
3888 	}
3889 
3890 	/* Step 5: Discard anycast and multicast list */
3891 	if (unregister) {
3892 		ipv6_ac_destroy_dev(idev);
3893 		ipv6_mc_destroy_dev(idev);
3894 	} else if (was_ready) {
3895 		ipv6_mc_down(idev);
3896 	}
3897 
3898 	idev->tstamp = jiffies;
3899 	idev->ra_mtu = 0;
3900 
3901 	/* Last: Shot the device (if unregistered) */
3902 	if (unregister) {
3903 		addrconf_sysctl_unregister(idev);
3904 		neigh_parms_release(&nd_tbl, idev->nd_parms);
3905 		neigh_ifdown(&nd_tbl, dev);
3906 		in6_dev_put(idev);
3907 	}
3908 	return 0;
3909 }
3910 
3911 static void addrconf_rs_timer(struct timer_list *t)
3912 {
3913 	struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3914 	struct net_device *dev = idev->dev;
3915 	struct in6_addr lladdr;
3916 
3917 	write_lock(&idev->lock);
3918 	if (idev->dead || !(idev->if_flags & IF_READY))
3919 		goto out;
3920 
3921 	if (!ipv6_accept_ra(idev))
3922 		goto out;
3923 
3924 	/* Announcement received after solicitation was sent */
3925 	if (idev->if_flags & IF_RA_RCVD)
3926 		goto out;
3927 
3928 	if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3929 		write_unlock(&idev->lock);
3930 		if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3931 			ndisc_send_rs(dev, &lladdr,
3932 				      &in6addr_linklocal_allrouters);
3933 		else
3934 			goto put;
3935 
3936 		write_lock(&idev->lock);
3937 		idev->rs_interval = rfc3315_s14_backoff_update(
3938 			idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3939 		/* The wait after the last probe can be shorter */
3940 		addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3941 					     idev->cnf.rtr_solicits) ?
3942 				      idev->cnf.rtr_solicit_delay :
3943 				      idev->rs_interval);
3944 	} else {
3945 		/*
3946 		 * Note: we do not support deprecated "all on-link"
3947 		 * assumption any longer.
3948 		 */
3949 		pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3950 	}
3951 
3952 out:
3953 	write_unlock(&idev->lock);
3954 put:
3955 	in6_dev_put(idev);
3956 }
3957 
3958 /*
3959  *	Duplicate Address Detection
3960  */
3961 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3962 {
3963 	unsigned long rand_num;
3964 	struct inet6_dev *idev = ifp->idev;
3965 	u64 nonce;
3966 
3967 	if (ifp->flags & IFA_F_OPTIMISTIC)
3968 		rand_num = 0;
3969 	else
3970 		rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3971 
3972 	nonce = 0;
3973 	if (idev->cnf.enhanced_dad ||
3974 	    dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3975 		do
3976 			get_random_bytes(&nonce, 6);
3977 		while (nonce == 0);
3978 	}
3979 	ifp->dad_nonce = nonce;
3980 	ifp->dad_probes = idev->cnf.dad_transmits;
3981 	addrconf_mod_dad_work(ifp, rand_num);
3982 }
3983 
3984 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3985 {
3986 	struct inet6_dev *idev = ifp->idev;
3987 	struct net_device *dev = idev->dev;
3988 	bool bump_id, notify = false;
3989 	struct net *net;
3990 
3991 	addrconf_join_solict(dev, &ifp->addr);
3992 
3993 	read_lock_bh(&idev->lock);
3994 	spin_lock(&ifp->lock);
3995 	if (ifp->state == INET6_IFADDR_STATE_DEAD)
3996 		goto out;
3997 
3998 	net = dev_net(dev);
3999 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4000 	    (net->ipv6.devconf_all->accept_dad < 1 &&
4001 	     idev->cnf.accept_dad < 1) ||
4002 	    !(ifp->flags&IFA_F_TENTATIVE) ||
4003 	    ifp->flags & IFA_F_NODAD) {
4004 		bool send_na = false;
4005 
4006 		if (ifp->flags & IFA_F_TENTATIVE &&
4007 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4008 			send_na = true;
4009 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4010 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4011 		spin_unlock(&ifp->lock);
4012 		read_unlock_bh(&idev->lock);
4013 
4014 		addrconf_dad_completed(ifp, bump_id, send_na);
4015 		return;
4016 	}
4017 
4018 	if (!(idev->if_flags & IF_READY)) {
4019 		spin_unlock(&ifp->lock);
4020 		read_unlock_bh(&idev->lock);
4021 		/*
4022 		 * If the device is not ready:
4023 		 * - keep it tentative if it is a permanent address.
4024 		 * - otherwise, kill it.
4025 		 */
4026 		in6_ifa_hold(ifp);
4027 		addrconf_dad_stop(ifp, 0);
4028 		return;
4029 	}
4030 
4031 	/*
4032 	 * Optimistic nodes can start receiving
4033 	 * Frames right away
4034 	 */
4035 	if (ifp->flags & IFA_F_OPTIMISTIC) {
4036 		ip6_ins_rt(net, ifp->rt);
4037 		if (ipv6_use_optimistic_addr(net, idev)) {
4038 			/* Because optimistic nodes can use this address,
4039 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4040 			 */
4041 			notify = true;
4042 		}
4043 	}
4044 
4045 	addrconf_dad_kick(ifp);
4046 out:
4047 	spin_unlock(&ifp->lock);
4048 	read_unlock_bh(&idev->lock);
4049 	if (notify)
4050 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
4051 }
4052 
4053 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4054 {
4055 	bool begin_dad = false;
4056 
4057 	spin_lock_bh(&ifp->lock);
4058 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4059 		ifp->state = INET6_IFADDR_STATE_PREDAD;
4060 		begin_dad = true;
4061 	}
4062 	spin_unlock_bh(&ifp->lock);
4063 
4064 	if (begin_dad)
4065 		addrconf_mod_dad_work(ifp, 0);
4066 }
4067 
4068 static void addrconf_dad_work(struct work_struct *w)
4069 {
4070 	struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4071 						struct inet6_ifaddr,
4072 						dad_work);
4073 	struct inet6_dev *idev = ifp->idev;
4074 	bool bump_id, disable_ipv6 = false;
4075 	struct in6_addr mcaddr;
4076 
4077 	enum {
4078 		DAD_PROCESS,
4079 		DAD_BEGIN,
4080 		DAD_ABORT,
4081 	} action = DAD_PROCESS;
4082 
4083 	rtnl_lock();
4084 
4085 	spin_lock_bh(&ifp->lock);
4086 	if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4087 		action = DAD_BEGIN;
4088 		ifp->state = INET6_IFADDR_STATE_DAD;
4089 	} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4090 		action = DAD_ABORT;
4091 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
4092 
4093 		if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4094 		     idev->cnf.accept_dad > 1) &&
4095 		    !idev->cnf.disable_ipv6 &&
4096 		    !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4097 			struct in6_addr addr;
4098 
4099 			addr.s6_addr32[0] = htonl(0xfe800000);
4100 			addr.s6_addr32[1] = 0;
4101 
4102 			if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4103 			    ipv6_addr_equal(&ifp->addr, &addr)) {
4104 				/* DAD failed for link-local based on MAC */
4105 				idev->cnf.disable_ipv6 = 1;
4106 
4107 				pr_info("%s: IPv6 being disabled!\n",
4108 					ifp->idev->dev->name);
4109 				disable_ipv6 = true;
4110 			}
4111 		}
4112 	}
4113 	spin_unlock_bh(&ifp->lock);
4114 
4115 	if (action == DAD_BEGIN) {
4116 		addrconf_dad_begin(ifp);
4117 		goto out;
4118 	} else if (action == DAD_ABORT) {
4119 		in6_ifa_hold(ifp);
4120 		addrconf_dad_stop(ifp, 1);
4121 		if (disable_ipv6)
4122 			addrconf_ifdown(idev->dev, false);
4123 		goto out;
4124 	}
4125 
4126 	if (!ifp->dad_probes && addrconf_dad_end(ifp))
4127 		goto out;
4128 
4129 	write_lock_bh(&idev->lock);
4130 	if (idev->dead || !(idev->if_flags & IF_READY)) {
4131 		write_unlock_bh(&idev->lock);
4132 		goto out;
4133 	}
4134 
4135 	spin_lock(&ifp->lock);
4136 	if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4137 		spin_unlock(&ifp->lock);
4138 		write_unlock_bh(&idev->lock);
4139 		goto out;
4140 	}
4141 
4142 	if (ifp->dad_probes == 0) {
4143 		bool send_na = false;
4144 
4145 		/*
4146 		 * DAD was successful
4147 		 */
4148 
4149 		if (ifp->flags & IFA_F_TENTATIVE &&
4150 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4151 			send_na = true;
4152 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4153 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4154 		spin_unlock(&ifp->lock);
4155 		write_unlock_bh(&idev->lock);
4156 
4157 		addrconf_dad_completed(ifp, bump_id, send_na);
4158 
4159 		goto out;
4160 	}
4161 
4162 	ifp->dad_probes--;
4163 	addrconf_mod_dad_work(ifp,
4164 			      max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4165 				  HZ/100));
4166 	spin_unlock(&ifp->lock);
4167 	write_unlock_bh(&idev->lock);
4168 
4169 	/* send a neighbour solicitation for our addr */
4170 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4171 	ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4172 		      ifp->dad_nonce);
4173 out:
4174 	in6_ifa_put(ifp);
4175 	rtnl_unlock();
4176 }
4177 
4178 /* ifp->idev must be at least read locked */
4179 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4180 {
4181 	struct inet6_ifaddr *ifpiter;
4182 	struct inet6_dev *idev = ifp->idev;
4183 
4184 	list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4185 		if (ifpiter->scope > IFA_LINK)
4186 			break;
4187 		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4188 		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4189 				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4190 		    IFA_F_PERMANENT)
4191 			return false;
4192 	}
4193 	return true;
4194 }
4195 
4196 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4197 				   bool send_na)
4198 {
4199 	struct net_device *dev = ifp->idev->dev;
4200 	struct in6_addr lladdr;
4201 	bool send_rs, send_mld;
4202 
4203 	addrconf_del_dad_work(ifp);
4204 
4205 	/*
4206 	 *	Configure the address for reception. Now it is valid.
4207 	 */
4208 
4209 	ipv6_ifa_notify(RTM_NEWADDR, ifp);
4210 
4211 	/* If added prefix is link local and we are prepared to process
4212 	   router advertisements, start sending router solicitations.
4213 	 */
4214 
4215 	read_lock_bh(&ifp->idev->lock);
4216 	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4217 	send_rs = send_mld &&
4218 		  ipv6_accept_ra(ifp->idev) &&
4219 		  ifp->idev->cnf.rtr_solicits != 0 &&
4220 		  (dev->flags & IFF_LOOPBACK) == 0 &&
4221 		  (dev->type != ARPHRD_TUNNEL);
4222 	read_unlock_bh(&ifp->idev->lock);
4223 
4224 	/* While dad is in progress mld report's source address is in6_addrany.
4225 	 * Resend with proper ll now.
4226 	 */
4227 	if (send_mld)
4228 		ipv6_mc_dad_complete(ifp->idev);
4229 
4230 	/* send unsolicited NA if enabled */
4231 	if (send_na &&
4232 	    (ifp->idev->cnf.ndisc_notify ||
4233 	     dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4234 		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4235 			      /*router=*/ !!ifp->idev->cnf.forwarding,
4236 			      /*solicited=*/ false, /*override=*/ true,
4237 			      /*inc_opt=*/ true);
4238 	}
4239 
4240 	if (send_rs) {
4241 		/*
4242 		 *	If a host as already performed a random delay
4243 		 *	[...] as part of DAD [...] there is no need
4244 		 *	to delay again before sending the first RS
4245 		 */
4246 		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4247 			return;
4248 		ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4249 
4250 		write_lock_bh(&ifp->idev->lock);
4251 		spin_lock(&ifp->lock);
4252 		ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4253 			ifp->idev->cnf.rtr_solicit_interval);
4254 		ifp->idev->rs_probes = 1;
4255 		ifp->idev->if_flags |= IF_RS_SENT;
4256 		addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4257 		spin_unlock(&ifp->lock);
4258 		write_unlock_bh(&ifp->idev->lock);
4259 	}
4260 
4261 	if (bump_id)
4262 		rt_genid_bump_ipv6(dev_net(dev));
4263 
4264 	/* Make sure that a new temporary address will be created
4265 	 * before this temporary address becomes deprecated.
4266 	 */
4267 	if (ifp->flags & IFA_F_TEMPORARY)
4268 		addrconf_verify_rtnl(dev_net(dev));
4269 }
4270 
4271 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4272 {
4273 	struct inet6_ifaddr *ifp;
4274 
4275 	read_lock_bh(&idev->lock);
4276 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
4277 		spin_lock(&ifp->lock);
4278 		if ((ifp->flags & IFA_F_TENTATIVE &&
4279 		     ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4280 			if (restart)
4281 				ifp->state = INET6_IFADDR_STATE_PREDAD;
4282 			addrconf_dad_kick(ifp);
4283 		}
4284 		spin_unlock(&ifp->lock);
4285 	}
4286 	read_unlock_bh(&idev->lock);
4287 }
4288 
4289 #ifdef CONFIG_PROC_FS
4290 struct if6_iter_state {
4291 	struct seq_net_private p;
4292 	int bucket;
4293 	int offset;
4294 };
4295 
4296 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4297 {
4298 	struct if6_iter_state *state = seq->private;
4299 	struct net *net = seq_file_net(seq);
4300 	struct inet6_ifaddr *ifa = NULL;
4301 	int p = 0;
4302 
4303 	/* initial bucket if pos is 0 */
4304 	if (pos == 0) {
4305 		state->bucket = 0;
4306 		state->offset = 0;
4307 	}
4308 
4309 	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4310 		hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4311 					 addr_lst) {
4312 			/* sync with offset */
4313 			if (p < state->offset) {
4314 				p++;
4315 				continue;
4316 			}
4317 			return ifa;
4318 		}
4319 
4320 		/* prepare for next bucket */
4321 		state->offset = 0;
4322 		p = 0;
4323 	}
4324 	return NULL;
4325 }
4326 
4327 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4328 					 struct inet6_ifaddr *ifa)
4329 {
4330 	struct if6_iter_state *state = seq->private;
4331 	struct net *net = seq_file_net(seq);
4332 
4333 	hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4334 		state->offset++;
4335 		return ifa;
4336 	}
4337 
4338 	state->offset = 0;
4339 	while (++state->bucket < IN6_ADDR_HSIZE) {
4340 		hlist_for_each_entry_rcu(ifa,
4341 				     &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4342 			return ifa;
4343 		}
4344 	}
4345 
4346 	return NULL;
4347 }
4348 
4349 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4350 	__acquires(rcu)
4351 {
4352 	rcu_read_lock();
4353 	return if6_get_first(seq, *pos);
4354 }
4355 
4356 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4357 {
4358 	struct inet6_ifaddr *ifa;
4359 
4360 	ifa = if6_get_next(seq, v);
4361 	++*pos;
4362 	return ifa;
4363 }
4364 
4365 static void if6_seq_stop(struct seq_file *seq, void *v)
4366 	__releases(rcu)
4367 {
4368 	rcu_read_unlock();
4369 }
4370 
4371 static int if6_seq_show(struct seq_file *seq, void *v)
4372 {
4373 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4374 	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4375 		   &ifp->addr,
4376 		   ifp->idev->dev->ifindex,
4377 		   ifp->prefix_len,
4378 		   ifp->scope,
4379 		   (u8) ifp->flags,
4380 		   ifp->idev->dev->name);
4381 	return 0;
4382 }
4383 
4384 static const struct seq_operations if6_seq_ops = {
4385 	.start	= if6_seq_start,
4386 	.next	= if6_seq_next,
4387 	.show	= if6_seq_show,
4388 	.stop	= if6_seq_stop,
4389 };
4390 
4391 static int __net_init if6_proc_net_init(struct net *net)
4392 {
4393 	if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4394 			sizeof(struct if6_iter_state)))
4395 		return -ENOMEM;
4396 	return 0;
4397 }
4398 
4399 static void __net_exit if6_proc_net_exit(struct net *net)
4400 {
4401 	remove_proc_entry("if_inet6", net->proc_net);
4402 }
4403 
4404 static struct pernet_operations if6_proc_net_ops = {
4405 	.init = if6_proc_net_init,
4406 	.exit = if6_proc_net_exit,
4407 };
4408 
4409 int __init if6_proc_init(void)
4410 {
4411 	return register_pernet_subsys(&if6_proc_net_ops);
4412 }
4413 
4414 void if6_proc_exit(void)
4415 {
4416 	unregister_pernet_subsys(&if6_proc_net_ops);
4417 }
4418 #endif	/* CONFIG_PROC_FS */
4419 
4420 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4421 /* Check if address is a home address configured on any interface. */
4422 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4423 {
4424 	unsigned int hash = inet6_addr_hash(net, addr);
4425 	struct inet6_ifaddr *ifp = NULL;
4426 	int ret = 0;
4427 
4428 	rcu_read_lock();
4429 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4430 		if (ipv6_addr_equal(&ifp->addr, addr) &&
4431 		    (ifp->flags & IFA_F_HOMEADDRESS)) {
4432 			ret = 1;
4433 			break;
4434 		}
4435 	}
4436 	rcu_read_unlock();
4437 	return ret;
4438 }
4439 #endif
4440 
4441 /* RFC6554 has some algorithm to avoid loops in segment routing by
4442  * checking if the segments contains any of a local interface address.
4443  *
4444  * Quote:
4445  *
4446  * To detect loops in the SRH, a router MUST determine if the SRH
4447  * includes multiple addresses assigned to any interface on that router.
4448  * If such addresses appear more than once and are separated by at least
4449  * one address not assigned to that router.
4450  */
4451 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4452 			  unsigned char nsegs)
4453 {
4454 	const struct in6_addr *addr;
4455 	int i, ret = 0, found = 0;
4456 	struct inet6_ifaddr *ifp;
4457 	bool separated = false;
4458 	unsigned int hash;
4459 	bool hash_found;
4460 
4461 	rcu_read_lock();
4462 	for (i = 0; i < nsegs; i++) {
4463 		addr = &segs[i];
4464 		hash = inet6_addr_hash(net, addr);
4465 
4466 		hash_found = false;
4467 		hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4468 
4469 			if (ipv6_addr_equal(&ifp->addr, addr)) {
4470 				hash_found = true;
4471 				break;
4472 			}
4473 		}
4474 
4475 		if (hash_found) {
4476 			if (found > 1 && separated) {
4477 				ret = 1;
4478 				break;
4479 			}
4480 
4481 			separated = false;
4482 			found++;
4483 		} else {
4484 			separated = true;
4485 		}
4486 	}
4487 	rcu_read_unlock();
4488 
4489 	return ret;
4490 }
4491 
4492 /*
4493  *	Periodic address status verification
4494  */
4495 
4496 static void addrconf_verify_rtnl(struct net *net)
4497 {
4498 	unsigned long now, next, next_sec, next_sched;
4499 	struct inet6_ifaddr *ifp;
4500 	int i;
4501 
4502 	ASSERT_RTNL();
4503 
4504 	rcu_read_lock_bh();
4505 	now = jiffies;
4506 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4507 
4508 	cancel_delayed_work(&net->ipv6.addr_chk_work);
4509 
4510 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4511 restart:
4512 		hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4513 			unsigned long age;
4514 
4515 			/* When setting preferred_lft to a value not zero or
4516 			 * infinity, while valid_lft is infinity
4517 			 * IFA_F_PERMANENT has a non-infinity life time.
4518 			 */
4519 			if ((ifp->flags & IFA_F_PERMANENT) &&
4520 			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
4521 				continue;
4522 
4523 			spin_lock(&ifp->lock);
4524 			/* We try to batch several events at once. */
4525 			age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4526 
4527 			if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4528 			    age >= ifp->valid_lft) {
4529 				spin_unlock(&ifp->lock);
4530 				in6_ifa_hold(ifp);
4531 				rcu_read_unlock_bh();
4532 				ipv6_del_addr(ifp);
4533 				rcu_read_lock_bh();
4534 				goto restart;
4535 			} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4536 				spin_unlock(&ifp->lock);
4537 				continue;
4538 			} else if (age >= ifp->prefered_lft) {
4539 				/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4540 				int deprecate = 0;
4541 
4542 				if (!(ifp->flags&IFA_F_DEPRECATED)) {
4543 					deprecate = 1;
4544 					ifp->flags |= IFA_F_DEPRECATED;
4545 				}
4546 
4547 				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4548 				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4549 					next = ifp->tstamp + ifp->valid_lft * HZ;
4550 
4551 				spin_unlock(&ifp->lock);
4552 
4553 				if (deprecate) {
4554 					in6_ifa_hold(ifp);
4555 
4556 					ipv6_ifa_notify(0, ifp);
4557 					in6_ifa_put(ifp);
4558 					goto restart;
4559 				}
4560 			} else if ((ifp->flags&IFA_F_TEMPORARY) &&
4561 				   !(ifp->flags&IFA_F_TENTATIVE)) {
4562 				unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4563 					ifp->idev->cnf.dad_transmits *
4564 					max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4565 
4566 				if (age >= ifp->prefered_lft - regen_advance) {
4567 					struct inet6_ifaddr *ifpub = ifp->ifpub;
4568 					if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4569 						next = ifp->tstamp + ifp->prefered_lft * HZ;
4570 					if (!ifp->regen_count && ifpub) {
4571 						ifp->regen_count++;
4572 						in6_ifa_hold(ifp);
4573 						in6_ifa_hold(ifpub);
4574 						spin_unlock(&ifp->lock);
4575 
4576 						spin_lock(&ifpub->lock);
4577 						ifpub->regen_count = 0;
4578 						spin_unlock(&ifpub->lock);
4579 						rcu_read_unlock_bh();
4580 						ipv6_create_tempaddr(ifpub, true);
4581 						in6_ifa_put(ifpub);
4582 						in6_ifa_put(ifp);
4583 						rcu_read_lock_bh();
4584 						goto restart;
4585 					}
4586 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4587 					next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4588 				spin_unlock(&ifp->lock);
4589 			} else {
4590 				/* ifp->prefered_lft <= ifp->valid_lft */
4591 				if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4592 					next = ifp->tstamp + ifp->prefered_lft * HZ;
4593 				spin_unlock(&ifp->lock);
4594 			}
4595 		}
4596 	}
4597 
4598 	next_sec = round_jiffies_up(next);
4599 	next_sched = next;
4600 
4601 	/* If rounded timeout is accurate enough, accept it. */
4602 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4603 		next_sched = next_sec;
4604 
4605 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4606 	if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4607 		next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4608 
4609 	pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4610 		 now, next, next_sec, next_sched);
4611 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4612 	rcu_read_unlock_bh();
4613 }
4614 
4615 static void addrconf_verify_work(struct work_struct *w)
4616 {
4617 	struct net *net = container_of(to_delayed_work(w), struct net,
4618 				       ipv6.addr_chk_work);
4619 
4620 	rtnl_lock();
4621 	addrconf_verify_rtnl(net);
4622 	rtnl_unlock();
4623 }
4624 
4625 static void addrconf_verify(struct net *net)
4626 {
4627 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4628 }
4629 
4630 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4631 				     struct in6_addr **peer_pfx)
4632 {
4633 	struct in6_addr *pfx = NULL;
4634 
4635 	*peer_pfx = NULL;
4636 
4637 	if (addr)
4638 		pfx = nla_data(addr);
4639 
4640 	if (local) {
4641 		if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4642 			*peer_pfx = pfx;
4643 		pfx = nla_data(local);
4644 	}
4645 
4646 	return pfx;
4647 }
4648 
4649 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4650 	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
4651 	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
4652 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
4653 	[IFA_FLAGS]		= { .len = sizeof(u32) },
4654 	[IFA_RT_PRIORITY]	= { .len = sizeof(u32) },
4655 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
4656 	[IFA_PROTO]		= { .type = NLA_U8 },
4657 };
4658 
4659 static int
4660 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4661 		  struct netlink_ext_ack *extack)
4662 {
4663 	struct net *net = sock_net(skb->sk);
4664 	struct ifaddrmsg *ifm;
4665 	struct nlattr *tb[IFA_MAX+1];
4666 	struct in6_addr *pfx, *peer_pfx;
4667 	u32 ifa_flags;
4668 	int err;
4669 
4670 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4671 				     ifa_ipv6_policy, extack);
4672 	if (err < 0)
4673 		return err;
4674 
4675 	ifm = nlmsg_data(nlh);
4676 	pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4677 	if (!pfx)
4678 		return -EINVAL;
4679 
4680 	ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4681 
4682 	/* We ignore other flags so far. */
4683 	ifa_flags &= IFA_F_MANAGETEMPADDR;
4684 
4685 	return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4686 			      ifm->ifa_prefixlen);
4687 }
4688 
4689 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4690 			       unsigned long expires, u32 flags,
4691 			       bool modify_peer)
4692 {
4693 	struct fib6_info *f6i;
4694 	u32 prio;
4695 
4696 	f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4697 					ifp->prefix_len,
4698 					ifp->idev->dev, 0, RTF_DEFAULT, true);
4699 	if (!f6i)
4700 		return -ENOENT;
4701 
4702 	prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4703 	if (f6i->fib6_metric != prio) {
4704 		/* delete old one */
4705 		ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4706 
4707 		/* add new one */
4708 		addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4709 				      ifp->prefix_len,
4710 				      ifp->rt_priority, ifp->idev->dev,
4711 				      expires, flags, GFP_KERNEL);
4712 	} else {
4713 		if (!expires)
4714 			fib6_clean_expires(f6i);
4715 		else
4716 			fib6_set_expires(f6i, expires);
4717 
4718 		fib6_info_release(f6i);
4719 	}
4720 
4721 	return 0;
4722 }
4723 
4724 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4725 			     struct ifa6_config *cfg)
4726 {
4727 	u32 flags;
4728 	clock_t expires;
4729 	unsigned long timeout;
4730 	bool was_managetempaddr;
4731 	bool had_prefixroute;
4732 	bool new_peer = false;
4733 
4734 	ASSERT_RTNL();
4735 
4736 	if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4737 		return -EINVAL;
4738 
4739 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4740 	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4741 		return -EINVAL;
4742 
4743 	if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4744 		cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4745 
4746 	timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4747 	if (addrconf_finite_timeout(timeout)) {
4748 		expires = jiffies_to_clock_t(timeout * HZ);
4749 		cfg->valid_lft = timeout;
4750 		flags = RTF_EXPIRES;
4751 	} else {
4752 		expires = 0;
4753 		flags = 0;
4754 		cfg->ifa_flags |= IFA_F_PERMANENT;
4755 	}
4756 
4757 	timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4758 	if (addrconf_finite_timeout(timeout)) {
4759 		if (timeout == 0)
4760 			cfg->ifa_flags |= IFA_F_DEPRECATED;
4761 		cfg->preferred_lft = timeout;
4762 	}
4763 
4764 	if (cfg->peer_pfx &&
4765 	    memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4766 		if (!ipv6_addr_any(&ifp->peer_addr))
4767 			cleanup_prefix_route(ifp, expires, true, true);
4768 		new_peer = true;
4769 	}
4770 
4771 	spin_lock_bh(&ifp->lock);
4772 	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4773 	had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4774 			  !(ifp->flags & IFA_F_NOPREFIXROUTE);
4775 	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4776 			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4777 			IFA_F_NOPREFIXROUTE);
4778 	ifp->flags |= cfg->ifa_flags;
4779 	ifp->tstamp = jiffies;
4780 	ifp->valid_lft = cfg->valid_lft;
4781 	ifp->prefered_lft = cfg->preferred_lft;
4782 	ifp->ifa_proto = cfg->ifa_proto;
4783 
4784 	if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4785 		ifp->rt_priority = cfg->rt_priority;
4786 
4787 	if (new_peer)
4788 		ifp->peer_addr = *cfg->peer_pfx;
4789 
4790 	spin_unlock_bh(&ifp->lock);
4791 	if (!(ifp->flags&IFA_F_TENTATIVE))
4792 		ipv6_ifa_notify(0, ifp);
4793 
4794 	if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4795 		int rc = -ENOENT;
4796 
4797 		if (had_prefixroute)
4798 			rc = modify_prefix_route(ifp, expires, flags, false);
4799 
4800 		/* prefix route could have been deleted; if so restore it */
4801 		if (rc == -ENOENT) {
4802 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4803 					      ifp->rt_priority, ifp->idev->dev,
4804 					      expires, flags, GFP_KERNEL);
4805 		}
4806 
4807 		if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4808 			rc = modify_prefix_route(ifp, expires, flags, true);
4809 
4810 		if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4811 			addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4812 					      ifp->rt_priority, ifp->idev->dev,
4813 					      expires, flags, GFP_KERNEL);
4814 		}
4815 	} else if (had_prefixroute) {
4816 		enum cleanup_prefix_rt_t action;
4817 		unsigned long rt_expires;
4818 
4819 		write_lock_bh(&ifp->idev->lock);
4820 		action = check_cleanup_prefix_route(ifp, &rt_expires);
4821 		write_unlock_bh(&ifp->idev->lock);
4822 
4823 		if (action != CLEANUP_PREFIX_RT_NOP) {
4824 			cleanup_prefix_route(ifp, rt_expires,
4825 				action == CLEANUP_PREFIX_RT_DEL, false);
4826 		}
4827 	}
4828 
4829 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4830 		if (was_managetempaddr &&
4831 		    !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4832 			cfg->valid_lft = 0;
4833 			cfg->preferred_lft = 0;
4834 		}
4835 		manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4836 				 cfg->preferred_lft, !was_managetempaddr,
4837 				 jiffies);
4838 	}
4839 
4840 	addrconf_verify_rtnl(net);
4841 
4842 	return 0;
4843 }
4844 
4845 static int
4846 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4847 		  struct netlink_ext_ack *extack)
4848 {
4849 	struct net *net = sock_net(skb->sk);
4850 	struct ifaddrmsg *ifm;
4851 	struct nlattr *tb[IFA_MAX+1];
4852 	struct in6_addr *peer_pfx;
4853 	struct inet6_ifaddr *ifa;
4854 	struct net_device *dev;
4855 	struct inet6_dev *idev;
4856 	struct ifa6_config cfg;
4857 	int err;
4858 
4859 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4860 				     ifa_ipv6_policy, extack);
4861 	if (err < 0)
4862 		return err;
4863 
4864 	memset(&cfg, 0, sizeof(cfg));
4865 
4866 	ifm = nlmsg_data(nlh);
4867 	cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4868 	if (!cfg.pfx)
4869 		return -EINVAL;
4870 
4871 	cfg.peer_pfx = peer_pfx;
4872 	cfg.plen = ifm->ifa_prefixlen;
4873 	if (tb[IFA_RT_PRIORITY])
4874 		cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4875 
4876 	if (tb[IFA_PROTO])
4877 		cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4878 
4879 	cfg.valid_lft = INFINITY_LIFE_TIME;
4880 	cfg.preferred_lft = INFINITY_LIFE_TIME;
4881 
4882 	if (tb[IFA_CACHEINFO]) {
4883 		struct ifa_cacheinfo *ci;
4884 
4885 		ci = nla_data(tb[IFA_CACHEINFO]);
4886 		cfg.valid_lft = ci->ifa_valid;
4887 		cfg.preferred_lft = ci->ifa_prefered;
4888 	}
4889 
4890 	dev =  __dev_get_by_index(net, ifm->ifa_index);
4891 	if (!dev)
4892 		return -ENODEV;
4893 
4894 	if (tb[IFA_FLAGS])
4895 		cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4896 	else
4897 		cfg.ifa_flags = ifm->ifa_flags;
4898 
4899 	/* We ignore other flags so far. */
4900 	cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4901 			 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4902 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4903 
4904 	idev = ipv6_find_idev(dev);
4905 	if (IS_ERR(idev))
4906 		return PTR_ERR(idev);
4907 
4908 	if (!ipv6_allow_optimistic_dad(net, idev))
4909 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4910 
4911 	if (cfg.ifa_flags & IFA_F_NODAD &&
4912 	    cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4913 		NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4914 		return -EINVAL;
4915 	}
4916 
4917 	ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4918 	if (!ifa) {
4919 		/*
4920 		 * It would be best to check for !NLM_F_CREATE here but
4921 		 * userspace already relies on not having to provide this.
4922 		 */
4923 		return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4924 	}
4925 
4926 	if (nlh->nlmsg_flags & NLM_F_EXCL ||
4927 	    !(nlh->nlmsg_flags & NLM_F_REPLACE))
4928 		err = -EEXIST;
4929 	else
4930 		err = inet6_addr_modify(net, ifa, &cfg);
4931 
4932 	in6_ifa_put(ifa);
4933 
4934 	return err;
4935 }
4936 
4937 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4938 			  u8 scope, int ifindex)
4939 {
4940 	struct ifaddrmsg *ifm;
4941 
4942 	ifm = nlmsg_data(nlh);
4943 	ifm->ifa_family = AF_INET6;
4944 	ifm->ifa_prefixlen = prefixlen;
4945 	ifm->ifa_flags = flags;
4946 	ifm->ifa_scope = scope;
4947 	ifm->ifa_index = ifindex;
4948 }
4949 
4950 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4951 			 unsigned long tstamp, u32 preferred, u32 valid)
4952 {
4953 	struct ifa_cacheinfo ci;
4954 
4955 	ci.cstamp = cstamp_delta(cstamp);
4956 	ci.tstamp = cstamp_delta(tstamp);
4957 	ci.ifa_prefered = preferred;
4958 	ci.ifa_valid = valid;
4959 
4960 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4961 }
4962 
4963 static inline int rt_scope(int ifa_scope)
4964 {
4965 	if (ifa_scope & IFA_HOST)
4966 		return RT_SCOPE_HOST;
4967 	else if (ifa_scope & IFA_LINK)
4968 		return RT_SCOPE_LINK;
4969 	else if (ifa_scope & IFA_SITE)
4970 		return RT_SCOPE_SITE;
4971 	else
4972 		return RT_SCOPE_UNIVERSE;
4973 }
4974 
4975 static inline int inet6_ifaddr_msgsize(void)
4976 {
4977 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4978 	       + nla_total_size(16) /* IFA_LOCAL */
4979 	       + nla_total_size(16) /* IFA_ADDRESS */
4980 	       + nla_total_size(sizeof(struct ifa_cacheinfo))
4981 	       + nla_total_size(4)  /* IFA_FLAGS */
4982 	       + nla_total_size(1)  /* IFA_PROTO */
4983 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */;
4984 }
4985 
4986 enum addr_type_t {
4987 	UNICAST_ADDR,
4988 	MULTICAST_ADDR,
4989 	ANYCAST_ADDR,
4990 };
4991 
4992 struct inet6_fill_args {
4993 	u32 portid;
4994 	u32 seq;
4995 	int event;
4996 	unsigned int flags;
4997 	int netnsid;
4998 	int ifindex;
4999 	enum addr_type_t type;
5000 };
5001 
5002 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
5003 			     struct inet6_fill_args *args)
5004 {
5005 	struct nlmsghdr  *nlh;
5006 	u32 preferred, valid;
5007 
5008 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5009 			sizeof(struct ifaddrmsg), args->flags);
5010 	if (!nlh)
5011 		return -EMSGSIZE;
5012 
5013 	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5014 		      ifa->idev->dev->ifindex);
5015 
5016 	if (args->netnsid >= 0 &&
5017 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5018 		goto error;
5019 
5020 	spin_lock_bh(&ifa->lock);
5021 	if (!((ifa->flags&IFA_F_PERMANENT) &&
5022 	      (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5023 		preferred = ifa->prefered_lft;
5024 		valid = ifa->valid_lft;
5025 		if (preferred != INFINITY_LIFE_TIME) {
5026 			long tval = (jiffies - ifa->tstamp)/HZ;
5027 			if (preferred > tval)
5028 				preferred -= tval;
5029 			else
5030 				preferred = 0;
5031 			if (valid != INFINITY_LIFE_TIME) {
5032 				if (valid > tval)
5033 					valid -= tval;
5034 				else
5035 					valid = 0;
5036 			}
5037 		}
5038 	} else {
5039 		preferred = INFINITY_LIFE_TIME;
5040 		valid = INFINITY_LIFE_TIME;
5041 	}
5042 	spin_unlock_bh(&ifa->lock);
5043 
5044 	if (!ipv6_addr_any(&ifa->peer_addr)) {
5045 		if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5046 		    nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5047 			goto error;
5048 	} else
5049 		if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5050 			goto error;
5051 
5052 	if (ifa->rt_priority &&
5053 	    nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5054 		goto error;
5055 
5056 	if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5057 		goto error;
5058 
5059 	if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5060 		goto error;
5061 
5062 	if (ifa->ifa_proto &&
5063 	    nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
5064 		goto error;
5065 
5066 	nlmsg_end(skb, nlh);
5067 	return 0;
5068 
5069 error:
5070 	nlmsg_cancel(skb, nlh);
5071 	return -EMSGSIZE;
5072 }
5073 
5074 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5075 			       struct inet6_fill_args *args)
5076 {
5077 	struct nlmsghdr  *nlh;
5078 	u8 scope = RT_SCOPE_UNIVERSE;
5079 	int ifindex = ifmca->idev->dev->ifindex;
5080 
5081 	if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5082 		scope = RT_SCOPE_SITE;
5083 
5084 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5085 			sizeof(struct ifaddrmsg), args->flags);
5086 	if (!nlh)
5087 		return -EMSGSIZE;
5088 
5089 	if (args->netnsid >= 0 &&
5090 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5091 		nlmsg_cancel(skb, nlh);
5092 		return -EMSGSIZE;
5093 	}
5094 
5095 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5096 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5097 	    put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5098 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5099 		nlmsg_cancel(skb, nlh);
5100 		return -EMSGSIZE;
5101 	}
5102 
5103 	nlmsg_end(skb, nlh);
5104 	return 0;
5105 }
5106 
5107 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5108 			       struct inet6_fill_args *args)
5109 {
5110 	struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5111 	int ifindex = dev ? dev->ifindex : 1;
5112 	struct nlmsghdr  *nlh;
5113 	u8 scope = RT_SCOPE_UNIVERSE;
5114 
5115 	if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5116 		scope = RT_SCOPE_SITE;
5117 
5118 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5119 			sizeof(struct ifaddrmsg), args->flags);
5120 	if (!nlh)
5121 		return -EMSGSIZE;
5122 
5123 	if (args->netnsid >= 0 &&
5124 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5125 		nlmsg_cancel(skb, nlh);
5126 		return -EMSGSIZE;
5127 	}
5128 
5129 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5130 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5131 	    put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5132 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5133 		nlmsg_cancel(skb, nlh);
5134 		return -EMSGSIZE;
5135 	}
5136 
5137 	nlmsg_end(skb, nlh);
5138 	return 0;
5139 }
5140 
5141 /* called with rcu_read_lock() */
5142 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5143 			  struct netlink_callback *cb, int s_ip_idx,
5144 			  struct inet6_fill_args *fillargs)
5145 {
5146 	struct ifmcaddr6 *ifmca;
5147 	struct ifacaddr6 *ifaca;
5148 	int ip_idx = 0;
5149 	int err = 1;
5150 
5151 	read_lock_bh(&idev->lock);
5152 	switch (fillargs->type) {
5153 	case UNICAST_ADDR: {
5154 		struct inet6_ifaddr *ifa;
5155 		fillargs->event = RTM_NEWADDR;
5156 
5157 		/* unicast address incl. temp addr */
5158 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
5159 			if (ip_idx < s_ip_idx)
5160 				goto next;
5161 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
5162 			if (err < 0)
5163 				break;
5164 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5165 next:
5166 			ip_idx++;
5167 		}
5168 		break;
5169 	}
5170 	case MULTICAST_ADDR:
5171 		read_unlock_bh(&idev->lock);
5172 		fillargs->event = RTM_GETMULTICAST;
5173 
5174 		/* multicast address */
5175 		for (ifmca = rcu_dereference(idev->mc_list);
5176 		     ifmca;
5177 		     ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5178 			if (ip_idx < s_ip_idx)
5179 				continue;
5180 			err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5181 			if (err < 0)
5182 				break;
5183 		}
5184 		read_lock_bh(&idev->lock);
5185 		break;
5186 	case ANYCAST_ADDR:
5187 		fillargs->event = RTM_GETANYCAST;
5188 		/* anycast address */
5189 		for (ifaca = idev->ac_list; ifaca;
5190 		     ifaca = ifaca->aca_next, ip_idx++) {
5191 			if (ip_idx < s_ip_idx)
5192 				continue;
5193 			err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5194 			if (err < 0)
5195 				break;
5196 		}
5197 		break;
5198 	default:
5199 		break;
5200 	}
5201 	read_unlock_bh(&idev->lock);
5202 	cb->args[2] = ip_idx;
5203 	return err;
5204 }
5205 
5206 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5207 				       struct inet6_fill_args *fillargs,
5208 				       struct net **tgt_net, struct sock *sk,
5209 				       struct netlink_callback *cb)
5210 {
5211 	struct netlink_ext_ack *extack = cb->extack;
5212 	struct nlattr *tb[IFA_MAX+1];
5213 	struct ifaddrmsg *ifm;
5214 	int err, i;
5215 
5216 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5217 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5218 		return -EINVAL;
5219 	}
5220 
5221 	ifm = nlmsg_data(nlh);
5222 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5223 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5224 		return -EINVAL;
5225 	}
5226 
5227 	fillargs->ifindex = ifm->ifa_index;
5228 	if (fillargs->ifindex) {
5229 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
5230 		fillargs->flags |= NLM_F_DUMP_FILTERED;
5231 	}
5232 
5233 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5234 					    ifa_ipv6_policy, extack);
5235 	if (err < 0)
5236 		return err;
5237 
5238 	for (i = 0; i <= IFA_MAX; ++i) {
5239 		if (!tb[i])
5240 			continue;
5241 
5242 		if (i == IFA_TARGET_NETNSID) {
5243 			struct net *net;
5244 
5245 			fillargs->netnsid = nla_get_s32(tb[i]);
5246 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5247 			if (IS_ERR(net)) {
5248 				fillargs->netnsid = -1;
5249 				NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5250 				return PTR_ERR(net);
5251 			}
5252 			*tgt_net = net;
5253 		} else {
5254 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5255 			return -EINVAL;
5256 		}
5257 	}
5258 
5259 	return 0;
5260 }
5261 
5262 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5263 			   enum addr_type_t type)
5264 {
5265 	const struct nlmsghdr *nlh = cb->nlh;
5266 	struct inet6_fill_args fillargs = {
5267 		.portid = NETLINK_CB(cb->skb).portid,
5268 		.seq = cb->nlh->nlmsg_seq,
5269 		.flags = NLM_F_MULTI,
5270 		.netnsid = -1,
5271 		.type = type,
5272 	};
5273 	struct net *tgt_net = sock_net(skb->sk);
5274 	int idx, s_idx, s_ip_idx;
5275 	int h, s_h;
5276 	struct net_device *dev;
5277 	struct inet6_dev *idev;
5278 	struct hlist_head *head;
5279 	int err = 0;
5280 
5281 	s_h = cb->args[0];
5282 	s_idx = idx = cb->args[1];
5283 	s_ip_idx = cb->args[2];
5284 
5285 	if (cb->strict_check) {
5286 		err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5287 						  skb->sk, cb);
5288 		if (err < 0)
5289 			goto put_tgt_net;
5290 
5291 		err = 0;
5292 		if (fillargs.ifindex) {
5293 			dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5294 			if (!dev) {
5295 				err = -ENODEV;
5296 				goto put_tgt_net;
5297 			}
5298 			idev = __in6_dev_get(dev);
5299 			if (idev) {
5300 				err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5301 						     &fillargs);
5302 				if (err > 0)
5303 					err = 0;
5304 			}
5305 			goto put_tgt_net;
5306 		}
5307 	}
5308 
5309 	rcu_read_lock();
5310 	cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5311 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5312 		idx = 0;
5313 		head = &tgt_net->dev_index_head[h];
5314 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
5315 			if (idx < s_idx)
5316 				goto cont;
5317 			if (h > s_h || idx > s_idx)
5318 				s_ip_idx = 0;
5319 			idev = __in6_dev_get(dev);
5320 			if (!idev)
5321 				goto cont;
5322 
5323 			if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5324 					   &fillargs) < 0)
5325 				goto done;
5326 cont:
5327 			idx++;
5328 		}
5329 	}
5330 done:
5331 	rcu_read_unlock();
5332 	cb->args[0] = h;
5333 	cb->args[1] = idx;
5334 put_tgt_net:
5335 	if (fillargs.netnsid >= 0)
5336 		put_net(tgt_net);
5337 
5338 	return skb->len ? : err;
5339 }
5340 
5341 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5342 {
5343 	enum addr_type_t type = UNICAST_ADDR;
5344 
5345 	return inet6_dump_addr(skb, cb, type);
5346 }
5347 
5348 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5349 {
5350 	enum addr_type_t type = MULTICAST_ADDR;
5351 
5352 	return inet6_dump_addr(skb, cb, type);
5353 }
5354 
5355 
5356 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5357 {
5358 	enum addr_type_t type = ANYCAST_ADDR;
5359 
5360 	return inet6_dump_addr(skb, cb, type);
5361 }
5362 
5363 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5364 				       const struct nlmsghdr *nlh,
5365 				       struct nlattr **tb,
5366 				       struct netlink_ext_ack *extack)
5367 {
5368 	struct ifaddrmsg *ifm;
5369 	int i, err;
5370 
5371 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5372 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5373 		return -EINVAL;
5374 	}
5375 
5376 	if (!netlink_strict_get_check(skb))
5377 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5378 					      ifa_ipv6_policy, extack);
5379 
5380 	ifm = nlmsg_data(nlh);
5381 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5382 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5383 		return -EINVAL;
5384 	}
5385 
5386 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5387 					    ifa_ipv6_policy, extack);
5388 	if (err)
5389 		return err;
5390 
5391 	for (i = 0; i <= IFA_MAX; i++) {
5392 		if (!tb[i])
5393 			continue;
5394 
5395 		switch (i) {
5396 		case IFA_TARGET_NETNSID:
5397 		case IFA_ADDRESS:
5398 		case IFA_LOCAL:
5399 			break;
5400 		default:
5401 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5402 			return -EINVAL;
5403 		}
5404 	}
5405 
5406 	return 0;
5407 }
5408 
5409 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5410 			     struct netlink_ext_ack *extack)
5411 {
5412 	struct net *tgt_net = sock_net(in_skb->sk);
5413 	struct inet6_fill_args fillargs = {
5414 		.portid = NETLINK_CB(in_skb).portid,
5415 		.seq = nlh->nlmsg_seq,
5416 		.event = RTM_NEWADDR,
5417 		.flags = 0,
5418 		.netnsid = -1,
5419 	};
5420 	struct ifaddrmsg *ifm;
5421 	struct nlattr *tb[IFA_MAX+1];
5422 	struct in6_addr *addr = NULL, *peer;
5423 	struct net_device *dev = NULL;
5424 	struct inet6_ifaddr *ifa;
5425 	struct sk_buff *skb;
5426 	int err;
5427 
5428 	err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5429 	if (err < 0)
5430 		return err;
5431 
5432 	if (tb[IFA_TARGET_NETNSID]) {
5433 		fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5434 
5435 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5436 						  fillargs.netnsid);
5437 		if (IS_ERR(tgt_net))
5438 			return PTR_ERR(tgt_net);
5439 	}
5440 
5441 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5442 	if (!addr)
5443 		return -EINVAL;
5444 
5445 	ifm = nlmsg_data(nlh);
5446 	if (ifm->ifa_index)
5447 		dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5448 
5449 	ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5450 	if (!ifa) {
5451 		err = -EADDRNOTAVAIL;
5452 		goto errout;
5453 	}
5454 
5455 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5456 	if (!skb) {
5457 		err = -ENOBUFS;
5458 		goto errout_ifa;
5459 	}
5460 
5461 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5462 	if (err < 0) {
5463 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5464 		WARN_ON(err == -EMSGSIZE);
5465 		kfree_skb(skb);
5466 		goto errout_ifa;
5467 	}
5468 	err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5469 errout_ifa:
5470 	in6_ifa_put(ifa);
5471 errout:
5472 	dev_put(dev);
5473 	if (fillargs.netnsid >= 0)
5474 		put_net(tgt_net);
5475 
5476 	return err;
5477 }
5478 
5479 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5480 {
5481 	struct sk_buff *skb;
5482 	struct net *net = dev_net(ifa->idev->dev);
5483 	struct inet6_fill_args fillargs = {
5484 		.portid = 0,
5485 		.seq = 0,
5486 		.event = event,
5487 		.flags = 0,
5488 		.netnsid = -1,
5489 	};
5490 	int err = -ENOBUFS;
5491 
5492 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5493 	if (!skb)
5494 		goto errout;
5495 
5496 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5497 	if (err < 0) {
5498 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5499 		WARN_ON(err == -EMSGSIZE);
5500 		kfree_skb(skb);
5501 		goto errout;
5502 	}
5503 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5504 	return;
5505 errout:
5506 	if (err < 0)
5507 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5508 }
5509 
5510 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5511 				__s32 *array, int bytes)
5512 {
5513 	BUG_ON(bytes < (DEVCONF_MAX * 4));
5514 
5515 	memset(array, 0, bytes);
5516 	array[DEVCONF_FORWARDING] = cnf->forwarding;
5517 	array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5518 	array[DEVCONF_MTU6] = cnf->mtu6;
5519 	array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5520 	array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5521 	array[DEVCONF_AUTOCONF] = cnf->autoconf;
5522 	array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5523 	array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5524 	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5525 		jiffies_to_msecs(cnf->rtr_solicit_interval);
5526 	array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5527 		jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5528 	array[DEVCONF_RTR_SOLICIT_DELAY] =
5529 		jiffies_to_msecs(cnf->rtr_solicit_delay);
5530 	array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5531 	array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5532 		jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5533 	array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5534 		jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5535 	array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5536 	array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5537 	array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5538 	array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5539 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5540 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5541 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5542 	array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
5543 	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5544 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5545 #ifdef CONFIG_IPV6_ROUTER_PREF
5546 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5547 	array[DEVCONF_RTR_PROBE_INTERVAL] =
5548 		jiffies_to_msecs(cnf->rtr_probe_interval);
5549 #ifdef CONFIG_IPV6_ROUTE_INFO
5550 	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5551 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5552 #endif
5553 #endif
5554 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5555 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5556 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5557 	array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5558 	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5559 #endif
5560 #ifdef CONFIG_IPV6_MROUTE
5561 	array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5562 #endif
5563 	array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5564 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5565 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5566 	array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5567 	array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5568 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5569 	array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5570 	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5571 	/* we omit DEVCONF_STABLE_SECRET for now */
5572 	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5573 	array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5574 	array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5575 	array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5576 	array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5577 #ifdef CONFIG_IPV6_SEG6_HMAC
5578 	array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5579 #endif
5580 	array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5581 	array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5582 	array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5583 	array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5584 	array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5585 	array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
5586 	array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
5587 	array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
5588 	array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
5589 	array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
5590 }
5591 
5592 static inline size_t inet6_ifla6_size(void)
5593 {
5594 	return nla_total_size(4) /* IFLA_INET6_FLAGS */
5595 	     + nla_total_size(sizeof(struct ifla_cacheinfo))
5596 	     + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5597 	     + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5598 	     + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5599 	     + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5600 	     + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5601 	     + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5602 	     + 0;
5603 }
5604 
5605 static inline size_t inet6_if_nlmsg_size(void)
5606 {
5607 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5608 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5609 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5610 	       + nla_total_size(4) /* IFLA_MTU */
5611 	       + nla_total_size(4) /* IFLA_LINK */
5612 	       + nla_total_size(1) /* IFLA_OPERSTATE */
5613 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5614 }
5615 
5616 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5617 					int bytes)
5618 {
5619 	int i;
5620 	int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5621 	BUG_ON(pad < 0);
5622 
5623 	/* Use put_unaligned() because stats may not be aligned for u64. */
5624 	put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5625 	for (i = 1; i < ICMP6_MIB_MAX; i++)
5626 		put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5627 
5628 	memset(&stats[ICMP6_MIB_MAX], 0, pad);
5629 }
5630 
5631 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5632 					int bytes, size_t syncpoff)
5633 {
5634 	int i, c;
5635 	u64 buff[IPSTATS_MIB_MAX];
5636 	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5637 
5638 	BUG_ON(pad < 0);
5639 
5640 	memset(buff, 0, sizeof(buff));
5641 	buff[0] = IPSTATS_MIB_MAX;
5642 
5643 	for_each_possible_cpu(c) {
5644 		for (i = 1; i < IPSTATS_MIB_MAX; i++)
5645 			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5646 	}
5647 
5648 	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5649 	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5650 }
5651 
5652 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5653 			     int bytes)
5654 {
5655 	switch (attrtype) {
5656 	case IFLA_INET6_STATS:
5657 		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5658 				     offsetof(struct ipstats_mib, syncp));
5659 		break;
5660 	case IFLA_INET6_ICMP6STATS:
5661 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5662 		break;
5663 	}
5664 }
5665 
5666 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5667 				  u32 ext_filter_mask)
5668 {
5669 	struct nlattr *nla;
5670 	struct ifla_cacheinfo ci;
5671 
5672 	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5673 		goto nla_put_failure;
5674 	ci.max_reasm_len = IPV6_MAXPLEN;
5675 	ci.tstamp = cstamp_delta(idev->tstamp);
5676 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5677 	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5678 	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5679 		goto nla_put_failure;
5680 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5681 	if (!nla)
5682 		goto nla_put_failure;
5683 	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5684 
5685 	/* XXX - MC not implemented */
5686 
5687 	if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5688 		return 0;
5689 
5690 	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5691 	if (!nla)
5692 		goto nla_put_failure;
5693 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5694 
5695 	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5696 	if (!nla)
5697 		goto nla_put_failure;
5698 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5699 
5700 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5701 	if (!nla)
5702 		goto nla_put_failure;
5703 	read_lock_bh(&idev->lock);
5704 	memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5705 	read_unlock_bh(&idev->lock);
5706 
5707 	if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5708 		goto nla_put_failure;
5709 
5710 	if (idev->ra_mtu &&
5711 	    nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
5712 		goto nla_put_failure;
5713 
5714 	return 0;
5715 
5716 nla_put_failure:
5717 	return -EMSGSIZE;
5718 }
5719 
5720 static size_t inet6_get_link_af_size(const struct net_device *dev,
5721 				     u32 ext_filter_mask)
5722 {
5723 	if (!__in6_dev_get(dev))
5724 		return 0;
5725 
5726 	return inet6_ifla6_size();
5727 }
5728 
5729 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5730 			      u32 ext_filter_mask)
5731 {
5732 	struct inet6_dev *idev = __in6_dev_get(dev);
5733 
5734 	if (!idev)
5735 		return -ENODATA;
5736 
5737 	if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5738 		return -EMSGSIZE;
5739 
5740 	return 0;
5741 }
5742 
5743 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5744 			     struct netlink_ext_ack *extack)
5745 {
5746 	struct inet6_ifaddr *ifp;
5747 	struct net_device *dev = idev->dev;
5748 	bool clear_token, update_rs = false;
5749 	struct in6_addr ll_addr;
5750 
5751 	ASSERT_RTNL();
5752 
5753 	if (!token)
5754 		return -EINVAL;
5755 
5756 	if (dev->flags & IFF_LOOPBACK) {
5757 		NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5758 		return -EINVAL;
5759 	}
5760 
5761 	if (dev->flags & IFF_NOARP) {
5762 		NL_SET_ERR_MSG_MOD(extack,
5763 				   "Device does not do neighbour discovery");
5764 		return -EINVAL;
5765 	}
5766 
5767 	if (!ipv6_accept_ra(idev)) {
5768 		NL_SET_ERR_MSG_MOD(extack,
5769 				   "Router advertisement is disabled on device");
5770 		return -EINVAL;
5771 	}
5772 
5773 	if (idev->cnf.rtr_solicits == 0) {
5774 		NL_SET_ERR_MSG(extack,
5775 			       "Router solicitation is disabled on device");
5776 		return -EINVAL;
5777 	}
5778 
5779 	write_lock_bh(&idev->lock);
5780 
5781 	BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5782 	memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5783 
5784 	write_unlock_bh(&idev->lock);
5785 
5786 	clear_token = ipv6_addr_any(token);
5787 	if (clear_token)
5788 		goto update_lft;
5789 
5790 	if (!idev->dead && (idev->if_flags & IF_READY) &&
5791 	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5792 			     IFA_F_OPTIMISTIC)) {
5793 		/* If we're not ready, then normal ifup will take care
5794 		 * of this. Otherwise, we need to request our rs here.
5795 		 */
5796 		ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5797 		update_rs = true;
5798 	}
5799 
5800 update_lft:
5801 	write_lock_bh(&idev->lock);
5802 
5803 	if (update_rs) {
5804 		idev->if_flags |= IF_RS_SENT;
5805 		idev->rs_interval = rfc3315_s14_backoff_init(
5806 			idev->cnf.rtr_solicit_interval);
5807 		idev->rs_probes = 1;
5808 		addrconf_mod_rs_timer(idev, idev->rs_interval);
5809 	}
5810 
5811 	/* Well, that's kinda nasty ... */
5812 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
5813 		spin_lock(&ifp->lock);
5814 		if (ifp->tokenized) {
5815 			ifp->valid_lft = 0;
5816 			ifp->prefered_lft = 0;
5817 		}
5818 		spin_unlock(&ifp->lock);
5819 	}
5820 
5821 	write_unlock_bh(&idev->lock);
5822 	inet6_ifinfo_notify(RTM_NEWLINK, idev);
5823 	addrconf_verify_rtnl(dev_net(dev));
5824 	return 0;
5825 }
5826 
5827 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5828 	[IFLA_INET6_ADDR_GEN_MODE]	= { .type = NLA_U8 },
5829 	[IFLA_INET6_TOKEN]		= { .len = sizeof(struct in6_addr) },
5830 	[IFLA_INET6_RA_MTU]		= { .type = NLA_REJECT,
5831 					    .reject_message =
5832 						"IFLA_INET6_RA_MTU can not be set" },
5833 };
5834 
5835 static int check_addr_gen_mode(int mode)
5836 {
5837 	if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5838 	    mode != IN6_ADDR_GEN_MODE_NONE &&
5839 	    mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5840 	    mode != IN6_ADDR_GEN_MODE_RANDOM)
5841 		return -EINVAL;
5842 	return 1;
5843 }
5844 
5845 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5846 				int mode)
5847 {
5848 	if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5849 	    !idev->cnf.stable_secret.initialized &&
5850 	    !net->ipv6.devconf_dflt->stable_secret.initialized)
5851 		return -EINVAL;
5852 	return 1;
5853 }
5854 
5855 static int inet6_validate_link_af(const struct net_device *dev,
5856 				  const struct nlattr *nla,
5857 				  struct netlink_ext_ack *extack)
5858 {
5859 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5860 	struct inet6_dev *idev = NULL;
5861 	int err;
5862 
5863 	if (dev) {
5864 		idev = __in6_dev_get(dev);
5865 		if (!idev)
5866 			return -EAFNOSUPPORT;
5867 	}
5868 
5869 	err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5870 					  inet6_af_policy, extack);
5871 	if (err)
5872 		return err;
5873 
5874 	if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5875 		return -EINVAL;
5876 
5877 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5878 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5879 
5880 		if (check_addr_gen_mode(mode) < 0)
5881 			return -EINVAL;
5882 		if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5883 			return -EINVAL;
5884 	}
5885 
5886 	return 0;
5887 }
5888 
5889 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
5890 			     struct netlink_ext_ack *extack)
5891 {
5892 	struct inet6_dev *idev = __in6_dev_get(dev);
5893 	struct nlattr *tb[IFLA_INET6_MAX + 1];
5894 	int err;
5895 
5896 	if (!idev)
5897 		return -EAFNOSUPPORT;
5898 
5899 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5900 		return -EINVAL;
5901 
5902 	if (tb[IFLA_INET6_TOKEN]) {
5903 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
5904 					extack);
5905 		if (err)
5906 			return err;
5907 	}
5908 
5909 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5910 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5911 
5912 		idev->cnf.addr_gen_mode = mode;
5913 	}
5914 
5915 	return 0;
5916 }
5917 
5918 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5919 			     u32 portid, u32 seq, int event, unsigned int flags)
5920 {
5921 	struct net_device *dev = idev->dev;
5922 	struct ifinfomsg *hdr;
5923 	struct nlmsghdr *nlh;
5924 	void *protoinfo;
5925 
5926 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5927 	if (!nlh)
5928 		return -EMSGSIZE;
5929 
5930 	hdr = nlmsg_data(nlh);
5931 	hdr->ifi_family = AF_INET6;
5932 	hdr->__ifi_pad = 0;
5933 	hdr->ifi_type = dev->type;
5934 	hdr->ifi_index = dev->ifindex;
5935 	hdr->ifi_flags = dev_get_flags(dev);
5936 	hdr->ifi_change = 0;
5937 
5938 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5939 	    (dev->addr_len &&
5940 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5941 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5942 	    (dev->ifindex != dev_get_iflink(dev) &&
5943 	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5944 	    nla_put_u8(skb, IFLA_OPERSTATE,
5945 		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5946 		goto nla_put_failure;
5947 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5948 	if (!protoinfo)
5949 		goto nla_put_failure;
5950 
5951 	if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5952 		goto nla_put_failure;
5953 
5954 	nla_nest_end(skb, protoinfo);
5955 	nlmsg_end(skb, nlh);
5956 	return 0;
5957 
5958 nla_put_failure:
5959 	nlmsg_cancel(skb, nlh);
5960 	return -EMSGSIZE;
5961 }
5962 
5963 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5964 				   struct netlink_ext_ack *extack)
5965 {
5966 	struct ifinfomsg *ifm;
5967 
5968 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5969 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5970 		return -EINVAL;
5971 	}
5972 
5973 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5974 		NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5975 		return -EINVAL;
5976 	}
5977 
5978 	ifm = nlmsg_data(nlh);
5979 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5980 	    ifm->ifi_change || ifm->ifi_index) {
5981 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5982 		return -EINVAL;
5983 	}
5984 
5985 	return 0;
5986 }
5987 
5988 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5989 {
5990 	struct net *net = sock_net(skb->sk);
5991 	int h, s_h;
5992 	int idx = 0, s_idx;
5993 	struct net_device *dev;
5994 	struct inet6_dev *idev;
5995 	struct hlist_head *head;
5996 
5997 	/* only requests using strict checking can pass data to
5998 	 * influence the dump
5999 	 */
6000 	if (cb->strict_check) {
6001 		int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6002 
6003 		if (err < 0)
6004 			return err;
6005 	}
6006 
6007 	s_h = cb->args[0];
6008 	s_idx = cb->args[1];
6009 
6010 	rcu_read_lock();
6011 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
6012 		idx = 0;
6013 		head = &net->dev_index_head[h];
6014 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
6015 			if (idx < s_idx)
6016 				goto cont;
6017 			idev = __in6_dev_get(dev);
6018 			if (!idev)
6019 				goto cont;
6020 			if (inet6_fill_ifinfo(skb, idev,
6021 					      NETLINK_CB(cb->skb).portid,
6022 					      cb->nlh->nlmsg_seq,
6023 					      RTM_NEWLINK, NLM_F_MULTI) < 0)
6024 				goto out;
6025 cont:
6026 			idx++;
6027 		}
6028 	}
6029 out:
6030 	rcu_read_unlock();
6031 	cb->args[1] = idx;
6032 	cb->args[0] = h;
6033 
6034 	return skb->len;
6035 }
6036 
6037 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6038 {
6039 	struct sk_buff *skb;
6040 	struct net *net = dev_net(idev->dev);
6041 	int err = -ENOBUFS;
6042 
6043 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6044 	if (!skb)
6045 		goto errout;
6046 
6047 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6048 	if (err < 0) {
6049 		/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6050 		WARN_ON(err == -EMSGSIZE);
6051 		kfree_skb(skb);
6052 		goto errout;
6053 	}
6054 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6055 	return;
6056 errout:
6057 	if (err < 0)
6058 		rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6059 }
6060 
6061 static inline size_t inet6_prefix_nlmsg_size(void)
6062 {
6063 	return NLMSG_ALIGN(sizeof(struct prefixmsg))
6064 	       + nla_total_size(sizeof(struct in6_addr))
6065 	       + nla_total_size(sizeof(struct prefix_cacheinfo));
6066 }
6067 
6068 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6069 			     struct prefix_info *pinfo, u32 portid, u32 seq,
6070 			     int event, unsigned int flags)
6071 {
6072 	struct prefixmsg *pmsg;
6073 	struct nlmsghdr *nlh;
6074 	struct prefix_cacheinfo	ci;
6075 
6076 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6077 	if (!nlh)
6078 		return -EMSGSIZE;
6079 
6080 	pmsg = nlmsg_data(nlh);
6081 	pmsg->prefix_family = AF_INET6;
6082 	pmsg->prefix_pad1 = 0;
6083 	pmsg->prefix_pad2 = 0;
6084 	pmsg->prefix_ifindex = idev->dev->ifindex;
6085 	pmsg->prefix_len = pinfo->prefix_len;
6086 	pmsg->prefix_type = pinfo->type;
6087 	pmsg->prefix_pad3 = 0;
6088 	pmsg->prefix_flags = 0;
6089 	if (pinfo->onlink)
6090 		pmsg->prefix_flags |= IF_PREFIX_ONLINK;
6091 	if (pinfo->autoconf)
6092 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
6093 
6094 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6095 		goto nla_put_failure;
6096 	ci.preferred_time = ntohl(pinfo->prefered);
6097 	ci.valid_time = ntohl(pinfo->valid);
6098 	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6099 		goto nla_put_failure;
6100 	nlmsg_end(skb, nlh);
6101 	return 0;
6102 
6103 nla_put_failure:
6104 	nlmsg_cancel(skb, nlh);
6105 	return -EMSGSIZE;
6106 }
6107 
6108 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6109 			 struct prefix_info *pinfo)
6110 {
6111 	struct sk_buff *skb;
6112 	struct net *net = dev_net(idev->dev);
6113 	int err = -ENOBUFS;
6114 
6115 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6116 	if (!skb)
6117 		goto errout;
6118 
6119 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6120 	if (err < 0) {
6121 		/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6122 		WARN_ON(err == -EMSGSIZE);
6123 		kfree_skb(skb);
6124 		goto errout;
6125 	}
6126 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6127 	return;
6128 errout:
6129 	if (err < 0)
6130 		rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6131 }
6132 
6133 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6134 {
6135 	struct net *net = dev_net(ifp->idev->dev);
6136 
6137 	if (event)
6138 		ASSERT_RTNL();
6139 
6140 	inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6141 
6142 	switch (event) {
6143 	case RTM_NEWADDR:
6144 		/*
6145 		 * If the address was optimistic we inserted the route at the
6146 		 * start of our DAD process, so we don't need to do it again.
6147 		 * If the device was taken down in the middle of the DAD
6148 		 * cycle there is a race where we could get here without a
6149 		 * host route, so nothing to insert. That will be fixed when
6150 		 * the device is brought up.
6151 		 */
6152 		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6153 			ip6_ins_rt(net, ifp->rt);
6154 		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6155 			pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6156 				&ifp->addr, ifp->idev->dev->name);
6157 		}
6158 
6159 		if (ifp->idev->cnf.forwarding)
6160 			addrconf_join_anycast(ifp);
6161 		if (!ipv6_addr_any(&ifp->peer_addr))
6162 			addrconf_prefix_route(&ifp->peer_addr, 128,
6163 					      ifp->rt_priority, ifp->idev->dev,
6164 					      0, 0, GFP_ATOMIC);
6165 		break;
6166 	case RTM_DELADDR:
6167 		if (ifp->idev->cnf.forwarding)
6168 			addrconf_leave_anycast(ifp);
6169 		addrconf_leave_solict(ifp->idev, &ifp->addr);
6170 		if (!ipv6_addr_any(&ifp->peer_addr)) {
6171 			struct fib6_info *rt;
6172 
6173 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6174 						       ifp->idev->dev, 0, 0,
6175 						       false);
6176 			if (rt)
6177 				ip6_del_rt(net, rt, false);
6178 		}
6179 		if (ifp->rt) {
6180 			ip6_del_rt(net, ifp->rt, false);
6181 			ifp->rt = NULL;
6182 		}
6183 		rt_genid_bump_ipv6(net);
6184 		break;
6185 	}
6186 	atomic_inc(&net->ipv6.dev_addr_genid);
6187 }
6188 
6189 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6190 {
6191 	if (likely(ifp->idev->dead == 0))
6192 		__ipv6_ifa_notify(event, ifp);
6193 }
6194 
6195 #ifdef CONFIG_SYSCTL
6196 
6197 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6198 		void *buffer, size_t *lenp, loff_t *ppos)
6199 {
6200 	int *valp = ctl->data;
6201 	int val = *valp;
6202 	loff_t pos = *ppos;
6203 	struct ctl_table lctl;
6204 	int ret;
6205 
6206 	/*
6207 	 * ctl->data points to idev->cnf.forwarding, we should
6208 	 * not modify it until we get the rtnl lock.
6209 	 */
6210 	lctl = *ctl;
6211 	lctl.data = &val;
6212 
6213 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6214 
6215 	if (write)
6216 		ret = addrconf_fixup_forwarding(ctl, valp, val);
6217 	if (ret)
6218 		*ppos = pos;
6219 	return ret;
6220 }
6221 
6222 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6223 		void *buffer, size_t *lenp, loff_t *ppos)
6224 {
6225 	struct inet6_dev *idev = ctl->extra1;
6226 	int min_mtu = IPV6_MIN_MTU;
6227 	struct ctl_table lctl;
6228 
6229 	lctl = *ctl;
6230 	lctl.extra1 = &min_mtu;
6231 	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6232 
6233 	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6234 }
6235 
6236 static void dev_disable_change(struct inet6_dev *idev)
6237 {
6238 	struct netdev_notifier_info info;
6239 
6240 	if (!idev || !idev->dev)
6241 		return;
6242 
6243 	netdev_notifier_info_init(&info, idev->dev);
6244 	if (idev->cnf.disable_ipv6)
6245 		addrconf_notify(NULL, NETDEV_DOWN, &info);
6246 	else
6247 		addrconf_notify(NULL, NETDEV_UP, &info);
6248 }
6249 
6250 static void addrconf_disable_change(struct net *net, __s32 newf)
6251 {
6252 	struct net_device *dev;
6253 	struct inet6_dev *idev;
6254 
6255 	for_each_netdev(net, dev) {
6256 		idev = __in6_dev_get(dev);
6257 		if (idev) {
6258 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6259 			idev->cnf.disable_ipv6 = newf;
6260 			if (changed)
6261 				dev_disable_change(idev);
6262 		}
6263 	}
6264 }
6265 
6266 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6267 {
6268 	struct net *net;
6269 	int old;
6270 
6271 	if (!rtnl_trylock())
6272 		return restart_syscall();
6273 
6274 	net = (struct net *)table->extra2;
6275 	old = *p;
6276 	*p = newf;
6277 
6278 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6279 		rtnl_unlock();
6280 		return 0;
6281 	}
6282 
6283 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
6284 		net->ipv6.devconf_dflt->disable_ipv6 = newf;
6285 		addrconf_disable_change(net, newf);
6286 	} else if ((!newf) ^ (!old))
6287 		dev_disable_change((struct inet6_dev *)table->extra1);
6288 
6289 	rtnl_unlock();
6290 	return 0;
6291 }
6292 
6293 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6294 		void *buffer, size_t *lenp, loff_t *ppos)
6295 {
6296 	int *valp = ctl->data;
6297 	int val = *valp;
6298 	loff_t pos = *ppos;
6299 	struct ctl_table lctl;
6300 	int ret;
6301 
6302 	/*
6303 	 * ctl->data points to idev->cnf.disable_ipv6, we should
6304 	 * not modify it until we get the rtnl lock.
6305 	 */
6306 	lctl = *ctl;
6307 	lctl.data = &val;
6308 
6309 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6310 
6311 	if (write)
6312 		ret = addrconf_disable_ipv6(ctl, valp, val);
6313 	if (ret)
6314 		*ppos = pos;
6315 	return ret;
6316 }
6317 
6318 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6319 		void *buffer, size_t *lenp, loff_t *ppos)
6320 {
6321 	int *valp = ctl->data;
6322 	int ret;
6323 	int old, new;
6324 
6325 	old = *valp;
6326 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6327 	new = *valp;
6328 
6329 	if (write && old != new) {
6330 		struct net *net = ctl->extra2;
6331 
6332 		if (!rtnl_trylock())
6333 			return restart_syscall();
6334 
6335 		if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6336 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6337 						     NETCONFA_PROXY_NEIGH,
6338 						     NETCONFA_IFINDEX_DEFAULT,
6339 						     net->ipv6.devconf_dflt);
6340 		else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6341 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6342 						     NETCONFA_PROXY_NEIGH,
6343 						     NETCONFA_IFINDEX_ALL,
6344 						     net->ipv6.devconf_all);
6345 		else {
6346 			struct inet6_dev *idev = ctl->extra1;
6347 
6348 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6349 						     NETCONFA_PROXY_NEIGH,
6350 						     idev->dev->ifindex,
6351 						     &idev->cnf);
6352 		}
6353 		rtnl_unlock();
6354 	}
6355 
6356 	return ret;
6357 }
6358 
6359 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6360 					 void *buffer, size_t *lenp,
6361 					 loff_t *ppos)
6362 {
6363 	int ret = 0;
6364 	u32 new_val;
6365 	struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6366 	struct net *net = (struct net *)ctl->extra2;
6367 	struct ctl_table tmp = {
6368 		.data = &new_val,
6369 		.maxlen = sizeof(new_val),
6370 		.mode = ctl->mode,
6371 	};
6372 
6373 	if (!rtnl_trylock())
6374 		return restart_syscall();
6375 
6376 	new_val = *((u32 *)ctl->data);
6377 
6378 	ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6379 	if (ret != 0)
6380 		goto out;
6381 
6382 	if (write) {
6383 		if (check_addr_gen_mode(new_val) < 0) {
6384 			ret = -EINVAL;
6385 			goto out;
6386 		}
6387 
6388 		if (idev) {
6389 			if (check_stable_privacy(idev, net, new_val) < 0) {
6390 				ret = -EINVAL;
6391 				goto out;
6392 			}
6393 
6394 			if (idev->cnf.addr_gen_mode != new_val) {
6395 				idev->cnf.addr_gen_mode = new_val;
6396 				addrconf_dev_config(idev->dev);
6397 			}
6398 		} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6399 			struct net_device *dev;
6400 
6401 			net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6402 			for_each_netdev(net, dev) {
6403 				idev = __in6_dev_get(dev);
6404 				if (idev &&
6405 				    idev->cnf.addr_gen_mode != new_val) {
6406 					idev->cnf.addr_gen_mode = new_val;
6407 					addrconf_dev_config(idev->dev);
6408 				}
6409 			}
6410 		}
6411 
6412 		*((u32 *)ctl->data) = new_val;
6413 	}
6414 
6415 out:
6416 	rtnl_unlock();
6417 
6418 	return ret;
6419 }
6420 
6421 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6422 					 void *buffer, size_t *lenp,
6423 					 loff_t *ppos)
6424 {
6425 	int err;
6426 	struct in6_addr addr;
6427 	char str[IPV6_MAX_STRLEN];
6428 	struct ctl_table lctl = *ctl;
6429 	struct net *net = ctl->extra2;
6430 	struct ipv6_stable_secret *secret = ctl->data;
6431 
6432 	if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6433 		return -EIO;
6434 
6435 	lctl.maxlen = IPV6_MAX_STRLEN;
6436 	lctl.data = str;
6437 
6438 	if (!rtnl_trylock())
6439 		return restart_syscall();
6440 
6441 	if (!write && !secret->initialized) {
6442 		err = -EIO;
6443 		goto out;
6444 	}
6445 
6446 	err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6447 	if (err >= sizeof(str)) {
6448 		err = -EIO;
6449 		goto out;
6450 	}
6451 
6452 	err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6453 	if (err || !write)
6454 		goto out;
6455 
6456 	if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6457 		err = -EIO;
6458 		goto out;
6459 	}
6460 
6461 	secret->initialized = true;
6462 	secret->secret = addr;
6463 
6464 	if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6465 		struct net_device *dev;
6466 
6467 		for_each_netdev(net, dev) {
6468 			struct inet6_dev *idev = __in6_dev_get(dev);
6469 
6470 			if (idev) {
6471 				idev->cnf.addr_gen_mode =
6472 					IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6473 			}
6474 		}
6475 	} else {
6476 		struct inet6_dev *idev = ctl->extra1;
6477 
6478 		idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6479 	}
6480 
6481 out:
6482 	rtnl_unlock();
6483 
6484 	return err;
6485 }
6486 
6487 static
6488 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6489 						int write, void *buffer,
6490 						size_t *lenp,
6491 						loff_t *ppos)
6492 {
6493 	int *valp = ctl->data;
6494 	int val = *valp;
6495 	loff_t pos = *ppos;
6496 	struct ctl_table lctl;
6497 	int ret;
6498 
6499 	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6500 	 * we should not modify it until we get the rtnl lock.
6501 	 */
6502 	lctl = *ctl;
6503 	lctl.data = &val;
6504 
6505 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6506 
6507 	if (write)
6508 		ret = addrconf_fixup_linkdown(ctl, valp, val);
6509 	if (ret)
6510 		*ppos = pos;
6511 	return ret;
6512 }
6513 
6514 static
6515 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6516 {
6517 	if (rt) {
6518 		if (action)
6519 			rt->dst.flags |= DST_NOPOLICY;
6520 		else
6521 			rt->dst.flags &= ~DST_NOPOLICY;
6522 	}
6523 }
6524 
6525 static
6526 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6527 {
6528 	struct inet6_ifaddr *ifa;
6529 
6530 	read_lock_bh(&idev->lock);
6531 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
6532 		spin_lock(&ifa->lock);
6533 		if (ifa->rt) {
6534 			/* host routes only use builtin fib6_nh */
6535 			struct fib6_nh *nh = ifa->rt->fib6_nh;
6536 			int cpu;
6537 
6538 			rcu_read_lock();
6539 			ifa->rt->dst_nopolicy = val ? true : false;
6540 			if (nh->rt6i_pcpu) {
6541 				for_each_possible_cpu(cpu) {
6542 					struct rt6_info **rtp;
6543 
6544 					rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6545 					addrconf_set_nopolicy(*rtp, val);
6546 				}
6547 			}
6548 			rcu_read_unlock();
6549 		}
6550 		spin_unlock(&ifa->lock);
6551 	}
6552 	read_unlock_bh(&idev->lock);
6553 }
6554 
6555 static
6556 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6557 {
6558 	struct inet6_dev *idev;
6559 	struct net *net;
6560 
6561 	if (!rtnl_trylock())
6562 		return restart_syscall();
6563 
6564 	*valp = val;
6565 
6566 	net = (struct net *)ctl->extra2;
6567 	if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6568 		rtnl_unlock();
6569 		return 0;
6570 	}
6571 
6572 	if (valp == &net->ipv6.devconf_all->disable_policy)  {
6573 		struct net_device *dev;
6574 
6575 		for_each_netdev(net, dev) {
6576 			idev = __in6_dev_get(dev);
6577 			if (idev)
6578 				addrconf_disable_policy_idev(idev, val);
6579 		}
6580 	} else {
6581 		idev = (struct inet6_dev *)ctl->extra1;
6582 		addrconf_disable_policy_idev(idev, val);
6583 	}
6584 
6585 	rtnl_unlock();
6586 	return 0;
6587 }
6588 
6589 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6590 				   void *buffer, size_t *lenp, loff_t *ppos)
6591 {
6592 	int *valp = ctl->data;
6593 	int val = *valp;
6594 	loff_t pos = *ppos;
6595 	struct ctl_table lctl;
6596 	int ret;
6597 
6598 	lctl = *ctl;
6599 	lctl.data = &val;
6600 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6601 
6602 	if (write && (*valp != val))
6603 		ret = addrconf_disable_policy(ctl, valp, val);
6604 
6605 	if (ret)
6606 		*ppos = pos;
6607 
6608 	return ret;
6609 }
6610 
6611 static int minus_one = -1;
6612 static const int two_five_five = 255;
6613 static u32 ioam6_if_id_max = U16_MAX;
6614 
6615 static const struct ctl_table addrconf_sysctl[] = {
6616 	{
6617 		.procname	= "forwarding",
6618 		.data		= &ipv6_devconf.forwarding,
6619 		.maxlen		= sizeof(int),
6620 		.mode		= 0644,
6621 		.proc_handler	= addrconf_sysctl_forward,
6622 	},
6623 	{
6624 		.procname	= "hop_limit",
6625 		.data		= &ipv6_devconf.hop_limit,
6626 		.maxlen		= sizeof(int),
6627 		.mode		= 0644,
6628 		.proc_handler	= proc_dointvec_minmax,
6629 		.extra1		= (void *)SYSCTL_ONE,
6630 		.extra2		= (void *)&two_five_five,
6631 	},
6632 	{
6633 		.procname	= "mtu",
6634 		.data		= &ipv6_devconf.mtu6,
6635 		.maxlen		= sizeof(int),
6636 		.mode		= 0644,
6637 		.proc_handler	= addrconf_sysctl_mtu,
6638 	},
6639 	{
6640 		.procname	= "accept_ra",
6641 		.data		= &ipv6_devconf.accept_ra,
6642 		.maxlen		= sizeof(int),
6643 		.mode		= 0644,
6644 		.proc_handler	= proc_dointvec,
6645 	},
6646 	{
6647 		.procname	= "accept_redirects",
6648 		.data		= &ipv6_devconf.accept_redirects,
6649 		.maxlen		= sizeof(int),
6650 		.mode		= 0644,
6651 		.proc_handler	= proc_dointvec,
6652 	},
6653 	{
6654 		.procname	= "autoconf",
6655 		.data		= &ipv6_devconf.autoconf,
6656 		.maxlen		= sizeof(int),
6657 		.mode		= 0644,
6658 		.proc_handler	= proc_dointvec,
6659 	},
6660 	{
6661 		.procname	= "dad_transmits",
6662 		.data		= &ipv6_devconf.dad_transmits,
6663 		.maxlen		= sizeof(int),
6664 		.mode		= 0644,
6665 		.proc_handler	= proc_dointvec,
6666 	},
6667 	{
6668 		.procname	= "router_solicitations",
6669 		.data		= &ipv6_devconf.rtr_solicits,
6670 		.maxlen		= sizeof(int),
6671 		.mode		= 0644,
6672 		.proc_handler	= proc_dointvec_minmax,
6673 		.extra1		= &minus_one,
6674 	},
6675 	{
6676 		.procname	= "router_solicitation_interval",
6677 		.data		= &ipv6_devconf.rtr_solicit_interval,
6678 		.maxlen		= sizeof(int),
6679 		.mode		= 0644,
6680 		.proc_handler	= proc_dointvec_jiffies,
6681 	},
6682 	{
6683 		.procname	= "router_solicitation_max_interval",
6684 		.data		= &ipv6_devconf.rtr_solicit_max_interval,
6685 		.maxlen		= sizeof(int),
6686 		.mode		= 0644,
6687 		.proc_handler	= proc_dointvec_jiffies,
6688 	},
6689 	{
6690 		.procname	= "router_solicitation_delay",
6691 		.data		= &ipv6_devconf.rtr_solicit_delay,
6692 		.maxlen		= sizeof(int),
6693 		.mode		= 0644,
6694 		.proc_handler	= proc_dointvec_jiffies,
6695 	},
6696 	{
6697 		.procname	= "force_mld_version",
6698 		.data		= &ipv6_devconf.force_mld_version,
6699 		.maxlen		= sizeof(int),
6700 		.mode		= 0644,
6701 		.proc_handler	= proc_dointvec,
6702 	},
6703 	{
6704 		.procname	= "mldv1_unsolicited_report_interval",
6705 		.data		=
6706 			&ipv6_devconf.mldv1_unsolicited_report_interval,
6707 		.maxlen		= sizeof(int),
6708 		.mode		= 0644,
6709 		.proc_handler	= proc_dointvec_ms_jiffies,
6710 	},
6711 	{
6712 		.procname	= "mldv2_unsolicited_report_interval",
6713 		.data		=
6714 			&ipv6_devconf.mldv2_unsolicited_report_interval,
6715 		.maxlen		= sizeof(int),
6716 		.mode		= 0644,
6717 		.proc_handler	= proc_dointvec_ms_jiffies,
6718 	},
6719 	{
6720 		.procname	= "use_tempaddr",
6721 		.data		= &ipv6_devconf.use_tempaddr,
6722 		.maxlen		= sizeof(int),
6723 		.mode		= 0644,
6724 		.proc_handler	= proc_dointvec,
6725 	},
6726 	{
6727 		.procname	= "temp_valid_lft",
6728 		.data		= &ipv6_devconf.temp_valid_lft,
6729 		.maxlen		= sizeof(int),
6730 		.mode		= 0644,
6731 		.proc_handler	= proc_dointvec,
6732 	},
6733 	{
6734 		.procname	= "temp_prefered_lft",
6735 		.data		= &ipv6_devconf.temp_prefered_lft,
6736 		.maxlen		= sizeof(int),
6737 		.mode		= 0644,
6738 		.proc_handler	= proc_dointvec,
6739 	},
6740 	{
6741 		.procname	= "regen_max_retry",
6742 		.data		= &ipv6_devconf.regen_max_retry,
6743 		.maxlen		= sizeof(int),
6744 		.mode		= 0644,
6745 		.proc_handler	= proc_dointvec,
6746 	},
6747 	{
6748 		.procname	= "max_desync_factor",
6749 		.data		= &ipv6_devconf.max_desync_factor,
6750 		.maxlen		= sizeof(int),
6751 		.mode		= 0644,
6752 		.proc_handler	= proc_dointvec,
6753 	},
6754 	{
6755 		.procname	= "max_addresses",
6756 		.data		= &ipv6_devconf.max_addresses,
6757 		.maxlen		= sizeof(int),
6758 		.mode		= 0644,
6759 		.proc_handler	= proc_dointvec,
6760 	},
6761 	{
6762 		.procname	= "accept_ra_defrtr",
6763 		.data		= &ipv6_devconf.accept_ra_defrtr,
6764 		.maxlen		= sizeof(int),
6765 		.mode		= 0644,
6766 		.proc_handler	= proc_dointvec,
6767 	},
6768 	{
6769 		.procname	= "ra_defrtr_metric",
6770 		.data		= &ipv6_devconf.ra_defrtr_metric,
6771 		.maxlen		= sizeof(u32),
6772 		.mode		= 0644,
6773 		.proc_handler	= proc_douintvec_minmax,
6774 		.extra1		= (void *)SYSCTL_ONE,
6775 	},
6776 	{
6777 		.procname	= "accept_ra_min_hop_limit",
6778 		.data		= &ipv6_devconf.accept_ra_min_hop_limit,
6779 		.maxlen		= sizeof(int),
6780 		.mode		= 0644,
6781 		.proc_handler	= proc_dointvec,
6782 	},
6783 	{
6784 		.procname	= "accept_ra_pinfo",
6785 		.data		= &ipv6_devconf.accept_ra_pinfo,
6786 		.maxlen		= sizeof(int),
6787 		.mode		= 0644,
6788 		.proc_handler	= proc_dointvec,
6789 	},
6790 #ifdef CONFIG_IPV6_ROUTER_PREF
6791 	{
6792 		.procname	= "accept_ra_rtr_pref",
6793 		.data		= &ipv6_devconf.accept_ra_rtr_pref,
6794 		.maxlen		= sizeof(int),
6795 		.mode		= 0644,
6796 		.proc_handler	= proc_dointvec,
6797 	},
6798 	{
6799 		.procname	= "router_probe_interval",
6800 		.data		= &ipv6_devconf.rtr_probe_interval,
6801 		.maxlen		= sizeof(int),
6802 		.mode		= 0644,
6803 		.proc_handler	= proc_dointvec_jiffies,
6804 	},
6805 #ifdef CONFIG_IPV6_ROUTE_INFO
6806 	{
6807 		.procname	= "accept_ra_rt_info_min_plen",
6808 		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
6809 		.maxlen		= sizeof(int),
6810 		.mode		= 0644,
6811 		.proc_handler	= proc_dointvec,
6812 	},
6813 	{
6814 		.procname	= "accept_ra_rt_info_max_plen",
6815 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
6816 		.maxlen		= sizeof(int),
6817 		.mode		= 0644,
6818 		.proc_handler	= proc_dointvec,
6819 	},
6820 #endif
6821 #endif
6822 	{
6823 		.procname	= "proxy_ndp",
6824 		.data		= &ipv6_devconf.proxy_ndp,
6825 		.maxlen		= sizeof(int),
6826 		.mode		= 0644,
6827 		.proc_handler	= addrconf_sysctl_proxy_ndp,
6828 	},
6829 	{
6830 		.procname	= "accept_source_route",
6831 		.data		= &ipv6_devconf.accept_source_route,
6832 		.maxlen		= sizeof(int),
6833 		.mode		= 0644,
6834 		.proc_handler	= proc_dointvec,
6835 	},
6836 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6837 	{
6838 		.procname	= "optimistic_dad",
6839 		.data		= &ipv6_devconf.optimistic_dad,
6840 		.maxlen		= sizeof(int),
6841 		.mode		= 0644,
6842 		.proc_handler   = proc_dointvec,
6843 	},
6844 	{
6845 		.procname	= "use_optimistic",
6846 		.data		= &ipv6_devconf.use_optimistic,
6847 		.maxlen		= sizeof(int),
6848 		.mode		= 0644,
6849 		.proc_handler	= proc_dointvec,
6850 	},
6851 #endif
6852 #ifdef CONFIG_IPV6_MROUTE
6853 	{
6854 		.procname	= "mc_forwarding",
6855 		.data		= &ipv6_devconf.mc_forwarding,
6856 		.maxlen		= sizeof(int),
6857 		.mode		= 0444,
6858 		.proc_handler	= proc_dointvec,
6859 	},
6860 #endif
6861 	{
6862 		.procname	= "disable_ipv6",
6863 		.data		= &ipv6_devconf.disable_ipv6,
6864 		.maxlen		= sizeof(int),
6865 		.mode		= 0644,
6866 		.proc_handler	= addrconf_sysctl_disable,
6867 	},
6868 	{
6869 		.procname	= "accept_dad",
6870 		.data		= &ipv6_devconf.accept_dad,
6871 		.maxlen		= sizeof(int),
6872 		.mode		= 0644,
6873 		.proc_handler	= proc_dointvec,
6874 	},
6875 	{
6876 		.procname	= "force_tllao",
6877 		.data		= &ipv6_devconf.force_tllao,
6878 		.maxlen		= sizeof(int),
6879 		.mode		= 0644,
6880 		.proc_handler	= proc_dointvec
6881 	},
6882 	{
6883 		.procname	= "ndisc_notify",
6884 		.data		= &ipv6_devconf.ndisc_notify,
6885 		.maxlen		= sizeof(int),
6886 		.mode		= 0644,
6887 		.proc_handler	= proc_dointvec
6888 	},
6889 	{
6890 		.procname	= "suppress_frag_ndisc",
6891 		.data		= &ipv6_devconf.suppress_frag_ndisc,
6892 		.maxlen		= sizeof(int),
6893 		.mode		= 0644,
6894 		.proc_handler	= proc_dointvec
6895 	},
6896 	{
6897 		.procname	= "accept_ra_from_local",
6898 		.data		= &ipv6_devconf.accept_ra_from_local,
6899 		.maxlen		= sizeof(int),
6900 		.mode		= 0644,
6901 		.proc_handler	= proc_dointvec,
6902 	},
6903 	{
6904 		.procname	= "accept_ra_mtu",
6905 		.data		= &ipv6_devconf.accept_ra_mtu,
6906 		.maxlen		= sizeof(int),
6907 		.mode		= 0644,
6908 		.proc_handler	= proc_dointvec,
6909 	},
6910 	{
6911 		.procname	= "stable_secret",
6912 		.data		= &ipv6_devconf.stable_secret,
6913 		.maxlen		= IPV6_MAX_STRLEN,
6914 		.mode		= 0600,
6915 		.proc_handler	= addrconf_sysctl_stable_secret,
6916 	},
6917 	{
6918 		.procname	= "use_oif_addrs_only",
6919 		.data		= &ipv6_devconf.use_oif_addrs_only,
6920 		.maxlen		= sizeof(int),
6921 		.mode		= 0644,
6922 		.proc_handler	= proc_dointvec,
6923 	},
6924 	{
6925 		.procname	= "ignore_routes_with_linkdown",
6926 		.data		= &ipv6_devconf.ignore_routes_with_linkdown,
6927 		.maxlen		= sizeof(int),
6928 		.mode		= 0644,
6929 		.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
6930 	},
6931 	{
6932 		.procname	= "drop_unicast_in_l2_multicast",
6933 		.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
6934 		.maxlen		= sizeof(int),
6935 		.mode		= 0644,
6936 		.proc_handler	= proc_dointvec,
6937 	},
6938 	{
6939 		.procname	= "drop_unsolicited_na",
6940 		.data		= &ipv6_devconf.drop_unsolicited_na,
6941 		.maxlen		= sizeof(int),
6942 		.mode		= 0644,
6943 		.proc_handler	= proc_dointvec,
6944 	},
6945 	{
6946 		.procname	= "keep_addr_on_down",
6947 		.data		= &ipv6_devconf.keep_addr_on_down,
6948 		.maxlen		= sizeof(int),
6949 		.mode		= 0644,
6950 		.proc_handler	= proc_dointvec,
6951 
6952 	},
6953 	{
6954 		.procname	= "seg6_enabled",
6955 		.data		= &ipv6_devconf.seg6_enabled,
6956 		.maxlen		= sizeof(int),
6957 		.mode		= 0644,
6958 		.proc_handler	= proc_dointvec,
6959 	},
6960 #ifdef CONFIG_IPV6_SEG6_HMAC
6961 	{
6962 		.procname	= "seg6_require_hmac",
6963 		.data		= &ipv6_devconf.seg6_require_hmac,
6964 		.maxlen		= sizeof(int),
6965 		.mode		= 0644,
6966 		.proc_handler	= proc_dointvec,
6967 	},
6968 #endif
6969 	{
6970 		.procname       = "enhanced_dad",
6971 		.data           = &ipv6_devconf.enhanced_dad,
6972 		.maxlen         = sizeof(int),
6973 		.mode           = 0644,
6974 		.proc_handler   = proc_dointvec,
6975 	},
6976 	{
6977 		.procname	= "addr_gen_mode",
6978 		.data		= &ipv6_devconf.addr_gen_mode,
6979 		.maxlen		= sizeof(int),
6980 		.mode		= 0644,
6981 		.proc_handler	= addrconf_sysctl_addr_gen_mode,
6982 	},
6983 	{
6984 		.procname       = "disable_policy",
6985 		.data           = &ipv6_devconf.disable_policy,
6986 		.maxlen         = sizeof(int),
6987 		.mode           = 0644,
6988 		.proc_handler   = addrconf_sysctl_disable_policy,
6989 	},
6990 	{
6991 		.procname	= "ndisc_tclass",
6992 		.data		= &ipv6_devconf.ndisc_tclass,
6993 		.maxlen		= sizeof(int),
6994 		.mode		= 0644,
6995 		.proc_handler	= proc_dointvec_minmax,
6996 		.extra1		= (void *)SYSCTL_ZERO,
6997 		.extra2		= (void *)&two_five_five,
6998 	},
6999 	{
7000 		.procname	= "rpl_seg_enabled",
7001 		.data		= &ipv6_devconf.rpl_seg_enabled,
7002 		.maxlen		= sizeof(int),
7003 		.mode		= 0644,
7004 		.proc_handler	= proc_dointvec,
7005 	},
7006 	{
7007 		.procname	= "ioam6_enabled",
7008 		.data		= &ipv6_devconf.ioam6_enabled,
7009 		.maxlen		= sizeof(u8),
7010 		.mode		= 0644,
7011 		.proc_handler	= proc_dou8vec_minmax,
7012 		.extra1		= (void *)SYSCTL_ZERO,
7013 		.extra2		= (void *)SYSCTL_ONE,
7014 	},
7015 	{
7016 		.procname	= "ioam6_id",
7017 		.data		= &ipv6_devconf.ioam6_id,
7018 		.maxlen		= sizeof(u32),
7019 		.mode		= 0644,
7020 		.proc_handler	= proc_douintvec_minmax,
7021 		.extra1		= (void *)SYSCTL_ZERO,
7022 		.extra2		= (void *)&ioam6_if_id_max,
7023 	},
7024 	{
7025 		.procname	= "ioam6_id_wide",
7026 		.data		= &ipv6_devconf.ioam6_id_wide,
7027 		.maxlen		= sizeof(u32),
7028 		.mode		= 0644,
7029 		.proc_handler	= proc_douintvec,
7030 	},
7031 	{
7032 		.procname	= "ndisc_evict_nocarrier",
7033 		.data		= &ipv6_devconf.ndisc_evict_nocarrier,
7034 		.maxlen		= sizeof(u8),
7035 		.mode		= 0644,
7036 		.proc_handler	= proc_dou8vec_minmax,
7037 		.extra1		= (void *)SYSCTL_ZERO,
7038 		.extra2		= (void *)SYSCTL_ONE,
7039 	},
7040 	{
7041 		.procname	= "accept_untracked_na",
7042 		.data		= &ipv6_devconf.accept_untracked_na,
7043 		.maxlen		= sizeof(int),
7044 		.mode		= 0644,
7045 		.proc_handler	= proc_dointvec_minmax,
7046 		.extra1		= (void *)SYSCTL_ZERO,
7047 		.extra2		= (void *)SYSCTL_ONE,
7048 	},
7049 	{
7050 		/* sentinel */
7051 	}
7052 };
7053 
7054 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7055 		struct inet6_dev *idev, struct ipv6_devconf *p)
7056 {
7057 	int i, ifindex;
7058 	struct ctl_table *table;
7059 	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7060 
7061 	table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7062 	if (!table)
7063 		goto out;
7064 
7065 	for (i = 0; table[i].data; i++) {
7066 		table[i].data += (char *)p - (char *)&ipv6_devconf;
7067 		/* If one of these is already set, then it is not safe to
7068 		 * overwrite either of them: this makes proc_dointvec_minmax
7069 		 * usable.
7070 		 */
7071 		if (!table[i].extra1 && !table[i].extra2) {
7072 			table[i].extra1 = idev; /* embedded; no ref */
7073 			table[i].extra2 = net;
7074 		}
7075 	}
7076 
7077 	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7078 
7079 	p->sysctl_header = register_net_sysctl(net, path, table);
7080 	if (!p->sysctl_header)
7081 		goto free;
7082 
7083 	if (!strcmp(dev_name, "all"))
7084 		ifindex = NETCONFA_IFINDEX_ALL;
7085 	else if (!strcmp(dev_name, "default"))
7086 		ifindex = NETCONFA_IFINDEX_DEFAULT;
7087 	else
7088 		ifindex = idev->dev->ifindex;
7089 	inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7090 				     ifindex, p);
7091 	return 0;
7092 
7093 free:
7094 	kfree(table);
7095 out:
7096 	return -ENOBUFS;
7097 }
7098 
7099 static void __addrconf_sysctl_unregister(struct net *net,
7100 					 struct ipv6_devconf *p, int ifindex)
7101 {
7102 	struct ctl_table *table;
7103 
7104 	if (!p->sysctl_header)
7105 		return;
7106 
7107 	table = p->sysctl_header->ctl_table_arg;
7108 	unregister_net_sysctl_table(p->sysctl_header);
7109 	p->sysctl_header = NULL;
7110 	kfree(table);
7111 
7112 	inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7113 }
7114 
7115 static int addrconf_sysctl_register(struct inet6_dev *idev)
7116 {
7117 	int err;
7118 
7119 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
7120 		return -EINVAL;
7121 
7122 	err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7123 				    &ndisc_ifinfo_sysctl_change);
7124 	if (err)
7125 		return err;
7126 	err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7127 					 idev, &idev->cnf);
7128 	if (err)
7129 		neigh_sysctl_unregister(idev->nd_parms);
7130 
7131 	return err;
7132 }
7133 
7134 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7135 {
7136 	__addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7137 				     idev->dev->ifindex);
7138 	neigh_sysctl_unregister(idev->nd_parms);
7139 }
7140 
7141 
7142 #endif
7143 
7144 static int __net_init addrconf_init_net(struct net *net)
7145 {
7146 	int err = -ENOMEM;
7147 	struct ipv6_devconf *all, *dflt;
7148 
7149 	spin_lock_init(&net->ipv6.addrconf_hash_lock);
7150 	INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7151 	net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7152 					   sizeof(struct hlist_head),
7153 					   GFP_KERNEL);
7154 	if (!net->ipv6.inet6_addr_lst)
7155 		goto err_alloc_addr;
7156 
7157 	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7158 	if (!all)
7159 		goto err_alloc_all;
7160 
7161 	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7162 	if (!dflt)
7163 		goto err_alloc_dflt;
7164 
7165 	if (IS_ENABLED(CONFIG_SYSCTL) &&
7166 	    !net_eq(net, &init_net)) {
7167 		switch (sysctl_devconf_inherit_init_net) {
7168 		case 1:  /* copy from init_net */
7169 			memcpy(all, init_net.ipv6.devconf_all,
7170 			       sizeof(ipv6_devconf));
7171 			memcpy(dflt, init_net.ipv6.devconf_dflt,
7172 			       sizeof(ipv6_devconf_dflt));
7173 			break;
7174 		case 3: /* copy from the current netns */
7175 			memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7176 			       sizeof(ipv6_devconf));
7177 			memcpy(dflt,
7178 			       current->nsproxy->net_ns->ipv6.devconf_dflt,
7179 			       sizeof(ipv6_devconf_dflt));
7180 			break;
7181 		case 0:
7182 		case 2:
7183 			/* use compiled values */
7184 			break;
7185 		}
7186 	}
7187 
7188 	/* these will be inherited by all namespaces */
7189 	dflt->autoconf = ipv6_defaults.autoconf;
7190 	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7191 
7192 	dflt->stable_secret.initialized = false;
7193 	all->stable_secret.initialized = false;
7194 
7195 	net->ipv6.devconf_all = all;
7196 	net->ipv6.devconf_dflt = dflt;
7197 
7198 #ifdef CONFIG_SYSCTL
7199 	err = __addrconf_sysctl_register(net, "all", NULL, all);
7200 	if (err < 0)
7201 		goto err_reg_all;
7202 
7203 	err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7204 	if (err < 0)
7205 		goto err_reg_dflt;
7206 #endif
7207 	return 0;
7208 
7209 #ifdef CONFIG_SYSCTL
7210 err_reg_dflt:
7211 	__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7212 err_reg_all:
7213 	kfree(dflt);
7214 #endif
7215 err_alloc_dflt:
7216 	kfree(all);
7217 err_alloc_all:
7218 	kfree(net->ipv6.inet6_addr_lst);
7219 err_alloc_addr:
7220 	return err;
7221 }
7222 
7223 static void __net_exit addrconf_exit_net(struct net *net)
7224 {
7225 	int i;
7226 
7227 #ifdef CONFIG_SYSCTL
7228 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7229 				     NETCONFA_IFINDEX_DEFAULT);
7230 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7231 				     NETCONFA_IFINDEX_ALL);
7232 #endif
7233 	kfree(net->ipv6.devconf_dflt);
7234 	net->ipv6.devconf_dflt = NULL;
7235 	kfree(net->ipv6.devconf_all);
7236 	net->ipv6.devconf_all = NULL;
7237 
7238 	cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7239 	/*
7240 	 *	Check hash table, then free it.
7241 	 */
7242 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
7243 		WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7244 
7245 	kfree(net->ipv6.inet6_addr_lst);
7246 	net->ipv6.inet6_addr_lst = NULL;
7247 }
7248 
7249 static struct pernet_operations addrconf_ops = {
7250 	.init = addrconf_init_net,
7251 	.exit = addrconf_exit_net,
7252 };
7253 
7254 static struct rtnl_af_ops inet6_ops __read_mostly = {
7255 	.family		  = AF_INET6,
7256 	.fill_link_af	  = inet6_fill_link_af,
7257 	.get_link_af_size = inet6_get_link_af_size,
7258 	.validate_link_af = inet6_validate_link_af,
7259 	.set_link_af	  = inet6_set_link_af,
7260 };
7261 
7262 /*
7263  *	Init / cleanup code
7264  */
7265 
7266 int __init addrconf_init(void)
7267 {
7268 	struct inet6_dev *idev;
7269 	int err;
7270 
7271 	err = ipv6_addr_label_init();
7272 	if (err < 0) {
7273 		pr_crit("%s: cannot initialize default policy table: %d\n",
7274 			__func__, err);
7275 		goto out;
7276 	}
7277 
7278 	err = register_pernet_subsys(&addrconf_ops);
7279 	if (err < 0)
7280 		goto out_addrlabel;
7281 
7282 	addrconf_wq = create_workqueue("ipv6_addrconf");
7283 	if (!addrconf_wq) {
7284 		err = -ENOMEM;
7285 		goto out_nowq;
7286 	}
7287 
7288 	rtnl_lock();
7289 	idev = ipv6_add_dev(blackhole_netdev);
7290 	rtnl_unlock();
7291 	if (IS_ERR(idev)) {
7292 		err = PTR_ERR(idev);
7293 		goto errlo;
7294 	}
7295 
7296 	ip6_route_init_special_entries();
7297 
7298 	register_netdevice_notifier(&ipv6_dev_notf);
7299 
7300 	addrconf_verify(&init_net);
7301 
7302 	rtnl_af_register(&inet6_ops);
7303 
7304 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7305 				   NULL, inet6_dump_ifinfo, 0);
7306 	if (err < 0)
7307 		goto errout;
7308 
7309 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7310 				   inet6_rtm_newaddr, NULL, 0);
7311 	if (err < 0)
7312 		goto errout;
7313 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7314 				   inet6_rtm_deladdr, NULL, 0);
7315 	if (err < 0)
7316 		goto errout;
7317 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7318 				   inet6_rtm_getaddr, inet6_dump_ifaddr,
7319 				   RTNL_FLAG_DOIT_UNLOCKED);
7320 	if (err < 0)
7321 		goto errout;
7322 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7323 				   NULL, inet6_dump_ifmcaddr, 0);
7324 	if (err < 0)
7325 		goto errout;
7326 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7327 				   NULL, inet6_dump_ifacaddr, 0);
7328 	if (err < 0)
7329 		goto errout;
7330 	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7331 				   inet6_netconf_get_devconf,
7332 				   inet6_netconf_dump_devconf,
7333 				   RTNL_FLAG_DOIT_UNLOCKED);
7334 	if (err < 0)
7335 		goto errout;
7336 	err = ipv6_addr_label_rtnl_register();
7337 	if (err < 0)
7338 		goto errout;
7339 
7340 	return 0;
7341 errout:
7342 	rtnl_unregister_all(PF_INET6);
7343 	rtnl_af_unregister(&inet6_ops);
7344 	unregister_netdevice_notifier(&ipv6_dev_notf);
7345 errlo:
7346 	destroy_workqueue(addrconf_wq);
7347 out_nowq:
7348 	unregister_pernet_subsys(&addrconf_ops);
7349 out_addrlabel:
7350 	ipv6_addr_label_cleanup();
7351 out:
7352 	return err;
7353 }
7354 
7355 void addrconf_cleanup(void)
7356 {
7357 	struct net_device *dev;
7358 
7359 	unregister_netdevice_notifier(&ipv6_dev_notf);
7360 	unregister_pernet_subsys(&addrconf_ops);
7361 	ipv6_addr_label_cleanup();
7362 
7363 	rtnl_af_unregister(&inet6_ops);
7364 
7365 	rtnl_lock();
7366 
7367 	/* clean dev list */
7368 	for_each_netdev(&init_net, dev) {
7369 		if (__in6_dev_get(dev) == NULL)
7370 			continue;
7371 		addrconf_ifdown(dev, true);
7372 	}
7373 	addrconf_ifdown(init_net.loopback_dev, true);
7374 
7375 	rtnl_unlock();
7376 
7377 	destroy_workqueue(addrconf_wq);
7378 }
7379