xref: /linux/net/ipv6/addrconf.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 Address [auto]configuration
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
9  */
10 
11 /*
12  *	Changes:
13  *
14  *	Janos Farkas			:	delete timer on ifdown
15  *	<chexum@bankinf.banki.hu>
16  *	Andi Kleen			:	kill double kfree on module
17  *						unload.
18  *	Maciej W. Rozycki		:	FDDI support
19  *	sekiya@USAGI			:	Don't send too many RS
20  *						packets.
21  *	yoshfuji@USAGI			:       Fixed interval between DAD
22  *						packets.
23  *	YOSHIFUJI Hideaki @USAGI	:	improved accuracy of
24  *						address validation timer.
25  *	YOSHIFUJI Hideaki @USAGI	:	Privacy Extensions (RFC3041)
26  *						support.
27  *	Yuji SEKIYA @USAGI		:	Don't assign a same IPv6
28  *						address on a same interface.
29  *	YOSHIFUJI Hideaki @USAGI	:	ARCnet support
30  *	YOSHIFUJI Hideaki @USAGI	:	convert /proc/net/if_inet6 to
31  *						seq_file.
32  *	YOSHIFUJI Hideaki @USAGI	:	improved source address
33  *						selection; consider scope,
34  *						status etc.
35  */
36 
37 #define pr_fmt(fmt) "IPv6: " fmt
38 
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65 
66 #include <net/ip_tunnels.h>
67 #include <net/net_namespace.h>
68 #include <net/sock.h>
69 #include <net/snmp.h>
70 
71 #include <net/6lowpan.h>
72 #include <net/firewire.h>
73 #include <net/ipv6.h>
74 #include <net/protocol.h>
75 #include <net/ndisc.h>
76 #include <net/ip6_route.h>
77 #include <net/addrconf.h>
78 #include <net/tcp.h>
79 #include <net/ip.h>
80 #include <net/netlink.h>
81 #include <net/pkt_sched.h>
82 #include <net/l3mdev.h>
83 #include <net/netdev_lock.h>
84 #include <linux/if_tunnel.h>
85 #include <linux/rtnetlink.h>
86 #include <linux/netconf.h>
87 #include <linux/random.h>
88 #include <linux/uaccess.h>
89 #include <linux/unaligned.h>
90 
91 #include <linux/proc_fs.h>
92 #include <linux/seq_file.h>
93 #include <linux/export.h>
94 #include <linux/ioam6.h>
95 
96 #define IPV6_MAX_STRLEN \
97 	sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98 
cstamp_delta(unsigned long cstamp)99 static inline u32 cstamp_delta(unsigned long cstamp)
100 {
101 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102 }
103 
rfc3315_s14_backoff_init(s32 irt)104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
105 {
106 	/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 	u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
108 	do_div(tmp, 1000000);
109 	return (s32)tmp;
110 }
111 
rfc3315_s14_backoff_update(s32 rt,s32 mrt)112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113 {
114 	/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 	u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
116 	do_div(tmp, 1000000);
117 	if ((s32)tmp > mrt) {
118 		/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 		tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
120 		do_div(tmp, 1000000);
121 	}
122 	return (s32)tmp;
123 }
124 
125 #ifdef CONFIG_SYSCTL
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128 #else
addrconf_sysctl_register(struct inet6_dev * idev)129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130 {
131 	return 0;
132 }
133 
addrconf_sysctl_unregister(struct inet6_dev * idev)134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135 {
136 }
137 #endif
138 
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140 
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
144 					u8 dad_count,
145 					const struct inet6_dev *idev);
146 
147 #define IN6_ADDR_HSIZE_SHIFT	8
148 #define IN6_ADDR_HSIZE		(1 << IN6_ADDR_HSIZE_SHIFT)
149 
150 static void addrconf_verify(struct net *net);
151 static void addrconf_verify_rtnl(struct net *net);
152 
153 static struct workqueue_struct *addrconf_wq;
154 
155 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
157 
158 static void addrconf_type_change(struct net_device *dev,
159 				 unsigned long event);
160 static int addrconf_ifdown(struct net_device *dev, bool unregister);
161 
162 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
163 						  int plen,
164 						  const struct net_device *dev,
165 						  u32 flags, u32 noflags,
166 						  bool no_gw);
167 
168 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169 static void addrconf_dad_work(struct work_struct *w);
170 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
171 				   bool send_na);
172 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173 static void addrconf_rs_timer(struct timer_list *t);
174 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
176 
177 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178 				struct prefix_info *pinfo);
179 
180 static struct ipv6_devconf ipv6_devconf __read_mostly = {
181 	.forwarding		= 0,
182 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
183 	.mtu6			= IPV6_MIN_MTU,
184 	.accept_ra		= 1,
185 	.accept_redirects	= 1,
186 	.autoconf		= 1,
187 	.force_mld_version	= 0,
188 	.mldv1_unsolicited_report_interval = 10 * HZ,
189 	.mldv2_unsolicited_report_interval = HZ,
190 	.dad_transmits		= 1,
191 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
192 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
193 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
195 	.use_tempaddr		= 0,
196 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
197 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
198 	.regen_min_advance	= REGEN_MIN_ADVANCE,
199 	.regen_max_retry	= REGEN_MAX_RETRY,
200 	.max_desync_factor	= MAX_DESYNC_FACTOR,
201 	.max_addresses		= IPV6_MAX_ADDRESSES,
202 	.accept_ra_defrtr	= 1,
203 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
204 	.accept_ra_from_local	= 0,
205 	.accept_ra_min_hop_limit= 1,
206 	.accept_ra_min_lft	= 0,
207 	.accept_ra_pinfo	= 1,
208 #ifdef CONFIG_IPV6_ROUTER_PREF
209 	.accept_ra_rtr_pref	= 1,
210 	.rtr_probe_interval	= 60 * HZ,
211 #ifdef CONFIG_IPV6_ROUTE_INFO
212 	.accept_ra_rt_info_min_plen = 0,
213 	.accept_ra_rt_info_max_plen = 0,
214 #endif
215 #endif
216 	.proxy_ndp		= 0,
217 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
218 	.disable_ipv6		= 0,
219 	.accept_dad		= 0,
220 	.suppress_frag_ndisc	= 1,
221 	.accept_ra_mtu		= 1,
222 	.stable_secret		= {
223 		.initialized = false,
224 	},
225 	.use_oif_addrs_only	= 0,
226 	.ignore_routes_with_linkdown = 0,
227 	.keep_addr_on_down	= 0,
228 	.seg6_enabled		= 0,
229 #ifdef CONFIG_IPV6_SEG6_HMAC
230 	.seg6_require_hmac	= 0,
231 #endif
232 	.enhanced_dad           = 1,
233 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
234 	.disable_policy		= 0,
235 	.rpl_seg_enabled	= 0,
236 	.ioam6_enabled		= 0,
237 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
238 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
239 	.ndisc_evict_nocarrier	= 1,
240 	.ra_honor_pio_life	= 0,
241 	.ra_honor_pio_pflag	= 0,
242 	.force_forwarding	= 0,
243 };
244 
245 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
246 	.forwarding		= 0,
247 	.hop_limit		= IPV6_DEFAULT_HOPLIMIT,
248 	.mtu6			= IPV6_MIN_MTU,
249 	.accept_ra		= 1,
250 	.accept_redirects	= 1,
251 	.autoconf		= 1,
252 	.force_mld_version	= 0,
253 	.mldv1_unsolicited_report_interval = 10 * HZ,
254 	.mldv2_unsolicited_report_interval = HZ,
255 	.dad_transmits		= 1,
256 	.rtr_solicits		= MAX_RTR_SOLICITATIONS,
257 	.rtr_solicit_interval	= RTR_SOLICITATION_INTERVAL,
258 	.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
259 	.rtr_solicit_delay	= MAX_RTR_SOLICITATION_DELAY,
260 	.use_tempaddr		= 0,
261 	.temp_valid_lft		= TEMP_VALID_LIFETIME,
262 	.temp_prefered_lft	= TEMP_PREFERRED_LIFETIME,
263 	.regen_min_advance	= REGEN_MIN_ADVANCE,
264 	.regen_max_retry	= REGEN_MAX_RETRY,
265 	.max_desync_factor	= MAX_DESYNC_FACTOR,
266 	.max_addresses		= IPV6_MAX_ADDRESSES,
267 	.accept_ra_defrtr	= 1,
268 	.ra_defrtr_metric	= IP6_RT_PRIO_USER,
269 	.accept_ra_from_local	= 0,
270 	.accept_ra_min_hop_limit= 1,
271 	.accept_ra_min_lft	= 0,
272 	.accept_ra_pinfo	= 1,
273 #ifdef CONFIG_IPV6_ROUTER_PREF
274 	.accept_ra_rtr_pref	= 1,
275 	.rtr_probe_interval	= 60 * HZ,
276 #ifdef CONFIG_IPV6_ROUTE_INFO
277 	.accept_ra_rt_info_min_plen = 0,
278 	.accept_ra_rt_info_max_plen = 0,
279 #endif
280 #endif
281 	.proxy_ndp		= 0,
282 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
283 	.disable_ipv6		= 0,
284 	.accept_dad		= 1,
285 	.suppress_frag_ndisc	= 1,
286 	.accept_ra_mtu		= 1,
287 	.stable_secret		= {
288 		.initialized = false,
289 	},
290 	.use_oif_addrs_only	= 0,
291 	.ignore_routes_with_linkdown = 0,
292 	.keep_addr_on_down	= 0,
293 	.seg6_enabled		= 0,
294 #ifdef CONFIG_IPV6_SEG6_HMAC
295 	.seg6_require_hmac	= 0,
296 #endif
297 	.enhanced_dad           = 1,
298 	.addr_gen_mode		= IN6_ADDR_GEN_MODE_EUI64,
299 	.disable_policy		= 0,
300 	.rpl_seg_enabled	= 0,
301 	.ioam6_enabled		= 0,
302 	.ioam6_id               = IOAM6_DEFAULT_IF_ID,
303 	.ioam6_id_wide		= IOAM6_DEFAULT_IF_ID_WIDE,
304 	.ndisc_evict_nocarrier	= 1,
305 	.ra_honor_pio_life	= 0,
306 	.ra_honor_pio_pflag	= 0,
307 	.force_forwarding	= 0,
308 };
309 
310 /* Check if link is ready: is it up and is a valid qdisc available */
addrconf_link_ready(const struct net_device * dev)311 static inline bool addrconf_link_ready(const struct net_device *dev)
312 {
313 	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
314 }
315 
addrconf_del_rs_timer(struct inet6_dev * idev)316 static void addrconf_del_rs_timer(struct inet6_dev *idev)
317 {
318 	if (timer_delete(&idev->rs_timer))
319 		__in6_dev_put(idev);
320 }
321 
addrconf_del_dad_work(struct inet6_ifaddr * ifp)322 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
323 {
324 	if (cancel_delayed_work(&ifp->dad_work))
325 		__in6_ifa_put(ifp);
326 }
327 
addrconf_mod_rs_timer(struct inet6_dev * idev,unsigned long when)328 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
329 				  unsigned long when)
330 {
331 	if (!mod_timer(&idev->rs_timer, jiffies + when))
332 		in6_dev_hold(idev);
333 }
334 
addrconf_mod_dad_work(struct inet6_ifaddr * ifp,unsigned long delay)335 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
336 				   unsigned long delay)
337 {
338 	in6_ifa_hold(ifp);
339 	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
340 		in6_ifa_put(ifp);
341 }
342 
snmp6_alloc_dev(struct inet6_dev * idev)343 static int snmp6_alloc_dev(struct inet6_dev *idev)
344 {
345 	int i;
346 
347 	idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
348 	if (!idev->stats.ipv6)
349 		goto err_ip;
350 
351 	for_each_possible_cpu(i) {
352 		struct ipstats_mib *addrconf_stats;
353 		addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
354 		u64_stats_init(&addrconf_stats->syncp);
355 	}
356 
357 
358 	idev->stats.icmpv6dev = kzalloc_obj(struct icmpv6_mib_device);
359 	if (!idev->stats.icmpv6dev)
360 		goto err_icmp;
361 	idev->stats.icmpv6msgdev = kzalloc_obj(struct icmpv6msg_mib_device,
362 					       GFP_KERNEL_ACCOUNT);
363 	if (!idev->stats.icmpv6msgdev)
364 		goto err_icmpmsg;
365 
366 	return 0;
367 
368 err_icmpmsg:
369 	kfree(idev->stats.icmpv6dev);
370 err_icmp:
371 	free_percpu(idev->stats.ipv6);
372 err_ip:
373 	return -ENOMEM;
374 }
375 
ipv6_add_dev(struct net_device * dev)376 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
377 {
378 	struct inet6_dev *ndev;
379 	int err = -ENOMEM;
380 
381 	ASSERT_RTNL();
382 	netdev_ops_assert_locked(dev);
383 
384 	if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
385 		return ERR_PTR(-EINVAL);
386 
387 	ndev = kzalloc_obj(*ndev, GFP_KERNEL_ACCOUNT);
388 	if (!ndev)
389 		return ERR_PTR(err);
390 
391 	rwlock_init(&ndev->lock);
392 	ndev->dev = dev;
393 	INIT_LIST_HEAD(&ndev->addr_list);
394 	timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
395 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
396 
397 	if (ndev->cnf.stable_secret.initialized)
398 		ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
399 
400 	ndev->cnf.mtu6 = dev->mtu;
401 	ndev->ra_mtu = 0;
402 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
403 	if (!ndev->nd_parms) {
404 		kfree(ndev);
405 		return ERR_PTR(err);
406 	}
407 	if (ndev->cnf.forwarding)
408 		netif_disable_lro(dev);
409 	/* We refer to the device */
410 	netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
411 
412 	if (snmp6_alloc_dev(ndev) < 0) {
413 		netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
414 			   __func__);
415 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
416 		netdev_put(dev, &ndev->dev_tracker);
417 		kfree(ndev);
418 		return ERR_PTR(err);
419 	}
420 
421 	if (dev != blackhole_netdev) {
422 		if (snmp6_register_dev(ndev) < 0) {
423 			netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
424 				   __func__, dev->name);
425 			goto err_release;
426 		}
427 	}
428 	/* One reference from device. */
429 	refcount_set(&ndev->refcnt, 1);
430 
431 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
432 		ndev->cnf.accept_dad = -1;
433 
434 #if IS_ENABLED(CONFIG_IPV6_SIT)
435 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
436 		pr_info("%s: Disabled Multicast RS\n", dev->name);
437 		ndev->cnf.rtr_solicits = 0;
438 	}
439 #endif
440 
441 	INIT_LIST_HEAD(&ndev->tempaddr_list);
442 	ndev->desync_factor = U32_MAX;
443 	if ((dev->flags&IFF_LOOPBACK) ||
444 	    dev->type == ARPHRD_TUNNEL ||
445 	    dev->type == ARPHRD_TUNNEL6 ||
446 	    dev->type == ARPHRD_SIT ||
447 	    dev->type == ARPHRD_NONE) {
448 		ndev->cnf.use_tempaddr = -1;
449 	}
450 
451 	ndev->token = in6addr_any;
452 
453 	if (netif_running(dev) && addrconf_link_ready(dev))
454 		ndev->if_flags |= IF_READY;
455 
456 	ipv6_mc_init_dev(ndev);
457 	ndev->tstamp = jiffies;
458 	if (dev != blackhole_netdev) {
459 		err = addrconf_sysctl_register(ndev);
460 		if (err) {
461 			ipv6_mc_destroy_dev(ndev);
462 			snmp6_unregister_dev(ndev);
463 			goto err_release;
464 		}
465 	}
466 	/* protected by rtnl_lock */
467 	rcu_assign_pointer(dev->ip6_ptr, ndev);
468 
469 	if (dev != blackhole_netdev) {
470 		/* Join interface-local all-node multicast group */
471 		ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
472 
473 		/* Join all-node multicast group */
474 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
475 
476 		/* Join all-router multicast group if forwarding is set */
477 		if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
478 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
479 	}
480 	return ndev;
481 
482 err_release:
483 	neigh_parms_release(&nd_tbl, ndev->nd_parms);
484 	ndev->dead = 1;
485 	in6_dev_finish_destroy(ndev);
486 	return ERR_PTR(err);
487 }
488 
ipv6_find_idev(struct net_device * dev)489 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
490 {
491 	struct inet6_dev *idev;
492 
493 	ASSERT_RTNL();
494 
495 	idev = __in6_dev_get(dev);
496 	if (!idev) {
497 		idev = ipv6_add_dev(dev);
498 		if (IS_ERR(idev))
499 			return idev;
500 	}
501 
502 	if (dev->flags&IFF_UP)
503 		ipv6_mc_up(idev);
504 	return idev;
505 }
506 
inet6_netconf_msgsize_devconf(int type)507 static int inet6_netconf_msgsize_devconf(int type)
508 {
509 	int size =  NLMSG_ALIGN(sizeof(struct netconfmsg))
510 		    + nla_total_size(4);	/* NETCONFA_IFINDEX */
511 	bool all = false;
512 
513 	if (type == NETCONFA_ALL)
514 		all = true;
515 
516 	if (all || type == NETCONFA_FORWARDING)
517 		size += nla_total_size(4);
518 #ifdef CONFIG_IPV6_MROUTE
519 	if (all || type == NETCONFA_MC_FORWARDING)
520 		size += nla_total_size(4);
521 #endif
522 	if (all || type == NETCONFA_PROXY_NEIGH)
523 		size += nla_total_size(4);
524 
525 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
526 		size += nla_total_size(4);
527 
528 	return size;
529 }
530 
inet6_netconf_fill_devconf(struct sk_buff * skb,int ifindex,struct ipv6_devconf * devconf,u32 portid,u32 seq,int event,unsigned int flags,int type)531 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
532 				      struct ipv6_devconf *devconf, u32 portid,
533 				      u32 seq, int event, unsigned int flags,
534 				      int type)
535 {
536 	struct nlmsghdr  *nlh;
537 	struct netconfmsg *ncm;
538 	bool all = false;
539 
540 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
541 			flags);
542 	if (!nlh)
543 		return -EMSGSIZE;
544 
545 	if (type == NETCONFA_ALL)
546 		all = true;
547 
548 	ncm = nlmsg_data(nlh);
549 	ncm->ncm_family = AF_INET6;
550 
551 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
552 		goto nla_put_failure;
553 
554 	if (!devconf)
555 		goto out;
556 
557 	if ((all || type == NETCONFA_FORWARDING) &&
558 	    nla_put_s32(skb, NETCONFA_FORWARDING,
559 			READ_ONCE(devconf->forwarding)) < 0)
560 		goto nla_put_failure;
561 #ifdef CONFIG_IPV6_MROUTE
562 	if ((all || type == NETCONFA_MC_FORWARDING) &&
563 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
564 			atomic_read(&devconf->mc_forwarding)) < 0)
565 		goto nla_put_failure;
566 #endif
567 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
568 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
569 			READ_ONCE(devconf->proxy_ndp)) < 0)
570 		goto nla_put_failure;
571 
572 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
573 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
574 			READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
575 		goto nla_put_failure;
576 
577 out:
578 	nlmsg_end(skb, nlh);
579 	return 0;
580 
581 nla_put_failure:
582 	nlmsg_cancel(skb, nlh);
583 	return -EMSGSIZE;
584 }
585 
inet6_netconf_notify_devconf(struct net * net,int event,int type,int ifindex,struct ipv6_devconf * devconf)586 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
587 				  int ifindex, struct ipv6_devconf *devconf)
588 {
589 	struct sk_buff *skb;
590 	int err = -ENOBUFS;
591 
592 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
593 	if (!skb)
594 		goto errout;
595 
596 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
597 					 event, 0, type);
598 	if (err < 0) {
599 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
600 		WARN_ON(err == -EMSGSIZE);
601 		kfree_skb(skb);
602 		goto errout;
603 	}
604 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
605 	return;
606 errout:
607 	rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
608 }
609 
610 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
611 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
612 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
613 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
614 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
615 };
616 
inet6_netconf_valid_get_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)617 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
618 				       const struct nlmsghdr *nlh,
619 				       struct nlattr **tb,
620 				       struct netlink_ext_ack *extack)
621 {
622 	int i, err;
623 
624 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
625 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
626 		return -EINVAL;
627 	}
628 
629 	if (!netlink_strict_get_check(skb))
630 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
631 					      tb, NETCONFA_MAX,
632 					      devconf_ipv6_policy, extack);
633 
634 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
635 					    tb, NETCONFA_MAX,
636 					    devconf_ipv6_policy, extack);
637 	if (err)
638 		return err;
639 
640 	for (i = 0; i <= NETCONFA_MAX; i++) {
641 		if (!tb[i])
642 			continue;
643 
644 		switch (i) {
645 		case NETCONFA_IFINDEX:
646 			break;
647 		default:
648 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
649 			return -EINVAL;
650 		}
651 	}
652 
653 	return 0;
654 }
655 
inet6_netconf_get_devconf(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)656 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
657 				     struct nlmsghdr *nlh,
658 				     struct netlink_ext_ack *extack)
659 {
660 	struct net *net = sock_net(in_skb->sk);
661 	struct nlattr *tb[NETCONFA_MAX+1];
662 	struct inet6_dev *in6_dev = NULL;
663 	struct net_device *dev = NULL;
664 	struct sk_buff *skb;
665 	struct ipv6_devconf *devconf;
666 	int ifindex;
667 	int err;
668 
669 	err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
670 	if (err < 0)
671 		return err;
672 
673 	if (!tb[NETCONFA_IFINDEX])
674 		return -EINVAL;
675 
676 	err = -EINVAL;
677 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
678 	switch (ifindex) {
679 	case NETCONFA_IFINDEX_ALL:
680 		devconf = net->ipv6.devconf_all;
681 		break;
682 	case NETCONFA_IFINDEX_DEFAULT:
683 		devconf = net->ipv6.devconf_dflt;
684 		break;
685 	default:
686 		dev = dev_get_by_index(net, ifindex);
687 		if (!dev)
688 			return -EINVAL;
689 		in6_dev = in6_dev_get(dev);
690 		if (!in6_dev)
691 			goto errout;
692 		devconf = &in6_dev->cnf;
693 		break;
694 	}
695 
696 	err = -ENOBUFS;
697 	skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
698 	if (!skb)
699 		goto errout;
700 
701 	err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
702 					 NETLINK_CB(in_skb).portid,
703 					 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
704 					 NETCONFA_ALL);
705 	if (err < 0) {
706 		/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
707 		WARN_ON(err == -EMSGSIZE);
708 		kfree_skb(skb);
709 		goto errout;
710 	}
711 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
712 errout:
713 	if (in6_dev)
714 		in6_dev_put(in6_dev);
715 	dev_put(dev);
716 	return err;
717 }
718 
719 /* Combine dev_addr_genid and dev_base_seq to detect changes.
720  */
inet6_base_seq(const struct net * net)721 static u32 inet6_base_seq(const struct net *net)
722 {
723 	u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
724 		  READ_ONCE(net->dev_base_seq);
725 
726 	/* Must not return 0 (see nl_dump_check_consistent()).
727 	 * Chose a value far away from 0.
728 	 */
729 	if (!res)
730 		res = 0x80000000;
731 	return res;
732 }
733 
inet6_netconf_dump_devconf(struct sk_buff * skb,struct netlink_callback * cb)734 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
735 				      struct netlink_callback *cb)
736 {
737 	const struct nlmsghdr *nlh = cb->nlh;
738 	struct net *net = sock_net(skb->sk);
739 	struct {
740 		unsigned long ifindex;
741 		unsigned int all_default;
742 	} *ctx = (void *)cb->ctx;
743 	struct net_device *dev;
744 	struct inet6_dev *idev;
745 	int err = 0;
746 
747 	if (cb->strict_check) {
748 		struct netlink_ext_ack *extack = cb->extack;
749 		struct netconfmsg *ncm;
750 
751 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
752 			NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
753 			return -EINVAL;
754 		}
755 
756 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
757 			NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
758 			return -EINVAL;
759 		}
760 	}
761 
762 	rcu_read_lock();
763 	for_each_netdev_dump(net, dev, ctx->ifindex) {
764 		idev = __in6_dev_get(dev);
765 		if (!idev)
766 			continue;
767 		err = inet6_netconf_fill_devconf(skb, dev->ifindex,
768 					         &idev->cnf,
769 						 NETLINK_CB(cb->skb).portid,
770 						 nlh->nlmsg_seq,
771 						 RTM_NEWNETCONF,
772 						 NLM_F_MULTI,
773 						 NETCONFA_ALL);
774 		if (err < 0)
775 			goto done;
776 	}
777 	if (ctx->all_default == 0) {
778 		err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
779 						 net->ipv6.devconf_all,
780 						 NETLINK_CB(cb->skb).portid,
781 						 nlh->nlmsg_seq,
782 						 RTM_NEWNETCONF, NLM_F_MULTI,
783 						 NETCONFA_ALL);
784 		if (err < 0)
785 			goto done;
786 		ctx->all_default++;
787 	}
788 	if (ctx->all_default == 1) {
789 		err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
790 						 net->ipv6.devconf_dflt,
791 						 NETLINK_CB(cb->skb).portid,
792 						 nlh->nlmsg_seq,
793 						 RTM_NEWNETCONF, NLM_F_MULTI,
794 						 NETCONFA_ALL);
795 		if (err < 0)
796 			goto done;
797 		ctx->all_default++;
798 	}
799 done:
800 	rcu_read_unlock();
801 	return err;
802 }
803 
804 #ifdef CONFIG_SYSCTL
dev_forward_change(struct inet6_dev * idev)805 static void dev_forward_change(struct inet6_dev *idev)
806 {
807 	struct net_device *dev;
808 	struct inet6_ifaddr *ifa;
809 	LIST_HEAD(tmp_addr_list);
810 
811 	if (!idev)
812 		return;
813 	dev = idev->dev;
814 	if (idev->cnf.forwarding)
815 		dev_disable_lro(dev);
816 	if (dev->flags & IFF_MULTICAST) {
817 		if (idev->cnf.forwarding) {
818 			ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
819 			ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
820 			ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
821 		} else {
822 			ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
823 			ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
824 			ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
825 		}
826 	}
827 
828 	read_lock_bh(&idev->lock);
829 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
830 		if (ifa->flags&IFA_F_TENTATIVE)
831 			continue;
832 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
833 	}
834 	read_unlock_bh(&idev->lock);
835 
836 	while (!list_empty(&tmp_addr_list)) {
837 		ifa = list_first_entry(&tmp_addr_list,
838 				       struct inet6_ifaddr, if_list_aux);
839 		list_del(&ifa->if_list_aux);
840 		if (idev->cnf.forwarding)
841 			addrconf_join_anycast(ifa);
842 		else
843 			addrconf_leave_anycast(ifa);
844 	}
845 
846 	inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
847 				     NETCONFA_FORWARDING,
848 				     dev->ifindex, &idev->cnf);
849 }
850 
851 
addrconf_forward_change(struct net * net,__s32 newf)852 static void addrconf_forward_change(struct net *net, __s32 newf)
853 {
854 	struct net_device *dev;
855 	struct inet6_dev *idev;
856 
857 	for_each_netdev(net, dev) {
858 		idev = __in6_dev_get_rtnl_net(dev);
859 		if (idev) {
860 			int changed = (!idev->cnf.forwarding) ^ (!newf);
861 			/* Disabling all.forwarding sets 0 to force_forwarding for all interfaces */
862 			if (newf == 0)
863 				WRITE_ONCE(idev->cnf.force_forwarding, 0);
864 
865 			WRITE_ONCE(idev->cnf.forwarding, newf);
866 			if (changed)
867 				dev_forward_change(idev);
868 		}
869 	}
870 }
871 
addrconf_fixup_forwarding(const struct ctl_table * table,int * p,int newf)872 static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
873 {
874 	struct net *net = (struct net *)table->extra2;
875 	int old;
876 
877 	if (!rtnl_net_trylock(net))
878 		return restart_syscall();
879 
880 	old = *p;
881 	WRITE_ONCE(*p, newf);
882 
883 	if (p == &net->ipv6.devconf_dflt->forwarding) {
884 		if ((!newf) ^ (!old))
885 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
886 						     NETCONFA_FORWARDING,
887 						     NETCONFA_IFINDEX_DEFAULT,
888 						     net->ipv6.devconf_dflt);
889 		rtnl_net_unlock(net);
890 		return 0;
891 	}
892 
893 	if (p == &net->ipv6.devconf_all->forwarding) {
894 		int old_dflt = net->ipv6.devconf_dflt->forwarding;
895 
896 		WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
897 		if ((!newf) ^ (!old_dflt))
898 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
899 						     NETCONFA_FORWARDING,
900 						     NETCONFA_IFINDEX_DEFAULT,
901 						     net->ipv6.devconf_dflt);
902 
903 		addrconf_forward_change(net, newf);
904 		if ((!newf) ^ (!old))
905 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
906 						     NETCONFA_FORWARDING,
907 						     NETCONFA_IFINDEX_ALL,
908 						     net->ipv6.devconf_all);
909 	} else if ((!newf) ^ (!old))
910 		dev_forward_change((struct inet6_dev *)table->extra1);
911 	rtnl_net_unlock(net);
912 
913 	if (newf)
914 		rt6_purge_dflt_routers(net);
915 	return 1;
916 }
917 
addrconf_linkdown_change(struct net * net,__s32 newf)918 static void addrconf_linkdown_change(struct net *net, __s32 newf)
919 {
920 	struct net_device *dev;
921 	struct inet6_dev *idev;
922 
923 	for_each_netdev(net, dev) {
924 		idev = __in6_dev_get_rtnl_net(dev);
925 		if (idev) {
926 			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
927 
928 			WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
929 			if (changed)
930 				inet6_netconf_notify_devconf(dev_net(dev),
931 							     RTM_NEWNETCONF,
932 							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
933 							     dev->ifindex,
934 							     &idev->cnf);
935 		}
936 	}
937 }
938 
addrconf_fixup_linkdown(const struct ctl_table * table,int * p,int newf)939 static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
940 {
941 	struct net *net = (struct net *)table->extra2;
942 	int old;
943 
944 	if (!rtnl_net_trylock(net))
945 		return restart_syscall();
946 
947 	old = *p;
948 	WRITE_ONCE(*p, newf);
949 
950 	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
951 		if ((!newf) ^ (!old))
952 			inet6_netconf_notify_devconf(net,
953 						     RTM_NEWNETCONF,
954 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
955 						     NETCONFA_IFINDEX_DEFAULT,
956 						     net->ipv6.devconf_dflt);
957 		rtnl_net_unlock(net);
958 		return 0;
959 	}
960 
961 	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
962 		WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
963 		addrconf_linkdown_change(net, newf);
964 		if ((!newf) ^ (!old))
965 			inet6_netconf_notify_devconf(net,
966 						     RTM_NEWNETCONF,
967 						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
968 						     NETCONFA_IFINDEX_ALL,
969 						     net->ipv6.devconf_all);
970 	}
971 
972 	rtnl_net_unlock(net);
973 
974 	return 1;
975 }
976 
977 #endif
978 
979 /* Nobody refers to this ifaddr, destroy it */
inet6_ifa_finish_destroy(struct inet6_ifaddr * ifp)980 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
981 {
982 	WARN_ON(!hlist_unhashed(&ifp->addr_lst));
983 
984 #ifdef NET_REFCNT_DEBUG
985 	pr_debug("%s\n", __func__);
986 #endif
987 
988 	in6_dev_put(ifp->idev);
989 
990 	if (cancel_delayed_work(&ifp->dad_work))
991 		pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
992 			  ifp);
993 
994 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
995 		pr_warn("Freeing alive inet6 address %p\n", ifp);
996 		return;
997 	}
998 
999 	kfree_rcu(ifp, rcu);
1000 }
1001 
1002 static void
ipv6_link_dev_addr(struct inet6_dev * idev,struct inet6_ifaddr * ifp)1003 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
1004 {
1005 	struct list_head *p;
1006 	int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
1007 
1008 	/*
1009 	 * Each device address list is sorted in order of scope -
1010 	 * global before linklocal.
1011 	 */
1012 	list_for_each(p, &idev->addr_list) {
1013 		struct inet6_ifaddr *ifa
1014 			= list_entry(p, struct inet6_ifaddr, if_list);
1015 		if (ifp_scope > ipv6_addr_src_scope(&ifa->addr))
1016 			break;
1017 	}
1018 
1019 	list_add_tail_rcu(&ifp->if_list, p);
1020 }
1021 
inet6_addr_hash(const struct net * net,const struct in6_addr * addr)1022 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1023 {
1024 	u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net));
1025 
1026 	return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1027 }
1028 
ipv6_chk_same_addr(struct net * net,const struct in6_addr * addr,struct net_device * dev,unsigned int hash)1029 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1030 			       struct net_device *dev, unsigned int hash)
1031 {
1032 	struct inet6_ifaddr *ifp;
1033 
1034 	hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1035 		if (ipv6_addr_equal(&ifp->addr, addr)) {
1036 			if (!dev || ifp->idev->dev == dev)
1037 				return true;
1038 		}
1039 	}
1040 	return false;
1041 }
1042 
ipv6_add_addr_hash(struct net_device * dev,struct inet6_ifaddr * ifa)1043 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1044 {
1045 	struct net *net = dev_net(dev);
1046 	unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1047 	int err = 0;
1048 
1049 	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1050 
1051 	/* Ignore adding duplicate addresses on an interface */
1052 	if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1053 		netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1054 		err = -EEXIST;
1055 	} else {
1056 		hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1057 	}
1058 
1059 	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1060 
1061 	return err;
1062 }
1063 
1064 /* On success it returns ifp with increased reference count */
1065 
1066 static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev * idev,struct ifa6_config * cfg,bool can_block,struct netlink_ext_ack * extack)1067 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1068 	      bool can_block, struct netlink_ext_ack *extack)
1069 {
1070 	gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1071 	int addr_type = ipv6_addr_type(cfg->pfx);
1072 	struct net *net = dev_net(idev->dev);
1073 	struct inet6_ifaddr *ifa = NULL;
1074 	struct fib6_info *f6i = NULL;
1075 	int err = 0;
1076 
1077 	if (addr_type == IPV6_ADDR_ANY) {
1078 		NL_SET_ERR_MSG_MOD(extack, "Invalid address");
1079 		return ERR_PTR(-EADDRNOTAVAIL);
1080 	} else if (addr_type & IPV6_ADDR_MULTICAST &&
1081 		   !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) {
1082 		NL_SET_ERR_MSG_MOD(extack, "Cannot assign multicast address without \"IFA_F_MCAUTOJOIN\" flag");
1083 		return ERR_PTR(-EADDRNOTAVAIL);
1084 	} else if (!(idev->dev->flags & IFF_LOOPBACK) &&
1085 		   !netif_is_l3_master(idev->dev) &&
1086 		   addr_type & IPV6_ADDR_LOOPBACK) {
1087 		NL_SET_ERR_MSG_MOD(extack, "Cannot assign loopback address on this device");
1088 		return ERR_PTR(-EADDRNOTAVAIL);
1089 	}
1090 
1091 	if (idev->dead) {
1092 		NL_SET_ERR_MSG_MOD(extack, "device is going away");
1093 		err = -ENODEV;
1094 		goto out;
1095 	}
1096 
1097 	if (idev->cnf.disable_ipv6) {
1098 		NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
1099 		err = -EACCES;
1100 		goto out;
1101 	}
1102 
1103 	/* validator notifier needs to be blocking;
1104 	 * do not call in atomic context
1105 	 */
1106 	if (can_block) {
1107 		struct in6_validator_info i6vi = {
1108 			.i6vi_addr = *cfg->pfx,
1109 			.i6vi_dev = idev,
1110 			.extack = extack,
1111 		};
1112 
1113 		err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1114 		err = notifier_to_errno(err);
1115 		if (err < 0)
1116 			goto out;
1117 	}
1118 
1119 	ifa = kzalloc_obj(*ifa, gfp_flags | __GFP_ACCOUNT);
1120 	if (!ifa) {
1121 		err = -ENOBUFS;
1122 		goto out;
1123 	}
1124 
1125 	f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags, extack);
1126 	if (IS_ERR(f6i)) {
1127 		err = PTR_ERR(f6i);
1128 		f6i = NULL;
1129 		goto out;
1130 	}
1131 
1132 	neigh_parms_data_state_setall(idev->nd_parms);
1133 
1134 	ifa->addr = *cfg->pfx;
1135 	if (cfg->peer_pfx)
1136 		ifa->peer_addr = *cfg->peer_pfx;
1137 
1138 	spin_lock_init(&ifa->lock);
1139 	INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1140 	INIT_HLIST_NODE(&ifa->addr_lst);
1141 	ifa->scope = cfg->scope;
1142 	ifa->prefix_len = cfg->plen;
1143 	ifa->rt_priority = cfg->rt_priority;
1144 	ifa->flags = cfg->ifa_flags;
1145 	ifa->ifa_proto = cfg->ifa_proto;
1146 	/* No need to add the TENTATIVE flag for addresses with NODAD */
1147 	if (!(cfg->ifa_flags & IFA_F_NODAD))
1148 		ifa->flags |= IFA_F_TENTATIVE;
1149 	ifa->valid_lft = cfg->valid_lft;
1150 	ifa->prefered_lft = cfg->preferred_lft;
1151 	ifa->cstamp = ifa->tstamp = jiffies;
1152 	ifa->tokenized = false;
1153 
1154 	ifa->rt = f6i;
1155 
1156 	ifa->idev = idev;
1157 	in6_dev_hold(idev);
1158 
1159 	/* For caller */
1160 	refcount_set(&ifa->refcnt, 1);
1161 
1162 	rcu_read_lock();
1163 
1164 	err = ipv6_add_addr_hash(idev->dev, ifa);
1165 	if (err < 0) {
1166 		rcu_read_unlock();
1167 		goto out;
1168 	}
1169 
1170 	write_lock_bh(&idev->lock);
1171 
1172 	/* Add to inet6_dev unicast addr list. */
1173 	ipv6_link_dev_addr(idev, ifa);
1174 
1175 	if (ifa->flags&IFA_F_TEMPORARY) {
1176 		list_add(&ifa->tmp_list, &idev->tempaddr_list);
1177 		in6_ifa_hold(ifa);
1178 	}
1179 
1180 	in6_ifa_hold(ifa);
1181 	write_unlock_bh(&idev->lock);
1182 
1183 	rcu_read_unlock();
1184 
1185 	inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1186 out:
1187 	if (unlikely(err < 0)) {
1188 		fib6_info_release(f6i);
1189 
1190 		if (ifa) {
1191 			if (ifa->idev)
1192 				in6_dev_put(ifa->idev);
1193 			kfree(ifa);
1194 		}
1195 		ifa = ERR_PTR(err);
1196 	}
1197 
1198 	return ifa;
1199 }
1200 
1201 enum cleanup_prefix_rt_t {
1202 	CLEANUP_PREFIX_RT_NOP,    /* no cleanup action for prefix route */
1203 	CLEANUP_PREFIX_RT_DEL,    /* delete the prefix route */
1204 	CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1205 };
1206 
1207 /*
1208  * Check, whether the prefix for ifp would still need a prefix route
1209  * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1210  * constants.
1211  *
1212  * 1) we don't purge prefix if address was not permanent.
1213  *    prefix is managed by its own lifetime.
1214  * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1215  * 3) if there are no addresses, delete prefix.
1216  * 4) if there are still other permanent address(es),
1217  *    corresponding prefix is still permanent.
1218  * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1219  *    don't purge the prefix, assume user space is managing it.
1220  * 6) otherwise, update prefix lifetime to the
1221  *    longest valid lifetime among the corresponding
1222  *    addresses on the device.
1223  *    Note: subsequent RA will update lifetime.
1224  **/
1225 static enum cleanup_prefix_rt_t
check_cleanup_prefix_route(struct inet6_ifaddr * ifp,unsigned long * expires)1226 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1227 {
1228 	struct inet6_ifaddr *ifa;
1229 	struct inet6_dev *idev = ifp->idev;
1230 	unsigned long lifetime;
1231 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1232 
1233 	*expires = jiffies;
1234 
1235 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
1236 		if (ifa == ifp)
1237 			continue;
1238 		if (ifa->prefix_len != ifp->prefix_len ||
1239 		    !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1240 				       ifp->prefix_len))
1241 			continue;
1242 		if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1243 			return CLEANUP_PREFIX_RT_NOP;
1244 
1245 		action = CLEANUP_PREFIX_RT_EXPIRE;
1246 
1247 		spin_lock(&ifa->lock);
1248 
1249 		lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1250 		/*
1251 		 * Note: Because this address is
1252 		 * not permanent, lifetime <
1253 		 * LONG_MAX / HZ here.
1254 		 */
1255 		if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1256 			*expires = ifa->tstamp + lifetime * HZ;
1257 		spin_unlock(&ifa->lock);
1258 	}
1259 
1260 	return action;
1261 }
1262 
1263 static void
cleanup_prefix_route(struct inet6_ifaddr * ifp,unsigned long expires,bool del_rt,bool del_peer)1264 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1265 		     bool del_rt, bool del_peer)
1266 {
1267 	struct fib6_table *table;
1268 	struct fib6_info *f6i;
1269 
1270 	f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1271 					ifp->prefix_len,
1272 					ifp->idev->dev, 0, RTF_DEFAULT, true);
1273 	if (f6i) {
1274 		if (del_rt)
1275 			ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1276 		else {
1277 			if (!(f6i->fib6_flags & RTF_EXPIRES)) {
1278 				table = f6i->fib6_table;
1279 				spin_lock_bh(&table->tb6_lock);
1280 
1281 				fib6_set_expires(f6i, expires);
1282 				fib6_add_gc_list(f6i);
1283 
1284 				spin_unlock_bh(&table->tb6_lock);
1285 			}
1286 			fib6_info_release(f6i);
1287 		}
1288 	}
1289 }
1290 
1291 
1292 /* This function wants to get referenced ifp and releases it before return */
1293 
ipv6_del_addr(struct inet6_ifaddr * ifp)1294 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1295 {
1296 	enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1297 	struct net *net = dev_net(ifp->idev->dev);
1298 	unsigned long expires;
1299 	int state;
1300 
1301 	ASSERT_RTNL();
1302 
1303 	spin_lock_bh(&ifp->lock);
1304 	state = ifp->state;
1305 	ifp->state = INET6_IFADDR_STATE_DEAD;
1306 	spin_unlock_bh(&ifp->lock);
1307 
1308 	if (state == INET6_IFADDR_STATE_DEAD)
1309 		goto out;
1310 
1311 	spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1312 	hlist_del_init_rcu(&ifp->addr_lst);
1313 	spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1314 
1315 	write_lock_bh(&ifp->idev->lock);
1316 
1317 	if (ifp->flags&IFA_F_TEMPORARY) {
1318 		list_del(&ifp->tmp_list);
1319 		if (ifp->ifpub) {
1320 			in6_ifa_put(ifp->ifpub);
1321 			ifp->ifpub = NULL;
1322 		}
1323 		__in6_ifa_put(ifp);
1324 	}
1325 
1326 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE))
1327 		action = check_cleanup_prefix_route(ifp, &expires);
1328 
1329 	list_del_rcu(&ifp->if_list);
1330 	__in6_ifa_put(ifp);
1331 
1332 	write_unlock_bh(&ifp->idev->lock);
1333 
1334 	addrconf_del_dad_work(ifp);
1335 
1336 	ipv6_ifa_notify(RTM_DELADDR, ifp);
1337 
1338 	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1339 
1340 	if (action != CLEANUP_PREFIX_RT_NOP) {
1341 		cleanup_prefix_route(ifp, expires,
1342 			action == CLEANUP_PREFIX_RT_DEL, false);
1343 	}
1344 
1345 	/* clean up prefsrc entries */
1346 	rt6_remove_prefsrc(ifp);
1347 out:
1348 	in6_ifa_put(ifp);
1349 }
1350 
ipv6_get_regen_advance(const struct inet6_dev * idev)1351 static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
1352 {
1353 	return READ_ONCE(idev->cnf.regen_min_advance) +
1354 		READ_ONCE(idev->cnf.regen_max_retry) *
1355 		READ_ONCE(idev->cnf.dad_transmits) *
1356 		max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1357 }
1358 
ipv6_create_tempaddr(struct inet6_ifaddr * ifp,bool block)1359 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1360 {
1361 	struct inet6_dev *idev = ifp->idev;
1362 	unsigned long tmp_tstamp, age;
1363 	unsigned long regen_advance;
1364 	unsigned long now = jiffies;
1365 	u32 if_public_preferred_lft;
1366 	s32 cnf_temp_preferred_lft;
1367 	struct inet6_ifaddr *ift;
1368 	struct ifa6_config cfg;
1369 	long max_desync_factor;
1370 	struct in6_addr addr;
1371 	int ret = 0;
1372 
1373 	write_lock_bh(&idev->lock);
1374 
1375 retry:
1376 	in6_dev_hold(idev);
1377 	if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
1378 		write_unlock_bh(&idev->lock);
1379 		pr_info("%s: use_tempaddr is disabled\n", __func__);
1380 		in6_dev_put(idev);
1381 		ret = -1;
1382 		goto out;
1383 	}
1384 	spin_lock_bh(&ifp->lock);
1385 	if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
1386 		WRITE_ONCE(idev->cnf.use_tempaddr, -1);	/*XXX*/
1387 		spin_unlock_bh(&ifp->lock);
1388 		write_unlock_bh(&idev->lock);
1389 		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1390 			__func__);
1391 		in6_dev_put(idev);
1392 		ret = -1;
1393 		goto out;
1394 	}
1395 	in6_ifa_hold(ifp);
1396 	memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1397 	ipv6_gen_rnd_iid(&addr);
1398 
1399 	age = (now - ifp->tstamp) / HZ;
1400 
1401 	regen_advance = ipv6_get_regen_advance(idev);
1402 
1403 	/* recalculate max_desync_factor each time and update
1404 	 * idev->desync_factor if it's larger
1405 	 */
1406 	cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1407 	max_desync_factor = min_t(long,
1408 				  READ_ONCE(idev->cnf.max_desync_factor),
1409 				  cnf_temp_preferred_lft - regen_advance);
1410 
1411 	if (unlikely(idev->desync_factor > max_desync_factor)) {
1412 		if (max_desync_factor > 0) {
1413 			get_random_bytes(&idev->desync_factor,
1414 					 sizeof(idev->desync_factor));
1415 			idev->desync_factor %= max_desync_factor;
1416 		} else {
1417 			idev->desync_factor = 0;
1418 		}
1419 	}
1420 
1421 	if_public_preferred_lft = ifp->prefered_lft;
1422 
1423 	memset(&cfg, 0, sizeof(cfg));
1424 	cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1425 			      READ_ONCE(idev->cnf.temp_valid_lft) + age);
1426 	cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1427 	cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
1428 	cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
1429 
1430 	cfg.plen = ifp->prefix_len;
1431 	tmp_tstamp = ifp->tstamp;
1432 	spin_unlock_bh(&ifp->lock);
1433 
1434 	write_unlock_bh(&idev->lock);
1435 
1436 	/* From RFC 4941:
1437 	 *
1438 	 *     A temporary address is created only if this calculated Preferred
1439 	 *     Lifetime is greater than REGEN_ADVANCE time units.  In
1440 	 *     particular, an implementation must not create a temporary address
1441 	 *     with a zero Preferred Lifetime.
1442 	 *
1443 	 *     ...
1444 	 *
1445 	 *     When creating a temporary address, the lifetime values MUST be
1446 	 *     derived from the corresponding prefix as follows:
1447 	 *
1448 	 *     ...
1449 	 *
1450 	 *     *  Its Preferred Lifetime is the lower of the Preferred Lifetime
1451 	 *        of the public address or TEMP_PREFERRED_LIFETIME -
1452 	 *        DESYNC_FACTOR.
1453 	 *
1454 	 * To comply with the RFC's requirements, clamp the preferred lifetime
1455 	 * to a minimum of regen_advance, unless that would exceed valid_lft or
1456 	 * ifp->prefered_lft.
1457 	 *
1458 	 * Use age calculation as in addrconf_verify to avoid unnecessary
1459 	 * temporary addresses being generated.
1460 	 */
1461 	age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1462 	if (cfg.preferred_lft <= regen_advance + age) {
1463 		cfg.preferred_lft = regen_advance + age + 1;
1464 		if (cfg.preferred_lft > cfg.valid_lft ||
1465 		    cfg.preferred_lft > if_public_preferred_lft) {
1466 			in6_ifa_put(ifp);
1467 			in6_dev_put(idev);
1468 			ret = -1;
1469 			goto out;
1470 		}
1471 	}
1472 
1473 	cfg.ifa_flags = IFA_F_TEMPORARY;
1474 	/* set in addrconf_prefix_rcv() */
1475 	if (ifp->flags & IFA_F_OPTIMISTIC)
1476 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1477 
1478 	cfg.pfx = &addr;
1479 	cfg.scope = ipv6_addr_scope(cfg.pfx);
1480 
1481 	ift = ipv6_add_addr(idev, &cfg, block, NULL);
1482 	if (IS_ERR(ift)) {
1483 		in6_ifa_put(ifp);
1484 		in6_dev_put(idev);
1485 		pr_info("%s: retry temporary address regeneration\n", __func__);
1486 		write_lock_bh(&idev->lock);
1487 		goto retry;
1488 	}
1489 
1490 	spin_lock_bh(&ift->lock);
1491 	ift->ifpub = ifp;
1492 	ift->cstamp = now;
1493 	ift->tstamp = tmp_tstamp;
1494 	spin_unlock_bh(&ift->lock);
1495 
1496 	addrconf_dad_start(ift);
1497 	in6_ifa_put(ift);
1498 	in6_dev_put(idev);
1499 out:
1500 	return ret;
1501 }
1502 
1503 /*
1504  *	Choose an appropriate source address (RFC3484)
1505  */
1506 enum {
1507 	IPV6_SADDR_RULE_INIT = 0,
1508 	IPV6_SADDR_RULE_LOCAL,
1509 	IPV6_SADDR_RULE_SCOPE,
1510 	IPV6_SADDR_RULE_PREFERRED,
1511 #ifdef CONFIG_IPV6_MIP6
1512 	IPV6_SADDR_RULE_HOA,
1513 #endif
1514 	IPV6_SADDR_RULE_OIF,
1515 	IPV6_SADDR_RULE_LABEL,
1516 	IPV6_SADDR_RULE_PRIVACY,
1517 	IPV6_SADDR_RULE_ORCHID,
1518 	IPV6_SADDR_RULE_PREFIX,
1519 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1520 	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1521 #endif
1522 	IPV6_SADDR_RULE_MAX
1523 };
1524 
1525 struct ipv6_saddr_score {
1526 	int			rule;
1527 	int			addr_type;
1528 	struct inet6_ifaddr	*ifa;
1529 	DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1530 	int			scopedist;
1531 	int			matchlen;
1532 };
1533 
1534 struct ipv6_saddr_dst {
1535 	const struct in6_addr *addr;
1536 	int ifindex;
1537 	int scope;
1538 	int label;
1539 	unsigned int prefs;
1540 };
1541 
ipv6_saddr_preferred(int type)1542 static inline int ipv6_saddr_preferred(int type)
1543 {
1544 	if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1545 		return 1;
1546 	return 0;
1547 }
1548 
ipv6_use_optimistic_addr(const struct net * net,const struct inet6_dev * idev)1549 static bool ipv6_use_optimistic_addr(const struct net *net,
1550 				     const struct inet6_dev *idev)
1551 {
1552 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1553 	if (!idev)
1554 		return false;
1555 	if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1556 	    !READ_ONCE(idev->cnf.optimistic_dad))
1557 		return false;
1558 	if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
1559 	    !READ_ONCE(idev->cnf.use_optimistic))
1560 		return false;
1561 
1562 	return true;
1563 #else
1564 	return false;
1565 #endif
1566 }
1567 
ipv6_allow_optimistic_dad(const struct net * net,const struct inet6_dev * idev)1568 static bool ipv6_allow_optimistic_dad(const struct net *net,
1569 				      const struct inet6_dev *idev)
1570 {
1571 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1572 	if (!idev)
1573 		return false;
1574 	if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1575 	    !READ_ONCE(idev->cnf.optimistic_dad))
1576 		return false;
1577 
1578 	return true;
1579 #else
1580 	return false;
1581 #endif
1582 }
1583 
ipv6_get_saddr_eval(struct net * net,struct ipv6_saddr_score * score,struct ipv6_saddr_dst * dst,int i)1584 static int ipv6_get_saddr_eval(struct net *net,
1585 			       struct ipv6_saddr_score *score,
1586 			       struct ipv6_saddr_dst *dst,
1587 			       int i)
1588 {
1589 	int ret;
1590 
1591 	if (i <= score->rule) {
1592 		switch (i) {
1593 		case IPV6_SADDR_RULE_SCOPE:
1594 			ret = score->scopedist;
1595 			break;
1596 		case IPV6_SADDR_RULE_PREFIX:
1597 			ret = score->matchlen;
1598 			break;
1599 		default:
1600 			ret = !!test_bit(i, score->scorebits);
1601 		}
1602 		goto out;
1603 	}
1604 
1605 	switch (i) {
1606 	case IPV6_SADDR_RULE_INIT:
1607 		/* Rule 0: remember if hiscore is not ready yet */
1608 		ret = !!score->ifa;
1609 		break;
1610 	case IPV6_SADDR_RULE_LOCAL:
1611 		/* Rule 1: Prefer same address */
1612 		ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1613 		break;
1614 	case IPV6_SADDR_RULE_SCOPE:
1615 		/* Rule 2: Prefer appropriate scope
1616 		 *
1617 		 *      ret
1618 		 *       ^
1619 		 *    -1 |  d 15
1620 		 *    ---+--+-+---> scope
1621 		 *       |
1622 		 *       |             d is scope of the destination.
1623 		 *  B-d  |  \
1624 		 *       |   \      <- smaller scope is better if
1625 		 *  B-15 |    \        if scope is enough for destination.
1626 		 *       |             ret = B - scope (-1 <= scope >= d <= 15).
1627 		 * d-C-1 | /
1628 		 *       |/         <- greater is better
1629 		 *   -C  /             if scope is not enough for destination.
1630 		 *      /|             ret = scope - C (-1 <= d < scope <= 15).
1631 		 *
1632 		 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1633 		 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1634 		 * Assume B = 0 and we get C > 29.
1635 		 */
1636 		ret = __ipv6_addr_src_scope(score->addr_type);
1637 		if (ret >= dst->scope)
1638 			ret = -ret;
1639 		else
1640 			ret -= 128;	/* 30 is enough */
1641 		score->scopedist = ret;
1642 		break;
1643 	case IPV6_SADDR_RULE_PREFERRED:
1644 	    {
1645 		/* Rule 3: Avoid deprecated and optimistic addresses */
1646 		u8 avoid = IFA_F_DEPRECATED;
1647 
1648 		if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1649 			avoid |= IFA_F_OPTIMISTIC;
1650 		ret = ipv6_saddr_preferred(score->addr_type) ||
1651 		      !(score->ifa->flags & avoid);
1652 		break;
1653 	    }
1654 #ifdef CONFIG_IPV6_MIP6
1655 	case IPV6_SADDR_RULE_HOA:
1656 	    {
1657 		/* Rule 4: Prefer home address */
1658 		int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1659 		ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1660 		break;
1661 	    }
1662 #endif
1663 	case IPV6_SADDR_RULE_OIF:
1664 		/* Rule 5: Prefer outgoing interface */
1665 		ret = (!dst->ifindex ||
1666 		       dst->ifindex == score->ifa->idev->dev->ifindex);
1667 		break;
1668 	case IPV6_SADDR_RULE_LABEL:
1669 		/* Rule 6: Prefer matching label */
1670 		ret = ipv6_addr_label(net,
1671 				      &score->ifa->addr, score->addr_type,
1672 				      score->ifa->idev->dev->ifindex) == dst->label;
1673 		break;
1674 	case IPV6_SADDR_RULE_PRIVACY:
1675 	    {
1676 		/* Rule 7: Prefer public address
1677 		 * Note: prefer temporary address if use_tempaddr >= 2
1678 		 */
1679 		int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1680 				!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1681 				READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
1682 		ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1683 		break;
1684 	    }
1685 	case IPV6_SADDR_RULE_ORCHID:
1686 		/* Rule 8-: Prefer ORCHID vs ORCHID or
1687 		 *	    non-ORCHID vs non-ORCHID
1688 		 */
1689 		ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1690 			ipv6_addr_orchid(dst->addr));
1691 		break;
1692 	case IPV6_SADDR_RULE_PREFIX:
1693 		/* Rule 8: Use longest matching prefix */
1694 		ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1695 		if (ret > score->ifa->prefix_len)
1696 			ret = score->ifa->prefix_len;
1697 		score->matchlen = ret;
1698 		break;
1699 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1700 	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1701 		/* Optimistic addresses still have lower precedence than other
1702 		 * preferred addresses.
1703 		 */
1704 		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1705 		break;
1706 #endif
1707 	default:
1708 		ret = 0;
1709 	}
1710 
1711 	if (ret)
1712 		__set_bit(i, score->scorebits);
1713 	score->rule = i;
1714 out:
1715 	return ret;
1716 }
1717 
__ipv6_dev_get_saddr(struct net * net,struct ipv6_saddr_dst * dst,struct inet6_dev * idev,struct ipv6_saddr_score * scores,int hiscore_idx)1718 static int __ipv6_dev_get_saddr(struct net *net,
1719 				struct ipv6_saddr_dst *dst,
1720 				struct inet6_dev *idev,
1721 				struct ipv6_saddr_score *scores,
1722 				int hiscore_idx)
1723 {
1724 	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1725 
1726 	list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1727 		int i;
1728 
1729 		/*
1730 		 * - Tentative Address (RFC2462 section 5.4)
1731 		 *  - A tentative address is not considered
1732 		 *    "assigned to an interface" in the traditional
1733 		 *    sense, unless it is also flagged as optimistic.
1734 		 * - Candidate Source Address (section 4)
1735 		 *  - In any case, anycast addresses, multicast
1736 		 *    addresses, and the unspecified address MUST
1737 		 *    NOT be included in a candidate set.
1738 		 */
1739 		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1740 		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1741 			continue;
1742 
1743 		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1744 
1745 		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1746 			     score->addr_type & IPV6_ADDR_MULTICAST)) {
1747 			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1748 					    idev->dev->name);
1749 			continue;
1750 		}
1751 
1752 		score->rule = -1;
1753 		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1754 
1755 		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1756 			int minihiscore, miniscore;
1757 
1758 			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1759 			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1760 
1761 			if (minihiscore > miniscore) {
1762 				if (i == IPV6_SADDR_RULE_SCOPE &&
1763 				    score->scopedist > 0) {
1764 					/*
1765 					 * special case:
1766 					 * each remaining entry
1767 					 * has too small (not enough)
1768 					 * scope, because ifa entries
1769 					 * are sorted by their scope
1770 					 * values.
1771 					 */
1772 					goto out;
1773 				}
1774 				break;
1775 			} else if (minihiscore < miniscore) {
1776 				swap(hiscore, score);
1777 				hiscore_idx = 1 - hiscore_idx;
1778 
1779 				/* restore our iterator */
1780 				score->ifa = hiscore->ifa;
1781 
1782 				break;
1783 			}
1784 		}
1785 	}
1786 out:
1787 	return hiscore_idx;
1788 }
1789 
ipv6_get_saddr_master(struct net * net,const struct net_device * dst_dev,const struct net_device * master,struct ipv6_saddr_dst * dst,struct ipv6_saddr_score * scores,int hiscore_idx)1790 static int ipv6_get_saddr_master(struct net *net,
1791 				 const struct net_device *dst_dev,
1792 				 const struct net_device *master,
1793 				 struct ipv6_saddr_dst *dst,
1794 				 struct ipv6_saddr_score *scores,
1795 				 int hiscore_idx)
1796 {
1797 	struct inet6_dev *idev;
1798 
1799 	idev = __in6_dev_get(dst_dev);
1800 	if (idev)
1801 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1802 						   scores, hiscore_idx);
1803 
1804 	idev = __in6_dev_get(master);
1805 	if (idev)
1806 		hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1807 						   scores, hiscore_idx);
1808 
1809 	return hiscore_idx;
1810 }
1811 
ipv6_dev_get_saddr(struct net * net,const struct net_device * dst_dev,const struct in6_addr * daddr,unsigned int prefs,struct in6_addr * saddr)1812 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1813 		       const struct in6_addr *daddr, unsigned int prefs,
1814 		       struct in6_addr *saddr)
1815 {
1816 	struct ipv6_saddr_score scores[2], *hiscore;
1817 	struct ipv6_saddr_dst dst;
1818 	struct inet6_dev *idev;
1819 	struct net_device *dev;
1820 	int dst_type;
1821 	bool use_oif_addr = false;
1822 	int hiscore_idx = 0;
1823 	int ret = 0;
1824 
1825 	dst_type = __ipv6_addr_type(daddr);
1826 	dst.addr = daddr;
1827 	dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1828 	dst.scope = __ipv6_addr_src_scope(dst_type);
1829 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1830 	dst.prefs = prefs;
1831 
1832 	scores[hiscore_idx].rule = -1;
1833 	scores[hiscore_idx].ifa = NULL;
1834 
1835 	rcu_read_lock();
1836 
1837 	/* Candidate Source Address (section 4)
1838 	 *  - multicast and link-local destination address,
1839 	 *    the set of candidate source address MUST only
1840 	 *    include addresses assigned to interfaces
1841 	 *    belonging to the same link as the outgoing
1842 	 *    interface.
1843 	 * (- For site-local destination addresses, the
1844 	 *    set of candidate source addresses MUST only
1845 	 *    include addresses assigned to interfaces
1846 	 *    belonging to the same site as the outgoing
1847 	 *    interface.)
1848 	 *  - "It is RECOMMENDED that the candidate source addresses
1849 	 *    be the set of unicast addresses assigned to the
1850 	 *    interface that will be used to send to the destination
1851 	 *    (the 'outgoing' interface)." (RFC 6724)
1852 	 */
1853 	if (dst_dev) {
1854 		idev = __in6_dev_get(dst_dev);
1855 		if ((dst_type & IPV6_ADDR_MULTICAST) ||
1856 		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1857 		    (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
1858 			use_oif_addr = true;
1859 		}
1860 	}
1861 
1862 	if (use_oif_addr) {
1863 		if (idev)
1864 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1865 	} else {
1866 		const struct net_device *master;
1867 		int master_idx = 0;
1868 
1869 		/* if dst_dev exists and is enslaved to an L3 device, then
1870 		 * prefer addresses from dst_dev and then the master over
1871 		 * any other enslaved devices in the L3 domain.
1872 		 */
1873 		master = l3mdev_master_dev_rcu(dst_dev);
1874 		if (master) {
1875 			master_idx = master->ifindex;
1876 
1877 			hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1878 							    master, &dst,
1879 							    scores, hiscore_idx);
1880 
1881 			if (scores[hiscore_idx].ifa &&
1882 			    scores[hiscore_idx].scopedist >= 0)
1883 				goto out;
1884 		}
1885 
1886 		for_each_netdev_rcu(net, dev) {
1887 			/* only consider addresses on devices in the
1888 			 * same L3 domain
1889 			 */
1890 			if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1891 				continue;
1892 			idev = __in6_dev_get(dev);
1893 			if (!idev)
1894 				continue;
1895 			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1896 		}
1897 	}
1898 
1899 out:
1900 	hiscore = &scores[hiscore_idx];
1901 	if (!hiscore->ifa)
1902 		ret = -EADDRNOTAVAIL;
1903 	else
1904 		*saddr = hiscore->ifa->addr;
1905 
1906 	rcu_read_unlock();
1907 	return ret;
1908 }
1909 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1910 
__ipv6_get_lladdr(struct inet6_dev * idev,struct in6_addr * addr,u32 banned_flags)1911 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1912 			      u32 banned_flags)
1913 {
1914 	struct inet6_ifaddr *ifp;
1915 	int err = -EADDRNOTAVAIL;
1916 
1917 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1918 		if (ifp->scope > IFA_LINK)
1919 			break;
1920 		if (ifp->scope == IFA_LINK &&
1921 		    !(ifp->flags & banned_flags)) {
1922 			*addr = ifp->addr;
1923 			err = 0;
1924 			break;
1925 		}
1926 	}
1927 	return err;
1928 }
1929 
ipv6_get_lladdr(struct net_device * dev,struct in6_addr * addr,u32 banned_flags)1930 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1931 		    u32 banned_flags)
1932 {
1933 	struct inet6_dev *idev;
1934 	int err = -EADDRNOTAVAIL;
1935 
1936 	rcu_read_lock();
1937 	idev = __in6_dev_get(dev);
1938 	if (idev) {
1939 		read_lock_bh(&idev->lock);
1940 		err = __ipv6_get_lladdr(idev, addr, banned_flags);
1941 		read_unlock_bh(&idev->lock);
1942 	}
1943 	rcu_read_unlock();
1944 	return err;
1945 }
1946 
ipv6_count_addresses(const struct inet6_dev * idev)1947 static int ipv6_count_addresses(const struct inet6_dev *idev)
1948 {
1949 	const struct inet6_ifaddr *ifp;
1950 	int cnt = 0;
1951 
1952 	rcu_read_lock();
1953 	list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1954 		cnt++;
1955 	rcu_read_unlock();
1956 	return cnt;
1957 }
1958 
ipv6_chk_addr(struct net * net,const struct in6_addr * addr,const struct net_device * dev,int strict)1959 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1960 		  const struct net_device *dev, int strict)
1961 {
1962 	return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1963 				       strict, IFA_F_TENTATIVE);
1964 }
1965 EXPORT_SYMBOL(ipv6_chk_addr);
1966 
1967 /* device argument is used to find the L3 domain of interest. If
1968  * skip_dev_check is set, then the ifp device is not checked against
1969  * the passed in dev argument. So the 2 cases for addresses checks are:
1970  *   1. does the address exist in the L3 domain that dev is part of
1971  *      (skip_dev_check = true), or
1972  *
1973  *   2. does the address exist on the specific device
1974  *      (skip_dev_check = false)
1975  */
1976 static struct net_device *
__ipv6_chk_addr_and_flags(struct net * net,const struct in6_addr * addr,const struct net_device * dev,bool skip_dev_check,int strict,u32 banned_flags)1977 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1978 			  const struct net_device *dev, bool skip_dev_check,
1979 			  int strict, u32 banned_flags)
1980 {
1981 	unsigned int hash = inet6_addr_hash(net, addr);
1982 	struct net_device *l3mdev, *ndev;
1983 	struct inet6_ifaddr *ifp;
1984 	u32 ifp_flags;
1985 
1986 	rcu_read_lock();
1987 
1988 	l3mdev = l3mdev_master_dev_rcu(dev);
1989 	if (skip_dev_check)
1990 		dev = NULL;
1991 
1992 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1993 		ndev = ifp->idev->dev;
1994 
1995 		if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1996 			continue;
1997 
1998 		/* Decouple optimistic from tentative for evaluation here.
1999 		 * Ban optimistic addresses explicitly, when required.
2000 		 */
2001 		ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
2002 			    ? (ifp->flags&~IFA_F_TENTATIVE)
2003 			    : ifp->flags;
2004 		if (ipv6_addr_equal(&ifp->addr, addr) &&
2005 		    !(ifp_flags&banned_flags) &&
2006 		    (!dev || ndev == dev ||
2007 		     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
2008 			rcu_read_unlock();
2009 			return ndev;
2010 		}
2011 	}
2012 
2013 	rcu_read_unlock();
2014 	return NULL;
2015 }
2016 
ipv6_chk_addr_and_flags(struct net * net,const struct in6_addr * addr,const struct net_device * dev,bool skip_dev_check,int strict,u32 banned_flags)2017 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
2018 			    const struct net_device *dev, bool skip_dev_check,
2019 			    int strict, u32 banned_flags)
2020 {
2021 	return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
2022 					 strict, banned_flags) ? 1 : 0;
2023 }
2024 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
2025 
2026 
2027 /* Compares an address/prefix_len with addresses on device @dev.
2028  * If one is found it returns true.
2029  */
ipv6_chk_custom_prefix(const struct in6_addr * addr,const unsigned int prefix_len,struct net_device * dev)2030 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
2031 	const unsigned int prefix_len, struct net_device *dev)
2032 {
2033 	const struct inet6_ifaddr *ifa;
2034 	const struct inet6_dev *idev;
2035 	bool ret = false;
2036 
2037 	rcu_read_lock();
2038 	idev = __in6_dev_get(dev);
2039 	if (idev) {
2040 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2041 			ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
2042 			if (ret)
2043 				break;
2044 		}
2045 	}
2046 	rcu_read_unlock();
2047 
2048 	return ret;
2049 }
2050 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
2051 
ipv6_chk_prefix(const struct in6_addr * addr,struct net_device * dev)2052 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
2053 {
2054 	const struct inet6_ifaddr *ifa;
2055 	const struct inet6_dev *idev;
2056 	int	onlink;
2057 
2058 	onlink = 0;
2059 	rcu_read_lock();
2060 	idev = __in6_dev_get(dev);
2061 	if (idev) {
2062 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2063 			onlink = ipv6_prefix_equal(addr, &ifa->addr,
2064 						   ifa->prefix_len);
2065 			if (onlink)
2066 				break;
2067 		}
2068 	}
2069 	rcu_read_unlock();
2070 	return onlink;
2071 }
2072 EXPORT_SYMBOL(ipv6_chk_prefix);
2073 
2074 /**
2075  * ipv6_dev_find - find the first device with a given source address.
2076  * @net: the net namespace
2077  * @addr: the source address
2078  * @dev: used to find the L3 domain of interest
2079  *
2080  * The caller should be protected by RCU, or RTNL.
2081  */
ipv6_dev_find(struct net * net,const struct in6_addr * addr,struct net_device * dev)2082 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2083 				 struct net_device *dev)
2084 {
2085 	return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2086 					 IFA_F_TENTATIVE);
2087 }
2088 EXPORT_SYMBOL(ipv6_dev_find);
2089 
ipv6_get_ifaddr(struct net * net,const struct in6_addr * addr,struct net_device * dev,int strict)2090 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2091 				     struct net_device *dev, int strict)
2092 {
2093 	unsigned int hash = inet6_addr_hash(net, addr);
2094 	struct inet6_ifaddr *ifp, *result = NULL;
2095 
2096 	rcu_read_lock();
2097 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2098 		if (ipv6_addr_equal(&ifp->addr, addr)) {
2099 			if (!dev || ifp->idev->dev == dev ||
2100 			    !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2101 				if (in6_ifa_hold_safe(ifp)) {
2102 					result = ifp;
2103 					break;
2104 				}
2105 			}
2106 		}
2107 	}
2108 	rcu_read_unlock();
2109 
2110 	return result;
2111 }
2112 
2113 /* Gets referenced address, destroys ifaddr */
2114 
addrconf_dad_stop(struct inet6_ifaddr * ifp,int dad_failed)2115 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2116 {
2117 	if (dad_failed)
2118 		ifp->flags |= IFA_F_DADFAILED;
2119 
2120 	if (ifp->flags&IFA_F_TEMPORARY) {
2121 		struct inet6_ifaddr *ifpub;
2122 		spin_lock_bh(&ifp->lock);
2123 		ifpub = ifp->ifpub;
2124 		if (ifpub) {
2125 			in6_ifa_hold(ifpub);
2126 			spin_unlock_bh(&ifp->lock);
2127 			ipv6_create_tempaddr(ifpub, true);
2128 			in6_ifa_put(ifpub);
2129 		} else {
2130 			spin_unlock_bh(&ifp->lock);
2131 		}
2132 		ipv6_del_addr(ifp);
2133 	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2134 		spin_lock_bh(&ifp->lock);
2135 		addrconf_del_dad_work(ifp);
2136 		ifp->flags |= IFA_F_TENTATIVE;
2137 		if (dad_failed)
2138 			ifp->flags &= ~IFA_F_OPTIMISTIC;
2139 		spin_unlock_bh(&ifp->lock);
2140 		if (dad_failed)
2141 			ipv6_ifa_notify(0, ifp);
2142 		in6_ifa_put(ifp);
2143 	} else {
2144 		ipv6_del_addr(ifp);
2145 	}
2146 }
2147 
addrconf_dad_end(struct inet6_ifaddr * ifp)2148 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2149 {
2150 	int err = -ENOENT;
2151 
2152 	spin_lock_bh(&ifp->lock);
2153 	if (ifp->state == INET6_IFADDR_STATE_DAD) {
2154 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
2155 		err = 0;
2156 	}
2157 	spin_unlock_bh(&ifp->lock);
2158 
2159 	return err;
2160 }
2161 
addrconf_dad_failure(struct sk_buff * skb,struct inet6_ifaddr * ifp)2162 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2163 {
2164 	struct inet6_dev *idev = ifp->idev;
2165 	struct net *net = dev_net(idev->dev);
2166 	int max_addresses;
2167 
2168 	if (addrconf_dad_end(ifp)) {
2169 		in6_ifa_put(ifp);
2170 		return;
2171 	}
2172 
2173 	net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2174 			     ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2175 
2176 	spin_lock_bh(&ifp->lock);
2177 
2178 	if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2179 		struct in6_addr new_addr;
2180 		struct inet6_ifaddr *ifp2;
2181 		int retries = ifp->stable_privacy_retry + 1;
2182 		struct ifa6_config cfg = {
2183 			.pfx = &new_addr,
2184 			.plen = ifp->prefix_len,
2185 			.ifa_flags = ifp->flags,
2186 			.valid_lft = ifp->valid_lft,
2187 			.preferred_lft = ifp->prefered_lft,
2188 			.scope = ifp->scope,
2189 		};
2190 
2191 		if (retries > net->ipv6.sysctl.idgen_retries) {
2192 			net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2193 					     ifp->idev->dev->name);
2194 			goto errdad;
2195 		}
2196 
2197 		new_addr = ifp->addr;
2198 		if (ipv6_generate_stable_address(&new_addr, retries,
2199 						 idev))
2200 			goto errdad;
2201 
2202 		spin_unlock_bh(&ifp->lock);
2203 
2204 		max_addresses = READ_ONCE(idev->cnf.max_addresses);
2205 		if (max_addresses &&
2206 		    ipv6_count_addresses(idev) >= max_addresses)
2207 			goto lock_errdad;
2208 
2209 		net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2210 				     ifp->idev->dev->name);
2211 
2212 		ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2213 		if (IS_ERR(ifp2))
2214 			goto lock_errdad;
2215 
2216 		spin_lock_bh(&ifp2->lock);
2217 		ifp2->stable_privacy_retry = retries;
2218 		ifp2->state = INET6_IFADDR_STATE_PREDAD;
2219 		spin_unlock_bh(&ifp2->lock);
2220 
2221 		addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2222 		in6_ifa_put(ifp2);
2223 lock_errdad:
2224 		spin_lock_bh(&ifp->lock);
2225 	}
2226 
2227 errdad:
2228 	/* transition from _POSTDAD to _ERRDAD */
2229 	ifp->state = INET6_IFADDR_STATE_ERRDAD;
2230 	spin_unlock_bh(&ifp->lock);
2231 
2232 	addrconf_mod_dad_work(ifp, 0);
2233 	in6_ifa_put(ifp);
2234 }
2235 
2236 /* Join to solicited addr multicast group. */
addrconf_join_solict(struct net_device * dev,const struct in6_addr * addr)2237 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2238 {
2239 	struct in6_addr maddr;
2240 
2241 	if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
2242 		return;
2243 
2244 	addrconf_addr_solict_mult(addr, &maddr);
2245 	ipv6_dev_mc_inc(dev, &maddr);
2246 }
2247 
addrconf_leave_solict(struct inet6_dev * idev,const struct in6_addr * addr)2248 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2249 {
2250 	struct in6_addr maddr;
2251 
2252 	if (READ_ONCE(idev->dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
2253 		return;
2254 
2255 	addrconf_addr_solict_mult(addr, &maddr);
2256 	__ipv6_dev_mc_dec(idev, &maddr);
2257 }
2258 
addrconf_join_anycast(struct inet6_ifaddr * ifp)2259 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2260 {
2261 	struct in6_addr addr;
2262 
2263 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2264 		return;
2265 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2266 	if (ipv6_addr_any(&addr))
2267 		return;
2268 	__ipv6_dev_ac_inc(ifp->idev, &addr);
2269 }
2270 
addrconf_leave_anycast(struct inet6_ifaddr * ifp)2271 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2272 {
2273 	struct in6_addr addr;
2274 
2275 	if (ifp->prefix_len >= 127) /* RFC 6164 */
2276 		return;
2277 	ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2278 	if (ipv6_addr_any(&addr))
2279 		return;
2280 	__ipv6_dev_ac_dec(ifp->idev, &addr);
2281 }
2282 
addrconf_ifid_6lowpan(u8 * eui,struct net_device * dev)2283 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2284 {
2285 	switch (dev->addr_len) {
2286 	case ETH_ALEN:
2287 		memcpy(eui, dev->dev_addr, 3);
2288 		eui[3] = 0xFF;
2289 		eui[4] = 0xFE;
2290 		memcpy(eui + 5, dev->dev_addr + 3, 3);
2291 		break;
2292 	case EUI64_ADDR_LEN:
2293 		memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2294 		eui[0] ^= 2;
2295 		break;
2296 	default:
2297 		return -1;
2298 	}
2299 
2300 	return 0;
2301 }
2302 
addrconf_ifid_ieee1394(u8 * eui,struct net_device * dev)2303 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2304 {
2305 	const union fwnet_hwaddr *ha;
2306 
2307 	if (dev->addr_len != FWNET_ALEN)
2308 		return -1;
2309 
2310 	ha = (const union fwnet_hwaddr *)dev->dev_addr;
2311 
2312 	memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2313 	eui[0] ^= 2;
2314 	return 0;
2315 }
2316 
addrconf_ifid_arcnet(u8 * eui,struct net_device * dev)2317 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2318 {
2319 	/* XXX: inherit EUI-64 from other interface -- yoshfuji */
2320 	if (dev->addr_len != ARCNET_ALEN)
2321 		return -1;
2322 	memset(eui, 0, 7);
2323 	eui[7] = *(u8 *)dev->dev_addr;
2324 	return 0;
2325 }
2326 
addrconf_ifid_infiniband(u8 * eui,struct net_device * dev)2327 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2328 {
2329 	if (dev->addr_len != INFINIBAND_ALEN)
2330 		return -1;
2331 	memcpy(eui, dev->dev_addr + 12, 8);
2332 	eui[0] |= 2;
2333 	return 0;
2334 }
2335 
__ipv6_isatap_ifid(u8 * eui,__be32 addr)2336 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2337 {
2338 	if (addr == 0)
2339 		return -1;
2340 	eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2341 		  ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2342 		  ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2343 		  ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2344 		  ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2345 		  ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2346 	eui[1] = 0;
2347 	eui[2] = 0x5E;
2348 	eui[3] = 0xFE;
2349 	memcpy(eui + 4, &addr, 4);
2350 	return 0;
2351 }
2352 
addrconf_ifid_sit(u8 * eui,struct net_device * dev)2353 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2354 {
2355 	if (dev->priv_flags & IFF_ISATAP)
2356 		return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2357 	return -1;
2358 }
2359 
addrconf_ifid_gre(u8 * eui,struct net_device * dev)2360 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2361 {
2362 	return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2363 }
2364 
addrconf_ifid_ip6tnl(u8 * eui,struct net_device * dev)2365 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2366 {
2367 	memcpy(eui, dev->perm_addr, 3);
2368 	memcpy(eui + 5, dev->perm_addr + 3, 3);
2369 	eui[3] = 0xFF;
2370 	eui[4] = 0xFE;
2371 	eui[0] ^= 2;
2372 	return 0;
2373 }
2374 
ipv6_generate_eui64(u8 * eui,struct net_device * dev)2375 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2376 {
2377 	switch (dev->type) {
2378 	case ARPHRD_ETHER:
2379 	case ARPHRD_FDDI:
2380 		return addrconf_ifid_eui48(eui, dev);
2381 	case ARPHRD_ARCNET:
2382 		return addrconf_ifid_arcnet(eui, dev);
2383 	case ARPHRD_INFINIBAND:
2384 		return addrconf_ifid_infiniband(eui, dev);
2385 	case ARPHRD_SIT:
2386 		return addrconf_ifid_sit(eui, dev);
2387 	case ARPHRD_IPGRE:
2388 	case ARPHRD_TUNNEL:
2389 		return addrconf_ifid_gre(eui, dev);
2390 	case ARPHRD_6LOWPAN:
2391 		return addrconf_ifid_6lowpan(eui, dev);
2392 	case ARPHRD_IEEE1394:
2393 		return addrconf_ifid_ieee1394(eui, dev);
2394 	case ARPHRD_TUNNEL6:
2395 	case ARPHRD_IP6GRE:
2396 	case ARPHRD_RAWIP:
2397 		return addrconf_ifid_ip6tnl(eui, dev);
2398 	}
2399 	return -1;
2400 }
2401 
ipv6_inherit_eui64(u8 * eui,struct inet6_dev * idev)2402 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2403 {
2404 	int err = -1;
2405 	struct inet6_ifaddr *ifp;
2406 
2407 	read_lock_bh(&idev->lock);
2408 	list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2409 		if (ifp->scope > IFA_LINK)
2410 			break;
2411 		if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2412 			memcpy(eui, ifp->addr.s6_addr+8, 8);
2413 			err = 0;
2414 			break;
2415 		}
2416 	}
2417 	read_unlock_bh(&idev->lock);
2418 	return err;
2419 }
2420 
2421 /* Generation of a randomized Interface Identifier
2422  * draft-ietf-6man-rfc4941bis, Section 3.3.1
2423  */
2424 
ipv6_gen_rnd_iid(struct in6_addr * addr)2425 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2426 {
2427 regen:
2428 	get_random_bytes(&addr->s6_addr[8], 8);
2429 
2430 	/* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2431 	 * check if generated address is not inappropriate:
2432 	 *
2433 	 * - Reserved IPv6 Interface Identifiers
2434 	 * - XXX: already assigned to an address on the device
2435 	 */
2436 
2437 	/* Subnet-router anycast: 0000:0000:0000:0000 */
2438 	if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2439 		goto regen;
2440 
2441 	/* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2442 	 * Proxy Mobile IPv6:   0200:5EFF:FE00:5213
2443 	 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2444 	 */
2445 	if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2446 	    (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2447 		goto regen;
2448 
2449 	/* Reserved subnet anycast addresses */
2450 	if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2451 	    ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2452 		goto regen;
2453 }
2454 
2455 /*
2456  *	Add prefix route.
2457  */
2458 
2459 static void
addrconf_prefix_route(struct in6_addr * pfx,int plen,u32 metric,struct net_device * dev,unsigned long expires,u32 flags,gfp_t gfp_flags)2460 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2461 		      struct net_device *dev, unsigned long expires,
2462 		      u32 flags, gfp_t gfp_flags)
2463 {
2464 	struct fib6_config cfg = {
2465 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2466 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2467 		.fc_ifindex = dev->ifindex,
2468 		.fc_expires = expires,
2469 		.fc_dst_len = plen,
2470 		.fc_flags = RTF_UP | flags,
2471 		.fc_nlinfo.nl_net = dev_net(dev),
2472 		.fc_protocol = RTPROT_KERNEL,
2473 		.fc_type = RTN_UNICAST,
2474 	};
2475 
2476 	cfg.fc_dst = *pfx;
2477 
2478 	/* Prevent useless cloning on PtP SIT.
2479 	   This thing is done here expecting that the whole
2480 	   class of non-broadcast devices need not cloning.
2481 	 */
2482 #if IS_ENABLED(CONFIG_IPV6_SIT)
2483 	if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2484 		cfg.fc_flags |= RTF_NONEXTHOP;
2485 #endif
2486 
2487 	ip6_route_add(&cfg, gfp_flags, NULL);
2488 }
2489 
2490 
addrconf_get_prefix_route(const struct in6_addr * pfx,int plen,const struct net_device * dev,u32 flags,u32 noflags,bool no_gw)2491 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2492 						  int plen,
2493 						  const struct net_device *dev,
2494 						  u32 flags, u32 noflags,
2495 						  bool no_gw)
2496 {
2497 	struct fib6_node *fn;
2498 	struct fib6_info *rt = NULL;
2499 	struct fib6_table *table;
2500 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2501 
2502 	table = fib6_get_table(dev_net(dev), tb_id);
2503 	if (!table)
2504 		return NULL;
2505 
2506 	rcu_read_lock();
2507 	fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2508 	if (!fn)
2509 		goto out;
2510 
2511 	for_each_fib6_node_rt_rcu(fn) {
2512 		/* prefix routes only use builtin fib6_nh */
2513 		if (rt->nh)
2514 			continue;
2515 
2516 		if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2517 			continue;
2518 		if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2519 			continue;
2520 		if ((rt->fib6_flags & flags) != flags)
2521 			continue;
2522 		if ((rt->fib6_flags & noflags) != 0)
2523 			continue;
2524 		if (!fib6_info_hold_safe(rt))
2525 			continue;
2526 		break;
2527 	}
2528 out:
2529 	rcu_read_unlock();
2530 	return rt;
2531 }
2532 
2533 
2534 /* Create "default" multicast route to the interface */
2535 
addrconf_add_mroute(struct net_device * dev)2536 static void addrconf_add_mroute(struct net_device *dev)
2537 {
2538 	struct fib6_config cfg = {
2539 		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2540 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
2541 		.fc_ifindex = dev->ifindex,
2542 		.fc_dst_len = 8,
2543 		.fc_flags = RTF_UP,
2544 		.fc_type = RTN_MULTICAST,
2545 		.fc_nlinfo.nl_net = dev_net(dev),
2546 		.fc_protocol = RTPROT_KERNEL,
2547 	};
2548 
2549 	ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2550 
2551 	ip6_route_add(&cfg, GFP_KERNEL, NULL);
2552 }
2553 
addrconf_add_dev(struct net_device * dev)2554 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2555 {
2556 	struct inet6_dev *idev;
2557 
2558 	ASSERT_RTNL();
2559 
2560 	idev = ipv6_find_idev(dev);
2561 	if (IS_ERR(idev))
2562 		return idev;
2563 
2564 	if (idev->cnf.disable_ipv6)
2565 		return ERR_PTR(-EACCES);
2566 
2567 	/* Add default multicast route */
2568 	if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2569 		addrconf_add_mroute(dev);
2570 
2571 	return idev;
2572 }
2573 
delete_tempaddrs(struct inet6_dev * idev,struct inet6_ifaddr * ifp)2574 static void delete_tempaddrs(struct inet6_dev *idev,
2575 			     struct inet6_ifaddr *ifp)
2576 {
2577 	struct inet6_ifaddr *ift, *tmp;
2578 
2579 	write_lock_bh(&idev->lock);
2580 	list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) {
2581 		if (ift->ifpub != ifp)
2582 			continue;
2583 
2584 		in6_ifa_hold(ift);
2585 		write_unlock_bh(&idev->lock);
2586 		ipv6_del_addr(ift);
2587 		write_lock_bh(&idev->lock);
2588 	}
2589 	write_unlock_bh(&idev->lock);
2590 }
2591 
manage_tempaddrs(struct inet6_dev * idev,struct inet6_ifaddr * ifp,__u32 valid_lft,__u32 prefered_lft,bool create,unsigned long now)2592 static void manage_tempaddrs(struct inet6_dev *idev,
2593 			     struct inet6_ifaddr *ifp,
2594 			     __u32 valid_lft, __u32 prefered_lft,
2595 			     bool create, unsigned long now)
2596 {
2597 	u32 flags;
2598 	struct inet6_ifaddr *ift;
2599 
2600 	read_lock_bh(&idev->lock);
2601 	/* update all temporary addresses in the list */
2602 	list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2603 		int age, max_valid, max_prefered;
2604 
2605 		if (ifp != ift->ifpub)
2606 			continue;
2607 
2608 		/* RFC 4941 section 3.3:
2609 		 * If a received option will extend the lifetime of a public
2610 		 * address, the lifetimes of temporary addresses should
2611 		 * be extended, subject to the overall constraint that no
2612 		 * temporary addresses should ever remain "valid" or "preferred"
2613 		 * for a time longer than (TEMP_VALID_LIFETIME) or
2614 		 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2615 		 */
2616 		age = (now - ift->cstamp) / HZ;
2617 		max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
2618 		if (max_valid < 0)
2619 			max_valid = 0;
2620 
2621 		max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
2622 			       idev->desync_factor - age;
2623 		if (max_prefered < 0)
2624 			max_prefered = 0;
2625 
2626 		if (valid_lft > max_valid)
2627 			valid_lft = max_valid;
2628 
2629 		if (prefered_lft > max_prefered)
2630 			prefered_lft = max_prefered;
2631 
2632 		spin_lock(&ift->lock);
2633 		flags = ift->flags;
2634 		ift->valid_lft = valid_lft;
2635 		ift->prefered_lft = prefered_lft;
2636 		ift->tstamp = now;
2637 		if (prefered_lft > 0)
2638 			ift->flags &= ~IFA_F_DEPRECATED;
2639 
2640 		spin_unlock(&ift->lock);
2641 		if (!(flags&IFA_F_TENTATIVE))
2642 			ipv6_ifa_notify(0, ift);
2643 	}
2644 
2645 	/* Also create a temporary address if it's enabled but no temporary
2646 	 * address currently exists.
2647 	 * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
2648 	 * as part of cleanup (ie. deleting the mngtmpaddr).
2649 	 * We don't want that to result in creating a new temporary ip address.
2650 	 */
2651 	if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
2652 		create = true;
2653 
2654 	if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
2655 		/* When a new public address is created as described
2656 		 * in [ADDRCONF], also create a new temporary address.
2657 		 */
2658 		read_unlock_bh(&idev->lock);
2659 		ipv6_create_tempaddr(ifp, false);
2660 	} else {
2661 		read_unlock_bh(&idev->lock);
2662 	}
2663 }
2664 
is_addr_mode_generate_stable(struct inet6_dev * idev)2665 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2666 {
2667 	return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2668 	       idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2669 }
2670 
addrconf_prefix_rcv_add_addr(struct net * net,struct net_device * dev,const struct prefix_info * pinfo,struct inet6_dev * in6_dev,const struct in6_addr * addr,int addr_type,u32 addr_flags,bool sllao,bool tokenized,__u32 valid_lft,u32 prefered_lft)2671 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2672 				 const struct prefix_info *pinfo,
2673 				 struct inet6_dev *in6_dev,
2674 				 const struct in6_addr *addr, int addr_type,
2675 				 u32 addr_flags, bool sllao, bool tokenized,
2676 				 __u32 valid_lft, u32 prefered_lft)
2677 {
2678 	struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2679 	int create = 0, update_lft = 0;
2680 
2681 	if (!ifp && valid_lft) {
2682 		int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
2683 		struct ifa6_config cfg = {
2684 			.pfx = addr,
2685 			.plen = pinfo->prefix_len,
2686 			.ifa_flags = addr_flags,
2687 			.valid_lft = valid_lft,
2688 			.preferred_lft = prefered_lft,
2689 			.scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2690 			.ifa_proto = IFAPROT_KERNEL_RA
2691 		};
2692 
2693 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2694 		if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
2695 		     READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
2696 		    !net->ipv6.devconf_all->forwarding && sllao)
2697 			cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2698 #endif
2699 
2700 		/* Do not allow to create too much of autoconfigured
2701 		 * addresses; this would be too easy way to crash kernel.
2702 		 */
2703 		if (!max_addresses ||
2704 		    ipv6_count_addresses(in6_dev) < max_addresses)
2705 			ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2706 
2707 		if (IS_ERR_OR_NULL(ifp))
2708 			return -1;
2709 
2710 		create = 1;
2711 		spin_lock_bh(&ifp->lock);
2712 		ifp->flags |= IFA_F_MANAGETEMPADDR;
2713 		ifp->cstamp = jiffies;
2714 		ifp->tokenized = tokenized;
2715 		spin_unlock_bh(&ifp->lock);
2716 		addrconf_dad_start(ifp);
2717 	}
2718 
2719 	if (ifp) {
2720 		u32 flags;
2721 		unsigned long now;
2722 		u32 stored_lft;
2723 
2724 		/* update lifetime (RFC2462 5.5.3 e) */
2725 		spin_lock_bh(&ifp->lock);
2726 		now = jiffies;
2727 		if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2728 			stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2729 		else
2730 			stored_lft = 0;
2731 
2732 		/* RFC4862 Section 5.5.3e:
2733 		 * "Note that the preferred lifetime of the
2734 		 *  corresponding address is always reset to
2735 		 *  the Preferred Lifetime in the received
2736 		 *  Prefix Information option, regardless of
2737 		 *  whether the valid lifetime is also reset or
2738 		 *  ignored."
2739 		 *
2740 		 * So we should always update prefered_lft here.
2741 		 */
2742 		update_lft = !create && stored_lft;
2743 
2744 		if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
2745 			const u32 minimum_lft = min_t(u32,
2746 				stored_lft, MIN_VALID_LIFETIME);
2747 			valid_lft = max(valid_lft, minimum_lft);
2748 		}
2749 
2750 		if (update_lft) {
2751 			ifp->valid_lft = valid_lft;
2752 			ifp->prefered_lft = prefered_lft;
2753 			WRITE_ONCE(ifp->tstamp, now);
2754 			flags = ifp->flags;
2755 			ifp->flags &= ~IFA_F_DEPRECATED;
2756 			spin_unlock_bh(&ifp->lock);
2757 
2758 			if (!(flags&IFA_F_TENTATIVE))
2759 				ipv6_ifa_notify(0, ifp);
2760 		} else
2761 			spin_unlock_bh(&ifp->lock);
2762 
2763 		manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2764 				 create, now);
2765 
2766 		in6_ifa_put(ifp);
2767 		addrconf_verify(net);
2768 	}
2769 
2770 	return 0;
2771 }
2772 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2773 
addrconf_prefix_rcv(struct net_device * dev,u8 * opt,int len,bool sllao)2774 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2775 {
2776 	struct prefix_info *pinfo;
2777 	struct fib6_table *table;
2778 	__u32 valid_lft;
2779 	__u32 prefered_lft;
2780 	int addr_type, err;
2781 	u32 addr_flags = 0;
2782 	struct inet6_dev *in6_dev;
2783 	struct net *net = dev_net(dev);
2784 	bool ignore_autoconf = false;
2785 
2786 	pinfo = (struct prefix_info *) opt;
2787 
2788 	if (len < sizeof(struct prefix_info)) {
2789 		netdev_dbg(dev, "addrconf: prefix option too short\n");
2790 		return;
2791 	}
2792 
2793 	/*
2794 	 *	Validation checks ([ADDRCONF], page 19)
2795 	 */
2796 
2797 	addr_type = ipv6_addr_type(&pinfo->prefix);
2798 
2799 	if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2800 		return;
2801 
2802 	valid_lft = ntohl(pinfo->valid);
2803 	prefered_lft = ntohl(pinfo->prefered);
2804 
2805 	if (prefered_lft > valid_lft) {
2806 		net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2807 		return;
2808 	}
2809 
2810 	in6_dev = in6_dev_get(dev);
2811 
2812 	if (!in6_dev) {
2813 		net_dbg_ratelimited("addrconf: device %s not configured\n",
2814 				    dev->name);
2815 		return;
2816 	}
2817 
2818 	if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
2819 		goto put;
2820 
2821 	/*
2822 	 *	Two things going on here:
2823 	 *	1) Add routes for on-link prefixes
2824 	 *	2) Configure prefixes with the auto flag set
2825 	 */
2826 
2827 	if (pinfo->onlink) {
2828 		struct fib6_info *rt;
2829 		unsigned long rt_expires;
2830 
2831 		/* Avoid arithmetic overflow. Really, we could
2832 		 * save rt_expires in seconds, likely valid_lft,
2833 		 * but it would require division in fib gc, that it
2834 		 * not good.
2835 		 */
2836 		if (HZ > USER_HZ)
2837 			rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2838 		else
2839 			rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2840 
2841 		if (addrconf_finite_timeout(rt_expires))
2842 			rt_expires *= HZ;
2843 
2844 		rt = addrconf_get_prefix_route(&pinfo->prefix,
2845 					       pinfo->prefix_len,
2846 					       dev,
2847 					       RTF_ADDRCONF | RTF_PREFIX_RT,
2848 					       RTF_DEFAULT, true);
2849 
2850 		if (rt) {
2851 			/* Autoconf prefix route */
2852 			if (valid_lft == 0) {
2853 				ip6_del_rt(net, rt, false);
2854 				rt = NULL;
2855 			} else {
2856 				table = rt->fib6_table;
2857 				spin_lock_bh(&table->tb6_lock);
2858 
2859 				if (addrconf_finite_timeout(rt_expires)) {
2860 					/* not infinity */
2861 					fib6_set_expires(rt, jiffies + rt_expires);
2862 					fib6_add_gc_list(rt);
2863 				} else {
2864 					fib6_clean_expires(rt);
2865 					fib6_remove_gc_list(rt);
2866 				}
2867 
2868 				spin_unlock_bh(&table->tb6_lock);
2869 			}
2870 		} else if (valid_lft) {
2871 			clock_t expires = 0;
2872 			int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2873 			if (addrconf_finite_timeout(rt_expires)) {
2874 				/* not infinity */
2875 				flags |= RTF_EXPIRES;
2876 				expires = jiffies_to_clock_t(rt_expires);
2877 			}
2878 			addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2879 					      0, dev, expires, flags,
2880 					      GFP_ATOMIC);
2881 		}
2882 		fib6_info_release(rt);
2883 	}
2884 
2885 	/* Try to figure out our local address for this prefix */
2886 
2887 	ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
2888 	if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
2889 		struct in6_addr addr;
2890 		bool tokenized = false, dev_addr_generated = false;
2891 
2892 		if (pinfo->prefix_len == 64) {
2893 			memcpy(&addr, &pinfo->prefix, 8);
2894 
2895 			if (!ipv6_addr_any(&in6_dev->token)) {
2896 				read_lock_bh(&in6_dev->lock);
2897 				memcpy(addr.s6_addr + 8,
2898 				       in6_dev->token.s6_addr + 8, 8);
2899 				read_unlock_bh(&in6_dev->lock);
2900 				tokenized = true;
2901 			} else if (is_addr_mode_generate_stable(in6_dev) &&
2902 				   !ipv6_generate_stable_address(&addr, 0,
2903 								 in6_dev)) {
2904 				addr_flags |= IFA_F_STABLE_PRIVACY;
2905 				goto ok;
2906 			} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2907 				   ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2908 				goto put;
2909 			} else {
2910 				dev_addr_generated = true;
2911 			}
2912 			goto ok;
2913 		}
2914 		net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2915 				    pinfo->prefix_len);
2916 		goto put;
2917 
2918 ok:
2919 		err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2920 						   &addr, addr_type,
2921 						   addr_flags, sllao,
2922 						   tokenized, valid_lft,
2923 						   prefered_lft);
2924 		if (err)
2925 			goto put;
2926 
2927 		/* Ignore error case here because previous prefix add addr was
2928 		 * successful which will be notified.
2929 		 */
2930 		ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2931 					      addr_type, addr_flags, sllao,
2932 					      tokenized, valid_lft,
2933 					      prefered_lft,
2934 					      dev_addr_generated);
2935 	}
2936 	inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2937 put:
2938 	in6_dev_put(in6_dev);
2939 }
2940 
addrconf_set_sit_dstaddr(struct net * net,struct net_device * dev,struct in6_ifreq * ireq)2941 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2942 		struct in6_ifreq *ireq)
2943 {
2944 	struct ip_tunnel_parm_kern p = { };
2945 	int err;
2946 
2947 	if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2948 		return -EADDRNOTAVAIL;
2949 
2950 	p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2951 	p.iph.version = 4;
2952 	p.iph.ihl = 5;
2953 	p.iph.protocol = IPPROTO_IPV6;
2954 	p.iph.ttl = 64;
2955 
2956 	if (!dev->netdev_ops->ndo_tunnel_ctl)
2957 		return -EOPNOTSUPP;
2958 	err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2959 	if (err)
2960 		return err;
2961 
2962 	dev = __dev_get_by_name(net, p.name);
2963 	if (!dev)
2964 		return -ENOBUFS;
2965 	return dev_open(dev, NULL);
2966 }
2967 
2968 /*
2969  *	Set destination address.
2970  *	Special case for SIT interfaces where we create a new "virtual"
2971  *	device.
2972  */
addrconf_set_dstaddr(struct net * net,void __user * arg)2973 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2974 {
2975 	struct net_device *dev;
2976 	struct in6_ifreq ireq;
2977 	int err = -ENODEV;
2978 
2979 	if (!IS_ENABLED(CONFIG_IPV6_SIT))
2980 		return -ENODEV;
2981 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2982 		return -EFAULT;
2983 
2984 	rtnl_net_lock(net);
2985 	dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2986 	if (dev && dev->type == ARPHRD_SIT)
2987 		err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2988 	rtnl_net_unlock(net);
2989 	return err;
2990 }
2991 
ipv6_mc_config(struct sock * sk,bool join,const struct in6_addr * addr,int ifindex)2992 static int ipv6_mc_config(struct sock *sk, bool join,
2993 			  const struct in6_addr *addr, int ifindex)
2994 {
2995 	int ret;
2996 
2997 	ASSERT_RTNL();
2998 
2999 	lock_sock(sk);
3000 	if (join)
3001 		ret = ipv6_sock_mc_join(sk, ifindex, addr);
3002 	else
3003 		ret = ipv6_sock_mc_drop(sk, ifindex, addr);
3004 	release_sock(sk);
3005 
3006 	return ret;
3007 }
3008 
3009 /*
3010  *	Manual configuration of address on an interface
3011  */
inet6_addr_add(struct net * net,struct net_device * dev,struct ifa6_config * cfg,clock_t expires,u32 flags,struct netlink_ext_ack * extack)3012 static int inet6_addr_add(struct net *net, struct net_device *dev,
3013 			  struct ifa6_config *cfg, clock_t expires, u32 flags,
3014 			  struct netlink_ext_ack *extack)
3015 {
3016 	struct inet6_ifaddr *ifp;
3017 	struct inet6_dev *idev;
3018 
3019 	ASSERT_RTNL_NET(net);
3020 
3021 	if (cfg->plen > 128) {
3022 		NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3023 		return -EINVAL;
3024 	}
3025 
3026 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) {
3027 		NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64");
3028 		return -EINVAL;
3029 	}
3030 
3031 	idev = addrconf_add_dev(dev);
3032 	if (IS_ERR(idev)) {
3033 		NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3034 		return PTR_ERR(idev);
3035 	}
3036 
3037 	if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3038 		int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3039 					 true, cfg->pfx, dev->ifindex);
3040 
3041 		if (ret < 0) {
3042 			NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed");
3043 			return ret;
3044 		}
3045 	}
3046 
3047 	cfg->scope = ipv6_addr_scope(cfg->pfx);
3048 
3049 	ifp = ipv6_add_addr(idev, cfg, true, extack);
3050 	if (!IS_ERR(ifp)) {
3051 		if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
3052 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3053 					      ifp->rt_priority, dev, expires,
3054 					      flags, GFP_KERNEL);
3055 		}
3056 
3057 		/* Send a netlink notification if DAD is enabled and
3058 		 * optimistic flag is not set
3059 		 */
3060 		if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
3061 			ipv6_ifa_notify(0, ifp);
3062 		/*
3063 		 * Note that section 3.1 of RFC 4429 indicates
3064 		 * that the Optimistic flag should not be set for
3065 		 * manually configured addresses
3066 		 */
3067 		addrconf_dad_start(ifp);
3068 		if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
3069 			manage_tempaddrs(idev, ifp, cfg->valid_lft,
3070 					 cfg->preferred_lft, true, jiffies);
3071 		in6_ifa_put(ifp);
3072 		addrconf_verify_rtnl(net);
3073 		return 0;
3074 	} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3075 		ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3076 			       cfg->pfx, dev->ifindex);
3077 	}
3078 
3079 	return PTR_ERR(ifp);
3080 }
3081 
inet6_addr_del(struct net * net,int ifindex,u32 ifa_flags,const struct in6_addr * pfx,unsigned int plen,struct netlink_ext_ack * extack)3082 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3083 			  const struct in6_addr *pfx, unsigned int plen,
3084 			  struct netlink_ext_ack *extack)
3085 {
3086 	struct inet6_ifaddr *ifp;
3087 	struct inet6_dev *idev;
3088 	struct net_device *dev;
3089 
3090 	if (plen > 128) {
3091 		NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3092 		return -EINVAL;
3093 	}
3094 
3095 	dev = __dev_get_by_index(net, ifindex);
3096 	if (!dev) {
3097 		NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
3098 		return -ENODEV;
3099 	}
3100 
3101 	idev = __in6_dev_get_rtnl_net(dev);
3102 	if (!idev) {
3103 		NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3104 		return -ENXIO;
3105 	}
3106 
3107 	read_lock_bh(&idev->lock);
3108 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
3109 		if (ifp->prefix_len == plen &&
3110 		    ipv6_addr_equal(pfx, &ifp->addr)) {
3111 			in6_ifa_hold(ifp);
3112 			read_unlock_bh(&idev->lock);
3113 
3114 			if (!(ifp->flags & IFA_F_TEMPORARY) &&
3115 			    (ifp->flags & IFA_F_MANAGETEMPADDR))
3116 				delete_tempaddrs(idev, ifp);
3117 
3118 			ipv6_del_addr(ifp);
3119 
3120 			addrconf_verify_rtnl(net);
3121 			if (ipv6_addr_is_multicast(pfx)) {
3122 				ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3123 					       false, pfx, dev->ifindex);
3124 			}
3125 			return 0;
3126 		}
3127 	}
3128 	read_unlock_bh(&idev->lock);
3129 
3130 	NL_SET_ERR_MSG_MOD(extack, "address not found");
3131 	return -EADDRNOTAVAIL;
3132 }
3133 
3134 
addrconf_add_ifaddr(struct net * net,void __user * arg)3135 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3136 {
3137 	struct ifa6_config cfg = {
3138 		.ifa_flags = IFA_F_PERMANENT,
3139 		.preferred_lft = INFINITY_LIFE_TIME,
3140 		.valid_lft = INFINITY_LIFE_TIME,
3141 	};
3142 	struct net_device *dev;
3143 	struct in6_ifreq ireq;
3144 	int err;
3145 
3146 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3147 		return -EPERM;
3148 
3149 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3150 		return -EFAULT;
3151 
3152 	cfg.pfx = &ireq.ifr6_addr;
3153 	cfg.plen = ireq.ifr6_prefixlen;
3154 
3155 	rtnl_net_lock(net);
3156 	dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
3157 	if (dev) {
3158 		netdev_lock_ops(dev);
3159 		err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
3160 		netdev_unlock_ops(dev);
3161 	} else {
3162 		err = -ENODEV;
3163 	}
3164 	rtnl_net_unlock(net);
3165 	return err;
3166 }
3167 
addrconf_del_ifaddr(struct net * net,void __user * arg)3168 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3169 {
3170 	struct in6_ifreq ireq;
3171 	int err;
3172 
3173 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3174 		return -EPERM;
3175 
3176 	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3177 		return -EFAULT;
3178 
3179 	rtnl_net_lock(net);
3180 	err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3181 			     ireq.ifr6_prefixlen, NULL);
3182 	rtnl_net_unlock(net);
3183 	return err;
3184 }
3185 
add_addr(struct inet6_dev * idev,const struct in6_addr * addr,int plen,int scope,u8 proto)3186 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3187 		     int plen, int scope, u8 proto)
3188 {
3189 	struct inet6_ifaddr *ifp;
3190 	struct ifa6_config cfg = {
3191 		.pfx = addr,
3192 		.plen = plen,
3193 		.ifa_flags = IFA_F_PERMANENT,
3194 		.valid_lft = INFINITY_LIFE_TIME,
3195 		.preferred_lft = INFINITY_LIFE_TIME,
3196 		.scope = scope,
3197 		.ifa_proto = proto
3198 	};
3199 
3200 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3201 	if (!IS_ERR(ifp)) {
3202 		spin_lock_bh(&ifp->lock);
3203 		ifp->flags &= ~IFA_F_TENTATIVE;
3204 		spin_unlock_bh(&ifp->lock);
3205 		rt_genid_bump_ipv6(dev_net(idev->dev));
3206 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
3207 		in6_ifa_put(ifp);
3208 	}
3209 }
3210 
3211 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE)
add_v4_addrs(struct inet6_dev * idev)3212 static void add_v4_addrs(struct inet6_dev *idev)
3213 {
3214 	struct in6_addr addr;
3215 	struct net_device *dev;
3216 	struct net *net = dev_net(idev->dev);
3217 	int scope, plen;
3218 	u32 pflags = 0;
3219 
3220 	ASSERT_RTNL();
3221 
3222 	memset(&addr, 0, sizeof(struct in6_addr));
3223 	memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3224 
3225 	if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
3226 		scope = IPV6_ADDR_COMPATv4;
3227 		plen = 96;
3228 		pflags |= RTF_NONEXTHOP;
3229 	} else {
3230 		if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3231 			return;
3232 
3233 		addr.s6_addr32[0] = htonl(0xfe800000);
3234 		scope = IFA_LINK;
3235 		plen = 64;
3236 	}
3237 
3238 	if (addr.s6_addr32[3]) {
3239 		add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3240 		addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3241 				      GFP_KERNEL);
3242 		return;
3243 	}
3244 
3245 	for_each_netdev(net, dev) {
3246 		struct in_device *in_dev = __in_dev_get_rtnl(dev);
3247 		if (in_dev && (dev->flags & IFF_UP)) {
3248 			struct in_ifaddr *ifa;
3249 			int flag = scope;
3250 
3251 			in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3252 				addr.s6_addr32[3] = ifa->ifa_local;
3253 
3254 				if (ifa->ifa_scope == RT_SCOPE_LINK)
3255 					continue;
3256 				if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3257 					if (idev->dev->flags&IFF_POINTOPOINT)
3258 						continue;
3259 					flag |= IFA_HOST;
3260 				}
3261 
3262 				add_addr(idev, &addr, plen, flag,
3263 					 IFAPROT_UNSPEC);
3264 				addrconf_prefix_route(&addr, plen, 0, idev->dev,
3265 						      0, pflags, GFP_KERNEL);
3266 			}
3267 		}
3268 	}
3269 }
3270 #endif
3271 
init_loopback(struct net_device * dev)3272 static void init_loopback(struct net_device *dev)
3273 {
3274 	struct inet6_dev  *idev;
3275 
3276 	/* ::1 */
3277 
3278 	ASSERT_RTNL();
3279 
3280 	idev = ipv6_find_idev(dev);
3281 	if (IS_ERR(idev)) {
3282 		pr_debug("%s: add_dev failed\n", __func__);
3283 		return;
3284 	}
3285 
3286 	add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3287 }
3288 
addrconf_add_linklocal(struct inet6_dev * idev,const struct in6_addr * addr,u32 flags)3289 void addrconf_add_linklocal(struct inet6_dev *idev,
3290 			    const struct in6_addr *addr, u32 flags)
3291 {
3292 	struct ifa6_config cfg = {
3293 		.pfx = addr,
3294 		.plen = 64,
3295 		.ifa_flags = flags | IFA_F_PERMANENT,
3296 		.valid_lft = INFINITY_LIFE_TIME,
3297 		.preferred_lft = INFINITY_LIFE_TIME,
3298 		.scope = IFA_LINK,
3299 		.ifa_proto = IFAPROT_KERNEL_LL
3300 	};
3301 	struct inet6_ifaddr *ifp;
3302 
3303 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3304 	if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
3305 	     READ_ONCE(idev->cnf.optimistic_dad)) &&
3306 	    !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3307 		cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3308 #endif
3309 
3310 	ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3311 	if (!IS_ERR(ifp)) {
3312 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3313 				      0, 0, GFP_ATOMIC);
3314 		addrconf_dad_start(ifp);
3315 		in6_ifa_put(ifp);
3316 	}
3317 }
3318 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3319 
ipv6_reserved_interfaceid(struct in6_addr address)3320 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3321 {
3322 	if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3323 		return true;
3324 
3325 	if (address.s6_addr32[2] == htonl(0x02005eff) &&
3326 	    ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3327 		return true;
3328 
3329 	if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3330 	    ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3331 		return true;
3332 
3333 	return false;
3334 }
3335 
ipv6_generate_stable_address(struct in6_addr * address,u8 dad_count,const struct inet6_dev * idev)3336 static int ipv6_generate_stable_address(struct in6_addr *address,
3337 					u8 dad_count,
3338 					const struct inet6_dev *idev)
3339 {
3340 	static DEFINE_SPINLOCK(lock);
3341 	static struct sha1_ctx sha_ctx;
3342 
3343 	static union {
3344 		u8 __data[SHA1_BLOCK_SIZE];
3345 		struct {
3346 			struct in6_addr secret;
3347 			__be32 prefix[2];
3348 			unsigned char hwaddr[MAX_ADDR_LEN];
3349 			u8 dad_count;
3350 		} __packed;
3351 	} data;
3352 
3353 	struct in6_addr secret;
3354 	struct in6_addr temp;
3355 	struct net *net = dev_net(idev->dev);
3356 
3357 	BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3358 
3359 	if (idev->cnf.stable_secret.initialized)
3360 		secret = idev->cnf.stable_secret.secret;
3361 	else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3362 		secret = net->ipv6.devconf_dflt->stable_secret.secret;
3363 	else
3364 		return -1;
3365 
3366 retry:
3367 	spin_lock_bh(&lock);
3368 
3369 	sha1_init(&sha_ctx);
3370 
3371 	memset(&data, 0, sizeof(data));
3372 	memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3373 	data.prefix[0] = address->s6_addr32[0];
3374 	data.prefix[1] = address->s6_addr32[1];
3375 	data.secret = secret;
3376 	data.dad_count = dad_count;
3377 
3378 	sha1_update(&sha_ctx, data.__data, sizeof(data));
3379 
3380 	/*
3381 	 * Note that the SHA-1 finalization is omitted here, and the digest is
3382 	 * pulled directly from the internal SHA-1 state (making it incompatible
3383 	 * with standard SHA-1).  Unusual, but technically okay since the data
3384 	 * length is fixed and is a multiple of the SHA-1 block size.
3385 	 */
3386 	temp = *address;
3387 	temp.s6_addr32[2] = (__force __be32)sha_ctx.state.h[0];
3388 	temp.s6_addr32[3] = (__force __be32)sha_ctx.state.h[1];
3389 
3390 	spin_unlock_bh(&lock);
3391 
3392 	if (ipv6_reserved_interfaceid(temp)) {
3393 		dad_count++;
3394 		if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3395 			return -1;
3396 		goto retry;
3397 	}
3398 
3399 	*address = temp;
3400 	return 0;
3401 }
3402 
ipv6_gen_mode_random_init(struct inet6_dev * idev)3403 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3404 {
3405 	struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3406 
3407 	if (s->initialized)
3408 		return;
3409 	s = &idev->cnf.stable_secret;
3410 	get_random_bytes(&s->secret, sizeof(s->secret));
3411 	s->initialized = true;
3412 }
3413 
addrconf_addr_gen(struct inet6_dev * idev,bool prefix_route)3414 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3415 {
3416 	struct in6_addr addr;
3417 
3418 	/* no link local addresses on L3 master devices */
3419 	if (netif_is_l3_master(idev->dev))
3420 		return;
3421 
3422 	/* no link local addresses on devices flagged as slaves */
3423 	if (idev->dev->priv_flags & IFF_NO_ADDRCONF)
3424 		return;
3425 
3426 	ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3427 
3428 	switch (idev->cnf.addr_gen_mode) {
3429 	case IN6_ADDR_GEN_MODE_RANDOM:
3430 		ipv6_gen_mode_random_init(idev);
3431 		fallthrough;
3432 	case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3433 		if (!ipv6_generate_stable_address(&addr, 0, idev))
3434 			addrconf_add_linklocal(idev, &addr,
3435 					       IFA_F_STABLE_PRIVACY);
3436 		else if (prefix_route)
3437 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3438 					      0, 0, GFP_KERNEL);
3439 		break;
3440 	case IN6_ADDR_GEN_MODE_EUI64:
3441 		/* addrconf_add_linklocal also adds a prefix_route and we
3442 		 * only need to care about prefix routes if ipv6_generate_eui64
3443 		 * couldn't generate one.
3444 		 */
3445 		if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3446 			addrconf_add_linklocal(idev, &addr, 0);
3447 		else if (prefix_route)
3448 			addrconf_prefix_route(&addr, 64, 0, idev->dev,
3449 					      0, 0, GFP_KERNEL);
3450 		break;
3451 	case IN6_ADDR_GEN_MODE_NONE:
3452 	default:
3453 		/* will not add any link local address */
3454 		break;
3455 	}
3456 }
3457 
addrconf_dev_config(struct net_device * dev)3458 static void addrconf_dev_config(struct net_device *dev)
3459 {
3460 	struct inet6_dev *idev;
3461 
3462 	ASSERT_RTNL();
3463 
3464 	if ((dev->type != ARPHRD_ETHER) &&
3465 	    (dev->type != ARPHRD_FDDI) &&
3466 	    (dev->type != ARPHRD_ARCNET) &&
3467 	    (dev->type != ARPHRD_INFINIBAND) &&
3468 	    (dev->type != ARPHRD_IEEE1394) &&
3469 	    (dev->type != ARPHRD_TUNNEL6) &&
3470 	    (dev->type != ARPHRD_6LOWPAN) &&
3471 	    (dev->type != ARPHRD_IP6GRE) &&
3472 	    (dev->type != ARPHRD_TUNNEL) &&
3473 	    (dev->type != ARPHRD_NONE) &&
3474 	    (dev->type != ARPHRD_RAWIP)) {
3475 		/* Alas, we support only Ethernet autoconfiguration. */
3476 		idev = __in6_dev_get(dev);
3477 		if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3478 		    dev->flags & IFF_MULTICAST)
3479 			ipv6_mc_up(idev);
3480 		return;
3481 	}
3482 
3483 	idev = addrconf_add_dev(dev);
3484 	if (IS_ERR(idev))
3485 		return;
3486 
3487 	/* this device type has no EUI support */
3488 	if (dev->type == ARPHRD_NONE &&
3489 	    idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3490 		WRITE_ONCE(idev->cnf.addr_gen_mode,
3491 			   IN6_ADDR_GEN_MODE_RANDOM);
3492 
3493 	addrconf_addr_gen(idev, false);
3494 }
3495 
3496 #if IS_ENABLED(CONFIG_IPV6_SIT)
addrconf_sit_config(struct net_device * dev)3497 static void addrconf_sit_config(struct net_device *dev)
3498 {
3499 	struct inet6_dev *idev;
3500 
3501 	ASSERT_RTNL();
3502 
3503 	/*
3504 	 * Configure the tunnel with one of our IPv4
3505 	 * addresses... we should configure all of
3506 	 * our v4 addrs in the tunnel
3507 	 */
3508 
3509 	idev = ipv6_find_idev(dev);
3510 	if (IS_ERR(idev)) {
3511 		pr_debug("%s: add_dev failed\n", __func__);
3512 		return;
3513 	}
3514 
3515 	if (dev->priv_flags & IFF_ISATAP) {
3516 		addrconf_addr_gen(idev, false);
3517 		return;
3518 	}
3519 
3520 	add_v4_addrs(idev);
3521 
3522 	if (dev->flags&IFF_POINTOPOINT)
3523 		addrconf_add_mroute(dev);
3524 }
3525 #endif
3526 
3527 #if IS_ENABLED(CONFIG_NET_IPGRE)
addrconf_gre_config(struct net_device * dev)3528 static void addrconf_gre_config(struct net_device *dev)
3529 {
3530 	struct inet6_dev *idev;
3531 
3532 	ASSERT_RTNL();
3533 
3534 	idev = addrconf_add_dev(dev);
3535 	if (IS_ERR(idev))
3536 		return;
3537 
3538 	/* Generate the IPv6 link-local address using addrconf_addr_gen(),
3539 	 * unless we have an IPv4 GRE device not bound to an IP address and
3540 	 * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
3541 	 * case). Such devices fall back to add_v4_addrs() instead.
3542 	 */
3543 	if (!(*(__be32 *)dev->dev_addr == 0 &&
3544 	      idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
3545 		addrconf_addr_gen(idev, true);
3546 		return;
3547 	}
3548 
3549 	add_v4_addrs(idev);
3550 }
3551 #endif
3552 
addrconf_init_auto_addrs(struct net_device * dev)3553 static void addrconf_init_auto_addrs(struct net_device *dev)
3554 {
3555 	switch (dev->type) {
3556 #if IS_ENABLED(CONFIG_IPV6_SIT)
3557 	case ARPHRD_SIT:
3558 		addrconf_sit_config(dev);
3559 		break;
3560 #endif
3561 #if IS_ENABLED(CONFIG_NET_IPGRE)
3562 	case ARPHRD_IPGRE:
3563 		addrconf_gre_config(dev);
3564 		break;
3565 #endif
3566 	case ARPHRD_LOOPBACK:
3567 		init_loopback(dev);
3568 		break;
3569 
3570 	default:
3571 		addrconf_dev_config(dev);
3572 		break;
3573 	}
3574 }
3575 
fixup_permanent_addr(struct net * net,struct inet6_dev * idev,struct inet6_ifaddr * ifp)3576 static int fixup_permanent_addr(struct net *net,
3577 				struct inet6_dev *idev,
3578 				struct inet6_ifaddr *ifp)
3579 {
3580 	/* !fib6_node means the host route was removed from the
3581 	 * FIB, for example, if 'lo' device is taken down. In that
3582 	 * case regenerate the host route.
3583 	 */
3584 	if (!ifp->rt || !ifp->rt->fib6_node) {
3585 		struct fib6_info *f6i, *prev;
3586 
3587 		f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3588 					 GFP_ATOMIC, NULL);
3589 		if (IS_ERR(f6i))
3590 			return PTR_ERR(f6i);
3591 
3592 		/* ifp->rt can be accessed outside of rtnl */
3593 		spin_lock(&ifp->lock);
3594 		prev = ifp->rt;
3595 		ifp->rt = f6i;
3596 		spin_unlock(&ifp->lock);
3597 
3598 		fib6_info_release(prev);
3599 	}
3600 
3601 	if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3602 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3603 				      ifp->rt_priority, idev->dev, 0, 0,
3604 				      GFP_ATOMIC);
3605 	}
3606 
3607 	if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3608 		addrconf_dad_start(ifp);
3609 
3610 	return 0;
3611 }
3612 
addrconf_permanent_addr(struct net * net,struct net_device * dev)3613 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3614 {
3615 	struct inet6_ifaddr *ifp, *tmp;
3616 	struct inet6_dev *idev;
3617 
3618 	idev = __in6_dev_get(dev);
3619 	if (!idev)
3620 		return;
3621 
3622 	write_lock_bh(&idev->lock);
3623 
3624 	list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3625 		if ((ifp->flags & IFA_F_PERMANENT) &&
3626 		    fixup_permanent_addr(net, idev, ifp) < 0) {
3627 			write_unlock_bh(&idev->lock);
3628 			in6_ifa_hold(ifp);
3629 			ipv6_del_addr(ifp);
3630 			write_lock_bh(&idev->lock);
3631 
3632 			net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3633 					     idev->dev->name, &ifp->addr);
3634 		}
3635 	}
3636 
3637 	write_unlock_bh(&idev->lock);
3638 }
3639 
addrconf_notify(struct notifier_block * this,unsigned long event,void * ptr)3640 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3641 			   void *ptr)
3642 {
3643 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3644 	struct netdev_notifier_change_info *change_info;
3645 	struct netdev_notifier_changeupper_info *info;
3646 	struct inet6_dev *idev = __in6_dev_get(dev);
3647 	struct net *net = dev_net(dev);
3648 	int run_pending = 0;
3649 	int err;
3650 
3651 	switch (event) {
3652 	case NETDEV_REGISTER:
3653 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3654 			idev = ipv6_add_dev(dev);
3655 			if (IS_ERR(idev))
3656 				return notifier_from_errno(PTR_ERR(idev));
3657 		}
3658 		break;
3659 
3660 	case NETDEV_CHANGEMTU:
3661 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3662 		if (dev->mtu < IPV6_MIN_MTU) {
3663 			addrconf_ifdown(dev, dev != net->loopback_dev);
3664 			break;
3665 		}
3666 
3667 		if (idev) {
3668 			rt6_mtu_change(dev, dev->mtu);
3669 			WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3670 			break;
3671 		}
3672 
3673 		/* allocate new idev */
3674 		idev = ipv6_add_dev(dev);
3675 		if (IS_ERR(idev))
3676 			break;
3677 
3678 		/* device is still not ready */
3679 		if (!(idev->if_flags & IF_READY))
3680 			break;
3681 
3682 		run_pending = 1;
3683 		fallthrough;
3684 	case NETDEV_UP:
3685 	case NETDEV_CHANGE:
3686 		if (idev && idev->cnf.disable_ipv6)
3687 			break;
3688 
3689 		if (dev->priv_flags & IFF_NO_ADDRCONF) {
3690 			if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
3691 			    dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
3692 				ipv6_mc_up(idev);
3693 			break;
3694 		}
3695 
3696 		if (event == NETDEV_UP) {
3697 			/* restore routes for permanent addresses */
3698 			addrconf_permanent_addr(net, dev);
3699 
3700 			if (!addrconf_link_ready(dev)) {
3701 				/* device is not ready yet. */
3702 				pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3703 					 dev->name);
3704 				break;
3705 			}
3706 
3707 			if (!idev && dev->mtu >= IPV6_MIN_MTU)
3708 				idev = ipv6_add_dev(dev);
3709 
3710 			if (!IS_ERR_OR_NULL(idev)) {
3711 				idev->if_flags |= IF_READY;
3712 				run_pending = 1;
3713 			}
3714 		} else if (event == NETDEV_CHANGE) {
3715 			if (!addrconf_link_ready(dev)) {
3716 				/* device is still not ready. */
3717 				rt6_sync_down_dev(dev, event);
3718 				break;
3719 			}
3720 
3721 			if (!IS_ERR_OR_NULL(idev)) {
3722 				if (idev->if_flags & IF_READY) {
3723 					/* device is already configured -
3724 					 * but resend MLD reports, we might
3725 					 * have roamed and need to update
3726 					 * multicast snooping switches
3727 					 */
3728 					ipv6_mc_up(idev);
3729 					change_info = ptr;
3730 					if (change_info->flags_changed & IFF_NOARP)
3731 						addrconf_dad_run(idev, true);
3732 					rt6_sync_up(dev, RTNH_F_LINKDOWN);
3733 					break;
3734 				}
3735 				idev->if_flags |= IF_READY;
3736 			}
3737 
3738 			pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3739 				 dev->name);
3740 
3741 			run_pending = 1;
3742 		}
3743 
3744 		addrconf_init_auto_addrs(dev);
3745 
3746 		if (!IS_ERR_OR_NULL(idev)) {
3747 			if (run_pending)
3748 				addrconf_dad_run(idev, false);
3749 
3750 			/* Device has an address by now */
3751 			rt6_sync_up(dev, RTNH_F_DEAD);
3752 
3753 			/*
3754 			 * If the MTU changed during the interface down,
3755 			 * when the interface up, the changed MTU must be
3756 			 * reflected in the idev as well as routers.
3757 			 */
3758 			if (idev->cnf.mtu6 != dev->mtu &&
3759 			    dev->mtu >= IPV6_MIN_MTU) {
3760 				rt6_mtu_change(dev, dev->mtu);
3761 				WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3762 			}
3763 			WRITE_ONCE(idev->tstamp, jiffies);
3764 			inet6_ifinfo_notify(RTM_NEWLINK, idev);
3765 
3766 			/*
3767 			 * If the changed mtu during down is lower than
3768 			 * IPV6_MIN_MTU stop IPv6 on this interface.
3769 			 */
3770 			if (dev->mtu < IPV6_MIN_MTU)
3771 				addrconf_ifdown(dev, dev != net->loopback_dev);
3772 		}
3773 		break;
3774 
3775 	case NETDEV_DOWN:
3776 	case NETDEV_UNREGISTER:
3777 		/*
3778 		 *	Remove all addresses from this interface.
3779 		 */
3780 		addrconf_ifdown(dev, event != NETDEV_DOWN);
3781 		break;
3782 
3783 	case NETDEV_CHANGENAME:
3784 		if (idev) {
3785 			snmp6_unregister_dev(idev);
3786 			addrconf_sysctl_unregister(idev);
3787 			err = addrconf_sysctl_register(idev);
3788 			if (err)
3789 				return notifier_from_errno(err);
3790 			err = snmp6_register_dev(idev);
3791 			if (err) {
3792 				addrconf_sysctl_unregister(idev);
3793 				return notifier_from_errno(err);
3794 			}
3795 		}
3796 		break;
3797 
3798 	case NETDEV_PRE_TYPE_CHANGE:
3799 	case NETDEV_POST_TYPE_CHANGE:
3800 		if (idev)
3801 			addrconf_type_change(dev, event);
3802 		break;
3803 
3804 	case NETDEV_CHANGEUPPER:
3805 		info = ptr;
3806 
3807 		/* flush all routes if dev is linked to or unlinked from
3808 		 * an L3 master device (e.g., VRF)
3809 		 */
3810 		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3811 			addrconf_ifdown(dev, false);
3812 	}
3813 
3814 	return NOTIFY_OK;
3815 }
3816 
3817 /*
3818  *	addrconf module should be notified of a device going up
3819  */
3820 static struct notifier_block ipv6_dev_notf = {
3821 	.notifier_call = addrconf_notify,
3822 	.priority = ADDRCONF_NOTIFY_PRIORITY,
3823 };
3824 
addrconf_type_change(struct net_device * dev,unsigned long event)3825 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3826 {
3827 	struct inet6_dev *idev;
3828 	ASSERT_RTNL();
3829 
3830 	idev = __in6_dev_get(dev);
3831 
3832 	if (event == NETDEV_POST_TYPE_CHANGE)
3833 		ipv6_mc_remap(idev);
3834 	else if (event == NETDEV_PRE_TYPE_CHANGE)
3835 		ipv6_mc_unmap(idev);
3836 }
3837 
addr_is_local(const struct in6_addr * addr)3838 static bool addr_is_local(const struct in6_addr *addr)
3839 {
3840 	return ipv6_addr_type(addr) &
3841 		(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3842 }
3843 
addrconf_ifdown(struct net_device * dev,bool unregister)3844 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3845 {
3846 	unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3847 	struct net *net = dev_net(dev);
3848 	struct inet6_dev *idev;
3849 	struct inet6_ifaddr *ifa;
3850 	LIST_HEAD(tmp_addr_list);
3851 	bool keep_addr = false;
3852 	bool was_ready;
3853 	int state, i;
3854 
3855 	ASSERT_RTNL();
3856 
3857 	rt6_disable_ip(dev, event);
3858 
3859 	idev = __in6_dev_get(dev);
3860 	if (!idev)
3861 		return -ENODEV;
3862 
3863 	/*
3864 	 * Step 1: remove reference to ipv6 device from parent device.
3865 	 *	   Do not dev_put!
3866 	 */
3867 	if (unregister) {
3868 		WRITE_ONCE(idev->dead, 1);
3869 
3870 		/* protected by rtnl_lock */
3871 		RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3872 
3873 		/* Step 1.5: remove snmp6 entry */
3874 		snmp6_unregister_dev(idev);
3875 
3876 	}
3877 
3878 	/* combine the user config with event to determine if permanent
3879 	 * addresses are to be removed from address hash table
3880 	 */
3881 	if (!unregister && !idev->cnf.disable_ipv6) {
3882 		/* aggregate the system setting and interface setting */
3883 		int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
3884 
3885 		if (!_keep_addr)
3886 			_keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
3887 
3888 		keep_addr = (_keep_addr > 0);
3889 	}
3890 
3891 	/* Step 2: clear hash table */
3892 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3893 		struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3894 
3895 		spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3896 restart:
3897 		hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3898 			if (ifa->idev == idev) {
3899 				addrconf_del_dad_work(ifa);
3900 				/* combined flag + permanent flag decide if
3901 				 * address is retained on a down event
3902 				 */
3903 				if (!keep_addr ||
3904 				    !(ifa->flags & IFA_F_PERMANENT) ||
3905 				    addr_is_local(&ifa->addr)) {
3906 					hlist_del_init_rcu(&ifa->addr_lst);
3907 					goto restart;
3908 				}
3909 			}
3910 		}
3911 		spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3912 	}
3913 
3914 	write_lock_bh(&idev->lock);
3915 
3916 	addrconf_del_rs_timer(idev);
3917 
3918 	/* Step 2: clear flags for stateless addrconf, repeated down
3919 	 *         detection
3920 	 */
3921 	was_ready = idev->if_flags & IF_READY;
3922 	if (!unregister)
3923 		idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3924 
3925 	/* Step 3: clear tempaddr list */
3926 	while (!list_empty(&idev->tempaddr_list)) {
3927 		ifa = list_first_entry(&idev->tempaddr_list,
3928 				       struct inet6_ifaddr, tmp_list);
3929 		list_del(&ifa->tmp_list);
3930 		write_unlock_bh(&idev->lock);
3931 		spin_lock_bh(&ifa->lock);
3932 
3933 		if (ifa->ifpub) {
3934 			in6_ifa_put(ifa->ifpub);
3935 			ifa->ifpub = NULL;
3936 		}
3937 		spin_unlock_bh(&ifa->lock);
3938 		in6_ifa_put(ifa);
3939 		write_lock_bh(&idev->lock);
3940 	}
3941 
3942 	list_for_each_entry(ifa, &idev->addr_list, if_list)
3943 		list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3944 	write_unlock_bh(&idev->lock);
3945 
3946 	while (!list_empty(&tmp_addr_list)) {
3947 		struct fib6_info *rt = NULL;
3948 		bool keep;
3949 
3950 		ifa = list_first_entry(&tmp_addr_list,
3951 				       struct inet6_ifaddr, if_list_aux);
3952 		list_del(&ifa->if_list_aux);
3953 
3954 		addrconf_del_dad_work(ifa);
3955 
3956 		keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3957 			!addr_is_local(&ifa->addr);
3958 
3959 		spin_lock_bh(&ifa->lock);
3960 
3961 		if (keep) {
3962 			/* set state to skip the notifier below */
3963 			state = INET6_IFADDR_STATE_DEAD;
3964 			ifa->state = INET6_IFADDR_STATE_PREDAD;
3965 			if (!(ifa->flags & IFA_F_NODAD))
3966 				ifa->flags |= IFA_F_TENTATIVE;
3967 
3968 			rt = ifa->rt;
3969 			ifa->rt = NULL;
3970 		} else {
3971 			state = ifa->state;
3972 			ifa->state = INET6_IFADDR_STATE_DEAD;
3973 		}
3974 
3975 		spin_unlock_bh(&ifa->lock);
3976 
3977 		if (rt)
3978 			ip6_del_rt(net, rt, false);
3979 
3980 		if (state != INET6_IFADDR_STATE_DEAD) {
3981 			__ipv6_ifa_notify(RTM_DELADDR, ifa);
3982 			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3983 		} else {
3984 			if (idev->cnf.forwarding)
3985 				addrconf_leave_anycast(ifa);
3986 			addrconf_leave_solict(ifa->idev, &ifa->addr);
3987 		}
3988 
3989 		if (!keep) {
3990 			write_lock_bh(&idev->lock);
3991 			list_del_rcu(&ifa->if_list);
3992 			write_unlock_bh(&idev->lock);
3993 			in6_ifa_put(ifa);
3994 		}
3995 	}
3996 
3997 	/* Step 5: Discard anycast and multicast list */
3998 	if (unregister) {
3999 		ipv6_ac_destroy_dev(idev);
4000 		ipv6_mc_destroy_dev(idev);
4001 	} else if (was_ready) {
4002 		ipv6_mc_down(idev);
4003 	}
4004 
4005 	WRITE_ONCE(idev->tstamp, jiffies);
4006 	idev->ra_mtu = 0;
4007 
4008 	/* Last: Shot the device (if unregistered) */
4009 	if (unregister) {
4010 		addrconf_sysctl_unregister(idev);
4011 		neigh_parms_release(&nd_tbl, idev->nd_parms);
4012 		neigh_ifdown(&nd_tbl, dev);
4013 		in6_dev_put(idev);
4014 	}
4015 	return 0;
4016 }
4017 
addrconf_rs_timer(struct timer_list * t)4018 static void addrconf_rs_timer(struct timer_list *t)
4019 {
4020 	struct inet6_dev *idev = timer_container_of(idev, t, rs_timer);
4021 	struct net_device *dev = idev->dev;
4022 	struct in6_addr lladdr;
4023 	int rtr_solicits;
4024 
4025 	write_lock(&idev->lock);
4026 	if (idev->dead || !(idev->if_flags & IF_READY))
4027 		goto out;
4028 
4029 	if (!ipv6_accept_ra(idev))
4030 		goto out;
4031 
4032 	/* Announcement received after solicitation was sent */
4033 	if (idev->if_flags & IF_RA_RCVD)
4034 		goto out;
4035 
4036 	rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
4037 
4038 	if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
4039 		write_unlock(&idev->lock);
4040 		if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4041 			ndisc_send_rs(dev, &lladdr,
4042 				      &in6addr_linklocal_allrouters);
4043 		else
4044 			goto put;
4045 
4046 		write_lock(&idev->lock);
4047 		idev->rs_interval = rfc3315_s14_backoff_update(
4048 				idev->rs_interval,
4049 				READ_ONCE(idev->cnf.rtr_solicit_max_interval));
4050 		/* The wait after the last probe can be shorter */
4051 		addrconf_mod_rs_timer(idev, (idev->rs_probes ==
4052 					     READ_ONCE(idev->cnf.rtr_solicits)) ?
4053 				      READ_ONCE(idev->cnf.rtr_solicit_delay) :
4054 				      idev->rs_interval);
4055 	} else {
4056 		/*
4057 		 * Note: we do not support deprecated "all on-link"
4058 		 * assumption any longer.
4059 		 */
4060 		pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
4061 	}
4062 
4063 out:
4064 	write_unlock(&idev->lock);
4065 put:
4066 	in6_dev_put(idev);
4067 }
4068 
4069 /*
4070  *	Duplicate Address Detection
4071  */
addrconf_dad_kick(struct inet6_ifaddr * ifp)4072 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
4073 {
4074 	struct inet6_dev *idev = ifp->idev;
4075 	unsigned long rand_num;
4076 	u64 nonce;
4077 
4078 	if (ifp->flags & IFA_F_OPTIMISTIC)
4079 		rand_num = 0;
4080 	else
4081 		rand_num = get_random_u32_below(
4082 				READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
4083 
4084 	nonce = 0;
4085 	if (READ_ONCE(idev->cnf.enhanced_dad) ||
4086 	    READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
4087 		do
4088 			get_random_bytes(&nonce, 6);
4089 		while (nonce == 0);
4090 	}
4091 	ifp->dad_nonce = nonce;
4092 	ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
4093 	addrconf_mod_dad_work(ifp, rand_num);
4094 }
4095 
addrconf_dad_begin(struct inet6_ifaddr * ifp)4096 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
4097 {
4098 	struct inet6_dev *idev = ifp->idev;
4099 	struct net_device *dev = idev->dev;
4100 	bool bump_id, notify = false;
4101 	struct net *net;
4102 
4103 	addrconf_join_solict(dev, &ifp->addr);
4104 
4105 	read_lock_bh(&idev->lock);
4106 	spin_lock(&ifp->lock);
4107 	if (ifp->state == INET6_IFADDR_STATE_DEAD)
4108 		goto out;
4109 
4110 	net = dev_net(dev);
4111 	if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4112 	    (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
4113 	     READ_ONCE(idev->cnf.accept_dad) < 1) ||
4114 	    !(ifp->flags&IFA_F_TENTATIVE) ||
4115 	    ifp->flags & IFA_F_NODAD) {
4116 		bool send_na = false;
4117 
4118 		if (ifp->flags & IFA_F_TENTATIVE &&
4119 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4120 			send_na = true;
4121 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4122 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4123 		spin_unlock(&ifp->lock);
4124 		read_unlock_bh(&idev->lock);
4125 
4126 		addrconf_dad_completed(ifp, bump_id, send_na);
4127 		return;
4128 	}
4129 
4130 	if (!(idev->if_flags & IF_READY)) {
4131 		spin_unlock(&ifp->lock);
4132 		read_unlock_bh(&idev->lock);
4133 		/*
4134 		 * If the device is not ready:
4135 		 * - keep it tentative if it is a permanent address.
4136 		 * - otherwise, kill it.
4137 		 */
4138 		in6_ifa_hold(ifp);
4139 		addrconf_dad_stop(ifp, 0);
4140 		return;
4141 	}
4142 
4143 	/*
4144 	 * Optimistic nodes can start receiving
4145 	 * Frames right away
4146 	 */
4147 	if (ifp->flags & IFA_F_OPTIMISTIC) {
4148 		ip6_ins_rt(net, ifp->rt);
4149 		if (ipv6_use_optimistic_addr(net, idev)) {
4150 			/* Because optimistic nodes can use this address,
4151 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4152 			 */
4153 			notify = true;
4154 		}
4155 	}
4156 
4157 	addrconf_dad_kick(ifp);
4158 out:
4159 	spin_unlock(&ifp->lock);
4160 	read_unlock_bh(&idev->lock);
4161 	if (notify)
4162 		ipv6_ifa_notify(RTM_NEWADDR, ifp);
4163 }
4164 
addrconf_dad_start(struct inet6_ifaddr * ifp)4165 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4166 {
4167 	bool begin_dad = false;
4168 
4169 	spin_lock_bh(&ifp->lock);
4170 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4171 		ifp->state = INET6_IFADDR_STATE_PREDAD;
4172 		begin_dad = true;
4173 	}
4174 	spin_unlock_bh(&ifp->lock);
4175 
4176 	if (begin_dad)
4177 		addrconf_mod_dad_work(ifp, 0);
4178 }
4179 
addrconf_dad_work(struct work_struct * w)4180 static void addrconf_dad_work(struct work_struct *w)
4181 {
4182 	struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4183 						struct inet6_ifaddr,
4184 						dad_work);
4185 	struct inet6_dev *idev = ifp->idev;
4186 	bool bump_id, disable_ipv6 = false;
4187 	struct in6_addr mcaddr;
4188 	struct net *net;
4189 
4190 	enum {
4191 		DAD_PROCESS,
4192 		DAD_BEGIN,
4193 		DAD_ABORT,
4194 	} action = DAD_PROCESS;
4195 
4196 	net = dev_net(idev->dev);
4197 
4198 	rtnl_net_lock(net);
4199 
4200 	spin_lock_bh(&ifp->lock);
4201 	if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4202 		action = DAD_BEGIN;
4203 		ifp->state = INET6_IFADDR_STATE_DAD;
4204 	} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4205 		action = DAD_ABORT;
4206 		ifp->state = INET6_IFADDR_STATE_POSTDAD;
4207 
4208 		if ((READ_ONCE(net->ipv6.devconf_all->accept_dad) > 1 ||
4209 		     READ_ONCE(idev->cnf.accept_dad) > 1) &&
4210 		    !idev->cnf.disable_ipv6 &&
4211 		    !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4212 			struct in6_addr addr;
4213 
4214 			addr.s6_addr32[0] = htonl(0xfe800000);
4215 			addr.s6_addr32[1] = 0;
4216 
4217 			if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4218 			    ipv6_addr_equal(&ifp->addr, &addr)) {
4219 				/* DAD failed for link-local based on MAC */
4220 				WRITE_ONCE(idev->cnf.disable_ipv6, 1);
4221 
4222 				pr_info("%s: IPv6 being disabled!\n",
4223 					ifp->idev->dev->name);
4224 				disable_ipv6 = true;
4225 			}
4226 		}
4227 	}
4228 	spin_unlock_bh(&ifp->lock);
4229 
4230 	if (action == DAD_BEGIN) {
4231 		addrconf_dad_begin(ifp);
4232 		goto out;
4233 	} else if (action == DAD_ABORT) {
4234 		in6_ifa_hold(ifp);
4235 		addrconf_dad_stop(ifp, 1);
4236 		if (disable_ipv6)
4237 			addrconf_ifdown(idev->dev, false);
4238 		goto out;
4239 	}
4240 
4241 	if (!ifp->dad_probes && addrconf_dad_end(ifp))
4242 		goto out;
4243 
4244 	write_lock_bh(&idev->lock);
4245 	if (idev->dead || !(idev->if_flags & IF_READY)) {
4246 		write_unlock_bh(&idev->lock);
4247 		goto out;
4248 	}
4249 
4250 	spin_lock(&ifp->lock);
4251 	if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4252 		spin_unlock(&ifp->lock);
4253 		write_unlock_bh(&idev->lock);
4254 		goto out;
4255 	}
4256 
4257 	if (ifp->dad_probes == 0) {
4258 		bool send_na = false;
4259 
4260 		/*
4261 		 * DAD was successful
4262 		 */
4263 
4264 		if (ifp->flags & IFA_F_TENTATIVE &&
4265 		    !(ifp->flags & IFA_F_OPTIMISTIC))
4266 			send_na = true;
4267 		bump_id = ifp->flags & IFA_F_TENTATIVE;
4268 		ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4269 		spin_unlock(&ifp->lock);
4270 		write_unlock_bh(&idev->lock);
4271 
4272 		addrconf_dad_completed(ifp, bump_id, send_na);
4273 
4274 		goto out;
4275 	}
4276 
4277 	ifp->dad_probes--;
4278 	addrconf_mod_dad_work(ifp,
4279 			      max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4280 				  HZ/100));
4281 	spin_unlock(&ifp->lock);
4282 	write_unlock_bh(&idev->lock);
4283 
4284 	/* send a neighbour solicitation for our addr */
4285 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4286 	ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4287 		      ifp->dad_nonce);
4288 out:
4289 	in6_ifa_put(ifp);
4290 	rtnl_net_unlock(net);
4291 }
4292 
4293 /* ifp->idev must be at least read locked */
ipv6_lonely_lladdr(struct inet6_ifaddr * ifp)4294 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4295 {
4296 	struct inet6_ifaddr *ifpiter;
4297 	struct inet6_dev *idev = ifp->idev;
4298 
4299 	list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4300 		if (ifpiter->scope > IFA_LINK)
4301 			break;
4302 		if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4303 		    (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4304 				       IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4305 		    IFA_F_PERMANENT)
4306 			return false;
4307 	}
4308 	return true;
4309 }
4310 
addrconf_dad_completed(struct inet6_ifaddr * ifp,bool bump_id,bool send_na)4311 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4312 				   bool send_na)
4313 {
4314 	struct net_device *dev = ifp->idev->dev;
4315 	struct in6_addr lladdr;
4316 	bool send_rs, send_mld;
4317 
4318 	addrconf_del_dad_work(ifp);
4319 
4320 	/*
4321 	 *	Configure the address for reception. Now it is valid.
4322 	 */
4323 
4324 	ipv6_ifa_notify(RTM_NEWADDR, ifp);
4325 
4326 	/* If added prefix is link local and we are prepared to process
4327 	   router advertisements, start sending router solicitations.
4328 	 */
4329 
4330 	read_lock_bh(&ifp->idev->lock);
4331 	send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4332 	send_rs = send_mld &&
4333 		  ipv6_accept_ra(ifp->idev) &&
4334 		  READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
4335 		  (dev->flags & IFF_LOOPBACK) == 0 &&
4336 		  (dev->type != ARPHRD_TUNNEL) &&
4337 		  !netif_is_team_port(dev);
4338 	read_unlock_bh(&ifp->idev->lock);
4339 
4340 	/* While dad is in progress mld report's source address is in6_addrany.
4341 	 * Resend with proper ll now.
4342 	 */
4343 	if (send_mld)
4344 		ipv6_mc_dad_complete(ifp->idev);
4345 
4346 	/* send unsolicited NA if enabled */
4347 	if (send_na &&
4348 	    (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
4349 	     READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
4350 		ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4351 			      /*router=*/ !!ifp->idev->cnf.forwarding,
4352 			      /*solicited=*/ false, /*override=*/ true,
4353 			      /*inc_opt=*/ true);
4354 	}
4355 
4356 	if (send_rs) {
4357 		/*
4358 		 *	If a host as already performed a random delay
4359 		 *	[...] as part of DAD [...] there is no need
4360 		 *	to delay again before sending the first RS
4361 		 */
4362 		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4363 			return;
4364 		ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4365 
4366 		write_lock_bh(&ifp->idev->lock);
4367 		spin_lock(&ifp->lock);
4368 		ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4369 			READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
4370 		ifp->idev->rs_probes = 1;
4371 		ifp->idev->if_flags |= IF_RS_SENT;
4372 		addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4373 		spin_unlock(&ifp->lock);
4374 		write_unlock_bh(&ifp->idev->lock);
4375 	}
4376 
4377 	if (bump_id)
4378 		rt_genid_bump_ipv6(dev_net(dev));
4379 
4380 	/* Make sure that a new temporary address will be created
4381 	 * before this temporary address becomes deprecated.
4382 	 */
4383 	if (ifp->flags & IFA_F_TEMPORARY)
4384 		addrconf_verify_rtnl(dev_net(dev));
4385 }
4386 
addrconf_dad_run(struct inet6_dev * idev,bool restart)4387 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4388 {
4389 	struct inet6_ifaddr *ifp;
4390 
4391 	read_lock_bh(&idev->lock);
4392 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
4393 		spin_lock(&ifp->lock);
4394 		if ((ifp->flags & IFA_F_TENTATIVE &&
4395 		     ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4396 			if (restart)
4397 				ifp->state = INET6_IFADDR_STATE_PREDAD;
4398 			addrconf_dad_kick(ifp);
4399 		}
4400 		spin_unlock(&ifp->lock);
4401 	}
4402 	read_unlock_bh(&idev->lock);
4403 }
4404 
4405 #ifdef CONFIG_PROC_FS
4406 struct if6_iter_state {
4407 	struct seq_net_private p;
4408 	int bucket;
4409 	int offset;
4410 };
4411 
if6_get_first(struct seq_file * seq,loff_t pos)4412 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4413 {
4414 	struct if6_iter_state *state = seq->private;
4415 	struct net *net = seq_file_net(seq);
4416 	struct inet6_ifaddr *ifa = NULL;
4417 	int p = 0;
4418 
4419 	/* initial bucket if pos is 0 */
4420 	if (pos == 0) {
4421 		state->bucket = 0;
4422 		state->offset = 0;
4423 	}
4424 
4425 	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4426 		hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4427 					 addr_lst) {
4428 			/* sync with offset */
4429 			if (p < state->offset) {
4430 				p++;
4431 				continue;
4432 			}
4433 			return ifa;
4434 		}
4435 
4436 		/* prepare for next bucket */
4437 		state->offset = 0;
4438 		p = 0;
4439 	}
4440 	return NULL;
4441 }
4442 
if6_get_next(struct seq_file * seq,struct inet6_ifaddr * ifa)4443 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4444 					 struct inet6_ifaddr *ifa)
4445 {
4446 	struct if6_iter_state *state = seq->private;
4447 	struct net *net = seq_file_net(seq);
4448 
4449 	hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4450 		state->offset++;
4451 		return ifa;
4452 	}
4453 
4454 	state->offset = 0;
4455 	while (++state->bucket < IN6_ADDR_HSIZE) {
4456 		hlist_for_each_entry_rcu(ifa,
4457 				     &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4458 			return ifa;
4459 		}
4460 	}
4461 
4462 	return NULL;
4463 }
4464 
if6_seq_start(struct seq_file * seq,loff_t * pos)4465 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4466 	__acquires(rcu)
4467 {
4468 	rcu_read_lock();
4469 	return if6_get_first(seq, *pos);
4470 }
4471 
if6_seq_next(struct seq_file * seq,void * v,loff_t * pos)4472 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4473 {
4474 	struct inet6_ifaddr *ifa;
4475 
4476 	ifa = if6_get_next(seq, v);
4477 	++*pos;
4478 	return ifa;
4479 }
4480 
if6_seq_stop(struct seq_file * seq,void * v)4481 static void if6_seq_stop(struct seq_file *seq, void *v)
4482 	__releases(rcu)
4483 {
4484 	rcu_read_unlock();
4485 }
4486 
if6_seq_show(struct seq_file * seq,void * v)4487 static int if6_seq_show(struct seq_file *seq, void *v)
4488 {
4489 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4490 	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4491 		   &ifp->addr,
4492 		   ifp->idev->dev->ifindex,
4493 		   ifp->prefix_len,
4494 		   ifp->scope,
4495 		   (u8) ifp->flags,
4496 		   ifp->idev->dev->name);
4497 	return 0;
4498 }
4499 
4500 static const struct seq_operations if6_seq_ops = {
4501 	.start	= if6_seq_start,
4502 	.next	= if6_seq_next,
4503 	.show	= if6_seq_show,
4504 	.stop	= if6_seq_stop,
4505 };
4506 
if6_proc_net_init(struct net * net)4507 static int __net_init if6_proc_net_init(struct net *net)
4508 {
4509 	if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4510 			sizeof(struct if6_iter_state)))
4511 		return -ENOMEM;
4512 	return 0;
4513 }
4514 
if6_proc_net_exit(struct net * net)4515 static void __net_exit if6_proc_net_exit(struct net *net)
4516 {
4517 	remove_proc_entry("if_inet6", net->proc_net);
4518 }
4519 
4520 static struct pernet_operations if6_proc_net_ops = {
4521 	.init = if6_proc_net_init,
4522 	.exit = if6_proc_net_exit,
4523 };
4524 
if6_proc_init(void)4525 int __init if6_proc_init(void)
4526 {
4527 	return register_pernet_subsys(&if6_proc_net_ops);
4528 }
4529 
if6_proc_exit(void)4530 void if6_proc_exit(void)
4531 {
4532 	unregister_pernet_subsys(&if6_proc_net_ops);
4533 }
4534 #endif	/* CONFIG_PROC_FS */
4535 
4536 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4537 /* Check if address is a home address configured on any interface. */
ipv6_chk_home_addr(struct net * net,const struct in6_addr * addr)4538 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4539 {
4540 	unsigned int hash = inet6_addr_hash(net, addr);
4541 	struct inet6_ifaddr *ifp = NULL;
4542 	int ret = 0;
4543 
4544 	rcu_read_lock();
4545 	hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4546 		if (ipv6_addr_equal(&ifp->addr, addr) &&
4547 		    (ifp->flags & IFA_F_HOMEADDRESS)) {
4548 			ret = 1;
4549 			break;
4550 		}
4551 	}
4552 	rcu_read_unlock();
4553 	return ret;
4554 }
4555 #endif
4556 
4557 /* RFC6554 has some algorithm to avoid loops in segment routing by
4558  * checking if the segments contains any of a local interface address.
4559  *
4560  * Quote:
4561  *
4562  * To detect loops in the SRH, a router MUST determine if the SRH
4563  * includes multiple addresses assigned to any interface on that router.
4564  * If such addresses appear more than once and are separated by at least
4565  * one address not assigned to that router.
4566  */
ipv6_chk_rpl_srh_loop(struct net * net,const struct in6_addr * segs,unsigned char nsegs)4567 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4568 			  unsigned char nsegs)
4569 {
4570 	const struct in6_addr *addr;
4571 	int i, ret = 0, found = 0;
4572 	struct inet6_ifaddr *ifp;
4573 	bool separated = false;
4574 	unsigned int hash;
4575 	bool hash_found;
4576 
4577 	rcu_read_lock();
4578 	for (i = 0; i < nsegs; i++) {
4579 		addr = &segs[i];
4580 		hash = inet6_addr_hash(net, addr);
4581 
4582 		hash_found = false;
4583 		hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4584 
4585 			if (ipv6_addr_equal(&ifp->addr, addr)) {
4586 				hash_found = true;
4587 				break;
4588 			}
4589 		}
4590 
4591 		if (hash_found) {
4592 			if (found > 1 && separated) {
4593 				ret = 1;
4594 				break;
4595 			}
4596 
4597 			separated = false;
4598 			found++;
4599 		} else {
4600 			separated = true;
4601 		}
4602 	}
4603 	rcu_read_unlock();
4604 
4605 	return ret;
4606 }
4607 
4608 /*
4609  *	Periodic address status verification
4610  */
4611 
addrconf_verify_rtnl(struct net * net)4612 static void addrconf_verify_rtnl(struct net *net)
4613 {
4614 	unsigned long now, next, next_sec, next_sched;
4615 	struct inet6_ifaddr *ifp;
4616 	int i;
4617 
4618 	ASSERT_RTNL();
4619 
4620 	rcu_read_lock_bh();
4621 	now = jiffies;
4622 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4623 
4624 	cancel_delayed_work(&net->ipv6.addr_chk_work);
4625 
4626 	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4627 restart:
4628 		hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4629 			unsigned long age;
4630 
4631 			/* When setting preferred_lft to a value not zero or
4632 			 * infinity, while valid_lft is infinity
4633 			 * IFA_F_PERMANENT has a non-infinity life time.
4634 			 */
4635 			if ((ifp->flags & IFA_F_PERMANENT) &&
4636 			    (ifp->prefered_lft == INFINITY_LIFE_TIME))
4637 				continue;
4638 
4639 			spin_lock(&ifp->lock);
4640 			/* We try to batch several events at once. */
4641 			age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4642 
4643 			if ((ifp->flags&IFA_F_TEMPORARY) &&
4644 			    !(ifp->flags&IFA_F_TENTATIVE) &&
4645 			    ifp->prefered_lft != INFINITY_LIFE_TIME &&
4646 			    !ifp->regen_count && ifp->ifpub) {
4647 				/* This is a non-regenerated temporary addr. */
4648 
4649 				unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
4650 
4651 				if (age + regen_advance >= ifp->prefered_lft) {
4652 					struct inet6_ifaddr *ifpub = ifp->ifpub;
4653 					if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4654 						next = ifp->tstamp + ifp->prefered_lft * HZ;
4655 
4656 					ifp->regen_count++;
4657 					in6_ifa_hold(ifp);
4658 					in6_ifa_hold(ifpub);
4659 					spin_unlock(&ifp->lock);
4660 
4661 					spin_lock(&ifpub->lock);
4662 					ifpub->regen_count = 0;
4663 					spin_unlock(&ifpub->lock);
4664 					rcu_read_unlock_bh();
4665 					ipv6_create_tempaddr(ifpub, true);
4666 					in6_ifa_put(ifpub);
4667 					in6_ifa_put(ifp);
4668 					rcu_read_lock_bh();
4669 					goto restart;
4670 				} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4671 					next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4672 			}
4673 
4674 			if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4675 			    age >= ifp->valid_lft) {
4676 				spin_unlock(&ifp->lock);
4677 				in6_ifa_hold(ifp);
4678 				rcu_read_unlock_bh();
4679 				ipv6_del_addr(ifp);
4680 				rcu_read_lock_bh();
4681 				goto restart;
4682 			} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4683 				spin_unlock(&ifp->lock);
4684 				continue;
4685 			} else if (age >= ifp->prefered_lft) {
4686 				/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4687 				int deprecate = 0;
4688 
4689 				if (!(ifp->flags&IFA_F_DEPRECATED)) {
4690 					deprecate = 1;
4691 					ifp->flags |= IFA_F_DEPRECATED;
4692 				}
4693 
4694 				if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4695 				    (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4696 					next = ifp->tstamp + ifp->valid_lft * HZ;
4697 
4698 				spin_unlock(&ifp->lock);
4699 
4700 				if (deprecate) {
4701 					in6_ifa_hold(ifp);
4702 
4703 					ipv6_ifa_notify(0, ifp);
4704 					in6_ifa_put(ifp);
4705 					goto restart;
4706 				}
4707 			} else {
4708 				/* ifp->prefered_lft <= ifp->valid_lft */
4709 				if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4710 					next = ifp->tstamp + ifp->prefered_lft * HZ;
4711 				spin_unlock(&ifp->lock);
4712 			}
4713 		}
4714 	}
4715 
4716 	next_sec = round_jiffies_up(next);
4717 	next_sched = next;
4718 
4719 	/* If rounded timeout is accurate enough, accept it. */
4720 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4721 		next_sched = next_sec;
4722 
4723 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4724 	if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4725 		next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4726 
4727 	pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4728 		 now, next, next_sec, next_sched);
4729 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4730 	rcu_read_unlock_bh();
4731 }
4732 
addrconf_verify_work(struct work_struct * w)4733 static void addrconf_verify_work(struct work_struct *w)
4734 {
4735 	struct net *net = container_of(to_delayed_work(w), struct net,
4736 				       ipv6.addr_chk_work);
4737 
4738 	rtnl_net_lock(net);
4739 	addrconf_verify_rtnl(net);
4740 	rtnl_net_unlock(net);
4741 }
4742 
addrconf_verify(struct net * net)4743 static void addrconf_verify(struct net *net)
4744 {
4745 	mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4746 }
4747 
extract_addr(struct nlattr * addr,struct nlattr * local,struct in6_addr ** peer_pfx)4748 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4749 				     struct in6_addr **peer_pfx)
4750 {
4751 	struct in6_addr *pfx = NULL;
4752 
4753 	*peer_pfx = NULL;
4754 
4755 	if (addr)
4756 		pfx = nla_data(addr);
4757 
4758 	if (local) {
4759 		if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4760 			*peer_pfx = pfx;
4761 		pfx = nla_data(local);
4762 	}
4763 
4764 	return pfx;
4765 }
4766 
4767 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4768 	[IFA_ADDRESS]		= { .len = sizeof(struct in6_addr) },
4769 	[IFA_LOCAL]		= { .len = sizeof(struct in6_addr) },
4770 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
4771 	[IFA_FLAGS]		= { .len = sizeof(u32) },
4772 	[IFA_RT_PRIORITY]	= { .len = sizeof(u32) },
4773 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
4774 	[IFA_PROTO]		= { .type = NLA_U8 },
4775 };
4776 
4777 static int
inet6_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4778 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4779 		  struct netlink_ext_ack *extack)
4780 {
4781 	struct net *net = sock_net(skb->sk);
4782 	struct ifaddrmsg *ifm;
4783 	struct nlattr *tb[IFA_MAX+1];
4784 	struct in6_addr *pfx, *peer_pfx;
4785 	u32 ifa_flags;
4786 	int err;
4787 
4788 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4789 				     ifa_ipv6_policy, extack);
4790 	if (err < 0)
4791 		return err;
4792 
4793 	ifm = nlmsg_data(nlh);
4794 	pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4795 	if (!pfx)
4796 		return -EINVAL;
4797 
4798 	ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
4799 
4800 	/* We ignore other flags so far. */
4801 	ifa_flags &= IFA_F_MANAGETEMPADDR;
4802 
4803 	rtnl_net_lock(net);
4804 	err = inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4805 			     ifm->ifa_prefixlen, extack);
4806 	rtnl_net_unlock(net);
4807 
4808 	return err;
4809 }
4810 
modify_prefix_route(struct net * net,struct inet6_ifaddr * ifp,unsigned long expires,u32 flags,bool modify_peer)4811 static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
4812 			       unsigned long expires, u32 flags,
4813 			       bool modify_peer)
4814 {
4815 	struct fib6_table *table;
4816 	struct fib6_info *f6i;
4817 	u32 prio;
4818 
4819 	f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4820 					ifp->prefix_len,
4821 					ifp->idev->dev, 0, RTF_DEFAULT, true);
4822 	if (!f6i)
4823 		return -ENOENT;
4824 
4825 	prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4826 	if (f6i->fib6_metric != prio) {
4827 		/* delete old one */
4828 		ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4829 
4830 		/* add new one */
4831 		addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4832 				      ifp->prefix_len,
4833 				      ifp->rt_priority, ifp->idev->dev,
4834 				      expires, flags, GFP_KERNEL);
4835 		return 0;
4836 	}
4837 	if (f6i != net->ipv6.fib6_null_entry) {
4838 		table = f6i->fib6_table;
4839 		spin_lock_bh(&table->tb6_lock);
4840 
4841 		if (!(flags & RTF_EXPIRES)) {
4842 			fib6_clean_expires(f6i);
4843 			fib6_remove_gc_list(f6i);
4844 		} else {
4845 			fib6_set_expires(f6i, expires);
4846 			fib6_add_gc_list(f6i);
4847 		}
4848 
4849 		spin_unlock_bh(&table->tb6_lock);
4850 	}
4851 	fib6_info_release(f6i);
4852 
4853 	return 0;
4854 }
4855 
inet6_addr_modify(struct net * net,struct inet6_ifaddr * ifp,struct ifa6_config * cfg,clock_t expires,u32 flags)4856 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4857 			     struct ifa6_config *cfg, clock_t expires,
4858 			     u32 flags)
4859 {
4860 	bool was_managetempaddr;
4861 	bool new_peer = false;
4862 	bool had_prefixroute;
4863 
4864 	ASSERT_RTNL_NET(net);
4865 
4866 	if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4867 	    (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4868 		return -EINVAL;
4869 
4870 	if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4871 		cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4872 
4873 	if (cfg->peer_pfx &&
4874 	    memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4875 		if (!ipv6_addr_any(&ifp->peer_addr))
4876 			cleanup_prefix_route(ifp, expires, true, true);
4877 		new_peer = true;
4878 	}
4879 
4880 	spin_lock_bh(&ifp->lock);
4881 	was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4882 	had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4883 			  !(ifp->flags & IFA_F_NOPREFIXROUTE);
4884 	ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4885 			IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4886 			IFA_F_NOPREFIXROUTE);
4887 	ifp->flags |= cfg->ifa_flags;
4888 	WRITE_ONCE(ifp->tstamp, jiffies);
4889 	WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
4890 	WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
4891 	WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
4892 
4893 	if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4894 		WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
4895 
4896 	if (new_peer)
4897 		ifp->peer_addr = *cfg->peer_pfx;
4898 
4899 	spin_unlock_bh(&ifp->lock);
4900 	if (!(ifp->flags&IFA_F_TENTATIVE))
4901 		ipv6_ifa_notify(0, ifp);
4902 
4903 	if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4904 		int rc = -ENOENT;
4905 
4906 		if (had_prefixroute)
4907 			rc = modify_prefix_route(net, ifp, expires, flags, false);
4908 
4909 		/* prefix route could have been deleted; if so restore it */
4910 		if (rc == -ENOENT) {
4911 			addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4912 					      ifp->rt_priority, ifp->idev->dev,
4913 					      expires, flags, GFP_KERNEL);
4914 		}
4915 
4916 		if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4917 			rc = modify_prefix_route(net, ifp, expires, flags, true);
4918 
4919 		if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4920 			addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4921 					      ifp->rt_priority, ifp->idev->dev,
4922 					      expires, flags, GFP_KERNEL);
4923 		}
4924 	} else if (had_prefixroute) {
4925 		enum cleanup_prefix_rt_t action;
4926 		unsigned long rt_expires;
4927 
4928 		write_lock_bh(&ifp->idev->lock);
4929 		action = check_cleanup_prefix_route(ifp, &rt_expires);
4930 		write_unlock_bh(&ifp->idev->lock);
4931 
4932 		if (action != CLEANUP_PREFIX_RT_NOP) {
4933 			cleanup_prefix_route(ifp, rt_expires,
4934 				action == CLEANUP_PREFIX_RT_DEL, false);
4935 		}
4936 	}
4937 
4938 	if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4939 		if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4940 			delete_tempaddrs(ifp->idev, ifp);
4941 		else
4942 			manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4943 					 cfg->preferred_lft, !was_managetempaddr,
4944 					 jiffies);
4945 	}
4946 
4947 	addrconf_verify_rtnl(net);
4948 
4949 	return 0;
4950 }
4951 
4952 static int
inet6_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4953 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4954 		  struct netlink_ext_ack *extack)
4955 {
4956 	struct net *net = sock_net(skb->sk);
4957 	struct nlattr *tb[IFA_MAX+1];
4958 	struct in6_addr *peer_pfx;
4959 	struct inet6_ifaddr *ifa;
4960 	struct net_device *dev;
4961 	struct inet6_dev *idev;
4962 	struct ifa6_config cfg;
4963 	struct ifaddrmsg *ifm;
4964 	unsigned long timeout;
4965 	clock_t expires;
4966 	u32 flags;
4967 	int err;
4968 
4969 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4970 				     ifa_ipv6_policy, extack);
4971 	if (err < 0)
4972 		return err;
4973 
4974 	memset(&cfg, 0, sizeof(cfg));
4975 
4976 	ifm = nlmsg_data(nlh);
4977 	cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4978 	if (!cfg.pfx)
4979 		return -EINVAL;
4980 
4981 	cfg.peer_pfx = peer_pfx;
4982 	cfg.plen = ifm->ifa_prefixlen;
4983 	if (tb[IFA_RT_PRIORITY])
4984 		cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4985 
4986 	if (tb[IFA_PROTO])
4987 		cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4988 
4989 	cfg.ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
4990 
4991 	/* We ignore other flags so far. */
4992 	cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4993 			 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4994 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4995 
4996 	cfg.ifa_flags |= IFA_F_PERMANENT;
4997 	cfg.valid_lft = INFINITY_LIFE_TIME;
4998 	cfg.preferred_lft = INFINITY_LIFE_TIME;
4999 	expires = 0;
5000 	flags = 0;
5001 
5002 	if (tb[IFA_CACHEINFO]) {
5003 		struct ifa_cacheinfo *ci;
5004 
5005 		ci = nla_data(tb[IFA_CACHEINFO]);
5006 		cfg.valid_lft = ci->ifa_valid;
5007 		cfg.preferred_lft = ci->ifa_prefered;
5008 
5009 		if (!cfg.valid_lft || cfg.preferred_lft > cfg.valid_lft) {
5010 			NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid");
5011 			return -EINVAL;
5012 		}
5013 
5014 		timeout = addrconf_timeout_fixup(cfg.valid_lft, HZ);
5015 		if (addrconf_finite_timeout(timeout)) {
5016 			cfg.ifa_flags &= ~IFA_F_PERMANENT;
5017 			cfg.valid_lft = timeout;
5018 			expires = jiffies_to_clock_t(timeout * HZ);
5019 			flags = RTF_EXPIRES;
5020 		}
5021 
5022 		timeout = addrconf_timeout_fixup(cfg.preferred_lft, HZ);
5023 		if (addrconf_finite_timeout(timeout)) {
5024 			if (timeout == 0)
5025 				cfg.ifa_flags |= IFA_F_DEPRECATED;
5026 
5027 			cfg.preferred_lft = timeout;
5028 		}
5029 	}
5030 
5031 	rtnl_net_lock(net);
5032 
5033 	dev =  __dev_get_by_index(net, ifm->ifa_index);
5034 	if (!dev) {
5035 		NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
5036 		err = -ENODEV;
5037 		goto unlock_rtnl;
5038 	}
5039 
5040 	netdev_lock_ops(dev);
5041 	idev = ipv6_find_idev(dev);
5042 	if (IS_ERR(idev)) {
5043 		err = PTR_ERR(idev);
5044 		goto unlock;
5045 	}
5046 
5047 	if (!ipv6_allow_optimistic_dad(net, idev))
5048 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
5049 
5050 	if (cfg.ifa_flags & IFA_F_NODAD &&
5051 	    cfg.ifa_flags & IFA_F_OPTIMISTIC) {
5052 		NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
5053 		err = -EINVAL;
5054 		goto unlock;
5055 	}
5056 
5057 	ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
5058 	if (!ifa) {
5059 		/*
5060 		 * It would be best to check for !NLM_F_CREATE here but
5061 		 * userspace already relies on not having to provide this.
5062 		 */
5063 		err = inet6_addr_add(net, dev, &cfg, expires, flags, extack);
5064 		goto unlock;
5065 	}
5066 
5067 	if (nlh->nlmsg_flags & NLM_F_EXCL ||
5068 	    !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
5069 		NL_SET_ERR_MSG_MOD(extack, "address already assigned");
5070 		err = -EEXIST;
5071 	} else {
5072 		err = inet6_addr_modify(net, ifa, &cfg, expires, flags);
5073 	}
5074 
5075 	in6_ifa_put(ifa);
5076 unlock:
5077 	netdev_unlock_ops(dev);
5078 unlock_rtnl:
5079 	rtnl_net_unlock(net);
5080 
5081 	return err;
5082 }
5083 
put_ifaddrmsg(struct nlmsghdr * nlh,u8 prefixlen,u32 flags,u8 scope,int ifindex)5084 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
5085 			  u8 scope, int ifindex)
5086 {
5087 	struct ifaddrmsg *ifm;
5088 
5089 	ifm = nlmsg_data(nlh);
5090 	ifm->ifa_family = AF_INET6;
5091 	ifm->ifa_prefixlen = prefixlen;
5092 	ifm->ifa_flags = flags;
5093 	ifm->ifa_scope = scope;
5094 	ifm->ifa_index = ifindex;
5095 }
5096 
put_cacheinfo(struct sk_buff * skb,unsigned long cstamp,unsigned long tstamp,u32 preferred,u32 valid)5097 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
5098 			 unsigned long tstamp, u32 preferred, u32 valid)
5099 {
5100 	struct ifa_cacheinfo ci;
5101 
5102 	ci.cstamp = cstamp_delta(cstamp);
5103 	ci.tstamp = cstamp_delta(tstamp);
5104 	ci.ifa_prefered = preferred;
5105 	ci.ifa_valid = valid;
5106 
5107 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
5108 }
5109 
rt_scope(int ifa_scope)5110 static inline int rt_scope(int ifa_scope)
5111 {
5112 	if (ifa_scope & IFA_HOST)
5113 		return RT_SCOPE_HOST;
5114 	else if (ifa_scope & IFA_LINK)
5115 		return RT_SCOPE_LINK;
5116 	else if (ifa_scope & IFA_SITE)
5117 		return RT_SCOPE_SITE;
5118 	else
5119 		return RT_SCOPE_UNIVERSE;
5120 }
5121 
inet6_ifaddr_msgsize(void)5122 static inline int inet6_ifaddr_msgsize(void)
5123 {
5124 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
5125 	       + nla_total_size(16) /* IFA_LOCAL */
5126 	       + nla_total_size(16) /* IFA_ADDRESS */
5127 	       + nla_total_size(sizeof(struct ifa_cacheinfo))
5128 	       + nla_total_size(4)  /* IFA_FLAGS */
5129 	       + nla_total_size(1)  /* IFA_PROTO */
5130 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */;
5131 }
5132 
inet6_fill_ifaddr(struct sk_buff * skb,const struct inet6_ifaddr * ifa,struct inet6_fill_args * args)5133 static int inet6_fill_ifaddr(struct sk_buff *skb,
5134 			     const struct inet6_ifaddr *ifa,
5135 			     struct inet6_fill_args *args)
5136 {
5137 	struct nlmsghdr *nlh;
5138 	u32 preferred, valid;
5139 	u32 flags, priority;
5140 	u8 proto;
5141 
5142 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5143 			sizeof(struct ifaddrmsg), args->flags);
5144 	if (!nlh)
5145 		return -EMSGSIZE;
5146 
5147 	flags = READ_ONCE(ifa->flags);
5148 	put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5149 		      ifa->idev->dev->ifindex);
5150 
5151 	if (args->netnsid >= 0 &&
5152 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5153 		goto error;
5154 
5155 	preferred = READ_ONCE(ifa->prefered_lft);
5156 	valid = READ_ONCE(ifa->valid_lft);
5157 
5158 	if (!((flags & IFA_F_PERMANENT) &&
5159 	      (preferred == INFINITY_LIFE_TIME))) {
5160 		if (preferred != INFINITY_LIFE_TIME) {
5161 			long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
5162 
5163 			if (preferred > tval)
5164 				preferred -= tval;
5165 			else
5166 				preferred = 0;
5167 			if (valid != INFINITY_LIFE_TIME) {
5168 				if (valid > tval)
5169 					valid -= tval;
5170 				else
5171 					valid = 0;
5172 			}
5173 		}
5174 	} else {
5175 		preferred = INFINITY_LIFE_TIME;
5176 		valid = INFINITY_LIFE_TIME;
5177 	}
5178 
5179 	if (!ipv6_addr_any(&ifa->peer_addr)) {
5180 		if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5181 		    nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5182 			goto error;
5183 	} else {
5184 		if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5185 			goto error;
5186 	}
5187 
5188 	priority = READ_ONCE(ifa->rt_priority);
5189 	if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
5190 		goto error;
5191 
5192 	if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
5193 			  preferred, valid) < 0)
5194 		goto error;
5195 
5196 	if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
5197 		goto error;
5198 
5199 	proto = READ_ONCE(ifa->ifa_proto);
5200 	if (proto && nla_put_u8(skb, IFA_PROTO, proto))
5201 		goto error;
5202 
5203 	nlmsg_end(skb, nlh);
5204 	return 0;
5205 
5206 error:
5207 	nlmsg_cancel(skb, nlh);
5208 	return -EMSGSIZE;
5209 }
5210 
inet6_fill_ifmcaddr(struct sk_buff * skb,const struct ifmcaddr6 * ifmca,struct inet6_fill_args * args)5211 int inet6_fill_ifmcaddr(struct sk_buff *skb,
5212 			const struct ifmcaddr6 *ifmca,
5213 			struct inet6_fill_args *args)
5214 {
5215 	int ifindex = ifmca->idev->dev->ifindex;
5216 	u8 scope = RT_SCOPE_UNIVERSE;
5217 	struct nlmsghdr *nlh;
5218 
5219 	if (!args->force_rt_scope_universe &&
5220 	    ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5221 		scope = RT_SCOPE_SITE;
5222 
5223 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5224 			sizeof(struct ifaddrmsg), args->flags);
5225 	if (!nlh)
5226 		return -EMSGSIZE;
5227 
5228 	if (args->netnsid >= 0 &&
5229 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5230 		nlmsg_cancel(skb, nlh);
5231 		return -EMSGSIZE;
5232 	}
5233 
5234 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5235 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5236 	    put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
5237 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5238 		nlmsg_cancel(skb, nlh);
5239 		return -EMSGSIZE;
5240 	}
5241 
5242 	nlmsg_end(skb, nlh);
5243 	return 0;
5244 }
5245 
inet6_fill_ifacaddr(struct sk_buff * skb,const struct ifacaddr6 * ifaca,struct inet6_fill_args * args)5246 int inet6_fill_ifacaddr(struct sk_buff *skb,
5247 			const struct ifacaddr6 *ifaca,
5248 			struct inet6_fill_args *args)
5249 {
5250 	struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5251 	int ifindex = dev ? dev->ifindex : 1;
5252 	u8 scope = RT_SCOPE_UNIVERSE;
5253 	struct nlmsghdr *nlh;
5254 
5255 	if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5256 		scope = RT_SCOPE_SITE;
5257 
5258 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5259 			sizeof(struct ifaddrmsg), args->flags);
5260 	if (!nlh)
5261 		return -EMSGSIZE;
5262 
5263 	if (args->netnsid >= 0 &&
5264 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5265 		nlmsg_cancel(skb, nlh);
5266 		return -EMSGSIZE;
5267 	}
5268 
5269 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5270 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5271 	    put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
5272 			  INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5273 		nlmsg_cancel(skb, nlh);
5274 		return -EMSGSIZE;
5275 	}
5276 
5277 	nlmsg_end(skb, nlh);
5278 	return 0;
5279 }
5280 
5281 /* called with rcu_read_lock() */
in6_dump_addrs(const struct inet6_dev * idev,struct sk_buff * skb,struct netlink_callback * cb,int * s_ip_idx,struct inet6_fill_args * fillargs)5282 static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
5283 			  struct netlink_callback *cb, int *s_ip_idx,
5284 			  struct inet6_fill_args *fillargs)
5285 {
5286 	const struct ifmcaddr6 *ifmca;
5287 	const struct ifacaddr6 *ifaca;
5288 	int ip_idx = 0;
5289 	int err = 0;
5290 
5291 	switch (fillargs->type) {
5292 	case UNICAST_ADDR: {
5293 		const struct inet6_ifaddr *ifa;
5294 		fillargs->event = RTM_NEWADDR;
5295 
5296 		/* unicast address incl. temp addr */
5297 		list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
5298 			if (ip_idx < *s_ip_idx)
5299 				goto next;
5300 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
5301 			if (err < 0)
5302 				break;
5303 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5304 next:
5305 			ip_idx++;
5306 		}
5307 		break;
5308 	}
5309 	case MULTICAST_ADDR:
5310 		fillargs->event = RTM_GETMULTICAST;
5311 
5312 		/* multicast address */
5313 		for (ifmca = rcu_dereference(idev->mc_list);
5314 		     ifmca;
5315 		     ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5316 			if (ip_idx < *s_ip_idx)
5317 				continue;
5318 			err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5319 			if (err < 0)
5320 				break;
5321 		}
5322 		break;
5323 	case ANYCAST_ADDR:
5324 		fillargs->event = RTM_GETANYCAST;
5325 		/* anycast address */
5326 		for (ifaca = rcu_dereference(idev->ac_list); ifaca;
5327 		     ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
5328 			if (ip_idx < *s_ip_idx)
5329 				continue;
5330 			err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5331 			if (err < 0)
5332 				break;
5333 		}
5334 		break;
5335 	default:
5336 		break;
5337 	}
5338 	*s_ip_idx = err ? ip_idx : 0;
5339 	return err;
5340 }
5341 
inet6_valid_dump_ifaddr_req(const struct nlmsghdr * nlh,struct inet6_fill_args * fillargs,struct net ** tgt_net,struct sock * sk,struct netlink_callback * cb)5342 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5343 				       struct inet6_fill_args *fillargs,
5344 				       struct net **tgt_net, struct sock *sk,
5345 				       struct netlink_callback *cb)
5346 {
5347 	struct netlink_ext_ack *extack = cb->extack;
5348 	struct nlattr *tb[IFA_MAX+1];
5349 	struct ifaddrmsg *ifm;
5350 	int err, i;
5351 
5352 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
5353 	if (!ifm) {
5354 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5355 		return -EINVAL;
5356 	}
5357 
5358 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5359 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5360 		return -EINVAL;
5361 	}
5362 
5363 	fillargs->ifindex = ifm->ifa_index;
5364 	if (fillargs->ifindex) {
5365 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
5366 		fillargs->flags |= NLM_F_DUMP_FILTERED;
5367 	}
5368 
5369 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5370 					    ifa_ipv6_policy, extack);
5371 	if (err < 0)
5372 		return err;
5373 
5374 	for (i = 0; i <= IFA_MAX; ++i) {
5375 		if (!tb[i])
5376 			continue;
5377 
5378 		if (i == IFA_TARGET_NETNSID) {
5379 			struct net *net;
5380 
5381 			fillargs->netnsid = nla_get_s32(tb[i]);
5382 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5383 			if (IS_ERR(net)) {
5384 				fillargs->netnsid = -1;
5385 				NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5386 				return PTR_ERR(net);
5387 			}
5388 			*tgt_net = net;
5389 		} else {
5390 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5391 			return -EINVAL;
5392 		}
5393 	}
5394 
5395 	return 0;
5396 }
5397 
inet6_dump_addr(struct sk_buff * skb,struct netlink_callback * cb,enum addr_type_t type)5398 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5399 			   enum addr_type_t type)
5400 {
5401 	struct net *tgt_net = sock_net(skb->sk);
5402 	const struct nlmsghdr *nlh = cb->nlh;
5403 	struct inet6_fill_args fillargs = {
5404 		.portid = NETLINK_CB(cb->skb).portid,
5405 		.seq = cb->nlh->nlmsg_seq,
5406 		.flags = NLM_F_MULTI,
5407 		.netnsid = -1,
5408 		.type = type,
5409 		.force_rt_scope_universe = false,
5410 	};
5411 	struct {
5412 		unsigned long ifindex;
5413 		int ip_idx;
5414 	} *ctx = (void *)cb->ctx;
5415 	struct net_device *dev;
5416 	struct inet6_dev *idev;
5417 	int err = 0;
5418 
5419 	rcu_read_lock();
5420 	if (cb->strict_check) {
5421 		err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5422 						  skb->sk, cb);
5423 		if (err < 0)
5424 			goto done;
5425 
5426 		err = 0;
5427 		if (fillargs.ifindex) {
5428 			dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
5429 			if (!dev) {
5430 				err = -ENODEV;
5431 				goto done;
5432 			}
5433 			idev = __in6_dev_get(dev);
5434 			if (idev)
5435 				err = in6_dump_addrs(idev, skb, cb,
5436 						     &ctx->ip_idx,
5437 						     &fillargs);
5438 			goto done;
5439 		}
5440 	}
5441 
5442 	cb->seq = inet6_base_seq(tgt_net);
5443 	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
5444 		idev = __in6_dev_get(dev);
5445 		if (!idev)
5446 			continue;
5447 		err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
5448 				     &fillargs);
5449 		if (err < 0)
5450 			goto done;
5451 	}
5452 done:
5453 	rcu_read_unlock();
5454 	if (fillargs.netnsid >= 0)
5455 		put_net(tgt_net);
5456 
5457 	return err;
5458 }
5459 
inet6_dump_ifaddr(struct sk_buff * skb,struct netlink_callback * cb)5460 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5461 {
5462 	enum addr_type_t type = UNICAST_ADDR;
5463 
5464 	return inet6_dump_addr(skb, cb, type);
5465 }
5466 
inet6_dump_ifmcaddr(struct sk_buff * skb,struct netlink_callback * cb)5467 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5468 {
5469 	enum addr_type_t type = MULTICAST_ADDR;
5470 
5471 	return inet6_dump_addr(skb, cb, type);
5472 }
5473 
5474 
inet6_dump_ifacaddr(struct sk_buff * skb,struct netlink_callback * cb)5475 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5476 {
5477 	enum addr_type_t type = ANYCAST_ADDR;
5478 
5479 	return inet6_dump_addr(skb, cb, type);
5480 }
5481 
inet6_rtm_valid_getaddr_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)5482 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5483 				       const struct nlmsghdr *nlh,
5484 				       struct nlattr **tb,
5485 				       struct netlink_ext_ack *extack)
5486 {
5487 	struct ifaddrmsg *ifm;
5488 	int i, err;
5489 
5490 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
5491 	if (!ifm) {
5492 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5493 		return -EINVAL;
5494 	}
5495 
5496 	if (!netlink_strict_get_check(skb))
5497 		return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5498 					      ifa_ipv6_policy, extack);
5499 
5500 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5501 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5502 		return -EINVAL;
5503 	}
5504 
5505 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5506 					    ifa_ipv6_policy, extack);
5507 	if (err)
5508 		return err;
5509 
5510 	for (i = 0; i <= IFA_MAX; i++) {
5511 		if (!tb[i])
5512 			continue;
5513 
5514 		switch (i) {
5515 		case IFA_TARGET_NETNSID:
5516 		case IFA_ADDRESS:
5517 		case IFA_LOCAL:
5518 			break;
5519 		default:
5520 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5521 			return -EINVAL;
5522 		}
5523 	}
5524 
5525 	return 0;
5526 }
5527 
inet6_rtm_getaddr(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5528 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5529 			     struct netlink_ext_ack *extack)
5530 {
5531 	struct net *tgt_net = sock_net(in_skb->sk);
5532 	struct inet6_fill_args fillargs = {
5533 		.portid = NETLINK_CB(in_skb).portid,
5534 		.seq = nlh->nlmsg_seq,
5535 		.event = RTM_NEWADDR,
5536 		.flags = 0,
5537 		.netnsid = -1,
5538 		.force_rt_scope_universe = false,
5539 	};
5540 	struct ifaddrmsg *ifm;
5541 	struct nlattr *tb[IFA_MAX+1];
5542 	struct in6_addr *addr = NULL, *peer;
5543 	struct net_device *dev = NULL;
5544 	struct inet6_ifaddr *ifa;
5545 	struct sk_buff *skb;
5546 	int err;
5547 
5548 	err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5549 	if (err < 0)
5550 		return err;
5551 
5552 	if (tb[IFA_TARGET_NETNSID]) {
5553 		fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5554 
5555 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5556 						  fillargs.netnsid);
5557 		if (IS_ERR(tgt_net))
5558 			return PTR_ERR(tgt_net);
5559 	}
5560 
5561 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5562 	if (!addr) {
5563 		err = -EINVAL;
5564 		goto errout;
5565 	}
5566 	ifm = nlmsg_data(nlh);
5567 	if (ifm->ifa_index)
5568 		dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5569 
5570 	ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5571 	if (!ifa) {
5572 		err = -EADDRNOTAVAIL;
5573 		goto errout;
5574 	}
5575 
5576 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5577 	if (!skb) {
5578 		err = -ENOBUFS;
5579 		goto errout_ifa;
5580 	}
5581 
5582 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5583 	if (err < 0) {
5584 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5585 		WARN_ON(err == -EMSGSIZE);
5586 		kfree_skb(skb);
5587 		goto errout_ifa;
5588 	}
5589 	err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5590 errout_ifa:
5591 	in6_ifa_put(ifa);
5592 errout:
5593 	dev_put(dev);
5594 	if (fillargs.netnsid >= 0)
5595 		put_net(tgt_net);
5596 
5597 	return err;
5598 }
5599 
inet6_ifa_notify(int event,struct inet6_ifaddr * ifa)5600 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5601 {
5602 	struct sk_buff *skb;
5603 	struct net *net = dev_net(ifa->idev->dev);
5604 	struct inet6_fill_args fillargs = {
5605 		.portid = 0,
5606 		.seq = 0,
5607 		.event = event,
5608 		.flags = 0,
5609 		.netnsid = -1,
5610 		.force_rt_scope_universe = false,
5611 	};
5612 	int err = -ENOBUFS;
5613 
5614 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5615 	if (!skb)
5616 		goto errout;
5617 
5618 	err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5619 	if (err < 0) {
5620 		/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5621 		WARN_ON(err == -EMSGSIZE);
5622 		kfree_skb(skb);
5623 		goto errout;
5624 	}
5625 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5626 	return;
5627 errout:
5628 	rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5629 }
5630 
ipv6_store_devconf(const struct ipv6_devconf * cnf,__s32 * array,int bytes)5631 static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
5632 			       __s32 *array, int bytes)
5633 {
5634 	BUG_ON(bytes < (DEVCONF_MAX * 4));
5635 
5636 	memset(array, 0, bytes);
5637 	array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
5638 	array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
5639 	array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
5640 	array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
5641 	array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
5642 	array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
5643 	array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
5644 	array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
5645 	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5646 		jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
5647 	array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5648 		jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
5649 	array[DEVCONF_RTR_SOLICIT_DELAY] =
5650 		jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
5651 	array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
5652 	array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5653 		jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
5654 	array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5655 		jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
5656 	array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
5657 	array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
5658 	array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
5659 	array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
5660 	array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
5661 	array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
5662 	array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
5663 	array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
5664 	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
5665 		READ_ONCE(cnf->accept_ra_min_hop_limit);
5666 	array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
5667 #ifdef CONFIG_IPV6_ROUTER_PREF
5668 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
5669 	array[DEVCONF_RTR_PROBE_INTERVAL] =
5670 		jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
5671 #ifdef CONFIG_IPV6_ROUTE_INFO
5672 	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
5673 		READ_ONCE(cnf->accept_ra_rt_info_min_plen);
5674 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
5675 		READ_ONCE(cnf->accept_ra_rt_info_max_plen);
5676 #endif
5677 #endif
5678 	array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
5679 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
5680 		READ_ONCE(cnf->accept_source_route);
5681 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5682 	array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
5683 	array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
5684 #endif
5685 #ifdef CONFIG_IPV6_MROUTE
5686 	array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5687 #endif
5688 	array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
5689 	array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
5690 	array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
5691 	array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
5692 	array[DEVCONF_SUPPRESS_FRAG_NDISC] =
5693 		READ_ONCE(cnf->suppress_frag_ndisc);
5694 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
5695 		READ_ONCE(cnf->accept_ra_from_local);
5696 	array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
5697 	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
5698 		READ_ONCE(cnf->ignore_routes_with_linkdown);
5699 	/* we omit DEVCONF_STABLE_SECRET for now */
5700 	array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
5701 	array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
5702 		READ_ONCE(cnf->drop_unicast_in_l2_multicast);
5703 	array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
5704 	array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
5705 	array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
5706 #ifdef CONFIG_IPV6_SEG6_HMAC
5707 	array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
5708 #endif
5709 	array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
5710 	array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
5711 	array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
5712 	array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
5713 	array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
5714 	array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
5715 	array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
5716 	array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
5717 	array[DEVCONF_NDISC_EVICT_NOCARRIER] =
5718 		READ_ONCE(cnf->ndisc_evict_nocarrier);
5719 	array[DEVCONF_ACCEPT_UNTRACKED_NA] =
5720 		READ_ONCE(cnf->accept_untracked_na);
5721 	array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
5722 	array[DEVCONF_FORCE_FORWARDING] = READ_ONCE(cnf->force_forwarding);
5723 }
5724 
inet6_ifla6_size(void)5725 static inline size_t inet6_ifla6_size(void)
5726 {
5727 	return nla_total_size(4) /* IFLA_INET6_FLAGS */
5728 	     + nla_total_size(sizeof(struct ifla_cacheinfo))
5729 	     + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5730 	     + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5731 	     + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5732 	     + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5733 	     + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5734 	     + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5735 	     + 0;
5736 }
5737 
inet6_if_nlmsg_size(void)5738 static inline size_t inet6_if_nlmsg_size(void)
5739 {
5740 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5741 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5742 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5743 	       + nla_total_size(4) /* IFLA_MTU */
5744 	       + nla_total_size(4) /* IFLA_LINK */
5745 	       + nla_total_size(1) /* IFLA_OPERSTATE */
5746 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5747 }
5748 
__snmp6_fill_statsdev(u64 * stats,atomic_long_t * mib,int bytes)5749 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5750 					int bytes)
5751 {
5752 	int i;
5753 	int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5754 	BUG_ON(pad < 0);
5755 
5756 	/* Use put_unaligned() because stats may not be aligned for u64. */
5757 	put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5758 	for (i = 1; i < ICMP6_MIB_MAX; i++)
5759 		put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5760 
5761 	memset(&stats[ICMP6_MIB_MAX], 0, pad);
5762 }
5763 
__snmp6_fill_stats64(u64 * stats,void __percpu * mib,int bytes,size_t syncpoff)5764 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5765 					int bytes, size_t syncpoff)
5766 {
5767 	int i, c;
5768 	u64 buff[IPSTATS_MIB_MAX];
5769 	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5770 
5771 	BUG_ON(pad < 0);
5772 
5773 	memset(buff, 0, sizeof(buff));
5774 	buff[0] = IPSTATS_MIB_MAX;
5775 
5776 	for_each_possible_cpu(c) {
5777 		for (i = 1; i < IPSTATS_MIB_MAX; i++)
5778 			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5779 	}
5780 
5781 	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5782 	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5783 }
5784 
snmp6_fill_stats(u64 * stats,struct inet6_dev * idev,int attrtype,int bytes)5785 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5786 			     int bytes)
5787 {
5788 	switch (attrtype) {
5789 	case IFLA_INET6_STATS:
5790 		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5791 				     offsetof(struct ipstats_mib, syncp));
5792 		break;
5793 	case IFLA_INET6_ICMP6STATS:
5794 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5795 		break;
5796 	}
5797 }
5798 
inet6_fill_ifla6_stats_attrs(struct sk_buff * skb,struct inet6_dev * idev)5799 static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb,
5800 					struct inet6_dev *idev)
5801 {
5802 	struct nlattr *nla;
5803 
5804 	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5805 	if (!nla)
5806 		goto nla_put_failure;
5807 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5808 
5809 	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5810 	if (!nla)
5811 		goto nla_put_failure;
5812 	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5813 
5814 	return 0;
5815 
5816 nla_put_failure:
5817 	return -EMSGSIZE;
5818 }
5819 
inet6_fill_ifla6_attrs(struct sk_buff * skb,struct inet6_dev * idev,u32 ext_filter_mask)5820 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5821 				  u32 ext_filter_mask)
5822 {
5823 	struct ifla_cacheinfo ci;
5824 	struct nlattr *nla;
5825 	u32 ra_mtu;
5826 
5827 	if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
5828 		goto nla_put_failure;
5829 	ci.max_reasm_len = IPV6_MAXPLEN;
5830 	ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
5831 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5832 	ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5833 	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5834 		goto nla_put_failure;
5835 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5836 	if (!nla)
5837 		goto nla_put_failure;
5838 	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5839 
5840 	/* XXX - MC not implemented */
5841 
5842 	if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) {
5843 		if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0)
5844 			goto nla_put_failure;
5845 	}
5846 
5847 	nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5848 	if (!nla)
5849 		goto nla_put_failure;
5850 	read_lock_bh(&idev->lock);
5851 	memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5852 	read_unlock_bh(&idev->lock);
5853 
5854 	if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
5855 		       READ_ONCE(idev->cnf.addr_gen_mode)))
5856 		goto nla_put_failure;
5857 
5858 	ra_mtu = READ_ONCE(idev->ra_mtu);
5859 	if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
5860 		goto nla_put_failure;
5861 
5862 	return 0;
5863 
5864 nla_put_failure:
5865 	return -EMSGSIZE;
5866 }
5867 
inet6_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)5868 static size_t inet6_get_link_af_size(const struct net_device *dev,
5869 				     u32 ext_filter_mask)
5870 {
5871 	if (!__in6_dev_get(dev))
5872 		return 0;
5873 
5874 	return inet6_ifla6_size();
5875 }
5876 
inet6_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)5877 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5878 			      u32 ext_filter_mask)
5879 {
5880 	struct inet6_dev *idev = __in6_dev_get(dev);
5881 
5882 	if (!idev)
5883 		return -ENODATA;
5884 
5885 	if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5886 		return -EMSGSIZE;
5887 
5888 	return 0;
5889 }
5890 
inet6_set_iftoken(struct inet6_dev * idev,struct in6_addr * token,struct netlink_ext_ack * extack)5891 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5892 			     struct netlink_ext_ack *extack)
5893 {
5894 	struct inet6_ifaddr *ifp;
5895 	struct net_device *dev = idev->dev;
5896 	bool clear_token, update_rs = false;
5897 	struct in6_addr ll_addr;
5898 
5899 	ASSERT_RTNL();
5900 
5901 	if (!token)
5902 		return -EINVAL;
5903 
5904 	if (dev->flags & IFF_LOOPBACK) {
5905 		NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5906 		return -EINVAL;
5907 	}
5908 
5909 	if (dev->flags & IFF_NOARP) {
5910 		NL_SET_ERR_MSG_MOD(extack,
5911 				   "Device does not do neighbour discovery");
5912 		return -EINVAL;
5913 	}
5914 
5915 	if (!ipv6_accept_ra(idev)) {
5916 		NL_SET_ERR_MSG_MOD(extack,
5917 				   "Router advertisement is disabled on device");
5918 		return -EINVAL;
5919 	}
5920 
5921 	if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
5922 		NL_SET_ERR_MSG(extack,
5923 			       "Router solicitation is disabled on device");
5924 		return -EINVAL;
5925 	}
5926 
5927 	write_lock_bh(&idev->lock);
5928 
5929 	BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5930 	memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5931 
5932 	write_unlock_bh(&idev->lock);
5933 
5934 	clear_token = ipv6_addr_any(token);
5935 	if (clear_token)
5936 		goto update_lft;
5937 
5938 	if (!idev->dead && (idev->if_flags & IF_READY) &&
5939 	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5940 			     IFA_F_OPTIMISTIC)) {
5941 		/* If we're not ready, then normal ifup will take care
5942 		 * of this. Otherwise, we need to request our rs here.
5943 		 */
5944 		ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5945 		update_rs = true;
5946 	}
5947 
5948 update_lft:
5949 	write_lock_bh(&idev->lock);
5950 
5951 	if (update_rs) {
5952 		idev->if_flags |= IF_RS_SENT;
5953 		idev->rs_interval = rfc3315_s14_backoff_init(
5954 			READ_ONCE(idev->cnf.rtr_solicit_interval));
5955 		idev->rs_probes = 1;
5956 		addrconf_mod_rs_timer(idev, idev->rs_interval);
5957 	}
5958 
5959 	/* Well, that's kinda nasty ... */
5960 	list_for_each_entry(ifp, &idev->addr_list, if_list) {
5961 		spin_lock(&ifp->lock);
5962 		if (ifp->tokenized) {
5963 			ifp->valid_lft = 0;
5964 			ifp->prefered_lft = 0;
5965 		}
5966 		spin_unlock(&ifp->lock);
5967 	}
5968 
5969 	write_unlock_bh(&idev->lock);
5970 	inet6_ifinfo_notify(RTM_NEWLINK, idev);
5971 	addrconf_verify_rtnl(dev_net(dev));
5972 	return 0;
5973 }
5974 
5975 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5976 	[IFLA_INET6_ADDR_GEN_MODE]	= { .type = NLA_U8 },
5977 	[IFLA_INET6_TOKEN]		= { .len = sizeof(struct in6_addr) },
5978 	[IFLA_INET6_RA_MTU]		= { .type = NLA_REJECT,
5979 					    .reject_message =
5980 						"IFLA_INET6_RA_MTU can not be set" },
5981 };
5982 
check_addr_gen_mode(int mode)5983 static int check_addr_gen_mode(int mode)
5984 {
5985 	if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5986 	    mode != IN6_ADDR_GEN_MODE_NONE &&
5987 	    mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5988 	    mode != IN6_ADDR_GEN_MODE_RANDOM)
5989 		return -EINVAL;
5990 	return 1;
5991 }
5992 
check_stable_privacy(struct inet6_dev * idev,struct net * net,int mode)5993 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5994 				int mode)
5995 {
5996 	if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5997 	    !idev->cnf.stable_secret.initialized &&
5998 	    !net->ipv6.devconf_dflt->stable_secret.initialized)
5999 		return -EINVAL;
6000 	return 1;
6001 }
6002 
inet6_validate_link_af(const struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)6003 static int inet6_validate_link_af(const struct net_device *dev,
6004 				  const struct nlattr *nla,
6005 				  struct netlink_ext_ack *extack)
6006 {
6007 	struct nlattr *tb[IFLA_INET6_MAX + 1];
6008 	struct inet6_dev *idev = NULL;
6009 	int err;
6010 
6011 	if (dev) {
6012 		idev = __in6_dev_get(dev);
6013 		if (!idev)
6014 			return -EAFNOSUPPORT;
6015 	}
6016 
6017 	err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
6018 					  inet6_af_policy, extack);
6019 	if (err)
6020 		return err;
6021 
6022 	if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
6023 		return -EINVAL;
6024 
6025 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6026 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6027 
6028 		if (check_addr_gen_mode(mode) < 0)
6029 			return -EINVAL;
6030 		if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
6031 			return -EINVAL;
6032 	}
6033 
6034 	return 0;
6035 }
6036 
inet6_set_link_af(struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)6037 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
6038 			     struct netlink_ext_ack *extack)
6039 {
6040 	struct inet6_dev *idev = __in6_dev_get(dev);
6041 	struct nlattr *tb[IFLA_INET6_MAX + 1];
6042 	int err;
6043 
6044 	if (!idev)
6045 		return -EAFNOSUPPORT;
6046 
6047 	if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
6048 		return -EINVAL;
6049 
6050 	if (tb[IFLA_INET6_TOKEN]) {
6051 		err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
6052 					extack);
6053 		if (err)
6054 			return err;
6055 	}
6056 
6057 	if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6058 		u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6059 
6060 		WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
6061 	}
6062 
6063 	return 0;
6064 }
6065 
inet6_fill_ifinfo(struct sk_buff * skb,struct inet6_dev * idev,u32 portid,u32 seq,int event,unsigned int flags)6066 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
6067 			     u32 portid, u32 seq, int event, unsigned int flags)
6068 {
6069 	struct net_device *dev = idev->dev;
6070 	struct ifinfomsg *hdr;
6071 	struct nlmsghdr *nlh;
6072 	int ifindex, iflink;
6073 	void *protoinfo;
6074 
6075 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
6076 	if (!nlh)
6077 		return -EMSGSIZE;
6078 
6079 	hdr = nlmsg_data(nlh);
6080 	hdr->ifi_family = AF_INET6;
6081 	hdr->__ifi_pad = 0;
6082 	hdr->ifi_type = dev->type;
6083 	ifindex = READ_ONCE(dev->ifindex);
6084 	hdr->ifi_index = ifindex;
6085 	hdr->ifi_flags = netif_get_flags(dev);
6086 	hdr->ifi_change = 0;
6087 
6088 	iflink = dev_get_iflink(dev);
6089 	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
6090 	    (dev->addr_len &&
6091 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
6092 	    nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
6093 	    (ifindex != iflink &&
6094 	     nla_put_u32(skb, IFLA_LINK, iflink)) ||
6095 	    nla_put_u8(skb, IFLA_OPERSTATE,
6096 		       netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
6097 		goto nla_put_failure;
6098 	protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
6099 	if (!protoinfo)
6100 		goto nla_put_failure;
6101 
6102 	if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
6103 		goto nla_put_failure;
6104 
6105 	nla_nest_end(skb, protoinfo);
6106 	nlmsg_end(skb, nlh);
6107 	return 0;
6108 
6109 nla_put_failure:
6110 	nlmsg_cancel(skb, nlh);
6111 	return -EMSGSIZE;
6112 }
6113 
inet6_valid_dump_ifinfo(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6114 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
6115 				   struct netlink_ext_ack *extack)
6116 {
6117 	struct ifinfomsg *ifm;
6118 
6119 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
6120 	if (!ifm) {
6121 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
6122 		return -EINVAL;
6123 	}
6124 
6125 	if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
6126 		NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
6127 		return -EINVAL;
6128 	}
6129 
6130 	if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
6131 	    ifm->ifi_change || ifm->ifi_index) {
6132 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
6133 		return -EINVAL;
6134 	}
6135 
6136 	return 0;
6137 }
6138 
inet6_dump_ifinfo(struct sk_buff * skb,struct netlink_callback * cb)6139 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
6140 {
6141 	struct net *net = sock_net(skb->sk);
6142 	struct {
6143 		unsigned long ifindex;
6144 	} *ctx = (void *)cb->ctx;
6145 	struct net_device *dev;
6146 	struct inet6_dev *idev;
6147 	int err;
6148 
6149 	/* only requests using strict checking can pass data to
6150 	 * influence the dump
6151 	 */
6152 	if (cb->strict_check) {
6153 		err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6154 
6155 		if (err < 0)
6156 			return err;
6157 	}
6158 
6159 	err = 0;
6160 	rcu_read_lock();
6161 	for_each_netdev_dump(net, dev, ctx->ifindex) {
6162 		idev = __in6_dev_get(dev);
6163 		if (!idev)
6164 			continue;
6165 		err = inet6_fill_ifinfo(skb, idev,
6166 					NETLINK_CB(cb->skb).portid,
6167 					cb->nlh->nlmsg_seq,
6168 					RTM_NEWLINK, NLM_F_MULTI);
6169 		if (err < 0)
6170 			break;
6171 	}
6172 	rcu_read_unlock();
6173 
6174 	return err;
6175 }
6176 
inet6_ifinfo_notify(int event,struct inet6_dev * idev)6177 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6178 {
6179 	struct sk_buff *skb;
6180 	struct net *net = dev_net(idev->dev);
6181 	int err = -ENOBUFS;
6182 
6183 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6184 	if (!skb)
6185 		goto errout;
6186 
6187 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6188 	if (err < 0) {
6189 		/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6190 		WARN_ON(err == -EMSGSIZE);
6191 		kfree_skb(skb);
6192 		goto errout;
6193 	}
6194 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6195 	return;
6196 errout:
6197 	rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6198 }
6199 
inet6_prefix_nlmsg_size(void)6200 static inline size_t inet6_prefix_nlmsg_size(void)
6201 {
6202 	return NLMSG_ALIGN(sizeof(struct prefixmsg))
6203 	       + nla_total_size(sizeof(struct in6_addr))
6204 	       + nla_total_size(sizeof(struct prefix_cacheinfo));
6205 }
6206 
inet6_fill_prefix(struct sk_buff * skb,struct inet6_dev * idev,struct prefix_info * pinfo,u32 portid,u32 seq,int event,unsigned int flags)6207 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6208 			     struct prefix_info *pinfo, u32 portid, u32 seq,
6209 			     int event, unsigned int flags)
6210 {
6211 	struct prefixmsg *pmsg;
6212 	struct nlmsghdr *nlh;
6213 	struct prefix_cacheinfo	ci;
6214 
6215 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6216 	if (!nlh)
6217 		return -EMSGSIZE;
6218 
6219 	pmsg = nlmsg_data(nlh);
6220 	pmsg->prefix_family = AF_INET6;
6221 	pmsg->prefix_pad1 = 0;
6222 	pmsg->prefix_pad2 = 0;
6223 	pmsg->prefix_ifindex = idev->dev->ifindex;
6224 	pmsg->prefix_len = pinfo->prefix_len;
6225 	pmsg->prefix_type = pinfo->type;
6226 	pmsg->prefix_pad3 = 0;
6227 	pmsg->prefix_flags = pinfo->flags;
6228 
6229 	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6230 		goto nla_put_failure;
6231 	ci.preferred_time = ntohl(pinfo->prefered);
6232 	ci.valid_time = ntohl(pinfo->valid);
6233 	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6234 		goto nla_put_failure;
6235 	nlmsg_end(skb, nlh);
6236 	return 0;
6237 
6238 nla_put_failure:
6239 	nlmsg_cancel(skb, nlh);
6240 	return -EMSGSIZE;
6241 }
6242 
inet6_prefix_notify(int event,struct inet6_dev * idev,struct prefix_info * pinfo)6243 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6244 			 struct prefix_info *pinfo)
6245 {
6246 	struct sk_buff *skb;
6247 	struct net *net = dev_net(idev->dev);
6248 	int err = -ENOBUFS;
6249 
6250 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6251 	if (!skb)
6252 		goto errout;
6253 
6254 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6255 	if (err < 0) {
6256 		/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6257 		WARN_ON(err == -EMSGSIZE);
6258 		kfree_skb(skb);
6259 		goto errout;
6260 	}
6261 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6262 	return;
6263 errout:
6264 	rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6265 }
6266 
__ipv6_ifa_notify(int event,struct inet6_ifaddr * ifp)6267 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6268 {
6269 	struct net *net = dev_net(ifp->idev->dev);
6270 
6271 	if (event)
6272 		ASSERT_RTNL();
6273 
6274 	inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6275 
6276 	switch (event) {
6277 	case RTM_NEWADDR:
6278 		/*
6279 		 * If the address was optimistic we inserted the route at the
6280 		 * start of our DAD process, so we don't need to do it again.
6281 		 * If the device was taken down in the middle of the DAD
6282 		 * cycle there is a race where we could get here without a
6283 		 * host route, so nothing to insert. That will be fixed when
6284 		 * the device is brought up.
6285 		 */
6286 		if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6287 			ip6_ins_rt(net, ifp->rt);
6288 		} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6289 			pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6290 				&ifp->addr, ifp->idev->dev->name);
6291 		}
6292 
6293 		if (ifp->idev->cnf.forwarding)
6294 			addrconf_join_anycast(ifp);
6295 		if (!ipv6_addr_any(&ifp->peer_addr))
6296 			addrconf_prefix_route(&ifp->peer_addr, 128,
6297 					      ifp->rt_priority, ifp->idev->dev,
6298 					      0, 0, GFP_ATOMIC);
6299 		break;
6300 	case RTM_DELADDR:
6301 		if (ifp->idev->cnf.forwarding)
6302 			addrconf_leave_anycast(ifp);
6303 		addrconf_leave_solict(ifp->idev, &ifp->addr);
6304 		if (!ipv6_addr_any(&ifp->peer_addr)) {
6305 			struct fib6_info *rt;
6306 
6307 			rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6308 						       ifp->idev->dev, 0, 0,
6309 						       false);
6310 			if (rt)
6311 				ip6_del_rt(net, rt, false);
6312 		}
6313 		if (ifp->rt) {
6314 			ip6_del_rt(net, ifp->rt, false);
6315 			ifp->rt = NULL;
6316 		}
6317 		rt_genid_bump_ipv6(net);
6318 		break;
6319 	}
6320 	atomic_inc(&net->ipv6.dev_addr_genid);
6321 }
6322 
ipv6_ifa_notify(int event,struct inet6_ifaddr * ifp)6323 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6324 {
6325 	if (likely(ifp->idev->dead == 0))
6326 		__ipv6_ifa_notify(event, ifp);
6327 }
6328 
6329 #ifdef CONFIG_SYSCTL
6330 
addrconf_sysctl_forward(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6331 static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write,
6332 		void *buffer, size_t *lenp, loff_t *ppos)
6333 {
6334 	int *valp = ctl->data;
6335 	int val = *valp;
6336 	loff_t pos = *ppos;
6337 	struct ctl_table lctl;
6338 	int ret;
6339 
6340 	/*
6341 	 * ctl->data points to idev->cnf.forwarding, we should
6342 	 * not modify it until we get the rtnl lock.
6343 	 */
6344 	lctl = *ctl;
6345 	lctl.data = &val;
6346 
6347 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6348 
6349 	if (write)
6350 		ret = addrconf_fixup_forwarding(ctl, valp, val);
6351 	if (ret)
6352 		*ppos = pos;
6353 	return ret;
6354 }
6355 
addrconf_sysctl_mtu(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6356 static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write,
6357 		void *buffer, size_t *lenp, loff_t *ppos)
6358 {
6359 	struct inet6_dev *idev = ctl->extra1;
6360 	int min_mtu = IPV6_MIN_MTU;
6361 	struct ctl_table lctl;
6362 
6363 	lctl = *ctl;
6364 	lctl.extra1 = &min_mtu;
6365 	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6366 
6367 	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6368 }
6369 
dev_disable_change(struct inet6_dev * idev)6370 static void dev_disable_change(struct inet6_dev *idev)
6371 {
6372 	struct netdev_notifier_info info;
6373 
6374 	if (!idev || !idev->dev)
6375 		return;
6376 
6377 	netdev_notifier_info_init(&info, idev->dev);
6378 	if (idev->cnf.disable_ipv6)
6379 		addrconf_notify(NULL, NETDEV_DOWN, &info);
6380 	else
6381 		addrconf_notify(NULL, NETDEV_UP, &info);
6382 }
6383 
addrconf_disable_change(struct net * net,__s32 newf)6384 static void addrconf_disable_change(struct net *net, __s32 newf)
6385 {
6386 	struct net_device *dev;
6387 	struct inet6_dev *idev;
6388 
6389 	for_each_netdev(net, dev) {
6390 		idev = __in6_dev_get_rtnl_net(dev);
6391 		if (idev) {
6392 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6393 
6394 			WRITE_ONCE(idev->cnf.disable_ipv6, newf);
6395 			if (changed)
6396 				dev_disable_change(idev);
6397 		}
6398 	}
6399 }
6400 
addrconf_disable_ipv6(const struct ctl_table * table,int * p,int newf)6401 static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
6402 {
6403 	struct net *net = (struct net *)table->extra2;
6404 	int old;
6405 
6406 	if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6407 		WRITE_ONCE(*p, newf);
6408 		return 0;
6409 	}
6410 
6411 	if (!rtnl_net_trylock(net))
6412 		return restart_syscall();
6413 
6414 	old = *p;
6415 	WRITE_ONCE(*p, newf);
6416 
6417 	if (p == &net->ipv6.devconf_all->disable_ipv6) {
6418 		WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
6419 		addrconf_disable_change(net, newf);
6420 	} else if ((!newf) ^ (!old)) {
6421 		dev_disable_change((struct inet6_dev *)table->extra1);
6422 	}
6423 
6424 	rtnl_net_unlock(net);
6425 	return 0;
6426 }
6427 
addrconf_sysctl_disable(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6428 static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write,
6429 		void *buffer, size_t *lenp, loff_t *ppos)
6430 {
6431 	int *valp = ctl->data;
6432 	int val = *valp;
6433 	loff_t pos = *ppos;
6434 	struct ctl_table lctl;
6435 	int ret;
6436 
6437 	/*
6438 	 * ctl->data points to idev->cnf.disable_ipv6, we should
6439 	 * not modify it until we get the rtnl lock.
6440 	 */
6441 	lctl = *ctl;
6442 	lctl.data = &val;
6443 
6444 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6445 
6446 	if (write)
6447 		ret = addrconf_disable_ipv6(ctl, valp, val);
6448 	if (ret)
6449 		*ppos = pos;
6450 	return ret;
6451 }
6452 
addrconf_sysctl_proxy_ndp(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6453 static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write,
6454 		void *buffer, size_t *lenp, loff_t *ppos)
6455 {
6456 	int *valp = ctl->data;
6457 	int ret;
6458 	int old, new;
6459 
6460 	old = *valp;
6461 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6462 	new = *valp;
6463 
6464 	if (write && old != new) {
6465 		struct net *net = ctl->extra2;
6466 
6467 		if (!rtnl_net_trylock(net))
6468 			return restart_syscall();
6469 
6470 		if (valp == &net->ipv6.devconf_dflt->proxy_ndp) {
6471 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6472 						     NETCONFA_PROXY_NEIGH,
6473 						     NETCONFA_IFINDEX_DEFAULT,
6474 						     net->ipv6.devconf_dflt);
6475 		} else if (valp == &net->ipv6.devconf_all->proxy_ndp) {
6476 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6477 						     NETCONFA_PROXY_NEIGH,
6478 						     NETCONFA_IFINDEX_ALL,
6479 						     net->ipv6.devconf_all);
6480 		} else {
6481 			struct inet6_dev *idev = ctl->extra1;
6482 
6483 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6484 						     NETCONFA_PROXY_NEIGH,
6485 						     idev->dev->ifindex,
6486 						     &idev->cnf);
6487 		}
6488 		rtnl_net_unlock(net);
6489 	}
6490 
6491 	return ret;
6492 }
6493 
addrconf_sysctl_addr_gen_mode(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6494 static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
6495 					 void *buffer, size_t *lenp,
6496 					 loff_t *ppos)
6497 {
6498 	int ret = 0;
6499 	u32 new_val;
6500 	struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6501 	struct net *net = (struct net *)ctl->extra2;
6502 	struct ctl_table tmp = {
6503 		.data = &new_val,
6504 		.maxlen = sizeof(new_val),
6505 		.mode = ctl->mode,
6506 	};
6507 
6508 	if (!rtnl_net_trylock(net))
6509 		return restart_syscall();
6510 
6511 	new_val = *((u32 *)ctl->data);
6512 
6513 	ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6514 	if (ret != 0)
6515 		goto out;
6516 
6517 	if (write) {
6518 		if (check_addr_gen_mode(new_val) < 0) {
6519 			ret = -EINVAL;
6520 			goto out;
6521 		}
6522 
6523 		if (idev) {
6524 			if (check_stable_privacy(idev, net, new_val) < 0) {
6525 				ret = -EINVAL;
6526 				goto out;
6527 			}
6528 
6529 			if (idev->cnf.addr_gen_mode != new_val) {
6530 				WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
6531 				netdev_lock_ops(idev->dev);
6532 				addrconf_init_auto_addrs(idev->dev);
6533 				netdev_unlock_ops(idev->dev);
6534 			}
6535 		} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6536 			struct net_device *dev;
6537 
6538 			WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
6539 			for_each_netdev(net, dev) {
6540 				idev = __in6_dev_get_rtnl_net(dev);
6541 				if (idev &&
6542 				    idev->cnf.addr_gen_mode != new_val) {
6543 					WRITE_ONCE(idev->cnf.addr_gen_mode,
6544 						  new_val);
6545 					netdev_lock_ops(idev->dev);
6546 					addrconf_init_auto_addrs(idev->dev);
6547 					netdev_unlock_ops(idev->dev);
6548 				}
6549 			}
6550 		}
6551 
6552 		WRITE_ONCE(*((u32 *)ctl->data), new_val);
6553 	}
6554 
6555 out:
6556 	rtnl_net_unlock(net);
6557 
6558 	return ret;
6559 }
6560 
addrconf_sysctl_stable_secret(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6561 static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write,
6562 					 void *buffer, size_t *lenp,
6563 					 loff_t *ppos)
6564 {
6565 	int err;
6566 	struct in6_addr addr;
6567 	char str[IPV6_MAX_STRLEN];
6568 	struct ctl_table lctl = *ctl;
6569 	struct net *net = ctl->extra2;
6570 	struct ipv6_stable_secret *secret = ctl->data;
6571 
6572 	if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6573 		return -EIO;
6574 
6575 	lctl.maxlen = IPV6_MAX_STRLEN;
6576 	lctl.data = str;
6577 
6578 	if (!rtnl_net_trylock(net))
6579 		return restart_syscall();
6580 
6581 	if (!write && !secret->initialized) {
6582 		err = -EIO;
6583 		goto out;
6584 	}
6585 
6586 	err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6587 	if (err >= sizeof(str)) {
6588 		err = -EIO;
6589 		goto out;
6590 	}
6591 
6592 	err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6593 	if (err || !write)
6594 		goto out;
6595 
6596 	if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6597 		err = -EIO;
6598 		goto out;
6599 	}
6600 
6601 	secret->initialized = true;
6602 	secret->secret = addr;
6603 
6604 	if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6605 		struct net_device *dev;
6606 
6607 		for_each_netdev(net, dev) {
6608 			struct inet6_dev *idev = __in6_dev_get_rtnl_net(dev);
6609 
6610 			if (idev) {
6611 				WRITE_ONCE(idev->cnf.addr_gen_mode,
6612 					   IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6613 			}
6614 		}
6615 	} else {
6616 		struct inet6_dev *idev = ctl->extra1;
6617 
6618 		WRITE_ONCE(idev->cnf.addr_gen_mode,
6619 			   IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6620 	}
6621 
6622 out:
6623 	rtnl_net_unlock(net);
6624 
6625 	return err;
6626 }
6627 
6628 static
addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6629 int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl,
6630 						int write, void *buffer,
6631 						size_t *lenp,
6632 						loff_t *ppos)
6633 {
6634 	int *valp = ctl->data;
6635 	int val = *valp;
6636 	loff_t pos = *ppos;
6637 	struct ctl_table lctl;
6638 	int ret;
6639 
6640 	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6641 	 * we should not modify it until we get the rtnl lock.
6642 	 */
6643 	lctl = *ctl;
6644 	lctl.data = &val;
6645 
6646 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6647 
6648 	if (write)
6649 		ret = addrconf_fixup_linkdown(ctl, valp, val);
6650 	if (ret)
6651 		*ppos = pos;
6652 	return ret;
6653 }
6654 
6655 static
addrconf_set_nopolicy(struct rt6_info * rt,int action)6656 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6657 {
6658 	if (rt) {
6659 		if (action)
6660 			rt->dst.flags |= DST_NOPOLICY;
6661 		else
6662 			rt->dst.flags &= ~DST_NOPOLICY;
6663 	}
6664 }
6665 
6666 static
addrconf_disable_policy_idev(struct inet6_dev * idev,int val)6667 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6668 {
6669 	struct inet6_ifaddr *ifa;
6670 
6671 	read_lock_bh(&idev->lock);
6672 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
6673 		spin_lock(&ifa->lock);
6674 		if (ifa->rt) {
6675 			/* host routes only use builtin fib6_nh */
6676 			struct fib6_nh *nh = ifa->rt->fib6_nh;
6677 			int cpu;
6678 
6679 			rcu_read_lock();
6680 			ifa->rt->dst_nopolicy = val ? true : false;
6681 			if (nh->rt6i_pcpu) {
6682 				for_each_possible_cpu(cpu) {
6683 					struct rt6_info **rtp;
6684 
6685 					rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6686 					addrconf_set_nopolicy(*rtp, val);
6687 				}
6688 			}
6689 			rcu_read_unlock();
6690 		}
6691 		spin_unlock(&ifa->lock);
6692 	}
6693 	read_unlock_bh(&idev->lock);
6694 }
6695 
6696 static
addrconf_disable_policy(const struct ctl_table * ctl,int * valp,int val)6697 int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
6698 {
6699 	struct net *net = (struct net *)ctl->extra2;
6700 	struct inet6_dev *idev;
6701 
6702 	if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6703 		WRITE_ONCE(*valp, val);
6704 		return 0;
6705 	}
6706 
6707 	if (!rtnl_net_trylock(net))
6708 		return restart_syscall();
6709 
6710 	WRITE_ONCE(*valp, val);
6711 
6712 	if (valp == &net->ipv6.devconf_all->disable_policy)  {
6713 		struct net_device *dev;
6714 
6715 		for_each_netdev(net, dev) {
6716 			idev = __in6_dev_get_rtnl_net(dev);
6717 			if (idev)
6718 				addrconf_disable_policy_idev(idev, val);
6719 		}
6720 	} else {
6721 		idev = (struct inet6_dev *)ctl->extra1;
6722 		addrconf_disable_policy_idev(idev, val);
6723 	}
6724 
6725 	rtnl_net_unlock(net);
6726 	return 0;
6727 }
6728 
addrconf_sysctl_disable_policy(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6729 static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write,
6730 				   void *buffer, size_t *lenp, loff_t *ppos)
6731 {
6732 	int *valp = ctl->data;
6733 	int val = *valp;
6734 	loff_t pos = *ppos;
6735 	struct ctl_table lctl;
6736 	int ret;
6737 
6738 	lctl = *ctl;
6739 	lctl.data = &val;
6740 	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6741 
6742 	if (write && (*valp != val))
6743 		ret = addrconf_disable_policy(ctl, valp, val);
6744 
6745 	if (ret)
6746 		*ppos = pos;
6747 
6748 	return ret;
6749 }
6750 
addrconf_force_forward_change(struct net * net,__s32 newf)6751 static void addrconf_force_forward_change(struct net *net, __s32 newf)
6752 {
6753 	struct net_device *dev;
6754 	struct inet6_dev *idev;
6755 
6756 	for_each_netdev(net, dev) {
6757 		idev = __in6_dev_get_rtnl_net(dev);
6758 		if (idev) {
6759 			int changed = (!idev->cnf.force_forwarding) ^ (!newf);
6760 
6761 			WRITE_ONCE(idev->cnf.force_forwarding, newf);
6762 			if (changed)
6763 				inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
6764 							     NETCONFA_FORCE_FORWARDING,
6765 							     dev->ifindex, &idev->cnf);
6766 		}
6767 	}
6768 }
6769 
addrconf_sysctl_force_forwarding(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6770 static int addrconf_sysctl_force_forwarding(const struct ctl_table *ctl, int write,
6771 					    void *buffer, size_t *lenp, loff_t *ppos)
6772 {
6773 	struct inet6_dev *idev = ctl->extra1;
6774 	struct ctl_table tmp_ctl = *ctl;
6775 	struct net *net = ctl->extra2;
6776 	int *valp = ctl->data;
6777 	int new_val = *valp;
6778 	int old_val = *valp;
6779 	loff_t pos = *ppos;
6780 	int ret;
6781 
6782 	tmp_ctl.extra1 = SYSCTL_ZERO;
6783 	tmp_ctl.extra2 = SYSCTL_ONE;
6784 	tmp_ctl.data = &new_val;
6785 
6786 	ret = proc_douintvec_minmax(&tmp_ctl, write, buffer, lenp, ppos);
6787 
6788 	if (write && old_val != new_val) {
6789 		if (!rtnl_net_trylock(net))
6790 			return restart_syscall();
6791 
6792 		WRITE_ONCE(*valp, new_val);
6793 
6794 		if (valp == &net->ipv6.devconf_dflt->force_forwarding) {
6795 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6796 						     NETCONFA_FORCE_FORWARDING,
6797 						     NETCONFA_IFINDEX_DEFAULT,
6798 						     net->ipv6.devconf_dflt);
6799 		} else if (valp == &net->ipv6.devconf_all->force_forwarding) {
6800 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6801 						     NETCONFA_FORCE_FORWARDING,
6802 						     NETCONFA_IFINDEX_ALL,
6803 						     net->ipv6.devconf_all);
6804 
6805 			addrconf_force_forward_change(net, new_val);
6806 		} else {
6807 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6808 						     NETCONFA_FORCE_FORWARDING,
6809 						     idev->dev->ifindex,
6810 						     &idev->cnf);
6811 		}
6812 		rtnl_net_unlock(net);
6813 	}
6814 
6815 	if (ret)
6816 		*ppos = pos;
6817 	return ret;
6818 }
6819 
6820 static int minus_one = -1;
6821 static const int two_five_five = 255;
6822 static u32 ioam6_if_id_max = U16_MAX;
6823 
6824 static const struct ctl_table addrconf_sysctl[] = {
6825 	{
6826 		.procname	= "forwarding",
6827 		.data		= &ipv6_devconf.forwarding,
6828 		.maxlen		= sizeof(int),
6829 		.mode		= 0644,
6830 		.proc_handler	= addrconf_sysctl_forward,
6831 	},
6832 	{
6833 		.procname	= "hop_limit",
6834 		.data		= &ipv6_devconf.hop_limit,
6835 		.maxlen		= sizeof(int),
6836 		.mode		= 0644,
6837 		.proc_handler	= proc_dointvec_minmax,
6838 		.extra1		= (void *)SYSCTL_ONE,
6839 		.extra2		= (void *)&two_five_five,
6840 	},
6841 	{
6842 		.procname	= "mtu",
6843 		.data		= &ipv6_devconf.mtu6,
6844 		.maxlen		= sizeof(int),
6845 		.mode		= 0644,
6846 		.proc_handler	= addrconf_sysctl_mtu,
6847 	},
6848 	{
6849 		.procname	= "accept_ra",
6850 		.data		= &ipv6_devconf.accept_ra,
6851 		.maxlen		= sizeof(int),
6852 		.mode		= 0644,
6853 		.proc_handler	= proc_dointvec,
6854 	},
6855 	{
6856 		.procname	= "accept_redirects",
6857 		.data		= &ipv6_devconf.accept_redirects,
6858 		.maxlen		= sizeof(int),
6859 		.mode		= 0644,
6860 		.proc_handler	= proc_dointvec,
6861 	},
6862 	{
6863 		.procname	= "autoconf",
6864 		.data		= &ipv6_devconf.autoconf,
6865 		.maxlen		= sizeof(int),
6866 		.mode		= 0644,
6867 		.proc_handler	= proc_dointvec,
6868 	},
6869 	{
6870 		.procname	= "dad_transmits",
6871 		.data		= &ipv6_devconf.dad_transmits,
6872 		.maxlen		= sizeof(int),
6873 		.mode		= 0644,
6874 		.proc_handler	= proc_dointvec,
6875 	},
6876 	{
6877 		.procname	= "router_solicitations",
6878 		.data		= &ipv6_devconf.rtr_solicits,
6879 		.maxlen		= sizeof(int),
6880 		.mode		= 0644,
6881 		.proc_handler	= proc_dointvec_minmax,
6882 		.extra1		= &minus_one,
6883 	},
6884 	{
6885 		.procname	= "router_solicitation_interval",
6886 		.data		= &ipv6_devconf.rtr_solicit_interval,
6887 		.maxlen		= sizeof(int),
6888 		.mode		= 0644,
6889 		.proc_handler	= proc_dointvec_jiffies,
6890 	},
6891 	{
6892 		.procname	= "router_solicitation_max_interval",
6893 		.data		= &ipv6_devconf.rtr_solicit_max_interval,
6894 		.maxlen		= sizeof(int),
6895 		.mode		= 0644,
6896 		.proc_handler	= proc_dointvec_jiffies,
6897 	},
6898 	{
6899 		.procname	= "router_solicitation_delay",
6900 		.data		= &ipv6_devconf.rtr_solicit_delay,
6901 		.maxlen		= sizeof(int),
6902 		.mode		= 0644,
6903 		.proc_handler	= proc_dointvec_jiffies,
6904 	},
6905 	{
6906 		.procname	= "force_mld_version",
6907 		.data		= &ipv6_devconf.force_mld_version,
6908 		.maxlen		= sizeof(int),
6909 		.mode		= 0644,
6910 		.proc_handler	= proc_dointvec,
6911 	},
6912 	{
6913 		.procname	= "mldv1_unsolicited_report_interval",
6914 		.data		=
6915 			&ipv6_devconf.mldv1_unsolicited_report_interval,
6916 		.maxlen		= sizeof(int),
6917 		.mode		= 0644,
6918 		.proc_handler	= proc_dointvec_ms_jiffies,
6919 	},
6920 	{
6921 		.procname	= "mldv2_unsolicited_report_interval",
6922 		.data		=
6923 			&ipv6_devconf.mldv2_unsolicited_report_interval,
6924 		.maxlen		= sizeof(int),
6925 		.mode		= 0644,
6926 		.proc_handler	= proc_dointvec_ms_jiffies,
6927 	},
6928 	{
6929 		.procname	= "use_tempaddr",
6930 		.data		= &ipv6_devconf.use_tempaddr,
6931 		.maxlen		= sizeof(int),
6932 		.mode		= 0644,
6933 		.proc_handler	= proc_dointvec,
6934 	},
6935 	{
6936 		.procname	= "temp_valid_lft",
6937 		.data		= &ipv6_devconf.temp_valid_lft,
6938 		.maxlen		= sizeof(int),
6939 		.mode		= 0644,
6940 		.proc_handler	= proc_dointvec,
6941 	},
6942 	{
6943 		.procname	= "temp_prefered_lft",
6944 		.data		= &ipv6_devconf.temp_prefered_lft,
6945 		.maxlen		= sizeof(int),
6946 		.mode		= 0644,
6947 		.proc_handler	= proc_dointvec,
6948 	},
6949 	{
6950 		.procname       = "regen_min_advance",
6951 		.data           = &ipv6_devconf.regen_min_advance,
6952 		.maxlen         = sizeof(int),
6953 		.mode           = 0644,
6954 		.proc_handler   = proc_dointvec,
6955 	},
6956 	{
6957 		.procname	= "regen_max_retry",
6958 		.data		= &ipv6_devconf.regen_max_retry,
6959 		.maxlen		= sizeof(int),
6960 		.mode		= 0644,
6961 		.proc_handler	= proc_dointvec,
6962 	},
6963 	{
6964 		.procname	= "max_desync_factor",
6965 		.data		= &ipv6_devconf.max_desync_factor,
6966 		.maxlen		= sizeof(int),
6967 		.mode		= 0644,
6968 		.proc_handler	= proc_dointvec,
6969 	},
6970 	{
6971 		.procname	= "max_addresses",
6972 		.data		= &ipv6_devconf.max_addresses,
6973 		.maxlen		= sizeof(int),
6974 		.mode		= 0644,
6975 		.proc_handler	= proc_dointvec,
6976 	},
6977 	{
6978 		.procname	= "accept_ra_defrtr",
6979 		.data		= &ipv6_devconf.accept_ra_defrtr,
6980 		.maxlen		= sizeof(int),
6981 		.mode		= 0644,
6982 		.proc_handler	= proc_dointvec,
6983 	},
6984 	{
6985 		.procname	= "ra_defrtr_metric",
6986 		.data		= &ipv6_devconf.ra_defrtr_metric,
6987 		.maxlen		= sizeof(u32),
6988 		.mode		= 0644,
6989 		.proc_handler	= proc_douintvec_minmax,
6990 		.extra1		= (void *)SYSCTL_ONE,
6991 	},
6992 	{
6993 		.procname	= "accept_ra_min_hop_limit",
6994 		.data		= &ipv6_devconf.accept_ra_min_hop_limit,
6995 		.maxlen		= sizeof(int),
6996 		.mode		= 0644,
6997 		.proc_handler	= proc_dointvec,
6998 	},
6999 	{
7000 		.procname	= "accept_ra_min_lft",
7001 		.data		= &ipv6_devconf.accept_ra_min_lft,
7002 		.maxlen		= sizeof(int),
7003 		.mode		= 0644,
7004 		.proc_handler	= proc_dointvec,
7005 	},
7006 	{
7007 		.procname	= "accept_ra_pinfo",
7008 		.data		= &ipv6_devconf.accept_ra_pinfo,
7009 		.maxlen		= sizeof(int),
7010 		.mode		= 0644,
7011 		.proc_handler	= proc_dointvec,
7012 	},
7013 	{
7014 		.procname	= "ra_honor_pio_life",
7015 		.data		= &ipv6_devconf.ra_honor_pio_life,
7016 		.maxlen		= sizeof(u8),
7017 		.mode		= 0644,
7018 		.proc_handler	= proc_dou8vec_minmax,
7019 		.extra1		= SYSCTL_ZERO,
7020 		.extra2		= SYSCTL_ONE,
7021 	},
7022 	{
7023 		.procname	= "ra_honor_pio_pflag",
7024 		.data		= &ipv6_devconf.ra_honor_pio_pflag,
7025 		.maxlen		= sizeof(u8),
7026 		.mode		= 0644,
7027 		.proc_handler	= proc_dou8vec_minmax,
7028 		.extra1		= SYSCTL_ZERO,
7029 		.extra2		= SYSCTL_ONE,
7030 	},
7031 #ifdef CONFIG_IPV6_ROUTER_PREF
7032 	{
7033 		.procname	= "accept_ra_rtr_pref",
7034 		.data		= &ipv6_devconf.accept_ra_rtr_pref,
7035 		.maxlen		= sizeof(int),
7036 		.mode		= 0644,
7037 		.proc_handler	= proc_dointvec,
7038 	},
7039 	{
7040 		.procname	= "router_probe_interval",
7041 		.data		= &ipv6_devconf.rtr_probe_interval,
7042 		.maxlen		= sizeof(int),
7043 		.mode		= 0644,
7044 		.proc_handler	= proc_dointvec_jiffies,
7045 	},
7046 #ifdef CONFIG_IPV6_ROUTE_INFO
7047 	{
7048 		.procname	= "accept_ra_rt_info_min_plen",
7049 		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
7050 		.maxlen		= sizeof(int),
7051 		.mode		= 0644,
7052 		.proc_handler	= proc_dointvec,
7053 	},
7054 	{
7055 		.procname	= "accept_ra_rt_info_max_plen",
7056 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
7057 		.maxlen		= sizeof(int),
7058 		.mode		= 0644,
7059 		.proc_handler	= proc_dointvec,
7060 	},
7061 #endif
7062 #endif
7063 	{
7064 		.procname	= "proxy_ndp",
7065 		.data		= &ipv6_devconf.proxy_ndp,
7066 		.maxlen		= sizeof(int),
7067 		.mode		= 0644,
7068 		.proc_handler	= addrconf_sysctl_proxy_ndp,
7069 	},
7070 	{
7071 		.procname	= "accept_source_route",
7072 		.data		= &ipv6_devconf.accept_source_route,
7073 		.maxlen		= sizeof(int),
7074 		.mode		= 0644,
7075 		.proc_handler	= proc_dointvec,
7076 	},
7077 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
7078 	{
7079 		.procname	= "optimistic_dad",
7080 		.data		= &ipv6_devconf.optimistic_dad,
7081 		.maxlen		= sizeof(int),
7082 		.mode		= 0644,
7083 		.proc_handler   = proc_dointvec,
7084 	},
7085 	{
7086 		.procname	= "use_optimistic",
7087 		.data		= &ipv6_devconf.use_optimistic,
7088 		.maxlen		= sizeof(int),
7089 		.mode		= 0644,
7090 		.proc_handler	= proc_dointvec,
7091 	},
7092 #endif
7093 #ifdef CONFIG_IPV6_MROUTE
7094 	{
7095 		.procname	= "mc_forwarding",
7096 		.data		= &ipv6_devconf.mc_forwarding,
7097 		.maxlen		= sizeof(int),
7098 		.mode		= 0444,
7099 		.proc_handler	= proc_dointvec,
7100 	},
7101 #endif
7102 	{
7103 		.procname	= "disable_ipv6",
7104 		.data		= &ipv6_devconf.disable_ipv6,
7105 		.maxlen		= sizeof(int),
7106 		.mode		= 0644,
7107 		.proc_handler	= addrconf_sysctl_disable,
7108 	},
7109 	{
7110 		.procname	= "accept_dad",
7111 		.data		= &ipv6_devconf.accept_dad,
7112 		.maxlen		= sizeof(int),
7113 		.mode		= 0644,
7114 		.proc_handler	= proc_dointvec,
7115 	},
7116 	{
7117 		.procname	= "force_tllao",
7118 		.data		= &ipv6_devconf.force_tllao,
7119 		.maxlen		= sizeof(int),
7120 		.mode		= 0644,
7121 		.proc_handler	= proc_dointvec
7122 	},
7123 	{
7124 		.procname	= "ndisc_notify",
7125 		.data		= &ipv6_devconf.ndisc_notify,
7126 		.maxlen		= sizeof(int),
7127 		.mode		= 0644,
7128 		.proc_handler	= proc_dointvec
7129 	},
7130 	{
7131 		.procname	= "suppress_frag_ndisc",
7132 		.data		= &ipv6_devconf.suppress_frag_ndisc,
7133 		.maxlen		= sizeof(int),
7134 		.mode		= 0644,
7135 		.proc_handler	= proc_dointvec
7136 	},
7137 	{
7138 		.procname	= "accept_ra_from_local",
7139 		.data		= &ipv6_devconf.accept_ra_from_local,
7140 		.maxlen		= sizeof(int),
7141 		.mode		= 0644,
7142 		.proc_handler	= proc_dointvec,
7143 	},
7144 	{
7145 		.procname	= "accept_ra_mtu",
7146 		.data		= &ipv6_devconf.accept_ra_mtu,
7147 		.maxlen		= sizeof(int),
7148 		.mode		= 0644,
7149 		.proc_handler	= proc_dointvec,
7150 	},
7151 	{
7152 		.procname	= "stable_secret",
7153 		.data		= &ipv6_devconf.stable_secret,
7154 		.maxlen		= IPV6_MAX_STRLEN,
7155 		.mode		= 0600,
7156 		.proc_handler	= addrconf_sysctl_stable_secret,
7157 	},
7158 	{
7159 		.procname	= "use_oif_addrs_only",
7160 		.data		= &ipv6_devconf.use_oif_addrs_only,
7161 		.maxlen		= sizeof(int),
7162 		.mode		= 0644,
7163 		.proc_handler	= proc_dointvec,
7164 	},
7165 	{
7166 		.procname	= "ignore_routes_with_linkdown",
7167 		.data		= &ipv6_devconf.ignore_routes_with_linkdown,
7168 		.maxlen		= sizeof(int),
7169 		.mode		= 0644,
7170 		.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
7171 	},
7172 	{
7173 		.procname	= "drop_unicast_in_l2_multicast",
7174 		.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
7175 		.maxlen		= sizeof(int),
7176 		.mode		= 0644,
7177 		.proc_handler	= proc_dointvec,
7178 	},
7179 	{
7180 		.procname	= "drop_unsolicited_na",
7181 		.data		= &ipv6_devconf.drop_unsolicited_na,
7182 		.maxlen		= sizeof(int),
7183 		.mode		= 0644,
7184 		.proc_handler	= proc_dointvec,
7185 	},
7186 	{
7187 		.procname	= "keep_addr_on_down",
7188 		.data		= &ipv6_devconf.keep_addr_on_down,
7189 		.maxlen		= sizeof(int),
7190 		.mode		= 0644,
7191 		.proc_handler	= proc_dointvec,
7192 
7193 	},
7194 	{
7195 		.procname	= "seg6_enabled",
7196 		.data		= &ipv6_devconf.seg6_enabled,
7197 		.maxlen		= sizeof(int),
7198 		.mode		= 0644,
7199 		.proc_handler	= proc_dointvec,
7200 	},
7201 #ifdef CONFIG_IPV6_SEG6_HMAC
7202 	{
7203 		.procname	= "seg6_require_hmac",
7204 		.data		= &ipv6_devconf.seg6_require_hmac,
7205 		.maxlen		= sizeof(int),
7206 		.mode		= 0644,
7207 		.proc_handler	= proc_dointvec,
7208 	},
7209 #endif
7210 	{
7211 		.procname       = "enhanced_dad",
7212 		.data           = &ipv6_devconf.enhanced_dad,
7213 		.maxlen         = sizeof(int),
7214 		.mode           = 0644,
7215 		.proc_handler   = proc_dointvec,
7216 	},
7217 	{
7218 		.procname	= "addr_gen_mode",
7219 		.data		= &ipv6_devconf.addr_gen_mode,
7220 		.maxlen		= sizeof(int),
7221 		.mode		= 0644,
7222 		.proc_handler	= addrconf_sysctl_addr_gen_mode,
7223 	},
7224 	{
7225 		.procname       = "disable_policy",
7226 		.data           = &ipv6_devconf.disable_policy,
7227 		.maxlen         = sizeof(int),
7228 		.mode           = 0644,
7229 		.proc_handler   = addrconf_sysctl_disable_policy,
7230 	},
7231 	{
7232 		.procname	= "ndisc_tclass",
7233 		.data		= &ipv6_devconf.ndisc_tclass,
7234 		.maxlen		= sizeof(int),
7235 		.mode		= 0644,
7236 		.proc_handler	= proc_dointvec_minmax,
7237 		.extra1		= (void *)SYSCTL_ZERO,
7238 		.extra2		= (void *)&two_five_five,
7239 	},
7240 	{
7241 		.procname	= "rpl_seg_enabled",
7242 		.data		= &ipv6_devconf.rpl_seg_enabled,
7243 		.maxlen		= sizeof(int),
7244 		.mode		= 0644,
7245 		.proc_handler   = proc_dointvec_minmax,
7246 		.extra1         = SYSCTL_ZERO,
7247 		.extra2         = SYSCTL_ONE,
7248 	},
7249 	{
7250 		.procname	= "ioam6_enabled",
7251 		.data		= &ipv6_devconf.ioam6_enabled,
7252 		.maxlen		= sizeof(u8),
7253 		.mode		= 0644,
7254 		.proc_handler	= proc_dou8vec_minmax,
7255 		.extra1		= (void *)SYSCTL_ZERO,
7256 		.extra2		= (void *)SYSCTL_ONE,
7257 	},
7258 	{
7259 		.procname	= "ioam6_id",
7260 		.data		= &ipv6_devconf.ioam6_id,
7261 		.maxlen		= sizeof(u32),
7262 		.mode		= 0644,
7263 		.proc_handler	= proc_douintvec_minmax,
7264 		.extra1		= (void *)SYSCTL_ZERO,
7265 		.extra2		= (void *)&ioam6_if_id_max,
7266 	},
7267 	{
7268 		.procname	= "ioam6_id_wide",
7269 		.data		= &ipv6_devconf.ioam6_id_wide,
7270 		.maxlen		= sizeof(u32),
7271 		.mode		= 0644,
7272 		.proc_handler	= proc_douintvec,
7273 	},
7274 	{
7275 		.procname	= "ndisc_evict_nocarrier",
7276 		.data		= &ipv6_devconf.ndisc_evict_nocarrier,
7277 		.maxlen		= sizeof(u8),
7278 		.mode		= 0644,
7279 		.proc_handler	= proc_dou8vec_minmax,
7280 		.extra1		= (void *)SYSCTL_ZERO,
7281 		.extra2		= (void *)SYSCTL_ONE,
7282 	},
7283 	{
7284 		.procname	= "accept_untracked_na",
7285 		.data		= &ipv6_devconf.accept_untracked_na,
7286 		.maxlen		= sizeof(int),
7287 		.mode		= 0644,
7288 		.proc_handler	= proc_dointvec_minmax,
7289 		.extra1		= SYSCTL_ZERO,
7290 		.extra2		= SYSCTL_TWO,
7291 	},
7292 	{
7293 		.procname	= "force_forwarding",
7294 		.data		= &ipv6_devconf.force_forwarding,
7295 		.maxlen		= sizeof(int),
7296 		.mode		= 0644,
7297 		.proc_handler	= addrconf_sysctl_force_forwarding,
7298 	},
7299 };
7300 
__addrconf_sysctl_register(struct net * net,char * dev_name,struct inet6_dev * idev,struct ipv6_devconf * p)7301 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7302 		struct inet6_dev *idev, struct ipv6_devconf *p)
7303 {
7304 	size_t table_size = ARRAY_SIZE(addrconf_sysctl);
7305 	int i, ifindex;
7306 	struct ctl_table *table;
7307 	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7308 
7309 	table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7310 	if (!table)
7311 		goto out;
7312 
7313 	for (i = 0; i < table_size; i++) {
7314 		table[i].data += (char *)p - (char *)&ipv6_devconf;
7315 		/* If one of these is already set, then it is not safe to
7316 		 * overwrite either of them: this makes proc_dointvec_minmax
7317 		 * usable.
7318 		 */
7319 		if (!table[i].extra1 && !table[i].extra2) {
7320 			table[i].extra1 = idev; /* embedded; no ref */
7321 			table[i].extra2 = net;
7322 		}
7323 	}
7324 
7325 	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7326 
7327 	p->sysctl_header = register_net_sysctl_sz(net, path, table,
7328 						  table_size);
7329 	if (!p->sysctl_header)
7330 		goto free;
7331 
7332 	if (!strcmp(dev_name, "all"))
7333 		ifindex = NETCONFA_IFINDEX_ALL;
7334 	else if (!strcmp(dev_name, "default"))
7335 		ifindex = NETCONFA_IFINDEX_DEFAULT;
7336 	else
7337 		ifindex = idev->dev->ifindex;
7338 	inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7339 				     ifindex, p);
7340 	return 0;
7341 
7342 free:
7343 	kfree(table);
7344 out:
7345 	return -ENOBUFS;
7346 }
7347 
__addrconf_sysctl_unregister(struct net * net,struct ipv6_devconf * p,int ifindex)7348 static void __addrconf_sysctl_unregister(struct net *net,
7349 					 struct ipv6_devconf *p, int ifindex)
7350 {
7351 	const struct ctl_table *table;
7352 
7353 	if (!p->sysctl_header)
7354 		return;
7355 
7356 	table = p->sysctl_header->ctl_table_arg;
7357 	unregister_net_sysctl_table(p->sysctl_header);
7358 	p->sysctl_header = NULL;
7359 	kfree(table);
7360 
7361 	inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7362 }
7363 
addrconf_sysctl_register(struct inet6_dev * idev)7364 static int addrconf_sysctl_register(struct inet6_dev *idev)
7365 {
7366 	int err;
7367 
7368 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
7369 		return -EINVAL;
7370 
7371 	err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7372 				    &ndisc_ifinfo_sysctl_change);
7373 	if (err)
7374 		return err;
7375 	err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7376 					 idev, &idev->cnf);
7377 	if (err)
7378 		neigh_sysctl_unregister(idev->nd_parms);
7379 
7380 	return err;
7381 }
7382 
addrconf_sysctl_unregister(struct inet6_dev * idev)7383 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7384 {
7385 	__addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7386 				     idev->dev->ifindex);
7387 	neigh_sysctl_unregister(idev->nd_parms);
7388 }
7389 
7390 
7391 #endif
7392 
addrconf_init_net(struct net * net)7393 static int __net_init addrconf_init_net(struct net *net)
7394 {
7395 	int err = -ENOMEM;
7396 	struct ipv6_devconf *all, *dflt;
7397 
7398 	spin_lock_init(&net->ipv6.addrconf_hash_lock);
7399 	INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7400 	net->ipv6.inet6_addr_lst = kzalloc_objs(struct hlist_head,
7401 						IN6_ADDR_HSIZE);
7402 	if (!net->ipv6.inet6_addr_lst)
7403 		goto err_alloc_addr;
7404 
7405 	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7406 	if (!all)
7407 		goto err_alloc_all;
7408 
7409 	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7410 	if (!dflt)
7411 		goto err_alloc_dflt;
7412 
7413 	if (!net_eq(net, &init_net)) {
7414 		switch (net_inherit_devconf()) {
7415 		case 1:  /* copy from init_net */
7416 			memcpy(all, init_net.ipv6.devconf_all,
7417 			       sizeof(ipv6_devconf));
7418 			memcpy(dflt, init_net.ipv6.devconf_dflt,
7419 			       sizeof(ipv6_devconf_dflt));
7420 			break;
7421 		case 3: /* copy from the current netns */
7422 			memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7423 			       sizeof(ipv6_devconf));
7424 			memcpy(dflt,
7425 			       current->nsproxy->net_ns->ipv6.devconf_dflt,
7426 			       sizeof(ipv6_devconf_dflt));
7427 			break;
7428 		case 0:
7429 		case 2:
7430 			/* use compiled values */
7431 			break;
7432 		}
7433 	}
7434 
7435 	/* these will be inherited by all namespaces */
7436 	dflt->autoconf = ipv6_defaults.autoconf;
7437 	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7438 
7439 	dflt->stable_secret.initialized = false;
7440 	all->stable_secret.initialized = false;
7441 
7442 	net->ipv6.devconf_all = all;
7443 	net->ipv6.devconf_dflt = dflt;
7444 
7445 #ifdef CONFIG_SYSCTL
7446 	err = __addrconf_sysctl_register(net, "all", NULL, all);
7447 	if (err < 0)
7448 		goto err_reg_all;
7449 
7450 	err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7451 	if (err < 0)
7452 		goto err_reg_dflt;
7453 #endif
7454 	return 0;
7455 
7456 #ifdef CONFIG_SYSCTL
7457 err_reg_dflt:
7458 	__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7459 err_reg_all:
7460 	kfree(dflt);
7461 	net->ipv6.devconf_dflt = NULL;
7462 #endif
7463 err_alloc_dflt:
7464 	kfree(all);
7465 	net->ipv6.devconf_all = NULL;
7466 err_alloc_all:
7467 	kfree(net->ipv6.inet6_addr_lst);
7468 err_alloc_addr:
7469 	return err;
7470 }
7471 
addrconf_exit_net(struct net * net)7472 static void __net_exit addrconf_exit_net(struct net *net)
7473 {
7474 	int i;
7475 
7476 #ifdef CONFIG_SYSCTL
7477 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7478 				     NETCONFA_IFINDEX_DEFAULT);
7479 	__addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7480 				     NETCONFA_IFINDEX_ALL);
7481 #endif
7482 	kfree(net->ipv6.devconf_dflt);
7483 	net->ipv6.devconf_dflt = NULL;
7484 	kfree(net->ipv6.devconf_all);
7485 	net->ipv6.devconf_all = NULL;
7486 
7487 	cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7488 	/*
7489 	 *	Check hash table, then free it.
7490 	 */
7491 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
7492 		WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7493 
7494 	kfree(net->ipv6.inet6_addr_lst);
7495 	net->ipv6.inet6_addr_lst = NULL;
7496 }
7497 
7498 static struct pernet_operations addrconf_ops = {
7499 	.init = addrconf_init_net,
7500 	.exit = addrconf_exit_net,
7501 };
7502 
7503 static struct rtnl_af_ops inet6_ops __read_mostly = {
7504 	.family		  = AF_INET6,
7505 	.fill_link_af	  = inet6_fill_link_af,
7506 	.get_link_af_size = inet6_get_link_af_size,
7507 	.validate_link_af = inet6_validate_link_af,
7508 	.set_link_af	  = inet6_set_link_af,
7509 };
7510 
7511 static const struct rtnl_msg_handler addrconf_rtnl_msg_handlers[] __initconst_or_module = {
7512 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETLINK,
7513 	 .dumpit = inet6_dump_ifinfo, .flags = RTNL_FLAG_DUMP_UNLOCKED},
7514 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDR,
7515 	 .doit = inet6_rtm_newaddr, .flags = RTNL_FLAG_DOIT_PERNET},
7516 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDR,
7517 	 .doit = inet6_rtm_deladdr, .flags = RTNL_FLAG_DOIT_PERNET},
7518 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDR,
7519 	 .doit = inet6_rtm_getaddr, .dumpit = inet6_dump_ifaddr,
7520 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
7521 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETMULTICAST,
7522 	 .dumpit = inet6_dump_ifmcaddr,
7523 	 .flags = RTNL_FLAG_DUMP_UNLOCKED},
7524 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETANYCAST,
7525 	 .dumpit = inet6_dump_ifacaddr,
7526 	 .flags = RTNL_FLAG_DUMP_UNLOCKED},
7527 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETNETCONF,
7528 	 .doit = inet6_netconf_get_devconf, .dumpit = inet6_netconf_dump_devconf,
7529 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
7530 };
7531 
7532 /*
7533  *	Init / cleanup code
7534  */
7535 
addrconf_init(void)7536 int __init addrconf_init(void)
7537 {
7538 	struct inet6_dev *idev;
7539 	int err;
7540 
7541 	err = ipv6_addr_label_init();
7542 	if (err < 0) {
7543 		pr_crit("%s: cannot initialize default policy table: %d\n",
7544 			__func__, err);
7545 		goto out;
7546 	}
7547 
7548 	err = register_pernet_subsys(&addrconf_ops);
7549 	if (err < 0)
7550 		goto out_addrlabel;
7551 
7552 	/* All works using addrconf_wq need to lock rtnl. */
7553 	addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
7554 	if (!addrconf_wq) {
7555 		err = -ENOMEM;
7556 		goto out_nowq;
7557 	}
7558 
7559 	rtnl_net_lock(&init_net);
7560 	idev = ipv6_add_dev(blackhole_netdev);
7561 	rtnl_net_unlock(&init_net);
7562 	if (IS_ERR(idev)) {
7563 		err = PTR_ERR(idev);
7564 		goto errlo;
7565 	}
7566 
7567 	ip6_route_init_special_entries();
7568 
7569 	register_netdevice_notifier(&ipv6_dev_notf);
7570 
7571 	addrconf_verify(&init_net);
7572 
7573 	err = rtnl_af_register(&inet6_ops);
7574 	if (err)
7575 		goto erraf;
7576 
7577 	err = rtnl_register_many(addrconf_rtnl_msg_handlers);
7578 	if (err)
7579 		goto errout;
7580 
7581 	err = ipv6_addr_label_rtnl_register();
7582 	if (err < 0)
7583 		goto errout;
7584 
7585 	return 0;
7586 errout:
7587 	rtnl_unregister_all(PF_INET6);
7588 	rtnl_af_unregister(&inet6_ops);
7589 erraf:
7590 	unregister_netdevice_notifier(&ipv6_dev_notf);
7591 errlo:
7592 	destroy_workqueue(addrconf_wq);
7593 out_nowq:
7594 	unregister_pernet_subsys(&addrconf_ops);
7595 out_addrlabel:
7596 	ipv6_addr_label_cleanup();
7597 out:
7598 	return err;
7599 }
7600 
addrconf_cleanup(void)7601 void addrconf_cleanup(void)
7602 {
7603 	struct net_device *dev;
7604 
7605 	unregister_netdevice_notifier(&ipv6_dev_notf);
7606 	unregister_pernet_subsys(&addrconf_ops);
7607 	ipv6_addr_label_cleanup();
7608 
7609 	rtnl_af_unregister(&inet6_ops);
7610 
7611 	rtnl_net_lock(&init_net);
7612 
7613 	/* clean dev list */
7614 	for_each_netdev(&init_net, dev) {
7615 		if (!__in6_dev_get_rtnl_net(dev))
7616 			continue;
7617 		addrconf_ifdown(dev, true);
7618 	}
7619 	addrconf_ifdown(init_net.loopback_dev, true);
7620 
7621 	rtnl_net_unlock(&init_net);
7622 
7623 	destroy_workqueue(addrconf_wq);
7624 }
7625