xref: /linux/net/ipv4/devinet.c (revision d69eb204c255c35abd9e8cb621484e8074c75eaa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	NET3	IP device support routines.
4  *
5  *	Derived from the IP parts of dev.c 1.0.19
6  * 		Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  *	Changes:
15  *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
16  *					lists.
17  *		Cyrus Durgin:		updated for kmod
18  *		Matthias Andree:	in devinet_ioctl, compare label and
19  *					address (4.4BSD alias style support),
20  *					fall back to comparing just the label
21  *					if no match found.
22  */
23 
24 
25 #include <linux/uaccess.h>
26 #include <linux/bitops.h>
27 #include <linux/capability.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/mm.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/in.h>
37 #include <linux/errno.h>
38 #include <linux/interrupt.h>
39 #include <linux/if_addr.h>
40 #include <linux/if_ether.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/notifier.h>
47 #include <linux/inetdevice.h>
48 #include <linux/igmp.h>
49 #include "igmp_internal.h"
50 #include <linux/slab.h>
51 #include <linux/hash.h>
52 #ifdef CONFIG_SYSCTL
53 #include <linux/sysctl.h>
54 #endif
55 #include <linux/kmod.h>
56 #include <linux/netconf.h>
57 
58 #include <net/arp.h>
59 #include <net/ip.h>
60 #include <net/route.h>
61 #include <net/ip_fib.h>
62 #include <net/rtnetlink.h>
63 #include <net/net_namespace.h>
64 #include <net/addrconf.h>
65 
66 #define IPV6ONLY_FLAGS	\
67 		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
68 		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
69 		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
70 
71 static struct ipv4_devconf ipv4_devconf = {
72 	.data = {
73 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
74 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
75 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
76 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
77 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
78 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
79 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
80 	},
81 };
82 
83 static struct ipv4_devconf ipv4_devconf_dflt = {
84 	.data = {
85 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
86 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
87 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
88 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
89 		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
90 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
91 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
92 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
93 	},
94 };
95 
96 #define IPV4_DEVCONF_DFLT(net, attr) \
97 	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
98 
99 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
100 	[IFA_LOCAL]     	= { .type = NLA_U32 },
101 	[IFA_ADDRESS]   	= { .type = NLA_U32 },
102 	[IFA_BROADCAST] 	= { .type = NLA_U32 },
103 	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
104 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
105 	[IFA_FLAGS]		= { .type = NLA_U32 },
106 	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
107 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
108 	[IFA_PROTO]		= { .type = NLA_U8 },
109 };
110 
111 #define IN4_ADDR_HSIZE_SHIFT	8
112 #define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
113 
inet_addr_hash(const struct net * net,__be32 addr)114 static u32 inet_addr_hash(const struct net *net, __be32 addr)
115 {
116 	u32 val = __ipv4_addr_hash(addr, net_hash_mix(net));
117 
118 	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
119 }
120 
inet_hash_insert(struct net * net,struct in_ifaddr * ifa)121 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
122 {
123 	u32 hash = inet_addr_hash(net, ifa->ifa_local);
124 
125 	ASSERT_RTNL();
126 	hlist_add_head_rcu(&ifa->addr_lst, &net->ipv4.inet_addr_lst[hash]);
127 }
128 
inet_hash_remove(struct in_ifaddr * ifa)129 static void inet_hash_remove(struct in_ifaddr *ifa)
130 {
131 	ASSERT_RTNL();
132 	hlist_del_init_rcu(&ifa->addr_lst);
133 }
134 
135 /**
136  * __ip_dev_find - find the first device with a given source address.
137  * @net: the net namespace
138  * @addr: the source address
139  * @devref: if true, take a reference on the found device
140  *
141  * If a caller uses devref=false, it should be protected by RCU, or RTNL
142  */
__ip_dev_find(struct net * net,__be32 addr,bool devref)143 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
144 {
145 	struct net_device *result = NULL;
146 	struct in_ifaddr *ifa;
147 
148 	rcu_read_lock();
149 	ifa = inet_lookup_ifaddr_rcu(net, addr);
150 	if (!ifa) {
151 		struct flowi4 fl4 = { .daddr = addr };
152 		struct fib_result res = { 0 };
153 		struct fib_table *local;
154 
155 		/* Fallback to FIB local table so that communication
156 		 * over loopback subnets work.
157 		 */
158 		local = fib_get_table(net, RT_TABLE_LOCAL);
159 		if (local &&
160 		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
161 		    res.type == RTN_LOCAL)
162 			result = FIB_RES_DEV(res);
163 	} else {
164 		result = ifa->ifa_dev->dev;
165 	}
166 	if (result && devref)
167 		dev_hold(result);
168 	rcu_read_unlock();
169 	return result;
170 }
171 EXPORT_SYMBOL(__ip_dev_find);
172 
173 /* called under RCU lock */
inet_lookup_ifaddr_rcu(struct net * net,__be32 addr)174 struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
175 {
176 	u32 hash = inet_addr_hash(net, addr);
177 	struct in_ifaddr *ifa;
178 
179 	hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst)
180 		if (ifa->ifa_local == addr)
181 			return ifa;
182 
183 	return NULL;
184 }
185 
186 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
187 
188 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
189 static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
190 static void inet_del_ifa(struct in_device *in_dev,
191 			 struct in_ifaddr __rcu **ifap,
192 			 int destroy);
193 #ifdef CONFIG_SYSCTL
194 static int devinet_sysctl_register(struct in_device *idev);
195 static void devinet_sysctl_unregister(struct in_device *idev);
196 #else
devinet_sysctl_register(struct in_device * idev)197 static int devinet_sysctl_register(struct in_device *idev)
198 {
199 	return 0;
200 }
devinet_sysctl_unregister(struct in_device * idev)201 static void devinet_sysctl_unregister(struct in_device *idev)
202 {
203 }
204 #endif
205 
206 /* Locks all the inet devices. */
207 
inet_alloc_ifa(struct in_device * in_dev)208 static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev)
209 {
210 	struct in_ifaddr *ifa;
211 
212 	ifa = kzalloc(sizeof(*ifa), GFP_KERNEL_ACCOUNT);
213 	if (!ifa)
214 		return NULL;
215 
216 	in_dev_hold(in_dev);
217 	ifa->ifa_dev = in_dev;
218 
219 	INIT_HLIST_NODE(&ifa->addr_lst);
220 
221 	return ifa;
222 }
223 
inet_rcu_free_ifa(struct rcu_head * head)224 static void inet_rcu_free_ifa(struct rcu_head *head)
225 {
226 	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
227 
228 	in_dev_put(ifa->ifa_dev);
229 	kfree(ifa);
230 }
231 
inet_free_ifa(struct in_ifaddr * ifa)232 static void inet_free_ifa(struct in_ifaddr *ifa)
233 {
234 	/* Our reference to ifa->ifa_dev must be freed ASAP
235 	 * to release the reference to the netdev the same way.
236 	 * in_dev_put() -> in_dev_finish_destroy() -> netdev_put()
237 	 */
238 	call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa);
239 }
240 
in_dev_free_rcu(struct rcu_head * head)241 static void in_dev_free_rcu(struct rcu_head *head)
242 {
243 	struct in_device *idev = container_of(head, struct in_device, rcu_head);
244 
245 	kfree(rcu_dereference_protected(idev->mc_hash, 1));
246 	kfree(idev);
247 }
248 
in_dev_finish_destroy(struct in_device * idev)249 void in_dev_finish_destroy(struct in_device *idev)
250 {
251 	struct net_device *dev = idev->dev;
252 
253 	WARN_ON(idev->ifa_list);
254 	WARN_ON(idev->mc_list);
255 #ifdef NET_REFCNT_DEBUG
256 	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
257 #endif
258 	netdev_put(dev, &idev->dev_tracker);
259 	if (!idev->dead)
260 		pr_err("Freeing alive in_device %p\n", idev);
261 	else
262 		call_rcu(&idev->rcu_head, in_dev_free_rcu);
263 }
264 EXPORT_SYMBOL(in_dev_finish_destroy);
265 
inetdev_init(struct net_device * dev)266 static struct in_device *inetdev_init(struct net_device *dev)
267 {
268 	struct in_device *in_dev;
269 	int err = -ENOMEM;
270 
271 	ASSERT_RTNL();
272 
273 	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
274 	if (!in_dev)
275 		goto out;
276 	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
277 			sizeof(in_dev->cnf));
278 	in_dev->cnf.sysctl = NULL;
279 	in_dev->dev = dev;
280 	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
281 	if (!in_dev->arp_parms)
282 		goto out_kfree;
283 	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
284 		netif_disable_lro(dev);
285 	/* Reference in_dev->dev */
286 	netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
287 	/* Account for reference dev->ip_ptr (below) */
288 	refcount_set(&in_dev->refcnt, 1);
289 
290 	if (dev != blackhole_netdev) {
291 		err = devinet_sysctl_register(in_dev);
292 		if (err) {
293 			in_dev->dead = 1;
294 			neigh_parms_release(&arp_tbl, in_dev->arp_parms);
295 			in_dev_put(in_dev);
296 			in_dev = NULL;
297 			goto out;
298 		}
299 		ip_mc_init_dev(in_dev);
300 		if (dev->flags & IFF_UP)
301 			ip_mc_up(in_dev);
302 	}
303 
304 	/* we can receive as soon as ip_ptr is set -- do this last */
305 	rcu_assign_pointer(dev->ip_ptr, in_dev);
306 out:
307 	return in_dev ?: ERR_PTR(err);
308 out_kfree:
309 	kfree(in_dev);
310 	in_dev = NULL;
311 	goto out;
312 }
313 
inetdev_destroy(struct in_device * in_dev)314 static void inetdev_destroy(struct in_device *in_dev)
315 {
316 	struct net_device *dev;
317 	struct in_ifaddr *ifa;
318 
319 	ASSERT_RTNL();
320 
321 	dev = in_dev->dev;
322 
323 	in_dev->dead = 1;
324 
325 	ip_mc_destroy_dev(in_dev);
326 
327 	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
328 		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
329 		inet_free_ifa(ifa);
330 	}
331 
332 	RCU_INIT_POINTER(dev->ip_ptr, NULL);
333 
334 	devinet_sysctl_unregister(in_dev);
335 	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
336 	arp_ifdown(dev);
337 
338 	in_dev_put(in_dev);
339 }
340 
inet_blackhole_dev_init(void)341 static int __init inet_blackhole_dev_init(void)
342 {
343 	struct in_device *in_dev;
344 
345 	rtnl_lock();
346 	in_dev = inetdev_init(blackhole_netdev);
347 	rtnl_unlock();
348 
349 	return PTR_ERR_OR_ZERO(in_dev);
350 }
351 late_initcall(inet_blackhole_dev_init);
352 
inet_addr_onlink(struct in_device * in_dev,__be32 a,__be32 b)353 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
354 {
355 	const struct in_ifaddr *ifa;
356 
357 	rcu_read_lock();
358 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
359 		if (inet_ifa_match(a, ifa)) {
360 			if (!b || inet_ifa_match(b, ifa)) {
361 				rcu_read_unlock();
362 				return 1;
363 			}
364 		}
365 	}
366 	rcu_read_unlock();
367 	return 0;
368 }
369 
__inet_del_ifa(struct in_device * in_dev,struct in_ifaddr __rcu ** ifap,int destroy,struct nlmsghdr * nlh,u32 portid)370 static void __inet_del_ifa(struct in_device *in_dev,
371 			   struct in_ifaddr __rcu **ifap,
372 			   int destroy, struct nlmsghdr *nlh, u32 portid)
373 {
374 	struct in_ifaddr *promote = NULL;
375 	struct in_ifaddr *ifa, *ifa1;
376 	struct in_ifaddr __rcu **last_prim;
377 	struct in_ifaddr *prev_prom = NULL;
378 	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
379 
380 	ASSERT_RTNL();
381 
382 	ifa1 = rtnl_dereference(*ifap);
383 	last_prim = ifap;
384 	if (in_dev->dead)
385 		goto no_promotions;
386 
387 	/* 1. Deleting primary ifaddr forces deletion all secondaries
388 	 * unless alias promotion is set
389 	 **/
390 
391 	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
392 		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
393 
394 		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
395 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
396 			    ifa1->ifa_scope <= ifa->ifa_scope)
397 				last_prim = &ifa->ifa_next;
398 
399 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
400 			    ifa1->ifa_mask != ifa->ifa_mask ||
401 			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
402 				ifap1 = &ifa->ifa_next;
403 				prev_prom = ifa;
404 				continue;
405 			}
406 
407 			if (!do_promote) {
408 				inet_hash_remove(ifa);
409 				*ifap1 = ifa->ifa_next;
410 
411 				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
412 				blocking_notifier_call_chain(&inetaddr_chain,
413 						NETDEV_DOWN, ifa);
414 				inet_free_ifa(ifa);
415 			} else {
416 				promote = ifa;
417 				break;
418 			}
419 		}
420 	}
421 
422 	/* On promotion all secondaries from subnet are changing
423 	 * the primary IP, we must remove all their routes silently
424 	 * and later to add them back with new prefsrc. Do this
425 	 * while all addresses are on the device list.
426 	 */
427 	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
428 		if (ifa1->ifa_mask == ifa->ifa_mask &&
429 		    inet_ifa_match(ifa1->ifa_address, ifa))
430 			fib_del_ifaddr(ifa, ifa1);
431 	}
432 
433 no_promotions:
434 	/* 2. Unlink it */
435 
436 	*ifap = ifa1->ifa_next;
437 	inet_hash_remove(ifa1);
438 
439 	/* 3. Announce address deletion */
440 
441 	/* Send message first, then call notifier.
442 	   At first sight, FIB update triggered by notifier
443 	   will refer to already deleted ifaddr, that could confuse
444 	   netlink listeners. It is not true: look, gated sees
445 	   that route deleted and if it still thinks that ifaddr
446 	   is valid, it will try to restore deleted routes... Grr.
447 	   So that, this order is correct.
448 	 */
449 	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
450 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
451 
452 	if (promote) {
453 		struct in_ifaddr *next_sec;
454 
455 		next_sec = rtnl_dereference(promote->ifa_next);
456 		if (prev_prom) {
457 			struct in_ifaddr *last_sec;
458 
459 			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
460 
461 			last_sec = rtnl_dereference(*last_prim);
462 			rcu_assign_pointer(promote->ifa_next, last_sec);
463 			rcu_assign_pointer(*last_prim, promote);
464 		}
465 
466 		promote->ifa_flags &= ~IFA_F_SECONDARY;
467 		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
468 		blocking_notifier_call_chain(&inetaddr_chain,
469 				NETDEV_UP, promote);
470 		for (ifa = next_sec; ifa;
471 		     ifa = rtnl_dereference(ifa->ifa_next)) {
472 			if (ifa1->ifa_mask != ifa->ifa_mask ||
473 			    !inet_ifa_match(ifa1->ifa_address, ifa))
474 					continue;
475 			fib_add_ifaddr(ifa);
476 		}
477 
478 	}
479 	if (destroy)
480 		inet_free_ifa(ifa1);
481 }
482 
inet_del_ifa(struct in_device * in_dev,struct in_ifaddr __rcu ** ifap,int destroy)483 static void inet_del_ifa(struct in_device *in_dev,
484 			 struct in_ifaddr __rcu **ifap,
485 			 int destroy)
486 {
487 	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
488 }
489 
__inet_insert_ifa(struct in_ifaddr * ifa,struct nlmsghdr * nlh,u32 portid,struct netlink_ext_ack * extack)490 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
491 			     u32 portid, struct netlink_ext_ack *extack)
492 {
493 	struct in_ifaddr __rcu **last_primary, **ifap;
494 	struct in_device *in_dev = ifa->ifa_dev;
495 	struct net *net = dev_net(in_dev->dev);
496 	struct in_validator_info ivi;
497 	struct in_ifaddr *ifa1;
498 	int ret;
499 
500 	ASSERT_RTNL();
501 
502 	ifa->ifa_flags &= ~IFA_F_SECONDARY;
503 	last_primary = &in_dev->ifa_list;
504 
505 	/* Don't set IPv6 only flags to IPv4 addresses */
506 	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
507 
508 	ifap = &in_dev->ifa_list;
509 	ifa1 = rtnl_dereference(*ifap);
510 
511 	while (ifa1) {
512 		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
513 		    ifa->ifa_scope <= ifa1->ifa_scope)
514 			last_primary = &ifa1->ifa_next;
515 		if (ifa1->ifa_mask == ifa->ifa_mask &&
516 		    inet_ifa_match(ifa1->ifa_address, ifa)) {
517 			if (ifa1->ifa_local == ifa->ifa_local) {
518 				inet_free_ifa(ifa);
519 				return -EEXIST;
520 			}
521 			if (ifa1->ifa_scope != ifa->ifa_scope) {
522 				NL_SET_ERR_MSG(extack, "ipv4: Invalid scope value");
523 				inet_free_ifa(ifa);
524 				return -EINVAL;
525 			}
526 			ifa->ifa_flags |= IFA_F_SECONDARY;
527 		}
528 
529 		ifap = &ifa1->ifa_next;
530 		ifa1 = rtnl_dereference(*ifap);
531 	}
532 
533 	/* Allow any devices that wish to register ifaddr validtors to weigh
534 	 * in now, before changes are committed.  The rntl lock is serializing
535 	 * access here, so the state should not change between a validator call
536 	 * and a final notify on commit.  This isn't invoked on promotion under
537 	 * the assumption that validators are checking the address itself, and
538 	 * not the flags.
539 	 */
540 	ivi.ivi_addr = ifa->ifa_address;
541 	ivi.ivi_dev = ifa->ifa_dev;
542 	ivi.extack = extack;
543 	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
544 					   NETDEV_UP, &ivi);
545 	ret = notifier_to_errno(ret);
546 	if (ret) {
547 		inet_free_ifa(ifa);
548 		return ret;
549 	}
550 
551 	if (!(ifa->ifa_flags & IFA_F_SECONDARY))
552 		ifap = last_primary;
553 
554 	rcu_assign_pointer(ifa->ifa_next, *ifap);
555 	rcu_assign_pointer(*ifap, ifa);
556 
557 	inet_hash_insert(dev_net(in_dev->dev), ifa);
558 
559 	cancel_delayed_work(&net->ipv4.addr_chk_work);
560 	queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0);
561 
562 	/* Send message first, then call notifier.
563 	   Notifier will trigger FIB update, so that
564 	   listeners of netlink will know about new ifaddr */
565 	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
566 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
567 
568 	return 0;
569 }
570 
inet_insert_ifa(struct in_ifaddr * ifa)571 static int inet_insert_ifa(struct in_ifaddr *ifa)
572 {
573 	if (!ifa->ifa_local) {
574 		inet_free_ifa(ifa);
575 		return 0;
576 	}
577 
578 	return __inet_insert_ifa(ifa, NULL, 0, NULL);
579 }
580 
inet_set_ifa(struct net_device * dev,struct in_ifaddr * ifa)581 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
582 {
583 	struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
584 
585 	ipv4_devconf_setall(in_dev);
586 	neigh_parms_data_state_setall(in_dev->arp_parms);
587 
588 	if (ipv4_is_loopback(ifa->ifa_local))
589 		ifa->ifa_scope = RT_SCOPE_HOST;
590 	return inet_insert_ifa(ifa);
591 }
592 
593 /* Caller must hold RCU or RTNL :
594  * We dont take a reference on found in_device
595  */
inetdev_by_index(struct net * net,int ifindex)596 struct in_device *inetdev_by_index(struct net *net, int ifindex)
597 {
598 	struct net_device *dev;
599 	struct in_device *in_dev = NULL;
600 
601 	rcu_read_lock();
602 	dev = dev_get_by_index_rcu(net, ifindex);
603 	if (dev)
604 		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
605 	rcu_read_unlock();
606 	return in_dev;
607 }
608 EXPORT_SYMBOL(inetdev_by_index);
609 
610 /* Called only from RTNL semaphored context. No locks. */
611 
inet_ifa_byprefix(struct in_device * in_dev,__be32 prefix,__be32 mask)612 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
613 				    __be32 mask)
614 {
615 	struct in_ifaddr *ifa;
616 
617 	ASSERT_RTNL();
618 
619 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
620 		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
621 			return ifa;
622 	}
623 	return NULL;
624 }
625 
ip_mc_autojoin_config(struct net * net,bool join,const struct in_ifaddr * ifa)626 static int ip_mc_autojoin_config(struct net *net, bool join,
627 				 const struct in_ifaddr *ifa)
628 {
629 #if defined(CONFIG_IP_MULTICAST)
630 	struct ip_mreqn mreq = {
631 		.imr_multiaddr.s_addr = ifa->ifa_address,
632 		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
633 	};
634 	struct sock *sk = net->ipv4.mc_autojoin_sk;
635 	int ret;
636 
637 	ASSERT_RTNL_NET(net);
638 
639 	lock_sock(sk);
640 	if (join)
641 		ret = ip_mc_join_group(sk, &mreq);
642 	else
643 		ret = ip_mc_leave_group(sk, &mreq);
644 	release_sock(sk);
645 
646 	return ret;
647 #else
648 	return -EOPNOTSUPP;
649 #endif
650 }
651 
inet_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)652 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
653 			    struct netlink_ext_ack *extack)
654 {
655 	struct net *net = sock_net(skb->sk);
656 	struct in_ifaddr __rcu **ifap;
657 	struct nlattr *tb[IFA_MAX+1];
658 	struct in_device *in_dev;
659 	struct ifaddrmsg *ifm;
660 	struct in_ifaddr *ifa;
661 	int err;
662 
663 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
664 				     ifa_ipv4_policy, extack);
665 	if (err < 0)
666 		goto out;
667 
668 	ifm = nlmsg_data(nlh);
669 
670 	rtnl_net_lock(net);
671 
672 	in_dev = inetdev_by_index(net, ifm->ifa_index);
673 	if (!in_dev) {
674 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
675 		err = -ENODEV;
676 		goto unlock;
677 	}
678 
679 	for (ifap = &in_dev->ifa_list;
680 	     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
681 	     ifap = &ifa->ifa_next) {
682 		if (tb[IFA_LOCAL] &&
683 		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
684 			continue;
685 
686 		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
687 			continue;
688 
689 		if (tb[IFA_ADDRESS] &&
690 		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
691 		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
692 			continue;
693 
694 		if (ipv4_is_multicast(ifa->ifa_address))
695 			ip_mc_autojoin_config(net, false, ifa);
696 
697 		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
698 		goto unlock;
699 	}
700 
701 	NL_SET_ERR_MSG(extack, "ipv4: Address not found");
702 	err = -EADDRNOTAVAIL;
703 unlock:
704 	rtnl_net_unlock(net);
705 out:
706 	return err;
707 }
708 
check_lifetime(struct work_struct * work)709 static void check_lifetime(struct work_struct *work)
710 {
711 	unsigned long now, next, next_sec, next_sched;
712 	struct in_ifaddr *ifa;
713 	struct hlist_node *n;
714 	struct net *net;
715 	int i;
716 
717 	net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work);
718 	now = jiffies;
719 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
720 
721 	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
722 		struct hlist_head *head = &net->ipv4.inet_addr_lst[i];
723 		bool change_needed = false;
724 
725 		rcu_read_lock();
726 		hlist_for_each_entry_rcu(ifa, head, addr_lst) {
727 			unsigned long age, tstamp;
728 			u32 preferred_lft;
729 			u32 valid_lft;
730 			u32 flags;
731 
732 			flags = READ_ONCE(ifa->ifa_flags);
733 			if (flags & IFA_F_PERMANENT)
734 				continue;
735 
736 			preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
737 			valid_lft = READ_ONCE(ifa->ifa_valid_lft);
738 			tstamp = READ_ONCE(ifa->ifa_tstamp);
739 			/* We try to batch several events at once. */
740 			age = (now - tstamp +
741 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
742 
743 			if (valid_lft != INFINITY_LIFE_TIME &&
744 			    age >= valid_lft) {
745 				change_needed = true;
746 			} else if (preferred_lft ==
747 				   INFINITY_LIFE_TIME) {
748 				continue;
749 			} else if (age >= preferred_lft) {
750 				if (time_before(tstamp + valid_lft * HZ, next))
751 					next = tstamp + valid_lft * HZ;
752 
753 				if (!(flags & IFA_F_DEPRECATED))
754 					change_needed = true;
755 			} else if (time_before(tstamp + preferred_lft * HZ,
756 					       next)) {
757 				next = tstamp + preferred_lft * HZ;
758 			}
759 		}
760 		rcu_read_unlock();
761 		if (!change_needed)
762 			continue;
763 
764 		rtnl_net_lock(net);
765 		hlist_for_each_entry_safe(ifa, n, head, addr_lst) {
766 			unsigned long age;
767 
768 			if (ifa->ifa_flags & IFA_F_PERMANENT)
769 				continue;
770 
771 			/* We try to batch several events at once. */
772 			age = (now - ifa->ifa_tstamp +
773 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
774 
775 			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
776 			    age >= ifa->ifa_valid_lft) {
777 				struct in_ifaddr __rcu **ifap;
778 				struct in_ifaddr *tmp;
779 
780 				ifap = &ifa->ifa_dev->ifa_list;
781 				tmp = rtnl_net_dereference(net, *ifap);
782 				while (tmp) {
783 					if (tmp == ifa) {
784 						inet_del_ifa(ifa->ifa_dev,
785 							     ifap, 1);
786 						break;
787 					}
788 					ifap = &tmp->ifa_next;
789 					tmp = rtnl_net_dereference(net, *ifap);
790 				}
791 			} else if (ifa->ifa_preferred_lft !=
792 				   INFINITY_LIFE_TIME &&
793 				   age >= ifa->ifa_preferred_lft &&
794 				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
795 				ifa->ifa_flags |= IFA_F_DEPRECATED;
796 				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
797 			}
798 		}
799 		rtnl_net_unlock(net);
800 	}
801 
802 	next_sec = round_jiffies_up(next);
803 	next_sched = next;
804 
805 	/* If rounded timeout is accurate enough, accept it. */
806 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
807 		next_sched = next_sec;
808 
809 	now = jiffies;
810 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
811 	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
812 		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
813 
814 	queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work,
815 			   next_sched - now);
816 }
817 
set_ifa_lifetime(struct in_ifaddr * ifa,__u32 valid_lft,__u32 prefered_lft)818 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
819 			     __u32 prefered_lft)
820 {
821 	unsigned long timeout;
822 	u32 flags;
823 
824 	flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
825 
826 	timeout = addrconf_timeout_fixup(valid_lft, HZ);
827 	if (addrconf_finite_timeout(timeout))
828 		WRITE_ONCE(ifa->ifa_valid_lft, timeout);
829 	else
830 		flags |= IFA_F_PERMANENT;
831 
832 	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
833 	if (addrconf_finite_timeout(timeout)) {
834 		if (timeout == 0)
835 			flags |= IFA_F_DEPRECATED;
836 		WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
837 	}
838 	WRITE_ONCE(ifa->ifa_flags, flags);
839 	WRITE_ONCE(ifa->ifa_tstamp, jiffies);
840 	if (!ifa->ifa_cstamp)
841 		WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
842 }
843 
inet_validate_rtm(struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack,__u32 * valid_lft,__u32 * prefered_lft)844 static int inet_validate_rtm(struct nlmsghdr *nlh, struct nlattr **tb,
845 			     struct netlink_ext_ack *extack,
846 			     __u32 *valid_lft, __u32 *prefered_lft)
847 {
848 	struct ifaddrmsg *ifm = nlmsg_data(nlh);
849 	int err;
850 
851 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
852 				     ifa_ipv4_policy, extack);
853 	if (err < 0)
854 		return err;
855 
856 	if (ifm->ifa_prefixlen > 32) {
857 		NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length");
858 		return -EINVAL;
859 	}
860 
861 	if (!tb[IFA_LOCAL]) {
862 		NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied");
863 		return -EINVAL;
864 	}
865 
866 	if (tb[IFA_CACHEINFO]) {
867 		struct ifa_cacheinfo *ci;
868 
869 		ci = nla_data(tb[IFA_CACHEINFO]);
870 		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
871 			NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
872 			return -EINVAL;
873 		}
874 
875 		*valid_lft = ci->ifa_valid;
876 		*prefered_lft = ci->ifa_prefered;
877 	}
878 
879 	return 0;
880 }
881 
inet_rtm_to_ifa(struct net * net,struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)882 static struct in_ifaddr *inet_rtm_to_ifa(struct net *net, struct nlmsghdr *nlh,
883 					 struct nlattr **tb,
884 					 struct netlink_ext_ack *extack)
885 {
886 	struct ifaddrmsg *ifm = nlmsg_data(nlh);
887 	struct in_device *in_dev;
888 	struct net_device *dev;
889 	struct in_ifaddr *ifa;
890 	int err;
891 
892 	dev = __dev_get_by_index(net, ifm->ifa_index);
893 	err = -ENODEV;
894 	if (!dev) {
895 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
896 		goto errout;
897 	}
898 
899 	in_dev = __in_dev_get_rtnl_net(dev);
900 	err = -ENOBUFS;
901 	if (!in_dev)
902 		goto errout;
903 
904 	ifa = inet_alloc_ifa(in_dev);
905 	if (!ifa)
906 		/*
907 		 * A potential indev allocation can be left alive, it stays
908 		 * assigned to its device and is destroy with it.
909 		 */
910 		goto errout;
911 
912 	ipv4_devconf_setall(in_dev);
913 	neigh_parms_data_state_setall(in_dev->arp_parms);
914 
915 	if (!tb[IFA_ADDRESS])
916 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
917 
918 	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
919 	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
920 	ifa->ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
921 	ifa->ifa_scope = ifm->ifa_scope;
922 	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
923 	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
924 
925 	if (tb[IFA_BROADCAST])
926 		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
927 
928 	if (tb[IFA_LABEL])
929 		nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
930 	else
931 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
932 
933 	if (tb[IFA_RT_PRIORITY])
934 		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
935 
936 	if (tb[IFA_PROTO])
937 		ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]);
938 
939 	return ifa;
940 
941 errout:
942 	return ERR_PTR(err);
943 }
944 
find_matching_ifa(struct net * net,struct in_ifaddr * ifa)945 static struct in_ifaddr *find_matching_ifa(struct net *net, struct in_ifaddr *ifa)
946 {
947 	struct in_device *in_dev = ifa->ifa_dev;
948 	struct in_ifaddr *ifa1;
949 
950 	in_dev_for_each_ifa_rtnl_net(net, ifa1, in_dev) {
951 		if (ifa1->ifa_mask == ifa->ifa_mask &&
952 		    inet_ifa_match(ifa1->ifa_address, ifa) &&
953 		    ifa1->ifa_local == ifa->ifa_local)
954 			return ifa1;
955 	}
956 
957 	return NULL;
958 }
959 
inet_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)960 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
961 			    struct netlink_ext_ack *extack)
962 {
963 	__u32 prefered_lft = INFINITY_LIFE_TIME;
964 	__u32 valid_lft = INFINITY_LIFE_TIME;
965 	struct net *net = sock_net(skb->sk);
966 	struct in_ifaddr *ifa_existing;
967 	struct nlattr *tb[IFA_MAX + 1];
968 	struct in_ifaddr *ifa;
969 	int ret;
970 
971 	ret = inet_validate_rtm(nlh, tb, extack, &valid_lft, &prefered_lft);
972 	if (ret < 0)
973 		return ret;
974 
975 	if (!nla_get_in_addr(tb[IFA_LOCAL]))
976 		return 0;
977 
978 	rtnl_net_lock(net);
979 
980 	ifa = inet_rtm_to_ifa(net, nlh, tb, extack);
981 	if (IS_ERR(ifa)) {
982 		ret = PTR_ERR(ifa);
983 		goto unlock;
984 	}
985 
986 	ifa_existing = find_matching_ifa(net, ifa);
987 	if (!ifa_existing) {
988 		/* It would be best to check for !NLM_F_CREATE here but
989 		 * userspace already relies on not having to provide this.
990 		 */
991 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
992 		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
993 			ret = ip_mc_autojoin_config(net, true, ifa);
994 			if (ret < 0) {
995 				NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
996 				inet_free_ifa(ifa);
997 				goto unlock;
998 			}
999 		}
1000 
1001 		ret = __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid, extack);
1002 	} else {
1003 		u32 new_metric = ifa->ifa_rt_priority;
1004 		u8 new_proto = ifa->ifa_proto;
1005 
1006 		inet_free_ifa(ifa);
1007 
1008 		if (nlh->nlmsg_flags & NLM_F_EXCL ||
1009 		    !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
1010 			NL_SET_ERR_MSG(extack, "ipv4: Address already assigned");
1011 			ret = -EEXIST;
1012 			goto unlock;
1013 		}
1014 		ifa = ifa_existing;
1015 
1016 		if (ifa->ifa_rt_priority != new_metric) {
1017 			fib_modify_prefix_metric(ifa, new_metric);
1018 			ifa->ifa_rt_priority = new_metric;
1019 		}
1020 
1021 		ifa->ifa_proto = new_proto;
1022 
1023 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
1024 		cancel_delayed_work(&net->ipv4.addr_chk_work);
1025 		queue_delayed_work(system_power_efficient_wq,
1026 				   &net->ipv4.addr_chk_work, 0);
1027 		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
1028 	}
1029 
1030 unlock:
1031 	rtnl_net_unlock(net);
1032 
1033 	return ret;
1034 }
1035 
1036 /*
1037  *	Determine a default network mask, based on the IP address.
1038  */
1039 
inet_abc_len(__be32 addr)1040 static int inet_abc_len(__be32 addr)
1041 {
1042 	int rc = -1;	/* Something else, probably a multicast. */
1043 
1044 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
1045 		rc = 0;
1046 	else {
1047 		__u32 haddr = ntohl(addr);
1048 		if (IN_CLASSA(haddr))
1049 			rc = 8;
1050 		else if (IN_CLASSB(haddr))
1051 			rc = 16;
1052 		else if (IN_CLASSC(haddr))
1053 			rc = 24;
1054 		else if (IN_CLASSE(haddr))
1055 			rc = 32;
1056 	}
1057 
1058 	return rc;
1059 }
1060 
1061 
devinet_ioctl(struct net * net,unsigned int cmd,struct ifreq * ifr)1062 int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1063 {
1064 	struct sockaddr_in sin_orig;
1065 	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1066 	struct in_ifaddr __rcu **ifap = NULL;
1067 	struct in_device *in_dev;
1068 	struct in_ifaddr *ifa = NULL;
1069 	struct net_device *dev;
1070 	char *colon;
1071 	int ret = -EFAULT;
1072 	int tryaddrmatch = 0;
1073 
1074 	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1075 
1076 	/* save original address for comparison */
1077 	memcpy(&sin_orig, sin, sizeof(*sin));
1078 
1079 	colon = strchr(ifr->ifr_name, ':');
1080 	if (colon)
1081 		*colon = 0;
1082 
1083 	dev_load(net, ifr->ifr_name);
1084 
1085 	switch (cmd) {
1086 	case SIOCGIFADDR:	/* Get interface address */
1087 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1088 	case SIOCGIFDSTADDR:	/* Get the destination address */
1089 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1090 		/* Note that these ioctls will not sleep,
1091 		   so that we do not impose a lock.
1092 		   One day we will be forced to put shlock here (I mean SMP)
1093 		 */
1094 		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1095 		memset(sin, 0, sizeof(*sin));
1096 		sin->sin_family = AF_INET;
1097 		break;
1098 
1099 	case SIOCSIFFLAGS:
1100 		ret = -EPERM;
1101 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1102 			goto out;
1103 		break;
1104 	case SIOCSIFADDR:	/* Set interface address (and family) */
1105 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1106 	case SIOCSIFDSTADDR:	/* Set the destination address */
1107 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1108 		ret = -EPERM;
1109 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1110 			goto out;
1111 		ret = -EINVAL;
1112 		if (sin->sin_family != AF_INET)
1113 			goto out;
1114 		break;
1115 	default:
1116 		ret = -EINVAL;
1117 		goto out;
1118 	}
1119 
1120 	rtnl_net_lock(net);
1121 
1122 	ret = -ENODEV;
1123 	dev = __dev_get_by_name(net, ifr->ifr_name);
1124 	if (!dev)
1125 		goto done;
1126 
1127 	if (colon)
1128 		*colon = ':';
1129 
1130 	in_dev = __in_dev_get_rtnl_net(dev);
1131 	if (in_dev) {
1132 		if (tryaddrmatch) {
1133 			/* Matthias Andree */
1134 			/* compare label and address (4.4BSD style) */
1135 			/* note: we only do this for a limited set of ioctls
1136 			   and only if the original address family was AF_INET.
1137 			   This is checked above. */
1138 
1139 			for (ifap = &in_dev->ifa_list;
1140 			     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
1141 			     ifap = &ifa->ifa_next) {
1142 				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1143 				    sin_orig.sin_addr.s_addr ==
1144 							ifa->ifa_local) {
1145 					break; /* found */
1146 				}
1147 			}
1148 		}
1149 		/* we didn't get a match, maybe the application is
1150 		   4.3BSD-style and passed in junk so we fall back to
1151 		   comparing just the label */
1152 		if (!ifa) {
1153 			for (ifap = &in_dev->ifa_list;
1154 			     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
1155 			     ifap = &ifa->ifa_next)
1156 				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1157 					break;
1158 		}
1159 	}
1160 
1161 	ret = -EADDRNOTAVAIL;
1162 	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1163 		goto done;
1164 
1165 	switch (cmd) {
1166 	case SIOCGIFADDR:	/* Get interface address */
1167 		ret = 0;
1168 		sin->sin_addr.s_addr = ifa->ifa_local;
1169 		break;
1170 
1171 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1172 		ret = 0;
1173 		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1174 		break;
1175 
1176 	case SIOCGIFDSTADDR:	/* Get the destination address */
1177 		ret = 0;
1178 		sin->sin_addr.s_addr = ifa->ifa_address;
1179 		break;
1180 
1181 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1182 		ret = 0;
1183 		sin->sin_addr.s_addr = ifa->ifa_mask;
1184 		break;
1185 
1186 	case SIOCSIFFLAGS:
1187 		if (colon) {
1188 			ret = -EADDRNOTAVAIL;
1189 			if (!ifa)
1190 				break;
1191 			ret = 0;
1192 			if (!(ifr->ifr_flags & IFF_UP))
1193 				inet_del_ifa(in_dev, ifap, 1);
1194 			break;
1195 		}
1196 
1197 		/* NETDEV_UP/DOWN/CHANGE could touch a peer dev */
1198 		ASSERT_RTNL();
1199 		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1200 		break;
1201 
1202 	case SIOCSIFADDR:	/* Set interface address (and family) */
1203 		ret = -EINVAL;
1204 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1205 			break;
1206 
1207 		if (!ifa) {
1208 			ret = -ENOBUFS;
1209 			if (!in_dev)
1210 				break;
1211 			ifa = inet_alloc_ifa(in_dev);
1212 			if (!ifa)
1213 				break;
1214 
1215 			if (colon)
1216 				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1217 			else
1218 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1219 		} else {
1220 			ret = 0;
1221 			if (ifa->ifa_local == sin->sin_addr.s_addr)
1222 				break;
1223 			inet_del_ifa(in_dev, ifap, 0);
1224 			ifa->ifa_broadcast = 0;
1225 			ifa->ifa_scope = 0;
1226 		}
1227 
1228 		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1229 
1230 		if (!(dev->flags & IFF_POINTOPOINT)) {
1231 			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1232 			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1233 			if ((dev->flags & IFF_BROADCAST) &&
1234 			    ifa->ifa_prefixlen < 31)
1235 				ifa->ifa_broadcast = ifa->ifa_address |
1236 						     ~ifa->ifa_mask;
1237 		} else {
1238 			ifa->ifa_prefixlen = 32;
1239 			ifa->ifa_mask = inet_make_mask(32);
1240 		}
1241 		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1242 		ret = inet_set_ifa(dev, ifa);
1243 		break;
1244 
1245 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1246 		ret = 0;
1247 		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1248 			inet_del_ifa(in_dev, ifap, 0);
1249 			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1250 			inet_insert_ifa(ifa);
1251 		}
1252 		break;
1253 
1254 	case SIOCSIFDSTADDR:	/* Set the destination address */
1255 		ret = 0;
1256 		if (ifa->ifa_address == sin->sin_addr.s_addr)
1257 			break;
1258 		ret = -EINVAL;
1259 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1260 			break;
1261 		ret = 0;
1262 		inet_del_ifa(in_dev, ifap, 0);
1263 		ifa->ifa_address = sin->sin_addr.s_addr;
1264 		inet_insert_ifa(ifa);
1265 		break;
1266 
1267 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1268 
1269 		/*
1270 		 *	The mask we set must be legal.
1271 		 */
1272 		ret = -EINVAL;
1273 		if (bad_mask(sin->sin_addr.s_addr, 0))
1274 			break;
1275 		ret = 0;
1276 		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1277 			__be32 old_mask = ifa->ifa_mask;
1278 			inet_del_ifa(in_dev, ifap, 0);
1279 			ifa->ifa_mask = sin->sin_addr.s_addr;
1280 			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1281 
1282 			/* See if current broadcast address matches
1283 			 * with current netmask, then recalculate
1284 			 * the broadcast address. Otherwise it's a
1285 			 * funny address, so don't touch it since
1286 			 * the user seems to know what (s)he's doing...
1287 			 */
1288 			if ((dev->flags & IFF_BROADCAST) &&
1289 			    (ifa->ifa_prefixlen < 31) &&
1290 			    (ifa->ifa_broadcast ==
1291 			     (ifa->ifa_local|~old_mask))) {
1292 				ifa->ifa_broadcast = (ifa->ifa_local |
1293 						      ~sin->sin_addr.s_addr);
1294 			}
1295 			inet_insert_ifa(ifa);
1296 		}
1297 		break;
1298 	}
1299 done:
1300 	rtnl_net_unlock(net);
1301 out:
1302 	return ret;
1303 }
1304 
inet_gifconf(struct net_device * dev,char __user * buf,int len,int size)1305 int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1306 {
1307 	struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
1308 	const struct in_ifaddr *ifa;
1309 	struct ifreq ifr;
1310 	int done = 0;
1311 
1312 	if (WARN_ON(size > sizeof(struct ifreq)))
1313 		goto out;
1314 
1315 	if (!in_dev)
1316 		goto out;
1317 
1318 	in_dev_for_each_ifa_rtnl_net(dev_net(dev), ifa, in_dev) {
1319 		if (!buf) {
1320 			done += size;
1321 			continue;
1322 		}
1323 		if (len < size)
1324 			break;
1325 		memset(&ifr, 0, sizeof(struct ifreq));
1326 		strcpy(ifr.ifr_name, ifa->ifa_label);
1327 
1328 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1329 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1330 								ifa->ifa_local;
1331 
1332 		if (copy_to_user(buf + done, &ifr, size)) {
1333 			done = -EFAULT;
1334 			break;
1335 		}
1336 		len  -= size;
1337 		done += size;
1338 	}
1339 out:
1340 	return done;
1341 }
1342 
in_dev_select_addr(const struct in_device * in_dev,int scope)1343 static __be32 in_dev_select_addr(const struct in_device *in_dev,
1344 				 int scope)
1345 {
1346 	const struct in_ifaddr *ifa;
1347 
1348 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1349 		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1350 			continue;
1351 		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1352 		    ifa->ifa_scope <= scope)
1353 			return ifa->ifa_local;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
inet_select_addr(const struct net_device * dev,__be32 dst,int scope)1359 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1360 {
1361 	const struct in_ifaddr *ifa;
1362 	__be32 addr = 0;
1363 	unsigned char localnet_scope = RT_SCOPE_HOST;
1364 	struct in_device *in_dev;
1365 	struct net *net;
1366 	int master_idx;
1367 
1368 	rcu_read_lock();
1369 	net = dev_net_rcu(dev);
1370 	in_dev = __in_dev_get_rcu(dev);
1371 	if (!in_dev)
1372 		goto no_in_dev;
1373 
1374 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1375 		localnet_scope = RT_SCOPE_LINK;
1376 
1377 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1378 		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1379 			continue;
1380 		if (min(ifa->ifa_scope, localnet_scope) > scope)
1381 			continue;
1382 		if (!dst || inet_ifa_match(dst, ifa)) {
1383 			addr = ifa->ifa_local;
1384 			break;
1385 		}
1386 		if (!addr)
1387 			addr = ifa->ifa_local;
1388 	}
1389 
1390 	if (addr)
1391 		goto out_unlock;
1392 no_in_dev:
1393 	master_idx = l3mdev_master_ifindex_rcu(dev);
1394 
1395 	/* For VRFs, the VRF device takes the place of the loopback device,
1396 	 * with addresses on it being preferred.  Note in such cases the
1397 	 * loopback device will be among the devices that fail the master_idx
1398 	 * equality check in the loop below.
1399 	 */
1400 	if (master_idx &&
1401 	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1402 	    (in_dev = __in_dev_get_rcu(dev))) {
1403 		addr = in_dev_select_addr(in_dev, scope);
1404 		if (addr)
1405 			goto out_unlock;
1406 	}
1407 
1408 	/* Not loopback addresses on loopback should be preferred
1409 	   in this case. It is important that lo is the first interface
1410 	   in dev_base list.
1411 	 */
1412 	for_each_netdev_rcu(net, dev) {
1413 		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1414 			continue;
1415 
1416 		in_dev = __in_dev_get_rcu(dev);
1417 		if (!in_dev)
1418 			continue;
1419 
1420 		addr = in_dev_select_addr(in_dev, scope);
1421 		if (addr)
1422 			goto out_unlock;
1423 	}
1424 out_unlock:
1425 	rcu_read_unlock();
1426 	return addr;
1427 }
1428 EXPORT_SYMBOL(inet_select_addr);
1429 
confirm_addr_indev(struct in_device * in_dev,__be32 dst,__be32 local,int scope)1430 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1431 			      __be32 local, int scope)
1432 {
1433 	unsigned char localnet_scope = RT_SCOPE_HOST;
1434 	const struct in_ifaddr *ifa;
1435 	__be32 addr = 0;
1436 	int same = 0;
1437 
1438 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1439 		localnet_scope = RT_SCOPE_LINK;
1440 
1441 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1442 		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1443 
1444 		if (!addr &&
1445 		    (local == ifa->ifa_local || !local) &&
1446 		    min_scope <= scope) {
1447 			addr = ifa->ifa_local;
1448 			if (same)
1449 				break;
1450 		}
1451 		if (!same) {
1452 			same = (!local || inet_ifa_match(local, ifa)) &&
1453 				(!dst || inet_ifa_match(dst, ifa));
1454 			if (same && addr) {
1455 				if (local || !dst)
1456 					break;
1457 				/* Is the selected addr into dst subnet? */
1458 				if (inet_ifa_match(addr, ifa))
1459 					break;
1460 				/* No, then can we use new local src? */
1461 				if (min_scope <= scope) {
1462 					addr = ifa->ifa_local;
1463 					break;
1464 				}
1465 				/* search for large dst subnet for addr */
1466 				same = 0;
1467 			}
1468 		}
1469 	}
1470 
1471 	return same ? addr : 0;
1472 }
1473 
1474 /*
1475  * Confirm that local IP address exists using wildcards:
1476  * - net: netns to check, cannot be NULL
1477  * - in_dev: only on this interface, NULL=any interface
1478  * - dst: only in the same subnet as dst, 0=any dst
1479  * - local: address, 0=autoselect the local address
1480  * - scope: maximum allowed scope value for the local address
1481  */
inet_confirm_addr(struct net * net,struct in_device * in_dev,__be32 dst,__be32 local,int scope)1482 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1483 			 __be32 dst, __be32 local, int scope)
1484 {
1485 	__be32 addr = 0;
1486 	struct net_device *dev;
1487 
1488 	if (in_dev)
1489 		return confirm_addr_indev(in_dev, dst, local, scope);
1490 
1491 	rcu_read_lock();
1492 	for_each_netdev_rcu(net, dev) {
1493 		in_dev = __in_dev_get_rcu(dev);
1494 		if (in_dev) {
1495 			addr = confirm_addr_indev(in_dev, dst, local, scope);
1496 			if (addr)
1497 				break;
1498 		}
1499 	}
1500 	rcu_read_unlock();
1501 
1502 	return addr;
1503 }
1504 EXPORT_SYMBOL(inet_confirm_addr);
1505 
1506 /*
1507  *	Device notifier
1508  */
1509 
register_inetaddr_notifier(struct notifier_block * nb)1510 int register_inetaddr_notifier(struct notifier_block *nb)
1511 {
1512 	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1513 }
1514 EXPORT_SYMBOL(register_inetaddr_notifier);
1515 
unregister_inetaddr_notifier(struct notifier_block * nb)1516 int unregister_inetaddr_notifier(struct notifier_block *nb)
1517 {
1518 	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1519 }
1520 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1521 
register_inetaddr_validator_notifier(struct notifier_block * nb)1522 int register_inetaddr_validator_notifier(struct notifier_block *nb)
1523 {
1524 	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1525 }
1526 EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1527 
unregister_inetaddr_validator_notifier(struct notifier_block * nb)1528 int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1529 {
1530 	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1531 	    nb);
1532 }
1533 EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1534 
1535 /* Rename ifa_labels for a device name change. Make some effort to preserve
1536  * existing alias numbering and to create unique labels if possible.
1537 */
inetdev_changename(struct net_device * dev,struct in_device * in_dev)1538 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1539 {
1540 	struct in_ifaddr *ifa;
1541 	int named = 0;
1542 
1543 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1544 		char old[IFNAMSIZ], *dot;
1545 
1546 		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1547 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1548 		if (named++ == 0)
1549 			goto skip;
1550 		dot = strchr(old, ':');
1551 		if (!dot) {
1552 			sprintf(old, ":%d", named);
1553 			dot = old;
1554 		}
1555 		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1556 			strcat(ifa->ifa_label, dot);
1557 		else
1558 			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1559 skip:
1560 		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1561 	}
1562 }
1563 
inetdev_send_gratuitous_arp(struct net_device * dev,struct in_device * in_dev)1564 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1565 					struct in_device *in_dev)
1566 
1567 {
1568 	const struct in_ifaddr *ifa;
1569 
1570 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1571 		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1572 			 ifa->ifa_local, dev,
1573 			 ifa->ifa_local, NULL,
1574 			 dev->dev_addr, NULL);
1575 	}
1576 }
1577 
1578 /* Called only under RTNL semaphore */
1579 
inetdev_event(struct notifier_block * this,unsigned long event,void * ptr)1580 static int inetdev_event(struct notifier_block *this, unsigned long event,
1581 			 void *ptr)
1582 {
1583 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1584 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1585 
1586 	ASSERT_RTNL();
1587 
1588 	if (!in_dev) {
1589 		if (event == NETDEV_REGISTER) {
1590 			in_dev = inetdev_init(dev);
1591 			if (IS_ERR(in_dev))
1592 				return notifier_from_errno(PTR_ERR(in_dev));
1593 			if (dev->flags & IFF_LOOPBACK) {
1594 				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1595 				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1596 			}
1597 		} else if (event == NETDEV_CHANGEMTU) {
1598 			/* Re-enabling IP */
1599 			if (inetdev_valid_mtu(dev->mtu))
1600 				in_dev = inetdev_init(dev);
1601 		}
1602 		goto out;
1603 	}
1604 
1605 	switch (event) {
1606 	case NETDEV_REGISTER:
1607 		pr_debug("%s: bug\n", __func__);
1608 		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1609 		break;
1610 	case NETDEV_UP:
1611 		if (!inetdev_valid_mtu(dev->mtu))
1612 			break;
1613 		if (dev->flags & IFF_LOOPBACK) {
1614 			struct in_ifaddr *ifa = inet_alloc_ifa(in_dev);
1615 
1616 			if (ifa) {
1617 				ifa->ifa_local =
1618 				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1619 				ifa->ifa_prefixlen = 8;
1620 				ifa->ifa_mask = inet_make_mask(8);
1621 				ifa->ifa_scope = RT_SCOPE_HOST;
1622 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1623 				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1624 						 INFINITY_LIFE_TIME);
1625 				ipv4_devconf_setall(in_dev);
1626 				neigh_parms_data_state_setall(in_dev->arp_parms);
1627 				inet_insert_ifa(ifa);
1628 			}
1629 		}
1630 		ip_mc_up(in_dev);
1631 		fallthrough;
1632 	case NETDEV_CHANGEADDR:
1633 		if (!IN_DEV_ARP_NOTIFY(in_dev))
1634 			break;
1635 		fallthrough;
1636 	case NETDEV_NOTIFY_PEERS:
1637 		/* Send gratuitous ARP to notify of link change */
1638 		inetdev_send_gratuitous_arp(dev, in_dev);
1639 		break;
1640 	case NETDEV_DOWN:
1641 		ip_mc_down(in_dev);
1642 		break;
1643 	case NETDEV_PRE_TYPE_CHANGE:
1644 		ip_mc_unmap(in_dev);
1645 		break;
1646 	case NETDEV_POST_TYPE_CHANGE:
1647 		ip_mc_remap(in_dev);
1648 		break;
1649 	case NETDEV_CHANGEMTU:
1650 		if (inetdev_valid_mtu(dev->mtu))
1651 			break;
1652 		/* disable IP when MTU is not enough */
1653 		fallthrough;
1654 	case NETDEV_UNREGISTER:
1655 		inetdev_destroy(in_dev);
1656 		break;
1657 	case NETDEV_CHANGENAME:
1658 		/* Do not notify about label change, this event is
1659 		 * not interesting to applications using netlink.
1660 		 */
1661 		inetdev_changename(dev, in_dev);
1662 
1663 		devinet_sysctl_unregister(in_dev);
1664 		devinet_sysctl_register(in_dev);
1665 		break;
1666 	}
1667 out:
1668 	return NOTIFY_DONE;
1669 }
1670 
1671 static struct notifier_block ip_netdev_notifier = {
1672 	.notifier_call = inetdev_event,
1673 };
1674 
inet_nlmsg_size(void)1675 static size_t inet_nlmsg_size(void)
1676 {
1677 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1678 	       + nla_total_size(4) /* IFA_ADDRESS */
1679 	       + nla_total_size(4) /* IFA_LOCAL */
1680 	       + nla_total_size(4) /* IFA_BROADCAST */
1681 	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1682 	       + nla_total_size(4)  /* IFA_FLAGS */
1683 	       + nla_total_size(1)  /* IFA_PROTO */
1684 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1685 	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1686 }
1687 
cstamp_delta(unsigned long cstamp)1688 static inline u32 cstamp_delta(unsigned long cstamp)
1689 {
1690 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1691 }
1692 
put_cacheinfo(struct sk_buff * skb,unsigned long cstamp,unsigned long tstamp,u32 preferred,u32 valid)1693 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1694 			 unsigned long tstamp, u32 preferred, u32 valid)
1695 {
1696 	struct ifa_cacheinfo ci;
1697 
1698 	ci.cstamp = cstamp_delta(cstamp);
1699 	ci.tstamp = cstamp_delta(tstamp);
1700 	ci.ifa_prefered = preferred;
1701 	ci.ifa_valid = valid;
1702 
1703 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1704 }
1705 
inet_fill_ifaddr(struct sk_buff * skb,const struct in_ifaddr * ifa,struct inet_fill_args * args)1706 static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
1707 			    struct inet_fill_args *args)
1708 {
1709 	struct ifaddrmsg *ifm;
1710 	struct nlmsghdr  *nlh;
1711 	unsigned long tstamp;
1712 	u32 preferred, valid;
1713 	u32 flags;
1714 
1715 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1716 			args->flags);
1717 	if (!nlh)
1718 		return -EMSGSIZE;
1719 
1720 	ifm = nlmsg_data(nlh);
1721 	ifm->ifa_family = AF_INET;
1722 	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1723 
1724 	flags = READ_ONCE(ifa->ifa_flags);
1725 	/* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
1726 	 * The 32bit value is given in IFA_FLAGS attribute.
1727 	 */
1728 	ifm->ifa_flags = (__u8)flags;
1729 
1730 	ifm->ifa_scope = ifa->ifa_scope;
1731 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1732 
1733 	if (args->netnsid >= 0 &&
1734 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1735 		goto nla_put_failure;
1736 
1737 	tstamp = READ_ONCE(ifa->ifa_tstamp);
1738 	if (!(flags & IFA_F_PERMANENT)) {
1739 		preferred = READ_ONCE(ifa->ifa_preferred_lft);
1740 		valid = READ_ONCE(ifa->ifa_valid_lft);
1741 		if (preferred != INFINITY_LIFE_TIME) {
1742 			long tval = (jiffies - tstamp) / HZ;
1743 
1744 			if (preferred > tval)
1745 				preferred -= tval;
1746 			else
1747 				preferred = 0;
1748 			if (valid != INFINITY_LIFE_TIME) {
1749 				if (valid > tval)
1750 					valid -= tval;
1751 				else
1752 					valid = 0;
1753 			}
1754 		}
1755 	} else {
1756 		preferred = INFINITY_LIFE_TIME;
1757 		valid = INFINITY_LIFE_TIME;
1758 	}
1759 	if ((ifa->ifa_address &&
1760 	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1761 	    (ifa->ifa_local &&
1762 	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1763 	    (ifa->ifa_broadcast &&
1764 	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1765 	    (ifa->ifa_label[0] &&
1766 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1767 	    (ifa->ifa_proto &&
1768 	     nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
1769 	    nla_put_u32(skb, IFA_FLAGS, flags) ||
1770 	    (ifa->ifa_rt_priority &&
1771 	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1772 	    put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
1773 			  preferred, valid))
1774 		goto nla_put_failure;
1775 
1776 	nlmsg_end(skb, nlh);
1777 	return 0;
1778 
1779 nla_put_failure:
1780 	nlmsg_cancel(skb, nlh);
1781 	return -EMSGSIZE;
1782 }
1783 
inet_valid_dump_ifaddr_req(const struct nlmsghdr * nlh,struct inet_fill_args * fillargs,struct net ** tgt_net,struct sock * sk,struct netlink_callback * cb)1784 static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1785 				      struct inet_fill_args *fillargs,
1786 				      struct net **tgt_net, struct sock *sk,
1787 				      struct netlink_callback *cb)
1788 {
1789 	struct netlink_ext_ack *extack = cb->extack;
1790 	struct nlattr *tb[IFA_MAX+1];
1791 	struct ifaddrmsg *ifm;
1792 	int err, i;
1793 
1794 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
1795 	if (!ifm) {
1796 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1797 		return -EINVAL;
1798 	}
1799 
1800 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1801 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1802 		return -EINVAL;
1803 	}
1804 
1805 	fillargs->ifindex = ifm->ifa_index;
1806 	if (fillargs->ifindex) {
1807 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1808 		fillargs->flags |= NLM_F_DUMP_FILTERED;
1809 	}
1810 
1811 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1812 					    ifa_ipv4_policy, extack);
1813 	if (err < 0)
1814 		return err;
1815 
1816 	for (i = 0; i <= IFA_MAX; ++i) {
1817 		if (!tb[i])
1818 			continue;
1819 
1820 		if (i == IFA_TARGET_NETNSID) {
1821 			struct net *net;
1822 
1823 			fillargs->netnsid = nla_get_s32(tb[i]);
1824 
1825 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1826 			if (IS_ERR(net)) {
1827 				fillargs->netnsid = -1;
1828 				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1829 				return PTR_ERR(net);
1830 			}
1831 			*tgt_net = net;
1832 		} else {
1833 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1834 			return -EINVAL;
1835 		}
1836 	}
1837 
1838 	return 0;
1839 }
1840 
in_dev_dump_ifmcaddr(struct in_device * in_dev,struct sk_buff * skb,struct netlink_callback * cb,int * s_ip_idx,struct inet_fill_args * fillargs)1841 static int in_dev_dump_ifmcaddr(struct in_device *in_dev, struct sk_buff *skb,
1842 				struct netlink_callback *cb, int *s_ip_idx,
1843 				struct inet_fill_args *fillargs)
1844 {
1845 	struct ip_mc_list *im;
1846 	int ip_idx = 0;
1847 	int err;
1848 
1849 	for (im = rcu_dereference(in_dev->mc_list);
1850 	     im;
1851 	     im = rcu_dereference(im->next_rcu)) {
1852 		if (ip_idx < *s_ip_idx) {
1853 			ip_idx++;
1854 			continue;
1855 		}
1856 		err = inet_fill_ifmcaddr(skb, in_dev->dev, im, fillargs);
1857 		if (err < 0)
1858 			goto done;
1859 
1860 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1861 		ip_idx++;
1862 	}
1863 	err = 0;
1864 	ip_idx = 0;
1865 done:
1866 	*s_ip_idx = ip_idx;
1867 	return err;
1868 }
1869 
in_dev_dump_ifaddr(struct in_device * in_dev,struct sk_buff * skb,struct netlink_callback * cb,int * s_ip_idx,struct inet_fill_args * fillargs)1870 static int in_dev_dump_ifaddr(struct in_device *in_dev, struct sk_buff *skb,
1871 			      struct netlink_callback *cb, int *s_ip_idx,
1872 			      struct inet_fill_args *fillargs)
1873 {
1874 	struct in_ifaddr *ifa;
1875 	int ip_idx = 0;
1876 	int err;
1877 
1878 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1879 		if (ip_idx < *s_ip_idx) {
1880 			ip_idx++;
1881 			continue;
1882 		}
1883 		err = inet_fill_ifaddr(skb, ifa, fillargs);
1884 		if (err < 0)
1885 			goto done;
1886 
1887 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1888 		ip_idx++;
1889 	}
1890 	err = 0;
1891 	ip_idx = 0;
1892 done:
1893 	*s_ip_idx = ip_idx;
1894 
1895 	return err;
1896 }
1897 
in_dev_dump_addr(struct in_device * in_dev,struct sk_buff * skb,struct netlink_callback * cb,int * s_ip_idx,struct inet_fill_args * fillargs)1898 static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1899 			    struct netlink_callback *cb, int *s_ip_idx,
1900 			    struct inet_fill_args *fillargs)
1901 {
1902 	switch (fillargs->event) {
1903 	case RTM_NEWADDR:
1904 		return in_dev_dump_ifaddr(in_dev, skb, cb, s_ip_idx, fillargs);
1905 	case RTM_GETMULTICAST:
1906 		return in_dev_dump_ifmcaddr(in_dev, skb, cb, s_ip_idx,
1907 					    fillargs);
1908 	default:
1909 		return -EINVAL;
1910 	}
1911 }
1912 
1913 /* Combine dev_addr_genid and dev_base_seq to detect changes.
1914  */
inet_base_seq(const struct net * net)1915 static u32 inet_base_seq(const struct net *net)
1916 {
1917 	u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
1918 		  READ_ONCE(net->dev_base_seq);
1919 
1920 	/* Must not return 0 (see nl_dump_check_consistent()).
1921 	 * Chose a value far away from 0.
1922 	 */
1923 	if (!res)
1924 		res = 0x80000000;
1925 	return res;
1926 }
1927 
inet_dump_addr(struct sk_buff * skb,struct netlink_callback * cb,int event)1928 static int inet_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
1929 			  int event)
1930 {
1931 	const struct nlmsghdr *nlh = cb->nlh;
1932 	struct inet_fill_args fillargs = {
1933 		.portid = NETLINK_CB(cb->skb).portid,
1934 		.seq = nlh->nlmsg_seq,
1935 		.event = event,
1936 		.flags = NLM_F_MULTI,
1937 		.netnsid = -1,
1938 	};
1939 	struct net *net = sock_net(skb->sk);
1940 	struct net *tgt_net = net;
1941 	struct {
1942 		unsigned long ifindex;
1943 		int ip_idx;
1944 	} *ctx = (void *)cb->ctx;
1945 	struct in_device *in_dev;
1946 	struct net_device *dev;
1947 	int err = 0;
1948 
1949 	rcu_read_lock();
1950 	if (cb->strict_check) {
1951 		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1952 						 skb->sk, cb);
1953 		if (err < 0)
1954 			goto done;
1955 
1956 		if (fillargs.ifindex) {
1957 			dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
1958 			if (!dev) {
1959 				err = -ENODEV;
1960 				goto done;
1961 			}
1962 			in_dev = __in_dev_get_rcu(dev);
1963 			if (!in_dev)
1964 				goto done;
1965 			err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
1966 					       &fillargs);
1967 			goto done;
1968 		}
1969 	}
1970 
1971 	cb->seq = inet_base_seq(tgt_net);
1972 
1973 	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
1974 		in_dev = __in_dev_get_rcu(dev);
1975 		if (!in_dev)
1976 			continue;
1977 		err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
1978 				       &fillargs);
1979 		if (err < 0)
1980 			goto done;
1981 	}
1982 done:
1983 	if (fillargs.netnsid >= 0)
1984 		put_net(tgt_net);
1985 	rcu_read_unlock();
1986 	return err;
1987 }
1988 
inet_dump_ifaddr(struct sk_buff * skb,struct netlink_callback * cb)1989 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1990 {
1991 	return inet_dump_addr(skb, cb, RTM_NEWADDR);
1992 }
1993 
inet_dump_ifmcaddr(struct sk_buff * skb,struct netlink_callback * cb)1994 static int inet_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
1995 {
1996 	return inet_dump_addr(skb, cb, RTM_GETMULTICAST);
1997 }
1998 
rtmsg_ifa(int event,struct in_ifaddr * ifa,struct nlmsghdr * nlh,u32 portid)1999 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
2000 		      u32 portid)
2001 {
2002 	struct inet_fill_args fillargs = {
2003 		.portid = portid,
2004 		.seq = nlh ? nlh->nlmsg_seq : 0,
2005 		.event = event,
2006 		.flags = 0,
2007 		.netnsid = -1,
2008 	};
2009 	struct sk_buff *skb;
2010 	int err = -ENOBUFS;
2011 	struct net *net;
2012 
2013 	net = dev_net(ifa->ifa_dev->dev);
2014 	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
2015 	if (!skb)
2016 		goto errout;
2017 
2018 	err = inet_fill_ifaddr(skb, ifa, &fillargs);
2019 	if (err < 0) {
2020 		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
2021 		WARN_ON(err == -EMSGSIZE);
2022 		kfree_skb(skb);
2023 		goto errout;
2024 	}
2025 	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
2026 	return;
2027 errout:
2028 	rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
2029 }
2030 
inet_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)2031 static size_t inet_get_link_af_size(const struct net_device *dev,
2032 				    u32 ext_filter_mask)
2033 {
2034 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
2035 
2036 	if (!in_dev)
2037 		return 0;
2038 
2039 	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
2040 }
2041 
inet_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)2042 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
2043 			     u32 ext_filter_mask)
2044 {
2045 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
2046 	struct nlattr *nla;
2047 	int i;
2048 
2049 	if (!in_dev)
2050 		return -ENODATA;
2051 
2052 	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
2053 	if (!nla)
2054 		return -EMSGSIZE;
2055 
2056 	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
2057 		((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
2058 
2059 	return 0;
2060 }
2061 
2062 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
2063 	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
2064 };
2065 
inet_validate_link_af(const struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)2066 static int inet_validate_link_af(const struct net_device *dev,
2067 				 const struct nlattr *nla,
2068 				 struct netlink_ext_ack *extack)
2069 {
2070 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2071 	int err, rem;
2072 
2073 	if (dev && !__in_dev_get_rtnl(dev))
2074 		return -EAFNOSUPPORT;
2075 
2076 	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
2077 					  inet_af_policy, extack);
2078 	if (err < 0)
2079 		return err;
2080 
2081 	if (tb[IFLA_INET_CONF]) {
2082 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
2083 			int cfgid = nla_type(a);
2084 
2085 			if (nla_len(a) < 4)
2086 				return -EINVAL;
2087 
2088 			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
2089 				return -EINVAL;
2090 		}
2091 	}
2092 
2093 	return 0;
2094 }
2095 
inet_set_link_af(struct net_device * dev,const struct nlattr * nla,struct netlink_ext_ack * extack)2096 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
2097 			    struct netlink_ext_ack *extack)
2098 {
2099 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
2100 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2101 	int rem;
2102 
2103 	if (!in_dev)
2104 		return -EAFNOSUPPORT;
2105 
2106 	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2107 		return -EINVAL;
2108 
2109 	if (tb[IFLA_INET_CONF]) {
2110 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2111 			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
2112 	}
2113 
2114 	return 0;
2115 }
2116 
inet_netconf_msgsize_devconf(int type)2117 static int inet_netconf_msgsize_devconf(int type)
2118 {
2119 	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2120 		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2121 	bool all = false;
2122 
2123 	if (type == NETCONFA_ALL)
2124 		all = true;
2125 
2126 	if (all || type == NETCONFA_FORWARDING)
2127 		size += nla_total_size(4);
2128 	if (all || type == NETCONFA_RP_FILTER)
2129 		size += nla_total_size(4);
2130 	if (all || type == NETCONFA_MC_FORWARDING)
2131 		size += nla_total_size(4);
2132 	if (all || type == NETCONFA_BC_FORWARDING)
2133 		size += nla_total_size(4);
2134 	if (all || type == NETCONFA_PROXY_NEIGH)
2135 		size += nla_total_size(4);
2136 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2137 		size += nla_total_size(4);
2138 
2139 	return size;
2140 }
2141 
inet_netconf_fill_devconf(struct sk_buff * skb,int ifindex,const struct ipv4_devconf * devconf,u32 portid,u32 seq,int event,unsigned int flags,int type)2142 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2143 				     const struct ipv4_devconf *devconf,
2144 				     u32 portid, u32 seq, int event,
2145 				     unsigned int flags, int type)
2146 {
2147 	struct nlmsghdr  *nlh;
2148 	struct netconfmsg *ncm;
2149 	bool all = false;
2150 
2151 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2152 			flags);
2153 	if (!nlh)
2154 		return -EMSGSIZE;
2155 
2156 	if (type == NETCONFA_ALL)
2157 		all = true;
2158 
2159 	ncm = nlmsg_data(nlh);
2160 	ncm->ncm_family = AF_INET;
2161 
2162 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2163 		goto nla_put_failure;
2164 
2165 	if (!devconf)
2166 		goto out;
2167 
2168 	if ((all || type == NETCONFA_FORWARDING) &&
2169 	    nla_put_s32(skb, NETCONFA_FORWARDING,
2170 			IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
2171 		goto nla_put_failure;
2172 	if ((all || type == NETCONFA_RP_FILTER) &&
2173 	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2174 			IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
2175 		goto nla_put_failure;
2176 	if ((all || type == NETCONFA_MC_FORWARDING) &&
2177 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2178 			IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
2179 		goto nla_put_failure;
2180 	if ((all || type == NETCONFA_BC_FORWARDING) &&
2181 	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2182 			IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
2183 		goto nla_put_failure;
2184 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2185 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2186 			IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
2187 		goto nla_put_failure;
2188 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2189 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2190 			IPV4_DEVCONF_RO(*devconf,
2191 					IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2192 		goto nla_put_failure;
2193 
2194 out:
2195 	nlmsg_end(skb, nlh);
2196 	return 0;
2197 
2198 nla_put_failure:
2199 	nlmsg_cancel(skb, nlh);
2200 	return -EMSGSIZE;
2201 }
2202 
inet_netconf_notify_devconf(struct net * net,int event,int type,int ifindex,struct ipv4_devconf * devconf)2203 void inet_netconf_notify_devconf(struct net *net, int event, int type,
2204 				 int ifindex, struct ipv4_devconf *devconf)
2205 {
2206 	struct sk_buff *skb;
2207 	int err = -ENOBUFS;
2208 
2209 	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2210 	if (!skb)
2211 		goto errout;
2212 
2213 	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2214 					event, 0, type);
2215 	if (err < 0) {
2216 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2217 		WARN_ON(err == -EMSGSIZE);
2218 		kfree_skb(skb);
2219 		goto errout;
2220 	}
2221 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2222 	return;
2223 errout:
2224 	rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2225 }
2226 
2227 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2228 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2229 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2230 	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2231 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2232 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2233 };
2234 
inet_netconf_valid_get_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)2235 static int inet_netconf_valid_get_req(struct sk_buff *skb,
2236 				      const struct nlmsghdr *nlh,
2237 				      struct nlattr **tb,
2238 				      struct netlink_ext_ack *extack)
2239 {
2240 	int i, err;
2241 
2242 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2243 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2244 		return -EINVAL;
2245 	}
2246 
2247 	if (!netlink_strict_get_check(skb))
2248 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2249 					      tb, NETCONFA_MAX,
2250 					      devconf_ipv4_policy, extack);
2251 
2252 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2253 					    tb, NETCONFA_MAX,
2254 					    devconf_ipv4_policy, extack);
2255 	if (err)
2256 		return err;
2257 
2258 	for (i = 0; i <= NETCONFA_MAX; i++) {
2259 		if (!tb[i])
2260 			continue;
2261 
2262 		switch (i) {
2263 		case NETCONFA_IFINDEX:
2264 			break;
2265 		default:
2266 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2267 			return -EINVAL;
2268 		}
2269 	}
2270 
2271 	return 0;
2272 }
2273 
inet_netconf_get_devconf(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2274 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2275 				    struct nlmsghdr *nlh,
2276 				    struct netlink_ext_ack *extack)
2277 {
2278 	struct net *net = sock_net(in_skb->sk);
2279 	struct nlattr *tb[NETCONFA_MAX + 1];
2280 	const struct ipv4_devconf *devconf;
2281 	struct in_device *in_dev = NULL;
2282 	struct net_device *dev = NULL;
2283 	struct sk_buff *skb;
2284 	int ifindex;
2285 	int err;
2286 
2287 	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2288 	if (err)
2289 		return err;
2290 
2291 	if (!tb[NETCONFA_IFINDEX])
2292 		return -EINVAL;
2293 
2294 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2295 	switch (ifindex) {
2296 	case NETCONFA_IFINDEX_ALL:
2297 		devconf = net->ipv4.devconf_all;
2298 		break;
2299 	case NETCONFA_IFINDEX_DEFAULT:
2300 		devconf = net->ipv4.devconf_dflt;
2301 		break;
2302 	default:
2303 		err = -ENODEV;
2304 		dev = dev_get_by_index(net, ifindex);
2305 		if (dev)
2306 			in_dev = in_dev_get(dev);
2307 		if (!in_dev)
2308 			goto errout;
2309 		devconf = &in_dev->cnf;
2310 		break;
2311 	}
2312 
2313 	err = -ENOBUFS;
2314 	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2315 	if (!skb)
2316 		goto errout;
2317 
2318 	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2319 					NETLINK_CB(in_skb).portid,
2320 					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2321 					NETCONFA_ALL);
2322 	if (err < 0) {
2323 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2324 		WARN_ON(err == -EMSGSIZE);
2325 		kfree_skb(skb);
2326 		goto errout;
2327 	}
2328 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2329 errout:
2330 	if (in_dev)
2331 		in_dev_put(in_dev);
2332 	dev_put(dev);
2333 	return err;
2334 }
2335 
inet_netconf_dump_devconf(struct sk_buff * skb,struct netlink_callback * cb)2336 static int inet_netconf_dump_devconf(struct sk_buff *skb,
2337 				     struct netlink_callback *cb)
2338 {
2339 	const struct nlmsghdr *nlh = cb->nlh;
2340 	struct net *net = sock_net(skb->sk);
2341 	struct {
2342 		unsigned long ifindex;
2343 		unsigned int all_default;
2344 	} *ctx = (void *)cb->ctx;
2345 	const struct in_device *in_dev;
2346 	struct net_device *dev;
2347 	int err = 0;
2348 
2349 	if (cb->strict_check) {
2350 		struct netlink_ext_ack *extack = cb->extack;
2351 		struct netconfmsg *ncm;
2352 
2353 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2354 			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2355 			return -EINVAL;
2356 		}
2357 
2358 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2359 			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2360 			return -EINVAL;
2361 		}
2362 	}
2363 
2364 	rcu_read_lock();
2365 	for_each_netdev_dump(net, dev, ctx->ifindex) {
2366 		in_dev = __in_dev_get_rcu(dev);
2367 		if (!in_dev)
2368 			continue;
2369 		err = inet_netconf_fill_devconf(skb, dev->ifindex,
2370 						&in_dev->cnf,
2371 						NETLINK_CB(cb->skb).portid,
2372 						nlh->nlmsg_seq,
2373 						RTM_NEWNETCONF, NLM_F_MULTI,
2374 						NETCONFA_ALL);
2375 		if (err < 0)
2376 			goto done;
2377 	}
2378 	if (ctx->all_default == 0) {
2379 		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2380 						net->ipv4.devconf_all,
2381 						NETLINK_CB(cb->skb).portid,
2382 						nlh->nlmsg_seq,
2383 						RTM_NEWNETCONF, NLM_F_MULTI,
2384 						NETCONFA_ALL);
2385 		if (err < 0)
2386 			goto done;
2387 		ctx->all_default++;
2388 	}
2389 	if (ctx->all_default == 1) {
2390 		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2391 						net->ipv4.devconf_dflt,
2392 						NETLINK_CB(cb->skb).portid,
2393 						nlh->nlmsg_seq,
2394 						RTM_NEWNETCONF, NLM_F_MULTI,
2395 						NETCONFA_ALL);
2396 		if (err < 0)
2397 			goto done;
2398 		ctx->all_default++;
2399 	}
2400 done:
2401 	rcu_read_unlock();
2402 	return err;
2403 }
2404 
2405 #ifdef CONFIG_SYSCTL
2406 
devinet_copy_dflt_conf(struct net * net,int i)2407 static void devinet_copy_dflt_conf(struct net *net, int i)
2408 {
2409 	struct net_device *dev;
2410 
2411 	rcu_read_lock();
2412 	for_each_netdev_rcu(net, dev) {
2413 		struct in_device *in_dev;
2414 
2415 		in_dev = __in_dev_get_rcu(dev);
2416 		if (in_dev && !test_bit(i, in_dev->cnf.state))
2417 			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2418 	}
2419 	rcu_read_unlock();
2420 }
2421 
2422 /* called with RTNL locked */
inet_forward_change(struct net * net)2423 static void inet_forward_change(struct net *net)
2424 {
2425 	struct net_device *dev;
2426 	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2427 
2428 	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2429 	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2430 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2431 				    NETCONFA_FORWARDING,
2432 				    NETCONFA_IFINDEX_ALL,
2433 				    net->ipv4.devconf_all);
2434 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2435 				    NETCONFA_FORWARDING,
2436 				    NETCONFA_IFINDEX_DEFAULT,
2437 				    net->ipv4.devconf_dflt);
2438 
2439 	for_each_netdev(net, dev) {
2440 		struct in_device *in_dev;
2441 
2442 		if (on)
2443 			dev_disable_lro(dev);
2444 
2445 		in_dev = __in_dev_get_rtnl_net(dev);
2446 		if (in_dev) {
2447 			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2448 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2449 						    NETCONFA_FORWARDING,
2450 						    dev->ifindex, &in_dev->cnf);
2451 		}
2452 	}
2453 }
2454 
devinet_conf_ifindex(struct net * net,struct ipv4_devconf * cnf)2455 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2456 {
2457 	if (cnf == net->ipv4.devconf_dflt)
2458 		return NETCONFA_IFINDEX_DEFAULT;
2459 	else if (cnf == net->ipv4.devconf_all)
2460 		return NETCONFA_IFINDEX_ALL;
2461 	else {
2462 		struct in_device *idev
2463 			= container_of(cnf, struct in_device, cnf);
2464 		return idev->dev->ifindex;
2465 	}
2466 }
2467 
devinet_conf_proc(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2468 static int devinet_conf_proc(const struct ctl_table *ctl, int write,
2469 			     void *buffer, size_t *lenp, loff_t *ppos)
2470 {
2471 	int old_value = *(int *)ctl->data;
2472 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2473 	int new_value = *(int *)ctl->data;
2474 
2475 	if (write) {
2476 		struct ipv4_devconf *cnf = ctl->extra1;
2477 		struct net *net = ctl->extra2;
2478 		int i = (int *)ctl->data - cnf->data;
2479 		int ifindex;
2480 
2481 		set_bit(i, cnf->state);
2482 
2483 		if (cnf == net->ipv4.devconf_dflt)
2484 			devinet_copy_dflt_conf(net, i);
2485 		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2486 		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2487 			if ((new_value == 0) && (old_value != 0))
2488 				rt_cache_flush(net);
2489 
2490 		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2491 		    new_value != old_value)
2492 			rt_cache_flush(net);
2493 
2494 		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2495 		    new_value != old_value) {
2496 			ifindex = devinet_conf_ifindex(net, cnf);
2497 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2498 						    NETCONFA_RP_FILTER,
2499 						    ifindex, cnf);
2500 		}
2501 		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2502 		    new_value != old_value) {
2503 			ifindex = devinet_conf_ifindex(net, cnf);
2504 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2505 						    NETCONFA_PROXY_NEIGH,
2506 						    ifindex, cnf);
2507 		}
2508 		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2509 		    new_value != old_value) {
2510 			ifindex = devinet_conf_ifindex(net, cnf);
2511 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2512 						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2513 						    ifindex, cnf);
2514 		}
2515 	}
2516 
2517 	return ret;
2518 }
2519 
devinet_sysctl_forward(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2520 static int devinet_sysctl_forward(const struct ctl_table *ctl, int write,
2521 				  void *buffer, size_t *lenp, loff_t *ppos)
2522 {
2523 	int *valp = ctl->data;
2524 	int val = *valp;
2525 	loff_t pos = *ppos;
2526 	struct net *net = ctl->extra2;
2527 	int ret;
2528 
2529 	if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN))
2530 		return -EPERM;
2531 
2532 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2533 
2534 	if (write && *valp != val) {
2535 		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2536 			if (!rtnl_net_trylock(net)) {
2537 				/* Restore the original values before restarting */
2538 				*valp = val;
2539 				*ppos = pos;
2540 				return restart_syscall();
2541 			}
2542 			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2543 				inet_forward_change(net);
2544 			} else {
2545 				struct ipv4_devconf *cnf = ctl->extra1;
2546 				struct in_device *idev =
2547 					container_of(cnf, struct in_device, cnf);
2548 				if (*valp)
2549 					dev_disable_lro(idev->dev);
2550 				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2551 							    NETCONFA_FORWARDING,
2552 							    idev->dev->ifindex,
2553 							    cnf);
2554 			}
2555 			rtnl_net_unlock(net);
2556 			rt_cache_flush(net);
2557 		} else
2558 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2559 						    NETCONFA_FORWARDING,
2560 						    NETCONFA_IFINDEX_DEFAULT,
2561 						    net->ipv4.devconf_dflt);
2562 	}
2563 
2564 	return ret;
2565 }
2566 
ipv4_doint_and_flush(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)2567 static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write,
2568 				void *buffer, size_t *lenp, loff_t *ppos)
2569 {
2570 	int *valp = ctl->data;
2571 	int val = *valp;
2572 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2573 	struct net *net = ctl->extra2;
2574 
2575 	if (write && *valp != val)
2576 		rt_cache_flush(net);
2577 
2578 	return ret;
2579 }
2580 
2581 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2582 	{ \
2583 		.procname	= name, \
2584 		.data		= ipv4_devconf.data + \
2585 				  IPV4_DEVCONF_ ## attr - 1, \
2586 		.maxlen		= sizeof(int), \
2587 		.mode		= mval, \
2588 		.proc_handler	= proc, \
2589 		.extra1		= &ipv4_devconf, \
2590 	}
2591 
2592 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2593 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2594 
2595 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2596 	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2597 
2598 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2599 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2600 
2601 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2602 	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2603 
2604 static struct devinet_sysctl_table {
2605 	struct ctl_table_header *sysctl_header;
2606 	struct ctl_table devinet_vars[IPV4_DEVCONF_MAX];
2607 } devinet_sysctl = {
2608 	.devinet_vars = {
2609 		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2610 					     devinet_sysctl_forward),
2611 		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2612 		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2613 
2614 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2615 		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2616 		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2617 		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2618 		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2619 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2620 					"accept_source_route"),
2621 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2622 		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2623 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2624 		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2625 		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2626 		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2627 		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2628 		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2629 		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2630 		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2631 		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2632 		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2633 		DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER,
2634 					"arp_evict_nocarrier"),
2635 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2636 		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2637 					"force_igmp_version"),
2638 		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2639 					"igmpv2_unsolicited_report_interval"),
2640 		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2641 					"igmpv3_unsolicited_report_interval"),
2642 		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2643 					"ignore_routes_with_linkdown"),
2644 		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2645 					"drop_gratuitous_arp"),
2646 
2647 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2648 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2649 		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2650 					      "promote_secondaries"),
2651 		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2652 					      "route_localnet"),
2653 		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2654 					      "drop_unicast_in_l2_multicast"),
2655 	},
2656 };
2657 
__devinet_sysctl_register(struct net * net,char * dev_name,int ifindex,struct ipv4_devconf * p)2658 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2659 				     int ifindex, struct ipv4_devconf *p)
2660 {
2661 	int i;
2662 	struct devinet_sysctl_table *t;
2663 	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2664 
2665 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT);
2666 	if (!t)
2667 		goto out;
2668 
2669 	for (i = 0; i < ARRAY_SIZE(t->devinet_vars); i++) {
2670 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2671 		t->devinet_vars[i].extra1 = p;
2672 		t->devinet_vars[i].extra2 = net;
2673 	}
2674 
2675 	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2676 
2677 	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2678 	if (!t->sysctl_header)
2679 		goto free;
2680 
2681 	p->sysctl = t;
2682 
2683 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2684 				    ifindex, p);
2685 	return 0;
2686 
2687 free:
2688 	kfree(t);
2689 out:
2690 	return -ENOMEM;
2691 }
2692 
__devinet_sysctl_unregister(struct net * net,struct ipv4_devconf * cnf,int ifindex)2693 static void __devinet_sysctl_unregister(struct net *net,
2694 					struct ipv4_devconf *cnf, int ifindex)
2695 {
2696 	struct devinet_sysctl_table *t = cnf->sysctl;
2697 
2698 	if (t) {
2699 		cnf->sysctl = NULL;
2700 		unregister_net_sysctl_table(t->sysctl_header);
2701 		kfree(t);
2702 	}
2703 
2704 	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2705 }
2706 
devinet_sysctl_register(struct in_device * idev)2707 static int devinet_sysctl_register(struct in_device *idev)
2708 {
2709 	int err;
2710 
2711 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2712 		return -EINVAL;
2713 
2714 	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2715 	if (err)
2716 		return err;
2717 	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2718 					idev->dev->ifindex, &idev->cnf);
2719 	if (err)
2720 		neigh_sysctl_unregister(idev->arp_parms);
2721 	return err;
2722 }
2723 
devinet_sysctl_unregister(struct in_device * idev)2724 static void devinet_sysctl_unregister(struct in_device *idev)
2725 {
2726 	struct net *net = dev_net(idev->dev);
2727 
2728 	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2729 	neigh_sysctl_unregister(idev->arp_parms);
2730 }
2731 
2732 static struct ctl_table ctl_forward_entry[] = {
2733 	{
2734 		.procname	= "ip_forward",
2735 		.data		= &ipv4_devconf.data[
2736 					IPV4_DEVCONF_FORWARDING - 1],
2737 		.maxlen		= sizeof(int),
2738 		.mode		= 0644,
2739 		.proc_handler	= devinet_sysctl_forward,
2740 		.extra1		= &ipv4_devconf,
2741 		.extra2		= &init_net,
2742 	},
2743 };
2744 #endif
2745 
devinet_init_net(struct net * net)2746 static __net_init int devinet_init_net(struct net *net)
2747 {
2748 #ifdef CONFIG_SYSCTL
2749 	struct ctl_table_header *forw_hdr;
2750 	struct ctl_table *tbl;
2751 #endif
2752 	struct ipv4_devconf *all, *dflt;
2753 	int err;
2754 	int i;
2755 
2756 	err = -ENOMEM;
2757 	net->ipv4.inet_addr_lst = kmalloc_array(IN4_ADDR_HSIZE,
2758 						sizeof(struct hlist_head),
2759 						GFP_KERNEL);
2760 	if (!net->ipv4.inet_addr_lst)
2761 		goto err_alloc_hash;
2762 
2763 	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2764 	if (!all)
2765 		goto err_alloc_all;
2766 
2767 	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2768 	if (!dflt)
2769 		goto err_alloc_dflt;
2770 
2771 #ifdef CONFIG_SYSCTL
2772 	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2773 	if (!tbl)
2774 		goto err_alloc_ctl;
2775 
2776 	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2777 	tbl[0].extra1 = all;
2778 	tbl[0].extra2 = net;
2779 #endif
2780 
2781 	if (!net_eq(net, &init_net)) {
2782 		switch (net_inherit_devconf()) {
2783 		case 3:
2784 			/* copy from the current netns */
2785 			memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
2786 			       sizeof(ipv4_devconf));
2787 			memcpy(dflt,
2788 			       current->nsproxy->net_ns->ipv4.devconf_dflt,
2789 			       sizeof(ipv4_devconf_dflt));
2790 			break;
2791 		case 0:
2792 		case 1:
2793 			/* copy from init_net */
2794 			memcpy(all, init_net.ipv4.devconf_all,
2795 			       sizeof(ipv4_devconf));
2796 			memcpy(dflt, init_net.ipv4.devconf_dflt,
2797 			       sizeof(ipv4_devconf_dflt));
2798 			break;
2799 		case 2:
2800 			/* use compiled values */
2801 			break;
2802 		}
2803 	}
2804 
2805 #ifdef CONFIG_SYSCTL
2806 	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2807 	if (err < 0)
2808 		goto err_reg_all;
2809 
2810 	err = __devinet_sysctl_register(net, "default",
2811 					NETCONFA_IFINDEX_DEFAULT, dflt);
2812 	if (err < 0)
2813 		goto err_reg_dflt;
2814 
2815 	err = -ENOMEM;
2816 	forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl,
2817 					  ARRAY_SIZE(ctl_forward_entry));
2818 	if (!forw_hdr)
2819 		goto err_reg_ctl;
2820 	net->ipv4.forw_hdr = forw_hdr;
2821 #endif
2822 
2823 	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2824 		INIT_HLIST_HEAD(&net->ipv4.inet_addr_lst[i]);
2825 
2826 	INIT_DEFERRABLE_WORK(&net->ipv4.addr_chk_work, check_lifetime);
2827 
2828 	net->ipv4.devconf_all = all;
2829 	net->ipv4.devconf_dflt = dflt;
2830 	return 0;
2831 
2832 #ifdef CONFIG_SYSCTL
2833 err_reg_ctl:
2834 	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2835 err_reg_dflt:
2836 	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2837 err_reg_all:
2838 	kfree(tbl);
2839 err_alloc_ctl:
2840 #endif
2841 	kfree(dflt);
2842 err_alloc_dflt:
2843 	kfree(all);
2844 err_alloc_all:
2845 	kfree(net->ipv4.inet_addr_lst);
2846 err_alloc_hash:
2847 	return err;
2848 }
2849 
devinet_exit_net(struct net * net)2850 static __net_exit void devinet_exit_net(struct net *net)
2851 {
2852 #ifdef CONFIG_SYSCTL
2853 	const struct ctl_table *tbl;
2854 #endif
2855 
2856 	cancel_delayed_work_sync(&net->ipv4.addr_chk_work);
2857 
2858 #ifdef CONFIG_SYSCTL
2859 	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2860 	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2861 	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2862 				    NETCONFA_IFINDEX_DEFAULT);
2863 	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2864 				    NETCONFA_IFINDEX_ALL);
2865 	kfree(tbl);
2866 #endif
2867 	kfree(net->ipv4.devconf_dflt);
2868 	kfree(net->ipv4.devconf_all);
2869 	kfree(net->ipv4.inet_addr_lst);
2870 }
2871 
2872 static __net_initdata struct pernet_operations devinet_ops = {
2873 	.init = devinet_init_net,
2874 	.exit = devinet_exit_net,
2875 };
2876 
2877 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2878 	.family		  = AF_INET,
2879 	.fill_link_af	  = inet_fill_link_af,
2880 	.get_link_af_size = inet_get_link_af_size,
2881 	.validate_link_af = inet_validate_link_af,
2882 	.set_link_af	  = inet_set_link_af,
2883 };
2884 
2885 static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = {
2886 	{.protocol = PF_INET, .msgtype = RTM_NEWADDR, .doit = inet_rtm_newaddr,
2887 	 .flags = RTNL_FLAG_DOIT_PERNET},
2888 	{.protocol = PF_INET, .msgtype = RTM_DELADDR, .doit = inet_rtm_deladdr,
2889 	 .flags = RTNL_FLAG_DOIT_PERNET},
2890 	{.protocol = PF_INET, .msgtype = RTM_GETADDR, .dumpit = inet_dump_ifaddr,
2891 	 .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
2892 	{.protocol = PF_INET, .msgtype = RTM_GETNETCONF,
2893 	 .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf,
2894 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
2895 	{.owner = THIS_MODULE, .protocol = PF_INET, .msgtype = RTM_GETMULTICAST,
2896 	 .dumpit = inet_dump_ifmcaddr, .flags = RTNL_FLAG_DUMP_UNLOCKED},
2897 };
2898 
devinet_init(void)2899 void __init devinet_init(void)
2900 {
2901 	register_pernet_subsys(&devinet_ops);
2902 	register_netdevice_notifier(&ip_netdev_notifier);
2903 
2904 	if (rtnl_af_register(&inet_af_ops))
2905 		panic("Unable to register inet_af_ops\n");
2906 
2907 	rtnl_register_many(devinet_rtnl_msg_handlers);
2908 }
2909