xref: /linux/net/ipv4/devinet.c (revision fa8fca88714c3a4a74f972ed37328e2f0bbef9fa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	NET3	IP device support routines.
4  *
5  *	Derived from the IP parts of dev.c 1.0.19
6  * 		Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  *	Changes:
15  *		Alexey Kuznetsov:	pa_* fields are replaced with ifaddr
16  *					lists.
17  *		Cyrus Durgin:		updated for kmod
18  *		Matthias Andree:	in devinet_ioctl, compare label and
19  *					address (4.4BSD alias style support),
20  *					fall back to comparing just the label
21  *					if no match found.
22  */
23 
24 
25 #include <linux/uaccess.h>
26 #include <linux/bitops.h>
27 #include <linux/capability.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/mm.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/in.h>
37 #include <linux/errno.h>
38 #include <linux/interrupt.h>
39 #include <linux/if_addr.h>
40 #include <linux/if_ether.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/notifier.h>
47 #include <linux/inetdevice.h>
48 #include <linux/igmp.h>
49 #include "igmp_internal.h"
50 #include <linux/slab.h>
51 #include <linux/hash.h>
52 #ifdef CONFIG_SYSCTL
53 #include <linux/sysctl.h>
54 #endif
55 #include <linux/kmod.h>
56 #include <linux/netconf.h>
57 
58 #include <net/arp.h>
59 #include <net/ip.h>
60 #include <net/route.h>
61 #include <net/ip_fib.h>
62 #include <net/rtnetlink.h>
63 #include <net/net_namespace.h>
64 #include <net/addrconf.h>
65 
66 #define IPV6ONLY_FLAGS	\
67 		(IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
68 		 IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
69 		 IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
70 
71 static struct ipv4_devconf ipv4_devconf = {
72 	.data = {
73 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
74 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
75 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
76 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
77 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
78 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
79 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
80 	},
81 };
82 
83 static struct ipv4_devconf ipv4_devconf_dflt = {
84 	.data = {
85 		[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
86 		[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
87 		[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
88 		[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
89 		[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
90 		[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
91 		[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
92 		[IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1,
93 	},
94 };
95 
96 #define IPV4_DEVCONF_DFLT(net, attr) \
97 	IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
98 
99 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
100 	[IFA_LOCAL]     	= { .type = NLA_U32 },
101 	[IFA_ADDRESS]   	= { .type = NLA_U32 },
102 	[IFA_BROADCAST] 	= { .type = NLA_U32 },
103 	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
104 	[IFA_CACHEINFO]		= { .len = sizeof(struct ifa_cacheinfo) },
105 	[IFA_FLAGS]		= { .type = NLA_U32 },
106 	[IFA_RT_PRIORITY]	= { .type = NLA_U32 },
107 	[IFA_TARGET_NETNSID]	= { .type = NLA_S32 },
108 	[IFA_PROTO]		= { .type = NLA_U8 },
109 };
110 
111 #define IN4_ADDR_HSIZE_SHIFT	8
112 #define IN4_ADDR_HSIZE		(1U << IN4_ADDR_HSIZE_SHIFT)
113 
114 static u32 inet_addr_hash(const struct net *net, __be32 addr)
115 {
116 	u32 val = __ipv4_addr_hash(addr, net_hash_mix(net));
117 
118 	return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
119 }
120 
121 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
122 {
123 	u32 hash = inet_addr_hash(net, ifa->ifa_local);
124 
125 	ASSERT_RTNL();
126 	hlist_add_head_rcu(&ifa->addr_lst, &net->ipv4.inet_addr_lst[hash]);
127 }
128 
129 static void inet_hash_remove(struct in_ifaddr *ifa)
130 {
131 	ASSERT_RTNL();
132 	hlist_del_init_rcu(&ifa->addr_lst);
133 }
134 
135 /**
136  * __ip_dev_find - find the first device with a given source address.
137  * @net: the net namespace
138  * @addr: the source address
139  * @devref: if true, take a reference on the found device
140  *
141  * If a caller uses devref=false, it should be protected by RCU, or RTNL
142  */
143 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
144 {
145 	struct net_device *result = NULL;
146 	struct in_ifaddr *ifa;
147 
148 	rcu_read_lock();
149 	ifa = inet_lookup_ifaddr_rcu(net, addr);
150 	if (!ifa) {
151 		struct flowi4 fl4 = { .daddr = addr };
152 		struct fib_result res = { 0 };
153 		struct fib_table *local;
154 
155 		/* Fallback to FIB local table so that communication
156 		 * over loopback subnets work.
157 		 */
158 		local = fib_get_table(net, RT_TABLE_LOCAL);
159 		if (local &&
160 		    !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
161 		    res.type == RTN_LOCAL)
162 			result = FIB_RES_DEV(res);
163 	} else {
164 		result = ifa->ifa_dev->dev;
165 	}
166 	if (result && devref)
167 		dev_hold(result);
168 	rcu_read_unlock();
169 	return result;
170 }
171 EXPORT_SYMBOL(__ip_dev_find);
172 
173 /* called under RCU lock */
174 struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr)
175 {
176 	u32 hash = inet_addr_hash(net, addr);
177 	struct in_ifaddr *ifa;
178 
179 	hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst)
180 		if (ifa->ifa_local == addr)
181 			return ifa;
182 
183 	return NULL;
184 }
185 
186 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
187 
188 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
189 static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
190 static void inet_del_ifa(struct in_device *in_dev,
191 			 struct in_ifaddr __rcu **ifap,
192 			 int destroy);
193 #ifdef CONFIG_SYSCTL
194 static int devinet_sysctl_register(struct in_device *idev);
195 static void devinet_sysctl_unregister(struct in_device *idev);
196 #else
197 static int devinet_sysctl_register(struct in_device *idev)
198 {
199 	return 0;
200 }
201 static void devinet_sysctl_unregister(struct in_device *idev)
202 {
203 }
204 #endif
205 
206 /* Locks all the inet devices. */
207 
208 static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev)
209 {
210 	struct in_ifaddr *ifa;
211 
212 	ifa = kzalloc_obj(*ifa, GFP_KERNEL_ACCOUNT);
213 	if (!ifa)
214 		return NULL;
215 
216 	in_dev_hold(in_dev);
217 	ifa->ifa_dev = in_dev;
218 
219 	INIT_HLIST_NODE(&ifa->addr_lst);
220 
221 	return ifa;
222 }
223 
224 static void inet_rcu_free_ifa(struct rcu_head *head)
225 {
226 	struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
227 
228 	in_dev_put(ifa->ifa_dev);
229 	kfree(ifa);
230 }
231 
232 static void inet_free_ifa(struct in_ifaddr *ifa)
233 {
234 	/* Our reference to ifa->ifa_dev must be freed ASAP
235 	 * to release the reference to the netdev the same way.
236 	 * in_dev_put() -> in_dev_finish_destroy() -> netdev_put()
237 	 */
238 	call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa);
239 }
240 
241 static void in_dev_free_rcu(struct rcu_head *head)
242 {
243 	struct in_device *idev = container_of(head, struct in_device, rcu_head);
244 
245 	kfree(rcu_dereference_protected(idev->mc_hash, 1));
246 	kfree(idev);
247 }
248 
249 void in_dev_finish_destroy(struct in_device *idev)
250 {
251 	struct net_device *dev = idev->dev;
252 
253 	WARN_ON(idev->ifa_list);
254 	WARN_ON(idev->mc_list);
255 #ifdef NET_REFCNT_DEBUG
256 	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
257 #endif
258 	netdev_put(dev, &idev->dev_tracker);
259 	if (!idev->dead)
260 		pr_err("Freeing alive in_device %p\n", idev);
261 	else
262 		call_rcu(&idev->rcu_head, in_dev_free_rcu);
263 }
264 EXPORT_SYMBOL(in_dev_finish_destroy);
265 
266 static struct in_device *inetdev_init(struct net_device *dev)
267 {
268 	struct in_device *in_dev;
269 	int err = -ENOMEM;
270 
271 	ASSERT_RTNL();
272 
273 	in_dev = kzalloc_obj(*in_dev);
274 	if (!in_dev)
275 		goto out;
276 	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
277 			sizeof(in_dev->cnf));
278 	in_dev->cnf.sysctl = NULL;
279 	in_dev->dev = dev;
280 	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
281 	if (!in_dev->arp_parms)
282 		goto out_kfree;
283 	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
284 		netif_disable_lro(dev);
285 	/* Reference in_dev->dev */
286 	netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL);
287 	/* Account for reference dev->ip_ptr (below) */
288 	refcount_set(&in_dev->refcnt, 1);
289 
290 	if (dev != blackhole_netdev) {
291 		err = devinet_sysctl_register(in_dev);
292 		if (err) {
293 			in_dev->dead = 1;
294 			neigh_parms_release(&arp_tbl, in_dev->arp_parms);
295 			in_dev_put(in_dev);
296 			in_dev = NULL;
297 			goto out;
298 		}
299 		ip_mc_init_dev(in_dev);
300 		if (dev->flags & IFF_UP)
301 			ip_mc_up(in_dev);
302 	}
303 
304 	/* we can receive as soon as ip_ptr is set -- do this last */
305 	rcu_assign_pointer(dev->ip_ptr, in_dev);
306 out:
307 	return in_dev ?: ERR_PTR(err);
308 out_kfree:
309 	kfree(in_dev);
310 	in_dev = NULL;
311 	goto out;
312 }
313 
314 static void inetdev_destroy(struct in_device *in_dev)
315 {
316 	struct net_device *dev;
317 	struct in_ifaddr *ifa;
318 
319 	ASSERT_RTNL();
320 
321 	dev = in_dev->dev;
322 
323 	in_dev->dead = 1;
324 
325 	ip_mc_destroy_dev(in_dev);
326 
327 	while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
328 		inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
329 		inet_free_ifa(ifa);
330 	}
331 
332 	RCU_INIT_POINTER(dev->ip_ptr, NULL);
333 
334 	devinet_sysctl_unregister(in_dev);
335 	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
336 	arp_ifdown(dev);
337 
338 	in_dev_put(in_dev);
339 }
340 
341 static int __init inet_blackhole_dev_init(void)
342 {
343 	struct in_device *in_dev;
344 
345 	rtnl_lock();
346 	in_dev = inetdev_init(blackhole_netdev);
347 	rtnl_unlock();
348 
349 	return PTR_ERR_OR_ZERO(in_dev);
350 }
351 late_initcall(inet_blackhole_dev_init);
352 
353 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
354 {
355 	const struct in_ifaddr *ifa;
356 
357 	rcu_read_lock();
358 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
359 		if (inet_ifa_match(a, ifa)) {
360 			if (!b || inet_ifa_match(b, ifa)) {
361 				rcu_read_unlock();
362 				return 1;
363 			}
364 		}
365 	}
366 	rcu_read_unlock();
367 	return 0;
368 }
369 
370 static void __inet_del_ifa(struct in_device *in_dev,
371 			   struct in_ifaddr __rcu **ifap,
372 			   int destroy, struct nlmsghdr *nlh, u32 portid)
373 {
374 	struct in_ifaddr *promote = NULL;
375 	struct in_ifaddr *ifa, *ifa1;
376 	struct in_ifaddr __rcu **last_prim;
377 	struct in_ifaddr *prev_prom = NULL;
378 	int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
379 
380 	ASSERT_RTNL();
381 
382 	ifa1 = rtnl_dereference(*ifap);
383 	last_prim = ifap;
384 	if (in_dev->dead)
385 		goto no_promotions;
386 
387 	/* 1. Deleting primary ifaddr forces deletion all secondaries
388 	 * unless alias promotion is set
389 	 **/
390 
391 	if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
392 		struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
393 
394 		while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
395 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
396 			    ifa1->ifa_scope <= ifa->ifa_scope)
397 				last_prim = &ifa->ifa_next;
398 
399 			if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
400 			    ifa1->ifa_mask != ifa->ifa_mask ||
401 			    !inet_ifa_match(ifa1->ifa_address, ifa)) {
402 				ifap1 = &ifa->ifa_next;
403 				prev_prom = ifa;
404 				continue;
405 			}
406 
407 			if (!do_promote) {
408 				inet_hash_remove(ifa);
409 				*ifap1 = ifa->ifa_next;
410 
411 				rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
412 				blocking_notifier_call_chain(&inetaddr_chain,
413 						NETDEV_DOWN, ifa);
414 				inet_free_ifa(ifa);
415 			} else {
416 				promote = ifa;
417 				break;
418 			}
419 		}
420 	}
421 
422 	/* On promotion all secondaries from subnet are changing
423 	 * the primary IP, we must remove all their routes silently
424 	 * and later to add them back with new prefsrc. Do this
425 	 * while all addresses are on the device list.
426 	 */
427 	for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
428 		if (ifa1->ifa_mask == ifa->ifa_mask &&
429 		    inet_ifa_match(ifa1->ifa_address, ifa))
430 			fib_del_ifaddr(ifa, ifa1);
431 	}
432 
433 no_promotions:
434 	/* 2. Unlink it */
435 
436 	*ifap = ifa1->ifa_next;
437 	inet_hash_remove(ifa1);
438 
439 	/* 3. Announce address deletion */
440 
441 	/* Send message first, then call notifier.
442 	   At first sight, FIB update triggered by notifier
443 	   will refer to already deleted ifaddr, that could confuse
444 	   netlink listeners. It is not true: look, gated sees
445 	   that route deleted and if it still thinks that ifaddr
446 	   is valid, it will try to restore deleted routes... Grr.
447 	   So that, this order is correct.
448 	 */
449 	rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
450 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
451 
452 	if (promote) {
453 		struct in_ifaddr *next_sec;
454 
455 		next_sec = rtnl_dereference(promote->ifa_next);
456 		if (prev_prom) {
457 			struct in_ifaddr *last_sec;
458 
459 			rcu_assign_pointer(prev_prom->ifa_next, next_sec);
460 
461 			last_sec = rtnl_dereference(*last_prim);
462 			rcu_assign_pointer(promote->ifa_next, last_sec);
463 			rcu_assign_pointer(*last_prim, promote);
464 		}
465 
466 		promote->ifa_flags &= ~IFA_F_SECONDARY;
467 		rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
468 		blocking_notifier_call_chain(&inetaddr_chain,
469 				NETDEV_UP, promote);
470 		for (ifa = next_sec; ifa;
471 		     ifa = rtnl_dereference(ifa->ifa_next)) {
472 			if (ifa1->ifa_mask != ifa->ifa_mask ||
473 			    !inet_ifa_match(ifa1->ifa_address, ifa))
474 					continue;
475 			fib_add_ifaddr(ifa);
476 		}
477 
478 	}
479 	if (destroy)
480 		inet_free_ifa(ifa1);
481 }
482 
483 static void inet_del_ifa(struct in_device *in_dev,
484 			 struct in_ifaddr __rcu **ifap,
485 			 int destroy)
486 {
487 	__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
488 }
489 
490 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
491 			     u32 portid, struct netlink_ext_ack *extack)
492 {
493 	struct in_ifaddr __rcu **last_primary, **ifap;
494 	struct in_device *in_dev = ifa->ifa_dev;
495 	struct net *net = dev_net(in_dev->dev);
496 	struct in_validator_info ivi;
497 	struct in_ifaddr *ifa1;
498 	int ret;
499 
500 	ASSERT_RTNL();
501 
502 	ifa->ifa_flags &= ~IFA_F_SECONDARY;
503 	last_primary = &in_dev->ifa_list;
504 
505 	/* Don't set IPv6 only flags to IPv4 addresses */
506 	ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
507 
508 	ifap = &in_dev->ifa_list;
509 	ifa1 = rtnl_dereference(*ifap);
510 
511 	while (ifa1) {
512 		if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
513 		    ifa->ifa_scope <= ifa1->ifa_scope)
514 			last_primary = &ifa1->ifa_next;
515 		if (ifa1->ifa_mask == ifa->ifa_mask &&
516 		    inet_ifa_match(ifa1->ifa_address, ifa)) {
517 			if (ifa1->ifa_local == ifa->ifa_local) {
518 				inet_free_ifa(ifa);
519 				return -EEXIST;
520 			}
521 			if (ifa1->ifa_scope != ifa->ifa_scope) {
522 				NL_SET_ERR_MSG(extack, "ipv4: Invalid scope value");
523 				inet_free_ifa(ifa);
524 				return -EINVAL;
525 			}
526 			ifa->ifa_flags |= IFA_F_SECONDARY;
527 		}
528 
529 		ifap = &ifa1->ifa_next;
530 		ifa1 = rtnl_dereference(*ifap);
531 	}
532 
533 	/* Allow any devices that wish to register ifaddr validtors to weigh
534 	 * in now, before changes are committed.  The rntl lock is serializing
535 	 * access here, so the state should not change between a validator call
536 	 * and a final notify on commit.  This isn't invoked on promotion under
537 	 * the assumption that validators are checking the address itself, and
538 	 * not the flags.
539 	 */
540 	ivi.ivi_addr = ifa->ifa_address;
541 	ivi.ivi_dev = ifa->ifa_dev;
542 	ivi.extack = extack;
543 	ret = blocking_notifier_call_chain(&inetaddr_validator_chain,
544 					   NETDEV_UP, &ivi);
545 	ret = notifier_to_errno(ret);
546 	if (ret) {
547 		inet_free_ifa(ifa);
548 		return ret;
549 	}
550 
551 	if (!(ifa->ifa_flags & IFA_F_SECONDARY))
552 		ifap = last_primary;
553 
554 	rcu_assign_pointer(ifa->ifa_next, *ifap);
555 	rcu_assign_pointer(*ifap, ifa);
556 
557 	inet_hash_insert(dev_net(in_dev->dev), ifa);
558 
559 	cancel_delayed_work(&net->ipv4.addr_chk_work);
560 	queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0);
561 
562 	/* Send message first, then call notifier.
563 	   Notifier will trigger FIB update, so that
564 	   listeners of netlink will know about new ifaddr */
565 	rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
566 	blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
567 
568 	return 0;
569 }
570 
571 static int inet_insert_ifa(struct in_ifaddr *ifa)
572 {
573 	if (!ifa->ifa_local) {
574 		inet_free_ifa(ifa);
575 		return 0;
576 	}
577 
578 	return __inet_insert_ifa(ifa, NULL, 0, NULL);
579 }
580 
581 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
582 {
583 	struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
584 
585 	ipv4_devconf_setall(in_dev);
586 	neigh_parms_data_state_setall(in_dev->arp_parms);
587 
588 	if (ipv4_is_loopback(ifa->ifa_local))
589 		ifa->ifa_scope = RT_SCOPE_HOST;
590 	return inet_insert_ifa(ifa);
591 }
592 
593 /* Caller must hold RCU or RTNL :
594  * We dont take a reference on found in_device
595  */
596 struct in_device *inetdev_by_index(struct net *net, int ifindex)
597 {
598 	struct net_device *dev;
599 	struct in_device *in_dev = NULL;
600 
601 	rcu_read_lock();
602 	dev = dev_get_by_index_rcu(net, ifindex);
603 	if (dev)
604 		in_dev = rcu_dereference_rtnl(dev->ip_ptr);
605 	rcu_read_unlock();
606 	return in_dev;
607 }
608 EXPORT_SYMBOL(inetdev_by_index);
609 
610 /* Called only from RTNL semaphored context. No locks. */
611 
612 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
613 				    __be32 mask)
614 {
615 	struct in_ifaddr *ifa;
616 
617 	ASSERT_RTNL();
618 
619 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
620 		if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
621 			return ifa;
622 	}
623 	return NULL;
624 }
625 
626 static int ip_mc_autojoin_config(struct net *net, bool join,
627 				 const struct in_ifaddr *ifa)
628 {
629 #if defined(CONFIG_IP_MULTICAST)
630 	struct ip_mreqn mreq = {
631 		.imr_multiaddr.s_addr = ifa->ifa_address,
632 		.imr_ifindex = ifa->ifa_dev->dev->ifindex,
633 	};
634 	struct sock *sk = net->ipv4.mc_autojoin_sk;
635 	int ret;
636 
637 	ASSERT_RTNL_NET(net);
638 
639 	lock_sock(sk);
640 	if (join)
641 		ret = ip_mc_join_group(sk, &mreq);
642 	else
643 		ret = ip_mc_leave_group(sk, &mreq);
644 	release_sock(sk);
645 
646 	return ret;
647 #else
648 	return -EOPNOTSUPP;
649 #endif
650 }
651 
652 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
653 			    struct netlink_ext_ack *extack)
654 {
655 	struct net *net = sock_net(skb->sk);
656 	struct in_ifaddr __rcu **ifap;
657 	struct nlattr *tb[IFA_MAX+1];
658 	struct in_device *in_dev;
659 	struct ifaddrmsg *ifm;
660 	struct in_ifaddr *ifa;
661 	int err;
662 
663 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
664 				     ifa_ipv4_policy, extack);
665 	if (err < 0)
666 		goto out;
667 
668 	ifm = nlmsg_data(nlh);
669 
670 	rtnl_net_lock(net);
671 
672 	in_dev = inetdev_by_index(net, ifm->ifa_index);
673 	if (!in_dev) {
674 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
675 		err = -ENODEV;
676 		goto unlock;
677 	}
678 
679 	for (ifap = &in_dev->ifa_list;
680 	     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
681 	     ifap = &ifa->ifa_next) {
682 		if (tb[IFA_LOCAL] &&
683 		    ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
684 			continue;
685 
686 		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
687 			continue;
688 
689 		if (tb[IFA_ADDRESS] &&
690 		    (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
691 		    !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
692 			continue;
693 
694 		if (ipv4_is_multicast(ifa->ifa_address))
695 			ip_mc_autojoin_config(net, false, ifa);
696 
697 		__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
698 		goto unlock;
699 	}
700 
701 	NL_SET_ERR_MSG(extack, "ipv4: Address not found");
702 	err = -EADDRNOTAVAIL;
703 unlock:
704 	rtnl_net_unlock(net);
705 out:
706 	return err;
707 }
708 
709 static void check_lifetime(struct work_struct *work)
710 {
711 	unsigned long now, next, next_sec, next_sched;
712 	struct in_ifaddr *ifa;
713 	struct hlist_node *n;
714 	struct net *net;
715 	int i;
716 
717 	net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work);
718 	now = jiffies;
719 	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
720 
721 	for (i = 0; i < IN4_ADDR_HSIZE; i++) {
722 		struct hlist_head *head = &net->ipv4.inet_addr_lst[i];
723 		bool change_needed = false;
724 
725 		rcu_read_lock();
726 		hlist_for_each_entry_rcu(ifa, head, addr_lst) {
727 			unsigned long age, tstamp;
728 			u32 preferred_lft;
729 			u32 valid_lft;
730 			u32 flags;
731 
732 			flags = READ_ONCE(ifa->ifa_flags);
733 			if (flags & IFA_F_PERMANENT)
734 				continue;
735 
736 			preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
737 			valid_lft = READ_ONCE(ifa->ifa_valid_lft);
738 			tstamp = READ_ONCE(ifa->ifa_tstamp);
739 			/* We try to batch several events at once. */
740 			age = (now - tstamp +
741 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
742 
743 			if (valid_lft != INFINITY_LIFE_TIME &&
744 			    age >= valid_lft) {
745 				change_needed = true;
746 			} else if (preferred_lft ==
747 				   INFINITY_LIFE_TIME) {
748 				continue;
749 			} else if (age >= preferred_lft) {
750 				if (time_before(tstamp + valid_lft * HZ, next))
751 					next = tstamp + valid_lft * HZ;
752 
753 				if (!(flags & IFA_F_DEPRECATED))
754 					change_needed = true;
755 			} else if (time_before(tstamp + preferred_lft * HZ,
756 					       next)) {
757 				next = tstamp + preferred_lft * HZ;
758 			}
759 		}
760 		rcu_read_unlock();
761 		if (!change_needed)
762 			continue;
763 
764 		rtnl_net_lock(net);
765 		hlist_for_each_entry_safe(ifa, n, head, addr_lst) {
766 			unsigned long age;
767 
768 			if (ifa->ifa_flags & IFA_F_PERMANENT)
769 				continue;
770 
771 			/* We try to batch several events at once. */
772 			age = (now - ifa->ifa_tstamp +
773 			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
774 
775 			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
776 			    age >= ifa->ifa_valid_lft) {
777 				struct in_ifaddr __rcu **ifap;
778 				struct in_ifaddr *tmp;
779 
780 				ifap = &ifa->ifa_dev->ifa_list;
781 				tmp = rtnl_net_dereference(net, *ifap);
782 				while (tmp) {
783 					if (tmp == ifa) {
784 						inet_del_ifa(ifa->ifa_dev,
785 							     ifap, 1);
786 						break;
787 					}
788 					ifap = &tmp->ifa_next;
789 					tmp = rtnl_net_dereference(net, *ifap);
790 				}
791 			} else if (ifa->ifa_preferred_lft !=
792 				   INFINITY_LIFE_TIME &&
793 				   age >= ifa->ifa_preferred_lft &&
794 				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
795 				ifa->ifa_flags |= IFA_F_DEPRECATED;
796 				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
797 			}
798 		}
799 		rtnl_net_unlock(net);
800 	}
801 
802 	next_sec = round_jiffies_up(next);
803 	next_sched = next;
804 
805 	/* If rounded timeout is accurate enough, accept it. */
806 	if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
807 		next_sched = next_sec;
808 
809 	now = jiffies;
810 	/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
811 	if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
812 		next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
813 
814 	queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work,
815 			   next_sched - now);
816 }
817 
818 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
819 			     __u32 prefered_lft)
820 {
821 	unsigned long timeout;
822 	u32 flags;
823 
824 	flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
825 
826 	timeout = addrconf_timeout_fixup(valid_lft, HZ);
827 	if (addrconf_finite_timeout(timeout))
828 		WRITE_ONCE(ifa->ifa_valid_lft, timeout);
829 	else
830 		flags |= IFA_F_PERMANENT;
831 
832 	timeout = addrconf_timeout_fixup(prefered_lft, HZ);
833 	if (addrconf_finite_timeout(timeout)) {
834 		if (timeout == 0)
835 			flags |= IFA_F_DEPRECATED;
836 		WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
837 	}
838 	WRITE_ONCE(ifa->ifa_flags, flags);
839 	WRITE_ONCE(ifa->ifa_tstamp, jiffies);
840 	if (!ifa->ifa_cstamp)
841 		WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
842 }
843 
844 static int inet_validate_rtm(struct nlmsghdr *nlh, struct nlattr **tb,
845 			     struct netlink_ext_ack *extack,
846 			     __u32 *valid_lft, __u32 *prefered_lft)
847 {
848 	struct ifaddrmsg *ifm = nlmsg_data(nlh);
849 	int err;
850 
851 	err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
852 				     ifa_ipv4_policy, extack);
853 	if (err < 0)
854 		return err;
855 
856 	if (ifm->ifa_prefixlen > 32) {
857 		NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length");
858 		return -EINVAL;
859 	}
860 
861 	if (!tb[IFA_LOCAL]) {
862 		NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied");
863 		return -EINVAL;
864 	}
865 
866 	if (tb[IFA_CACHEINFO]) {
867 		struct ifa_cacheinfo *ci;
868 
869 		ci = nla_data(tb[IFA_CACHEINFO]);
870 		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
871 			NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid");
872 			return -EINVAL;
873 		}
874 
875 		*valid_lft = ci->ifa_valid;
876 		*prefered_lft = ci->ifa_prefered;
877 	}
878 
879 	return 0;
880 }
881 
882 static struct in_ifaddr *inet_rtm_to_ifa(struct net *net, struct nlmsghdr *nlh,
883 					 struct nlattr **tb,
884 					 struct netlink_ext_ack *extack)
885 {
886 	struct ifaddrmsg *ifm = nlmsg_data(nlh);
887 	struct in_device *in_dev;
888 	struct net_device *dev;
889 	struct in_ifaddr *ifa;
890 	int err;
891 
892 	dev = __dev_get_by_index(net, ifm->ifa_index);
893 	err = -ENODEV;
894 	if (!dev) {
895 		NL_SET_ERR_MSG(extack, "ipv4: Device not found");
896 		goto errout;
897 	}
898 
899 	in_dev = __in_dev_get_rtnl_net(dev);
900 	err = -ENOBUFS;
901 	if (!in_dev)
902 		goto errout;
903 
904 	ifa = inet_alloc_ifa(in_dev);
905 	if (!ifa)
906 		/*
907 		 * A potential indev allocation can be left alive, it stays
908 		 * assigned to its device and is destroy with it.
909 		 */
910 		goto errout;
911 
912 	ipv4_devconf_setall(in_dev);
913 	neigh_parms_data_state_setall(in_dev->arp_parms);
914 
915 	if (!tb[IFA_ADDRESS])
916 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
917 
918 	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
919 	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
920 	ifa->ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
921 	ifa->ifa_scope = ifm->ifa_scope;
922 	ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
923 	ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
924 
925 	if (tb[IFA_BROADCAST])
926 		ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
927 
928 	if (tb[IFA_LABEL])
929 		nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
930 	else
931 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
932 
933 	if (tb[IFA_RT_PRIORITY])
934 		ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
935 
936 	if (tb[IFA_PROTO])
937 		ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]);
938 
939 	return ifa;
940 
941 errout:
942 	return ERR_PTR(err);
943 }
944 
945 static struct in_ifaddr *find_matching_ifa(struct net *net, struct in_ifaddr *ifa)
946 {
947 	struct in_device *in_dev = ifa->ifa_dev;
948 	struct in_ifaddr *ifa1;
949 
950 	in_dev_for_each_ifa_rtnl_net(net, ifa1, in_dev) {
951 		if (ifa1->ifa_mask == ifa->ifa_mask &&
952 		    inet_ifa_match(ifa1->ifa_address, ifa) &&
953 		    ifa1->ifa_local == ifa->ifa_local)
954 			return ifa1;
955 	}
956 
957 	return NULL;
958 }
959 
960 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
961 			    struct netlink_ext_ack *extack)
962 {
963 	__u32 prefered_lft = INFINITY_LIFE_TIME;
964 	__u32 valid_lft = INFINITY_LIFE_TIME;
965 	struct net *net = sock_net(skb->sk);
966 	struct in_ifaddr *ifa_existing;
967 	struct nlattr *tb[IFA_MAX + 1];
968 	struct in_ifaddr *ifa;
969 	int ret;
970 
971 	ret = inet_validate_rtm(nlh, tb, extack, &valid_lft, &prefered_lft);
972 	if (ret < 0)
973 		return ret;
974 
975 	if (!nla_get_in_addr(tb[IFA_LOCAL]))
976 		return 0;
977 
978 	rtnl_net_lock(net);
979 
980 	ifa = inet_rtm_to_ifa(net, nlh, tb, extack);
981 	if (IS_ERR(ifa)) {
982 		ret = PTR_ERR(ifa);
983 		goto unlock;
984 	}
985 
986 	ifa_existing = find_matching_ifa(net, ifa);
987 	if (!ifa_existing) {
988 		/* It would be best to check for !NLM_F_CREATE here but
989 		 * userspace already relies on not having to provide this.
990 		 */
991 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
992 		if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
993 			ret = ip_mc_autojoin_config(net, true, ifa);
994 			if (ret < 0) {
995 				NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed");
996 				inet_free_ifa(ifa);
997 				goto unlock;
998 			}
999 		}
1000 
1001 		ret = __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid, extack);
1002 	} else {
1003 		u32 new_metric = ifa->ifa_rt_priority;
1004 		u8 new_proto = ifa->ifa_proto;
1005 
1006 		inet_free_ifa(ifa);
1007 
1008 		if (nlh->nlmsg_flags & NLM_F_EXCL ||
1009 		    !(nlh->nlmsg_flags & NLM_F_REPLACE)) {
1010 			NL_SET_ERR_MSG(extack, "ipv4: Address already assigned");
1011 			ret = -EEXIST;
1012 			goto unlock;
1013 		}
1014 		ifa = ifa_existing;
1015 
1016 		if (ifa->ifa_rt_priority != new_metric) {
1017 			fib_modify_prefix_metric(ifa, new_metric);
1018 			ifa->ifa_rt_priority = new_metric;
1019 		}
1020 
1021 		ifa->ifa_proto = new_proto;
1022 
1023 		set_ifa_lifetime(ifa, valid_lft, prefered_lft);
1024 		cancel_delayed_work(&net->ipv4.addr_chk_work);
1025 		queue_delayed_work(system_power_efficient_wq,
1026 				   &net->ipv4.addr_chk_work, 0);
1027 		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
1028 	}
1029 
1030 unlock:
1031 	rtnl_net_unlock(net);
1032 
1033 	return ret;
1034 }
1035 
1036 /*
1037  *	Determine a default network mask, based on the IP address.
1038  */
1039 
1040 static int inet_abc_len(__be32 addr)
1041 {
1042 	int rc = -1;	/* Something else, probably a multicast. */
1043 
1044 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
1045 		rc = 0;
1046 	else {
1047 		__u32 haddr = ntohl(addr);
1048 		if (IN_CLASSA(haddr))
1049 			rc = 8;
1050 		else if (IN_CLASSB(haddr))
1051 			rc = 16;
1052 		else if (IN_CLASSC(haddr))
1053 			rc = 24;
1054 		else if (IN_CLASSE(haddr))
1055 			rc = 32;
1056 	}
1057 
1058 	return rc;
1059 }
1060 
1061 
1062 int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
1063 {
1064 	struct sockaddr_in sin_orig;
1065 	struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
1066 	struct in_ifaddr __rcu **ifap = NULL;
1067 	struct in_device *in_dev;
1068 	struct in_ifaddr *ifa = NULL;
1069 	struct net_device *dev;
1070 	char *colon;
1071 	int ret = -EFAULT;
1072 	int tryaddrmatch = 0;
1073 
1074 	ifr->ifr_name[IFNAMSIZ - 1] = 0;
1075 
1076 	/* save original address for comparison */
1077 	memcpy(&sin_orig, sin, sizeof(*sin));
1078 
1079 	colon = strchr(ifr->ifr_name, ':');
1080 	if (colon)
1081 		*colon = 0;
1082 
1083 	dev_load(net, ifr->ifr_name);
1084 
1085 	switch (cmd) {
1086 	case SIOCGIFADDR:	/* Get interface address */
1087 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1088 	case SIOCGIFDSTADDR:	/* Get the destination address */
1089 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1090 		/* Note that these ioctls will not sleep,
1091 		   so that we do not impose a lock.
1092 		   One day we will be forced to put shlock here (I mean SMP)
1093 		 */
1094 		tryaddrmatch = (sin_orig.sin_family == AF_INET);
1095 		memset(sin, 0, sizeof(*sin));
1096 		sin->sin_family = AF_INET;
1097 		break;
1098 
1099 	case SIOCSIFFLAGS:
1100 		ret = -EPERM;
1101 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1102 			goto out;
1103 		break;
1104 	case SIOCSIFADDR:	/* Set interface address (and family) */
1105 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1106 	case SIOCSIFDSTADDR:	/* Set the destination address */
1107 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1108 		ret = -EPERM;
1109 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1110 			goto out;
1111 		ret = -EINVAL;
1112 		if (sin->sin_family != AF_INET)
1113 			goto out;
1114 		break;
1115 	default:
1116 		ret = -EINVAL;
1117 		goto out;
1118 	}
1119 
1120 	rtnl_net_lock(net);
1121 
1122 	ret = -ENODEV;
1123 	dev = __dev_get_by_name(net, ifr->ifr_name);
1124 	if (!dev)
1125 		goto done;
1126 
1127 	if (colon)
1128 		*colon = ':';
1129 
1130 	in_dev = __in_dev_get_rtnl_net(dev);
1131 	if (in_dev) {
1132 		if (tryaddrmatch) {
1133 			/* Matthias Andree */
1134 			/* compare label and address (4.4BSD style) */
1135 			/* note: we only do this for a limited set of ioctls
1136 			   and only if the original address family was AF_INET.
1137 			   This is checked above. */
1138 
1139 			for (ifap = &in_dev->ifa_list;
1140 			     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
1141 			     ifap = &ifa->ifa_next) {
1142 				if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
1143 				    sin_orig.sin_addr.s_addr ==
1144 							ifa->ifa_local) {
1145 					break; /* found */
1146 				}
1147 			}
1148 		}
1149 		/* we didn't get a match, maybe the application is
1150 		   4.3BSD-style and passed in junk so we fall back to
1151 		   comparing just the label */
1152 		if (!ifa) {
1153 			for (ifap = &in_dev->ifa_list;
1154 			     (ifa = rtnl_net_dereference(net, *ifap)) != NULL;
1155 			     ifap = &ifa->ifa_next)
1156 				if (!strcmp(ifr->ifr_name, ifa->ifa_label))
1157 					break;
1158 		}
1159 	}
1160 
1161 	ret = -EADDRNOTAVAIL;
1162 	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
1163 		goto done;
1164 
1165 	switch (cmd) {
1166 	case SIOCGIFADDR:	/* Get interface address */
1167 		ret = 0;
1168 		sin->sin_addr.s_addr = ifa->ifa_local;
1169 		break;
1170 
1171 	case SIOCGIFBRDADDR:	/* Get the broadcast address */
1172 		ret = 0;
1173 		sin->sin_addr.s_addr = ifa->ifa_broadcast;
1174 		break;
1175 
1176 	case SIOCGIFDSTADDR:	/* Get the destination address */
1177 		ret = 0;
1178 		sin->sin_addr.s_addr = ifa->ifa_address;
1179 		break;
1180 
1181 	case SIOCGIFNETMASK:	/* Get the netmask for the interface */
1182 		ret = 0;
1183 		sin->sin_addr.s_addr = ifa->ifa_mask;
1184 		break;
1185 
1186 	case SIOCSIFFLAGS:
1187 		if (colon) {
1188 			ret = -EADDRNOTAVAIL;
1189 			if (!ifa)
1190 				break;
1191 			ret = 0;
1192 			if (!(ifr->ifr_flags & IFF_UP))
1193 				inet_del_ifa(in_dev, ifap, 1);
1194 			break;
1195 		}
1196 
1197 		/* NETDEV_UP/DOWN/CHANGE could touch a peer dev */
1198 		ASSERT_RTNL();
1199 		ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
1200 		break;
1201 
1202 	case SIOCSIFADDR:	/* Set interface address (and family) */
1203 		ret = -EINVAL;
1204 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1205 			break;
1206 
1207 		if (!ifa) {
1208 			ret = -ENOBUFS;
1209 			if (!in_dev)
1210 				break;
1211 			ifa = inet_alloc_ifa(in_dev);
1212 			if (!ifa)
1213 				break;
1214 
1215 			if (colon)
1216 				memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ);
1217 			else
1218 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1219 		} else {
1220 			ret = 0;
1221 			if (ifa->ifa_local == sin->sin_addr.s_addr)
1222 				break;
1223 			inet_del_ifa(in_dev, ifap, 0);
1224 			ifa->ifa_broadcast = 0;
1225 			ifa->ifa_scope = 0;
1226 		}
1227 
1228 		ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1229 
1230 		if (!(dev->flags & IFF_POINTOPOINT)) {
1231 			ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1232 			ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1233 			if ((dev->flags & IFF_BROADCAST) &&
1234 			    ifa->ifa_prefixlen < 31)
1235 				ifa->ifa_broadcast = ifa->ifa_address |
1236 						     ~ifa->ifa_mask;
1237 		} else {
1238 			ifa->ifa_prefixlen = 32;
1239 			ifa->ifa_mask = inet_make_mask(32);
1240 		}
1241 		set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1242 		ret = inet_set_ifa(dev, ifa);
1243 		break;
1244 
1245 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
1246 		ret = 0;
1247 		if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1248 			inet_del_ifa(in_dev, ifap, 0);
1249 			ifa->ifa_broadcast = sin->sin_addr.s_addr;
1250 			inet_insert_ifa(ifa);
1251 		}
1252 		break;
1253 
1254 	case SIOCSIFDSTADDR:	/* Set the destination address */
1255 		ret = 0;
1256 		if (ifa->ifa_address == sin->sin_addr.s_addr)
1257 			break;
1258 		ret = -EINVAL;
1259 		if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1260 			break;
1261 		ret = 0;
1262 		inet_del_ifa(in_dev, ifap, 0);
1263 		ifa->ifa_address = sin->sin_addr.s_addr;
1264 		inet_insert_ifa(ifa);
1265 		break;
1266 
1267 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
1268 
1269 		/*
1270 		 *	The mask we set must be legal.
1271 		 */
1272 		ret = -EINVAL;
1273 		if (bad_mask(sin->sin_addr.s_addr, 0))
1274 			break;
1275 		ret = 0;
1276 		if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1277 			__be32 old_mask = ifa->ifa_mask;
1278 			inet_del_ifa(in_dev, ifap, 0);
1279 			ifa->ifa_mask = sin->sin_addr.s_addr;
1280 			ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1281 
1282 			/* See if current broadcast address matches
1283 			 * with current netmask, then recalculate
1284 			 * the broadcast address. Otherwise it's a
1285 			 * funny address, so don't touch it since
1286 			 * the user seems to know what (s)he's doing...
1287 			 */
1288 			if ((dev->flags & IFF_BROADCAST) &&
1289 			    (ifa->ifa_prefixlen < 31) &&
1290 			    (ifa->ifa_broadcast ==
1291 			     (ifa->ifa_local|~old_mask))) {
1292 				ifa->ifa_broadcast = (ifa->ifa_local |
1293 						      ~sin->sin_addr.s_addr);
1294 			}
1295 			inet_insert_ifa(ifa);
1296 		}
1297 		break;
1298 	}
1299 done:
1300 	rtnl_net_unlock(net);
1301 out:
1302 	return ret;
1303 }
1304 
1305 int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
1306 {
1307 	struct in_device *in_dev = __in_dev_get_rtnl_net(dev);
1308 	const struct in_ifaddr *ifa;
1309 	struct ifreq ifr;
1310 	int done = 0;
1311 
1312 	if (WARN_ON(size > sizeof(struct ifreq)))
1313 		goto out;
1314 
1315 	if (!in_dev)
1316 		goto out;
1317 
1318 	in_dev_for_each_ifa_rtnl_net(dev_net(dev), ifa, in_dev) {
1319 		if (!buf) {
1320 			done += size;
1321 			continue;
1322 		}
1323 		if (len < size)
1324 			break;
1325 		memset(&ifr, 0, sizeof(struct ifreq));
1326 		strcpy(ifr.ifr_name, ifa->ifa_label);
1327 
1328 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1329 		(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1330 								ifa->ifa_local;
1331 
1332 		if (copy_to_user(buf + done, &ifr, size)) {
1333 			done = -EFAULT;
1334 			break;
1335 		}
1336 		len  -= size;
1337 		done += size;
1338 	}
1339 out:
1340 	return done;
1341 }
1342 
1343 static __be32 in_dev_select_addr(const struct in_device *in_dev,
1344 				 int scope)
1345 {
1346 	const struct in_ifaddr *ifa;
1347 
1348 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1349 		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1350 			continue;
1351 		if (ifa->ifa_scope != RT_SCOPE_LINK &&
1352 		    ifa->ifa_scope <= scope)
1353 			return ifa->ifa_local;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1360 {
1361 	const struct in_ifaddr *ifa;
1362 	__be32 addr = 0;
1363 	unsigned char localnet_scope = RT_SCOPE_HOST;
1364 	struct in_device *in_dev;
1365 	struct net *net;
1366 	int master_idx;
1367 
1368 	rcu_read_lock();
1369 	net = dev_net_rcu(dev);
1370 	in_dev = __in_dev_get_rcu(dev);
1371 	if (!in_dev)
1372 		goto no_in_dev;
1373 
1374 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1375 		localnet_scope = RT_SCOPE_LINK;
1376 
1377 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1378 		if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
1379 			continue;
1380 		if (min(ifa->ifa_scope, localnet_scope) > scope)
1381 			continue;
1382 		if (!dst || inet_ifa_match(dst, ifa)) {
1383 			addr = ifa->ifa_local;
1384 			break;
1385 		}
1386 		if (!addr)
1387 			addr = ifa->ifa_local;
1388 	}
1389 
1390 	if (addr)
1391 		goto out_unlock;
1392 no_in_dev:
1393 	master_idx = l3mdev_master_ifindex_rcu(dev);
1394 
1395 	/* For VRFs, the VRF device takes the place of the loopback device,
1396 	 * with addresses on it being preferred.  Note in such cases the
1397 	 * loopback device will be among the devices that fail the master_idx
1398 	 * equality check in the loop below.
1399 	 */
1400 	if (master_idx &&
1401 	    (dev = dev_get_by_index_rcu(net, master_idx)) &&
1402 	    (in_dev = __in_dev_get_rcu(dev))) {
1403 		addr = in_dev_select_addr(in_dev, scope);
1404 		if (addr)
1405 			goto out_unlock;
1406 	}
1407 
1408 	/* Not loopback addresses on loopback should be preferred
1409 	   in this case. It is important that lo is the first interface
1410 	   in dev_base list.
1411 	 */
1412 	for_each_netdev_rcu(net, dev) {
1413 		if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1414 			continue;
1415 
1416 		in_dev = __in_dev_get_rcu(dev);
1417 		if (!in_dev)
1418 			continue;
1419 
1420 		addr = in_dev_select_addr(in_dev, scope);
1421 		if (addr)
1422 			goto out_unlock;
1423 	}
1424 out_unlock:
1425 	rcu_read_unlock();
1426 	return addr;
1427 }
1428 EXPORT_SYMBOL(inet_select_addr);
1429 
1430 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1431 			      __be32 local, int scope)
1432 {
1433 	unsigned char localnet_scope = RT_SCOPE_HOST;
1434 	const struct in_ifaddr *ifa;
1435 	__be32 addr = 0;
1436 	int same = 0;
1437 
1438 	if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
1439 		localnet_scope = RT_SCOPE_LINK;
1440 
1441 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1442 		unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
1443 
1444 		if (!addr &&
1445 		    (local == ifa->ifa_local || !local) &&
1446 		    min_scope <= scope) {
1447 			addr = ifa->ifa_local;
1448 			if (same)
1449 				break;
1450 		}
1451 		if (!same) {
1452 			same = (!local || inet_ifa_match(local, ifa)) &&
1453 				(!dst || inet_ifa_match(dst, ifa));
1454 			if (same && addr) {
1455 				if (local || !dst)
1456 					break;
1457 				/* Is the selected addr into dst subnet? */
1458 				if (inet_ifa_match(addr, ifa))
1459 					break;
1460 				/* No, then can we use new local src? */
1461 				if (min_scope <= scope) {
1462 					addr = ifa->ifa_local;
1463 					break;
1464 				}
1465 				/* search for large dst subnet for addr */
1466 				same = 0;
1467 			}
1468 		}
1469 	}
1470 
1471 	return same ? addr : 0;
1472 }
1473 
1474 /*
1475  * Confirm that local IP address exists using wildcards:
1476  * - net: netns to check, cannot be NULL
1477  * - in_dev: only on this interface, NULL=any interface
1478  * - dst: only in the same subnet as dst, 0=any dst
1479  * - local: address, 0=autoselect the local address
1480  * - scope: maximum allowed scope value for the local address
1481  */
1482 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1483 			 __be32 dst, __be32 local, int scope)
1484 {
1485 	__be32 addr = 0;
1486 	struct net_device *dev;
1487 
1488 	if (in_dev)
1489 		return confirm_addr_indev(in_dev, dst, local, scope);
1490 
1491 	rcu_read_lock();
1492 	for_each_netdev_rcu(net, dev) {
1493 		in_dev = __in_dev_get_rcu(dev);
1494 		if (in_dev) {
1495 			addr = confirm_addr_indev(in_dev, dst, local, scope);
1496 			if (addr)
1497 				break;
1498 		}
1499 	}
1500 	rcu_read_unlock();
1501 
1502 	return addr;
1503 }
1504 EXPORT_SYMBOL(inet_confirm_addr);
1505 
1506 /*
1507  *	Device notifier
1508  */
1509 
1510 int register_inetaddr_notifier(struct notifier_block *nb)
1511 {
1512 	return blocking_notifier_chain_register(&inetaddr_chain, nb);
1513 }
1514 EXPORT_SYMBOL(register_inetaddr_notifier);
1515 
1516 int unregister_inetaddr_notifier(struct notifier_block *nb)
1517 {
1518 	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1519 }
1520 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1521 
1522 int register_inetaddr_validator_notifier(struct notifier_block *nb)
1523 {
1524 	return blocking_notifier_chain_register(&inetaddr_validator_chain, nb);
1525 }
1526 EXPORT_SYMBOL(register_inetaddr_validator_notifier);
1527 
1528 int unregister_inetaddr_validator_notifier(struct notifier_block *nb)
1529 {
1530 	return blocking_notifier_chain_unregister(&inetaddr_validator_chain,
1531 	    nb);
1532 }
1533 EXPORT_SYMBOL(unregister_inetaddr_validator_notifier);
1534 
1535 /* Rename ifa_labels for a device name change. Make some effort to preserve
1536  * existing alias numbering and to create unique labels if possible.
1537 */
1538 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1539 {
1540 	struct in_ifaddr *ifa;
1541 	int named = 0;
1542 
1543 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1544 		char old[IFNAMSIZ], *dot;
1545 
1546 		memcpy(old, ifa->ifa_label, IFNAMSIZ);
1547 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1548 		if (named++ == 0)
1549 			goto skip;
1550 		dot = strchr(old, ':');
1551 		if (!dot) {
1552 			sprintf(old, ":%d", named);
1553 			dot = old;
1554 		}
1555 		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1556 			strcat(ifa->ifa_label, dot);
1557 		else
1558 			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1559 skip:
1560 		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1561 	}
1562 }
1563 
1564 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1565 					struct in_device *in_dev)
1566 
1567 {
1568 	const struct in_ifaddr *ifa;
1569 
1570 	in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1571 		arp_send(ARPOP_REQUEST, ETH_P_ARP,
1572 			 ifa->ifa_local, dev,
1573 			 ifa->ifa_local, NULL,
1574 			 dev->dev_addr, NULL);
1575 	}
1576 }
1577 
1578 /* Called only under RTNL semaphore */
1579 
1580 static int inetdev_event(struct notifier_block *this, unsigned long event,
1581 			 void *ptr)
1582 {
1583 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1584 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
1585 
1586 	ASSERT_RTNL();
1587 
1588 	if (!in_dev) {
1589 		if (event == NETDEV_REGISTER) {
1590 			in_dev = inetdev_init(dev);
1591 			if (IS_ERR(in_dev))
1592 				return notifier_from_errno(PTR_ERR(in_dev));
1593 			if (dev->flags & IFF_LOOPBACK) {
1594 				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1595 				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1596 			}
1597 		} else if (event == NETDEV_CHANGEMTU) {
1598 			/* Re-enabling IP */
1599 			if (inetdev_valid_mtu(dev->mtu))
1600 				in_dev = inetdev_init(dev);
1601 		}
1602 		goto out;
1603 	}
1604 
1605 	switch (event) {
1606 	case NETDEV_REGISTER:
1607 		pr_debug("%s: bug\n", __func__);
1608 		RCU_INIT_POINTER(dev->ip_ptr, NULL);
1609 		break;
1610 	case NETDEV_UP:
1611 		if (!inetdev_valid_mtu(dev->mtu))
1612 			break;
1613 		if (dev->flags & IFF_LOOPBACK) {
1614 			struct in_ifaddr *ifa = inet_alloc_ifa(in_dev);
1615 
1616 			if (ifa) {
1617 				ifa->ifa_local =
1618 				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
1619 				ifa->ifa_prefixlen = 8;
1620 				ifa->ifa_mask = inet_make_mask(8);
1621 				ifa->ifa_scope = RT_SCOPE_HOST;
1622 				memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1623 				set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1624 						 INFINITY_LIFE_TIME);
1625 				ipv4_devconf_setall(in_dev);
1626 				neigh_parms_data_state_setall(in_dev->arp_parms);
1627 				inet_insert_ifa(ifa);
1628 			}
1629 		}
1630 		ip_mc_up(in_dev);
1631 		fallthrough;
1632 	case NETDEV_CHANGEADDR:
1633 		if (!IN_DEV_ARP_NOTIFY(in_dev))
1634 			break;
1635 		fallthrough;
1636 	case NETDEV_NOTIFY_PEERS:
1637 		/* Send gratuitous ARP to notify of link change */
1638 		inetdev_send_gratuitous_arp(dev, in_dev);
1639 		break;
1640 	case NETDEV_DOWN:
1641 		ip_mc_down(in_dev);
1642 		break;
1643 	case NETDEV_PRE_TYPE_CHANGE:
1644 		ip_mc_unmap(in_dev);
1645 		break;
1646 	case NETDEV_POST_TYPE_CHANGE:
1647 		ip_mc_remap(in_dev);
1648 		break;
1649 	case NETDEV_CHANGEMTU:
1650 		if (inetdev_valid_mtu(dev->mtu))
1651 			break;
1652 		/* disable IP when MTU is not enough */
1653 		fallthrough;
1654 	case NETDEV_UNREGISTER:
1655 		inetdev_destroy(in_dev);
1656 		break;
1657 	case NETDEV_CHANGENAME:
1658 		/* Do not notify about label change, this event is
1659 		 * not interesting to applications using netlink.
1660 		 */
1661 		inetdev_changename(dev, in_dev);
1662 
1663 		devinet_sysctl_unregister(in_dev);
1664 		devinet_sysctl_register(in_dev);
1665 		break;
1666 	}
1667 out:
1668 	return NOTIFY_DONE;
1669 }
1670 
1671 static struct notifier_block ip_netdev_notifier = {
1672 	.notifier_call = inetdev_event,
1673 };
1674 
1675 static size_t inet_nlmsg_size(void)
1676 {
1677 	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1678 	       + nla_total_size(4) /* IFA_ADDRESS */
1679 	       + nla_total_size(4) /* IFA_LOCAL */
1680 	       + nla_total_size(4) /* IFA_BROADCAST */
1681 	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1682 	       + nla_total_size(4)  /* IFA_FLAGS */
1683 	       + nla_total_size(1)  /* IFA_PROTO */
1684 	       + nla_total_size(4)  /* IFA_RT_PRIORITY */
1685 	       + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1686 }
1687 
1688 static inline u32 cstamp_delta(unsigned long cstamp)
1689 {
1690 	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1691 }
1692 
1693 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1694 			 unsigned long tstamp, u32 preferred, u32 valid)
1695 {
1696 	struct ifa_cacheinfo ci;
1697 
1698 	ci.cstamp = cstamp_delta(cstamp);
1699 	ci.tstamp = cstamp_delta(tstamp);
1700 	ci.ifa_prefered = preferred;
1701 	ci.ifa_valid = valid;
1702 
1703 	return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1704 }
1705 
1706 static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
1707 			    struct inet_fill_args *args)
1708 {
1709 	struct ifaddrmsg *ifm;
1710 	struct nlmsghdr  *nlh;
1711 	unsigned long tstamp;
1712 	u32 preferred, valid;
1713 	u32 flags;
1714 
1715 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
1716 			args->flags);
1717 	if (!nlh)
1718 		return -EMSGSIZE;
1719 
1720 	ifm = nlmsg_data(nlh);
1721 	ifm->ifa_family = AF_INET;
1722 	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1723 
1724 	flags = READ_ONCE(ifa->ifa_flags);
1725 	/* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
1726 	 * The 32bit value is given in IFA_FLAGS attribute.
1727 	 */
1728 	ifm->ifa_flags = (__u8)flags;
1729 
1730 	ifm->ifa_scope = ifa->ifa_scope;
1731 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1732 
1733 	if (args->netnsid >= 0 &&
1734 	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
1735 		goto nla_put_failure;
1736 
1737 	tstamp = READ_ONCE(ifa->ifa_tstamp);
1738 	if (!(flags & IFA_F_PERMANENT)) {
1739 		preferred = READ_ONCE(ifa->ifa_preferred_lft);
1740 		valid = READ_ONCE(ifa->ifa_valid_lft);
1741 		if (preferred != INFINITY_LIFE_TIME) {
1742 			long tval = (jiffies - tstamp) / HZ;
1743 
1744 			if (preferred > tval)
1745 				preferred -= tval;
1746 			else
1747 				preferred = 0;
1748 			if (valid != INFINITY_LIFE_TIME) {
1749 				if (valid > tval)
1750 					valid -= tval;
1751 				else
1752 					valid = 0;
1753 			}
1754 		}
1755 	} else {
1756 		preferred = INFINITY_LIFE_TIME;
1757 		valid = INFINITY_LIFE_TIME;
1758 	}
1759 	if ((ifa->ifa_address &&
1760 	     nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1761 	    (ifa->ifa_local &&
1762 	     nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1763 	    (ifa->ifa_broadcast &&
1764 	     nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1765 	    (ifa->ifa_label[0] &&
1766 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1767 	    (ifa->ifa_proto &&
1768 	     nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
1769 	    nla_put_u32(skb, IFA_FLAGS, flags) ||
1770 	    (ifa->ifa_rt_priority &&
1771 	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
1772 	    put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
1773 			  preferred, valid))
1774 		goto nla_put_failure;
1775 
1776 	nlmsg_end(skb, nlh);
1777 	return 0;
1778 
1779 nla_put_failure:
1780 	nlmsg_cancel(skb, nlh);
1781 	return -EMSGSIZE;
1782 }
1783 
1784 static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
1785 				      struct inet_fill_args *fillargs,
1786 				      struct net **tgt_net, struct sock *sk,
1787 				      struct netlink_callback *cb)
1788 {
1789 	struct netlink_ext_ack *extack = cb->extack;
1790 	struct nlattr *tb[IFA_MAX+1];
1791 	struct ifaddrmsg *ifm;
1792 	int err, i;
1793 
1794 	ifm = nlmsg_payload(nlh, sizeof(*ifm));
1795 	if (!ifm) {
1796 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
1797 		return -EINVAL;
1798 	}
1799 
1800 	if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
1801 		NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
1802 		return -EINVAL;
1803 	}
1804 
1805 	fillargs->ifindex = ifm->ifa_index;
1806 	if (fillargs->ifindex) {
1807 		cb->answer_flags |= NLM_F_DUMP_FILTERED;
1808 		fillargs->flags |= NLM_F_DUMP_FILTERED;
1809 	}
1810 
1811 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
1812 					    ifa_ipv4_policy, extack);
1813 	if (err < 0)
1814 		return err;
1815 
1816 	for (i = 0; i <= IFA_MAX; ++i) {
1817 		if (!tb[i])
1818 			continue;
1819 
1820 		if (i == IFA_TARGET_NETNSID) {
1821 			struct net *net;
1822 
1823 			fillargs->netnsid = nla_get_s32(tb[i]);
1824 
1825 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
1826 			if (IS_ERR(net)) {
1827 				fillargs->netnsid = -1;
1828 				NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
1829 				return PTR_ERR(net);
1830 			}
1831 			*tgt_net = net;
1832 		} else {
1833 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
1834 			return -EINVAL;
1835 		}
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 static int in_dev_dump_ifmcaddr(struct in_device *in_dev, struct sk_buff *skb,
1842 				struct netlink_callback *cb, int *s_ip_idx,
1843 				struct inet_fill_args *fillargs)
1844 {
1845 	struct ip_mc_list *im;
1846 	int ip_idx = 0;
1847 	int err;
1848 
1849 	for (im = rcu_dereference(in_dev->mc_list);
1850 	     im;
1851 	     im = rcu_dereference(im->next_rcu)) {
1852 		if (ip_idx < *s_ip_idx) {
1853 			ip_idx++;
1854 			continue;
1855 		}
1856 		err = inet_fill_ifmcaddr(skb, in_dev->dev, im, fillargs);
1857 		if (err < 0)
1858 			goto done;
1859 
1860 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1861 		ip_idx++;
1862 	}
1863 	err = 0;
1864 	ip_idx = 0;
1865 done:
1866 	*s_ip_idx = ip_idx;
1867 	return err;
1868 }
1869 
1870 static int in_dev_dump_ifaddr(struct in_device *in_dev, struct sk_buff *skb,
1871 			      struct netlink_callback *cb, int *s_ip_idx,
1872 			      struct inet_fill_args *fillargs)
1873 {
1874 	struct in_ifaddr *ifa;
1875 	int ip_idx = 0;
1876 	int err;
1877 
1878 	in_dev_for_each_ifa_rcu(ifa, in_dev) {
1879 		if (ip_idx < *s_ip_idx) {
1880 			ip_idx++;
1881 			continue;
1882 		}
1883 		err = inet_fill_ifaddr(skb, ifa, fillargs);
1884 		if (err < 0)
1885 			goto done;
1886 
1887 		nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1888 		ip_idx++;
1889 	}
1890 	err = 0;
1891 	ip_idx = 0;
1892 done:
1893 	*s_ip_idx = ip_idx;
1894 
1895 	return err;
1896 }
1897 
1898 static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
1899 			    struct netlink_callback *cb, int *s_ip_idx,
1900 			    struct inet_fill_args *fillargs)
1901 {
1902 	switch (fillargs->event) {
1903 	case RTM_NEWADDR:
1904 		return in_dev_dump_ifaddr(in_dev, skb, cb, s_ip_idx, fillargs);
1905 	case RTM_GETMULTICAST:
1906 		return in_dev_dump_ifmcaddr(in_dev, skb, cb, s_ip_idx,
1907 					    fillargs);
1908 	default:
1909 		return -EINVAL;
1910 	}
1911 }
1912 
1913 /* Combine dev_addr_genid and dev_base_seq to detect changes.
1914  */
1915 static u32 inet_base_seq(const struct net *net)
1916 {
1917 	u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
1918 		  READ_ONCE(net->dev_base_seq);
1919 
1920 	/* Must not return 0 (see nl_dump_check_consistent()).
1921 	 * Chose a value far away from 0.
1922 	 */
1923 	if (!res)
1924 		res = 0x80000000;
1925 	return res;
1926 }
1927 
1928 static int inet_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
1929 			  int event)
1930 {
1931 	const struct nlmsghdr *nlh = cb->nlh;
1932 	struct inet_fill_args fillargs = {
1933 		.portid = NETLINK_CB(cb->skb).portid,
1934 		.seq = nlh->nlmsg_seq,
1935 		.event = event,
1936 		.flags = NLM_F_MULTI,
1937 		.netnsid = -1,
1938 	};
1939 	struct net *net = sock_net(skb->sk);
1940 	struct net *tgt_net = net;
1941 	struct {
1942 		unsigned long ifindex;
1943 		int ip_idx;
1944 	} *ctx = (void *)cb->ctx;
1945 	struct in_device *in_dev;
1946 	struct net_device *dev;
1947 	int err = 0;
1948 
1949 	rcu_read_lock();
1950 	if (cb->strict_check) {
1951 		err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
1952 						 skb->sk, cb);
1953 		if (err < 0)
1954 			goto done;
1955 
1956 		if (fillargs.ifindex) {
1957 			dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
1958 			if (!dev) {
1959 				err = -ENODEV;
1960 				goto done;
1961 			}
1962 			in_dev = __in_dev_get_rcu(dev);
1963 			if (!in_dev)
1964 				goto done;
1965 			err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
1966 					       &fillargs);
1967 			goto done;
1968 		}
1969 	}
1970 
1971 	cb->seq = inet_base_seq(tgt_net);
1972 
1973 	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
1974 		in_dev = __in_dev_get_rcu(dev);
1975 		if (!in_dev)
1976 			continue;
1977 		err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
1978 				       &fillargs);
1979 		if (err < 0)
1980 			goto done;
1981 	}
1982 done:
1983 	if (fillargs.netnsid >= 0)
1984 		put_net(tgt_net);
1985 	rcu_read_unlock();
1986 	return err;
1987 }
1988 
1989 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1990 {
1991 	return inet_dump_addr(skb, cb, RTM_NEWADDR);
1992 }
1993 
1994 static int inet_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
1995 {
1996 	return inet_dump_addr(skb, cb, RTM_GETMULTICAST);
1997 }
1998 
1999 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
2000 		      u32 portid)
2001 {
2002 	struct inet_fill_args fillargs = {
2003 		.portid = portid,
2004 		.seq = nlh ? nlh->nlmsg_seq : 0,
2005 		.event = event,
2006 		.flags = 0,
2007 		.netnsid = -1,
2008 	};
2009 	struct sk_buff *skb;
2010 	int err = -ENOBUFS;
2011 	struct net *net;
2012 
2013 	net = dev_net(ifa->ifa_dev->dev);
2014 	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
2015 	if (!skb)
2016 		goto errout;
2017 
2018 	err = inet_fill_ifaddr(skb, ifa, &fillargs);
2019 	if (err < 0) {
2020 		/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
2021 		WARN_ON(err == -EMSGSIZE);
2022 		kfree_skb(skb);
2023 		goto errout;
2024 	}
2025 	rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
2026 	return;
2027 errout:
2028 	rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
2029 }
2030 
2031 static size_t inet_get_link_af_size(const struct net_device *dev,
2032 				    u32 ext_filter_mask)
2033 {
2034 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
2035 
2036 	if (!in_dev)
2037 		return 0;
2038 
2039 	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
2040 }
2041 
2042 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
2043 			     u32 ext_filter_mask)
2044 {
2045 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
2046 	struct nlattr *nla;
2047 	int i;
2048 
2049 	if (!in_dev)
2050 		return -ENODATA;
2051 
2052 	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
2053 	if (!nla)
2054 		return -EMSGSIZE;
2055 
2056 	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
2057 		((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
2058 
2059 	return 0;
2060 }
2061 
2062 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
2063 	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
2064 };
2065 
2066 static const struct nla_policy inet_devconf_policy[IPV4_DEVCONF_MAX + 1] = {
2067 	[IPV4_DEVCONF_FORWARDING]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2068 	[IPV4_DEVCONF_MC_FORWARDING]	= { .type = NLA_REJECT },
2069 	[IPV4_DEVCONF_PROXY_ARP]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2070 	[IPV4_DEVCONF_ACCEPT_REDIRECTS]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2071 	[IPV4_DEVCONF_SECURE_REDIRECTS]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2072 	[IPV4_DEVCONF_SEND_REDIRECTS]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2073 	[IPV4_DEVCONF_SHARED_MEDIA]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2074 	[IPV4_DEVCONF_RP_FILTER]	= NLA_POLICY_RANGE(NLA_U32, 0, 2),
2075 	[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE] = NLA_POLICY_RANGE(NLA_U32, 0, 1),
2076 	[IPV4_DEVCONF_BOOTP_RELAY]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2077 	[IPV4_DEVCONF_LOG_MARTIANS]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2078 	[IPV4_DEVCONF_TAG]		= { .type = NLA_U32 },
2079 	[IPV4_DEVCONF_ARPFILTER]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2080 	[IPV4_DEVCONF_MEDIUM_ID]	= NLA_POLICY_MIN(NLA_S32, -1),
2081 	[IPV4_DEVCONF_NOXFRM]		= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2082 	[IPV4_DEVCONF_NOPOLICY]		= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2083 	[IPV4_DEVCONF_FORCE_IGMP_VERSION] = NLA_POLICY_RANGE(NLA_U32, 0, 3),
2084 	[IPV4_DEVCONF_ARP_ANNOUNCE]	= NLA_POLICY_RANGE(NLA_U32, 0, 2),
2085 	[IPV4_DEVCONF_ARP_IGNORE]	= NLA_POLICY_RANGE(NLA_U32, 0, 8),
2086 	[IPV4_DEVCONF_PROMOTE_SECONDARIES] = NLA_POLICY_RANGE(NLA_U32, 0, 1),
2087 	[IPV4_DEVCONF_ARP_ACCEPT]	= NLA_POLICY_RANGE(NLA_U32, 0, 2),
2088 	[IPV4_DEVCONF_ARP_NOTIFY]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2089 	[IPV4_DEVCONF_ACCEPT_LOCAL]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2090 	[IPV4_DEVCONF_SRC_VMARK]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2091 	[IPV4_DEVCONF_PROXY_ARP_PVLAN]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2092 	[IPV4_DEVCONF_ROUTE_LOCALNET]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2093 	[IPV4_DEVCONF_BC_FORWARDING]	= NLA_POLICY_RANGE(NLA_U32, 0, 1),
2094 	[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL] = { .type = NLA_U32 },
2095 	[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL] = { .type = NLA_U32 },
2096 	[IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
2097 		NLA_POLICY_RANGE(NLA_U32, 0, 1),
2098 	[IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
2099 		NLA_POLICY_RANGE(NLA_U32, 0, 1),
2100 	[IPV4_DEVCONF_DROP_GRATUITOUS_ARP] = NLA_POLICY_RANGE(NLA_U32, 0, 1),
2101 	[IPV4_DEVCONF_ARP_EVICT_NOCARRIER] = NLA_POLICY_RANGE(NLA_U32, 0, 1),
2102 };
2103 
2104 static int inet_validate_link_af(const struct net_device *dev,
2105 				 const struct nlattr *nla,
2106 				 struct netlink_ext_ack *extack)
2107 {
2108 	struct nlattr *tb[IFLA_INET_MAX + 1], *nested_tb[IPV4_DEVCONF_MAX + 1];
2109 	int err;
2110 
2111 	if (dev && !__in_dev_get_rtnl(dev))
2112 		return -EAFNOSUPPORT;
2113 
2114 	err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla,
2115 					  inet_af_policy, extack);
2116 	if (err < 0)
2117 		return err;
2118 
2119 	if (tb[IFLA_INET_CONF]) {
2120 		err = nla_parse_nested(nested_tb, IPV4_DEVCONF_MAX,
2121 				       tb[IFLA_INET_CONF], inet_devconf_policy,
2122 				       extack);
2123 
2124 		if (err < 0)
2125 			return err;
2126 	}
2127 
2128 	return 0;
2129 }
2130 
2131 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
2132 			    struct netlink_ext_ack *extack)
2133 {
2134 	struct in_device *in_dev = __in_dev_get_rtnl(dev);
2135 	struct nlattr *a, *tb[IFLA_INET_MAX+1];
2136 	int rem;
2137 
2138 	if (!in_dev)
2139 		return -EAFNOSUPPORT;
2140 
2141 	if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2142 		return -EINVAL;
2143 
2144 	if (tb[IFLA_INET_CONF]) {
2145 		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2146 			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 static int inet_netconf_msgsize_devconf(int type)
2153 {
2154 	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
2155 		   + nla_total_size(4);	/* NETCONFA_IFINDEX */
2156 	bool all = false;
2157 
2158 	if (type == NETCONFA_ALL)
2159 		all = true;
2160 
2161 	if (all || type == NETCONFA_FORWARDING)
2162 		size += nla_total_size(4);
2163 	if (all || type == NETCONFA_RP_FILTER)
2164 		size += nla_total_size(4);
2165 	if (all || type == NETCONFA_MC_FORWARDING)
2166 		size += nla_total_size(4);
2167 	if (all || type == NETCONFA_BC_FORWARDING)
2168 		size += nla_total_size(4);
2169 	if (all || type == NETCONFA_PROXY_NEIGH)
2170 		size += nla_total_size(4);
2171 	if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
2172 		size += nla_total_size(4);
2173 
2174 	return size;
2175 }
2176 
2177 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
2178 				     const struct ipv4_devconf *devconf,
2179 				     u32 portid, u32 seq, int event,
2180 				     unsigned int flags, int type)
2181 {
2182 	struct nlmsghdr  *nlh;
2183 	struct netconfmsg *ncm;
2184 	bool all = false;
2185 
2186 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
2187 			flags);
2188 	if (!nlh)
2189 		return -EMSGSIZE;
2190 
2191 	if (type == NETCONFA_ALL)
2192 		all = true;
2193 
2194 	ncm = nlmsg_data(nlh);
2195 	ncm->ncm_family = AF_INET;
2196 
2197 	if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
2198 		goto nla_put_failure;
2199 
2200 	if (!devconf)
2201 		goto out;
2202 
2203 	if ((all || type == NETCONFA_FORWARDING) &&
2204 	    nla_put_s32(skb, NETCONFA_FORWARDING,
2205 			IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
2206 		goto nla_put_failure;
2207 	if ((all || type == NETCONFA_RP_FILTER) &&
2208 	    nla_put_s32(skb, NETCONFA_RP_FILTER,
2209 			IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
2210 		goto nla_put_failure;
2211 	if ((all || type == NETCONFA_MC_FORWARDING) &&
2212 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
2213 			IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
2214 		goto nla_put_failure;
2215 	if ((all || type == NETCONFA_BC_FORWARDING) &&
2216 	    nla_put_s32(skb, NETCONFA_BC_FORWARDING,
2217 			IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
2218 		goto nla_put_failure;
2219 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
2220 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
2221 			IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
2222 		goto nla_put_failure;
2223 	if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
2224 	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2225 			IPV4_DEVCONF_RO(*devconf,
2226 					IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
2227 		goto nla_put_failure;
2228 
2229 out:
2230 	nlmsg_end(skb, nlh);
2231 	return 0;
2232 
2233 nla_put_failure:
2234 	nlmsg_cancel(skb, nlh);
2235 	return -EMSGSIZE;
2236 }
2237 
2238 void inet_netconf_notify_devconf(struct net *net, int event, int type,
2239 				 int ifindex, struct ipv4_devconf *devconf)
2240 {
2241 	struct sk_buff *skb;
2242 	int err = -ENOBUFS;
2243 
2244 	skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL);
2245 	if (!skb)
2246 		goto errout;
2247 
2248 	err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
2249 					event, 0, type);
2250 	if (err < 0) {
2251 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2252 		WARN_ON(err == -EMSGSIZE);
2253 		kfree_skb(skb);
2254 		goto errout;
2255 	}
2256 	rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL);
2257 	return;
2258 errout:
2259 	rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
2260 }
2261 
2262 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
2263 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
2264 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
2265 	[NETCONFA_RP_FILTER]	= { .len = sizeof(int) },
2266 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
2267 	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
2268 };
2269 
2270 static int inet_netconf_valid_get_req(struct sk_buff *skb,
2271 				      const struct nlmsghdr *nlh,
2272 				      struct nlattr **tb,
2273 				      struct netlink_ext_ack *extack)
2274 {
2275 	int i, err;
2276 
2277 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
2278 		NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request");
2279 		return -EINVAL;
2280 	}
2281 
2282 	if (!netlink_strict_get_check(skb))
2283 		return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
2284 					      tb, NETCONFA_MAX,
2285 					      devconf_ipv4_policy, extack);
2286 
2287 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
2288 					    tb, NETCONFA_MAX,
2289 					    devconf_ipv4_policy, extack);
2290 	if (err)
2291 		return err;
2292 
2293 	for (i = 0; i <= NETCONFA_MAX; i++) {
2294 		if (!tb[i])
2295 			continue;
2296 
2297 		switch (i) {
2298 		case NETCONFA_IFINDEX:
2299 			break;
2300 		default:
2301 			NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request");
2302 			return -EINVAL;
2303 		}
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
2310 				    struct nlmsghdr *nlh,
2311 				    struct netlink_ext_ack *extack)
2312 {
2313 	struct net *net = sock_net(in_skb->sk);
2314 	struct nlattr *tb[NETCONFA_MAX + 1];
2315 	const struct ipv4_devconf *devconf;
2316 	struct in_device *in_dev = NULL;
2317 	struct net_device *dev = NULL;
2318 	struct sk_buff *skb;
2319 	int ifindex;
2320 	int err;
2321 
2322 	err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
2323 	if (err)
2324 		return err;
2325 
2326 	if (!tb[NETCONFA_IFINDEX])
2327 		return -EINVAL;
2328 
2329 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
2330 	switch (ifindex) {
2331 	case NETCONFA_IFINDEX_ALL:
2332 		devconf = net->ipv4.devconf_all;
2333 		break;
2334 	case NETCONFA_IFINDEX_DEFAULT:
2335 		devconf = net->ipv4.devconf_dflt;
2336 		break;
2337 	default:
2338 		err = -ENODEV;
2339 		dev = dev_get_by_index(net, ifindex);
2340 		if (dev)
2341 			in_dev = in_dev_get(dev);
2342 		if (!in_dev)
2343 			goto errout;
2344 		devconf = &in_dev->cnf;
2345 		break;
2346 	}
2347 
2348 	err = -ENOBUFS;
2349 	skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
2350 	if (!skb)
2351 		goto errout;
2352 
2353 	err = inet_netconf_fill_devconf(skb, ifindex, devconf,
2354 					NETLINK_CB(in_skb).portid,
2355 					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
2356 					NETCONFA_ALL);
2357 	if (err < 0) {
2358 		/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
2359 		WARN_ON(err == -EMSGSIZE);
2360 		kfree_skb(skb);
2361 		goto errout;
2362 	}
2363 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2364 errout:
2365 	if (in_dev)
2366 		in_dev_put(in_dev);
2367 	dev_put(dev);
2368 	return err;
2369 }
2370 
2371 static int inet_netconf_dump_devconf(struct sk_buff *skb,
2372 				     struct netlink_callback *cb)
2373 {
2374 	const struct nlmsghdr *nlh = cb->nlh;
2375 	struct net *net = sock_net(skb->sk);
2376 	struct {
2377 		unsigned long ifindex;
2378 		unsigned int all_default;
2379 	} *ctx = (void *)cb->ctx;
2380 	const struct in_device *in_dev;
2381 	struct net_device *dev;
2382 	int err = 0;
2383 
2384 	if (cb->strict_check) {
2385 		struct netlink_ext_ack *extack = cb->extack;
2386 		struct netconfmsg *ncm;
2387 
2388 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
2389 			NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
2390 			return -EINVAL;
2391 		}
2392 
2393 		if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
2394 			NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
2395 			return -EINVAL;
2396 		}
2397 	}
2398 
2399 	rcu_read_lock();
2400 	for_each_netdev_dump(net, dev, ctx->ifindex) {
2401 		in_dev = __in_dev_get_rcu(dev);
2402 		if (!in_dev)
2403 			continue;
2404 		err = inet_netconf_fill_devconf(skb, dev->ifindex,
2405 						&in_dev->cnf,
2406 						NETLINK_CB(cb->skb).portid,
2407 						nlh->nlmsg_seq,
2408 						RTM_NEWNETCONF, NLM_F_MULTI,
2409 						NETCONFA_ALL);
2410 		if (err < 0)
2411 			goto done;
2412 	}
2413 	if (ctx->all_default == 0) {
2414 		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
2415 						net->ipv4.devconf_all,
2416 						NETLINK_CB(cb->skb).portid,
2417 						nlh->nlmsg_seq,
2418 						RTM_NEWNETCONF, NLM_F_MULTI,
2419 						NETCONFA_ALL);
2420 		if (err < 0)
2421 			goto done;
2422 		ctx->all_default++;
2423 	}
2424 	if (ctx->all_default == 1) {
2425 		err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
2426 						net->ipv4.devconf_dflt,
2427 						NETLINK_CB(cb->skb).portid,
2428 						nlh->nlmsg_seq,
2429 						RTM_NEWNETCONF, NLM_F_MULTI,
2430 						NETCONFA_ALL);
2431 		if (err < 0)
2432 			goto done;
2433 		ctx->all_default++;
2434 	}
2435 done:
2436 	rcu_read_unlock();
2437 	return err;
2438 }
2439 
2440 #ifdef CONFIG_SYSCTL
2441 
2442 static void devinet_copy_dflt_conf(struct net *net, int i)
2443 {
2444 	struct net_device *dev;
2445 
2446 	rcu_read_lock();
2447 	for_each_netdev_rcu(net, dev) {
2448 		struct in_device *in_dev;
2449 
2450 		in_dev = __in_dev_get_rcu(dev);
2451 		if (in_dev && !test_bit(i, in_dev->cnf.state))
2452 			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
2453 	}
2454 	rcu_read_unlock();
2455 }
2456 
2457 /* called with RTNL locked */
2458 static void inet_forward_change(struct net *net)
2459 {
2460 	struct net_device *dev;
2461 	int on = IPV4_DEVCONF_ALL(net, FORWARDING);
2462 
2463 	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
2464 	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
2465 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2466 				    NETCONFA_FORWARDING,
2467 				    NETCONFA_IFINDEX_ALL,
2468 				    net->ipv4.devconf_all);
2469 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2470 				    NETCONFA_FORWARDING,
2471 				    NETCONFA_IFINDEX_DEFAULT,
2472 				    net->ipv4.devconf_dflt);
2473 
2474 	for_each_netdev(net, dev) {
2475 		struct in_device *in_dev;
2476 
2477 		if (on)
2478 			dev_disable_lro(dev);
2479 
2480 		in_dev = __in_dev_get_rtnl_net(dev);
2481 		if (in_dev) {
2482 			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2483 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2484 						    NETCONFA_FORWARDING,
2485 						    dev->ifindex, &in_dev->cnf);
2486 		}
2487 	}
2488 }
2489 
2490 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2491 {
2492 	if (cnf == net->ipv4.devconf_dflt)
2493 		return NETCONFA_IFINDEX_DEFAULT;
2494 	else if (cnf == net->ipv4.devconf_all)
2495 		return NETCONFA_IFINDEX_ALL;
2496 	else {
2497 		struct in_device *idev
2498 			= container_of(cnf, struct in_device, cnf);
2499 		return idev->dev->ifindex;
2500 	}
2501 }
2502 
2503 static int devinet_conf_proc(const struct ctl_table *ctl, int write,
2504 			     void *buffer, size_t *lenp, loff_t *ppos)
2505 {
2506 	int old_value = *(int *)ctl->data;
2507 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2508 	int new_value = *(int *)ctl->data;
2509 
2510 	if (write) {
2511 		struct ipv4_devconf *cnf = ctl->extra1;
2512 		struct net *net = ctl->extra2;
2513 		int i = (int *)ctl->data - cnf->data;
2514 		int ifindex;
2515 
2516 		set_bit(i, cnf->state);
2517 
2518 		if (cnf == net->ipv4.devconf_dflt)
2519 			devinet_copy_dflt_conf(net, i);
2520 		if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2521 		    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2522 			if ((new_value == 0) && (old_value != 0))
2523 				rt_cache_flush(net);
2524 
2525 		if (i == IPV4_DEVCONF_BC_FORWARDING - 1 &&
2526 		    new_value != old_value)
2527 			rt_cache_flush(net);
2528 
2529 		if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2530 		    new_value != old_value) {
2531 			ifindex = devinet_conf_ifindex(net, cnf);
2532 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2533 						    NETCONFA_RP_FILTER,
2534 						    ifindex, cnf);
2535 		}
2536 		if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2537 		    new_value != old_value) {
2538 			ifindex = devinet_conf_ifindex(net, cnf);
2539 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2540 						    NETCONFA_PROXY_NEIGH,
2541 						    ifindex, cnf);
2542 		}
2543 		if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2544 		    new_value != old_value) {
2545 			ifindex = devinet_conf_ifindex(net, cnf);
2546 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2547 						    NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2548 						    ifindex, cnf);
2549 		}
2550 	}
2551 
2552 	return ret;
2553 }
2554 
2555 static int devinet_sysctl_forward(const struct ctl_table *ctl, int write,
2556 				  void *buffer, size_t *lenp, loff_t *ppos)
2557 {
2558 	int *valp = ctl->data;
2559 	int val = *valp;
2560 	loff_t pos = *ppos;
2561 	struct net *net = ctl->extra2;
2562 	int ret;
2563 
2564 	if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN))
2565 		return -EPERM;
2566 
2567 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2568 
2569 	if (write && *valp != val) {
2570 		if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2571 			if (!rtnl_net_trylock(net)) {
2572 				/* Restore the original values before restarting */
2573 				*valp = val;
2574 				*ppos = pos;
2575 				return restart_syscall();
2576 			}
2577 			if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2578 				inet_forward_change(net);
2579 			} else {
2580 				struct ipv4_devconf *cnf = ctl->extra1;
2581 				struct in_device *idev =
2582 					container_of(cnf, struct in_device, cnf);
2583 				if (*valp)
2584 					dev_disable_lro(idev->dev);
2585 				inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2586 							    NETCONFA_FORWARDING,
2587 							    idev->dev->ifindex,
2588 							    cnf);
2589 			}
2590 			rtnl_net_unlock(net);
2591 			rt_cache_flush(net);
2592 		} else
2593 			inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
2594 						    NETCONFA_FORWARDING,
2595 						    NETCONFA_IFINDEX_DEFAULT,
2596 						    net->ipv4.devconf_dflt);
2597 	}
2598 
2599 	return ret;
2600 }
2601 
2602 static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write,
2603 				void *buffer, size_t *lenp, loff_t *ppos)
2604 {
2605 	int *valp = ctl->data;
2606 	int val = *valp;
2607 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2608 	struct net *net = ctl->extra2;
2609 
2610 	if (write && *valp != val)
2611 		rt_cache_flush(net);
2612 
2613 	return ret;
2614 }
2615 
2616 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2617 	{ \
2618 		.procname	= name, \
2619 		.data		= ipv4_devconf.data + \
2620 				  IPV4_DEVCONF_ ## attr - 1, \
2621 		.maxlen		= sizeof(int), \
2622 		.mode		= mval, \
2623 		.proc_handler	= proc, \
2624 		.extra1		= &ipv4_devconf, \
2625 	}
2626 
2627 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2628 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2629 
2630 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2631 	DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2632 
2633 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2634 	DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2635 
2636 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2637 	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2638 
2639 static struct devinet_sysctl_table {
2640 	struct ctl_table_header *sysctl_header;
2641 	struct ctl_table devinet_vars[IPV4_DEVCONF_MAX];
2642 } devinet_sysctl = {
2643 	.devinet_vars = {
2644 		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2645 					     devinet_sysctl_forward),
2646 		DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2647 		DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"),
2648 
2649 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2650 		DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2651 		DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2652 		DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2653 		DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2654 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2655 					"accept_source_route"),
2656 		DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2657 		DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2658 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2659 		DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2660 		DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2661 		DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2662 		DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2663 		DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2664 		DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2665 		DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2666 		DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2667 		DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2668 		DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER,
2669 					"arp_evict_nocarrier"),
2670 		DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2671 		DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2672 					"force_igmp_version"),
2673 		DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2674 					"igmpv2_unsolicited_report_interval"),
2675 		DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2676 					"igmpv3_unsolicited_report_interval"),
2677 		DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2678 					"ignore_routes_with_linkdown"),
2679 		DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
2680 					"drop_gratuitous_arp"),
2681 
2682 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2683 		DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2684 		DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2685 					      "promote_secondaries"),
2686 		DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2687 					      "route_localnet"),
2688 		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
2689 					      "drop_unicast_in_l2_multicast"),
2690 	},
2691 };
2692 
2693 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2694 				     int ifindex, struct ipv4_devconf *p)
2695 {
2696 	int i;
2697 	struct devinet_sysctl_table *t;
2698 	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2699 
2700 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT);
2701 	if (!t)
2702 		goto out;
2703 
2704 	for (i = 0; i < ARRAY_SIZE(t->devinet_vars); i++) {
2705 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2706 		t->devinet_vars[i].extra1 = p;
2707 		t->devinet_vars[i].extra2 = net;
2708 	}
2709 
2710 	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2711 
2712 	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2713 	if (!t->sysctl_header)
2714 		goto free;
2715 
2716 	p->sysctl = t;
2717 
2718 	inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
2719 				    ifindex, p);
2720 	return 0;
2721 
2722 free:
2723 	kfree(t);
2724 out:
2725 	return -ENOMEM;
2726 }
2727 
2728 static void __devinet_sysctl_unregister(struct net *net,
2729 					struct ipv4_devconf *cnf, int ifindex)
2730 {
2731 	struct devinet_sysctl_table *t = cnf->sysctl;
2732 
2733 	if (t) {
2734 		cnf->sysctl = NULL;
2735 		unregister_net_sysctl_table(t->sysctl_header);
2736 		kfree(t);
2737 	}
2738 
2739 	inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
2740 }
2741 
2742 static int devinet_sysctl_register(struct in_device *idev)
2743 {
2744 	int err;
2745 
2746 	if (!sysctl_dev_name_is_allowed(idev->dev->name))
2747 		return -EINVAL;
2748 
2749 	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2750 	if (err)
2751 		return err;
2752 	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2753 					idev->dev->ifindex, &idev->cnf);
2754 	if (err)
2755 		neigh_sysctl_unregister(idev->arp_parms);
2756 	return err;
2757 }
2758 
2759 static void devinet_sysctl_unregister(struct in_device *idev)
2760 {
2761 	struct net *net = dev_net(idev->dev);
2762 
2763 	__devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex);
2764 	neigh_sysctl_unregister(idev->arp_parms);
2765 }
2766 
2767 static struct ctl_table ctl_forward_entry[] = {
2768 	{
2769 		.procname	= "ip_forward",
2770 		.data		= &ipv4_devconf.data[
2771 					IPV4_DEVCONF_FORWARDING - 1],
2772 		.maxlen		= sizeof(int),
2773 		.mode		= 0644,
2774 		.proc_handler	= devinet_sysctl_forward,
2775 		.extra1		= &ipv4_devconf,
2776 		.extra2		= &init_net,
2777 	},
2778 };
2779 #endif
2780 
2781 static __net_init int devinet_init_net(struct net *net)
2782 {
2783 #ifdef CONFIG_SYSCTL
2784 	struct ctl_table_header *forw_hdr;
2785 	struct ctl_table *tbl;
2786 #endif
2787 	struct ipv4_devconf *all, *dflt;
2788 	int err;
2789 	int i;
2790 
2791 	err = -ENOMEM;
2792 	net->ipv4.inet_addr_lst = kmalloc_objs(struct hlist_head,
2793 					       IN4_ADDR_HSIZE);
2794 	if (!net->ipv4.inet_addr_lst)
2795 		goto err_alloc_hash;
2796 
2797 	all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL);
2798 	if (!all)
2799 		goto err_alloc_all;
2800 
2801 	dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2802 	if (!dflt)
2803 		goto err_alloc_dflt;
2804 
2805 #ifdef CONFIG_SYSCTL
2806 	tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
2807 	if (!tbl)
2808 		goto err_alloc_ctl;
2809 
2810 	tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2811 	tbl[0].extra1 = all;
2812 	tbl[0].extra2 = net;
2813 #endif
2814 
2815 	if (!net_eq(net, &init_net)) {
2816 		switch (net_inherit_devconf()) {
2817 		case 3:
2818 			/* copy from the current netns */
2819 			memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
2820 			       sizeof(ipv4_devconf));
2821 			memcpy(dflt,
2822 			       current->nsproxy->net_ns->ipv4.devconf_dflt,
2823 			       sizeof(ipv4_devconf_dflt));
2824 			break;
2825 		case 0:
2826 		case 1:
2827 			/* copy from init_net */
2828 			memcpy(all, init_net.ipv4.devconf_all,
2829 			       sizeof(ipv4_devconf));
2830 			memcpy(dflt, init_net.ipv4.devconf_dflt,
2831 			       sizeof(ipv4_devconf_dflt));
2832 			break;
2833 		case 2:
2834 			/* use compiled values */
2835 			break;
2836 		}
2837 	}
2838 
2839 #ifdef CONFIG_SYSCTL
2840 	err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2841 	if (err < 0)
2842 		goto err_reg_all;
2843 
2844 	err = __devinet_sysctl_register(net, "default",
2845 					NETCONFA_IFINDEX_DEFAULT, dflt);
2846 	if (err < 0)
2847 		goto err_reg_dflt;
2848 
2849 	err = -ENOMEM;
2850 	forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl,
2851 					  ARRAY_SIZE(ctl_forward_entry));
2852 	if (!forw_hdr)
2853 		goto err_reg_ctl;
2854 	net->ipv4.forw_hdr = forw_hdr;
2855 #endif
2856 
2857 	for (i = 0; i < IN4_ADDR_HSIZE; i++)
2858 		INIT_HLIST_HEAD(&net->ipv4.inet_addr_lst[i]);
2859 
2860 	INIT_DEFERRABLE_WORK(&net->ipv4.addr_chk_work, check_lifetime);
2861 
2862 	net->ipv4.devconf_all = all;
2863 	net->ipv4.devconf_dflt = dflt;
2864 	return 0;
2865 
2866 #ifdef CONFIG_SYSCTL
2867 err_reg_ctl:
2868 	__devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT);
2869 err_reg_dflt:
2870 	__devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
2871 err_reg_all:
2872 	kfree(tbl);
2873 err_alloc_ctl:
2874 #endif
2875 	kfree(dflt);
2876 err_alloc_dflt:
2877 	kfree(all);
2878 err_alloc_all:
2879 	kfree(net->ipv4.inet_addr_lst);
2880 err_alloc_hash:
2881 	return err;
2882 }
2883 
2884 static __net_exit void devinet_exit_net(struct net *net)
2885 {
2886 #ifdef CONFIG_SYSCTL
2887 	const struct ctl_table *tbl;
2888 #endif
2889 
2890 	cancel_delayed_work_sync(&net->ipv4.addr_chk_work);
2891 
2892 #ifdef CONFIG_SYSCTL
2893 	tbl = net->ipv4.forw_hdr->ctl_table_arg;
2894 	unregister_net_sysctl_table(net->ipv4.forw_hdr);
2895 	__devinet_sysctl_unregister(net, net->ipv4.devconf_dflt,
2896 				    NETCONFA_IFINDEX_DEFAULT);
2897 	__devinet_sysctl_unregister(net, net->ipv4.devconf_all,
2898 				    NETCONFA_IFINDEX_ALL);
2899 	kfree(tbl);
2900 #endif
2901 	kfree(net->ipv4.devconf_dflt);
2902 	kfree(net->ipv4.devconf_all);
2903 	kfree(net->ipv4.inet_addr_lst);
2904 }
2905 
2906 static __net_initdata struct pernet_operations devinet_ops = {
2907 	.init = devinet_init_net,
2908 	.exit = devinet_exit_net,
2909 };
2910 
2911 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2912 	.family		  = AF_INET,
2913 	.fill_link_af	  = inet_fill_link_af,
2914 	.get_link_af_size = inet_get_link_af_size,
2915 	.validate_link_af = inet_validate_link_af,
2916 	.set_link_af	  = inet_set_link_af,
2917 };
2918 
2919 static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = {
2920 	{.protocol = PF_INET, .msgtype = RTM_NEWADDR, .doit = inet_rtm_newaddr,
2921 	 .flags = RTNL_FLAG_DOIT_PERNET},
2922 	{.protocol = PF_INET, .msgtype = RTM_DELADDR, .doit = inet_rtm_deladdr,
2923 	 .flags = RTNL_FLAG_DOIT_PERNET},
2924 	{.protocol = PF_INET, .msgtype = RTM_GETADDR, .dumpit = inet_dump_ifaddr,
2925 	 .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE},
2926 	{.protocol = PF_INET, .msgtype = RTM_GETNETCONF,
2927 	 .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf,
2928 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
2929 	{.owner = THIS_MODULE, .protocol = PF_INET, .msgtype = RTM_GETMULTICAST,
2930 	 .dumpit = inet_dump_ifmcaddr, .flags = RTNL_FLAG_DUMP_UNLOCKED},
2931 };
2932 
2933 void __init devinet_init(void)
2934 {
2935 	register_pernet_subsys(&devinet_ops);
2936 	register_netdevice_notifier(&ip_netdev_notifier);
2937 
2938 	if (rtnl_af_register(&inet_af_ops))
2939 		panic("Unable to register inet_af_ops\n");
2940 
2941 	rtnl_register_many(devinet_rtnl_msg_handlers);
2942 }
2943