xref: /linux/net/core/net_namespace.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24 
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 
30 /*
31  *	Our network namespace constructor/destructor lists
32  */
33 
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36 
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39 
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43 
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47 
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50 
51 static bool init_net_initialized;
52 /*
53  * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54  * init_net_initialized and first_device pointer.
55  * This is internal net namespace object. Please, don't use it
56  * outside.
57  */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59 
60 #define MIN_PERNET_OPS_ID	\
61 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
62 
63 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
64 
65 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
66 
67 DEFINE_COOKIE(net_cookie);
68 
net_alloc_generic(void)69 static struct net_generic *net_alloc_generic(void)
70 {
71 	unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
72 	unsigned int generic_size;
73 	struct net_generic *ng;
74 
75 	generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
76 
77 	ng = kzalloc(generic_size, GFP_KERNEL);
78 	if (ng)
79 		ng->s.len = gen_ptrs;
80 
81 	return ng;
82 }
83 
net_assign_generic(struct net * net,unsigned int id,void * data)84 static int net_assign_generic(struct net *net, unsigned int id, void *data)
85 {
86 	struct net_generic *ng, *old_ng;
87 
88 	BUG_ON(id < MIN_PERNET_OPS_ID);
89 
90 	old_ng = rcu_dereference_protected(net->gen,
91 					   lockdep_is_held(&pernet_ops_rwsem));
92 	if (old_ng->s.len > id) {
93 		old_ng->ptr[id] = data;
94 		return 0;
95 	}
96 
97 	ng = net_alloc_generic();
98 	if (!ng)
99 		return -ENOMEM;
100 
101 	/*
102 	 * Some synchronisation notes:
103 	 *
104 	 * The net_generic explores the net->gen array inside rcu
105 	 * read section. Besides once set the net->gen->ptr[x]
106 	 * pointer never changes (see rules in netns/generic.h).
107 	 *
108 	 * That said, we simply duplicate this array and schedule
109 	 * the old copy for kfree after a grace period.
110 	 */
111 
112 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
113 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
114 	ng->ptr[id] = data;
115 
116 	rcu_assign_pointer(net->gen, ng);
117 	kfree_rcu(old_ng, s.rcu);
118 	return 0;
119 }
120 
ops_init(const struct pernet_operations * ops,struct net * net)121 static int ops_init(const struct pernet_operations *ops, struct net *net)
122 {
123 	struct net_generic *ng;
124 	int err = -ENOMEM;
125 	void *data = NULL;
126 
127 	if (ops->id) {
128 		data = kzalloc(ops->size, GFP_KERNEL);
129 		if (!data)
130 			goto out;
131 
132 		err = net_assign_generic(net, *ops->id, data);
133 		if (err)
134 			goto cleanup;
135 	}
136 	err = 0;
137 	if (ops->init)
138 		err = ops->init(net);
139 	if (!err)
140 		return 0;
141 
142 	if (ops->id) {
143 		ng = rcu_dereference_protected(net->gen,
144 					       lockdep_is_held(&pernet_ops_rwsem));
145 		ng->ptr[*ops->id] = NULL;
146 	}
147 
148 cleanup:
149 	kfree(data);
150 
151 out:
152 	return err;
153 }
154 
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)155 static void ops_pre_exit_list(const struct pernet_operations *ops,
156 			      struct list_head *net_exit_list)
157 {
158 	struct net *net;
159 
160 	if (ops->pre_exit) {
161 		list_for_each_entry(net, net_exit_list, exit_list)
162 			ops->pre_exit(net);
163 	}
164 }
165 
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)166 static void ops_exit_list(const struct pernet_operations *ops,
167 			  struct list_head *net_exit_list)
168 {
169 	struct net *net;
170 	if (ops->exit) {
171 		list_for_each_entry(net, net_exit_list, exit_list) {
172 			ops->exit(net);
173 			cond_resched();
174 		}
175 	}
176 	if (ops->exit_batch)
177 		ops->exit_batch(net_exit_list);
178 }
179 
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)180 static void ops_free_list(const struct pernet_operations *ops,
181 			  struct list_head *net_exit_list)
182 {
183 	struct net *net;
184 
185 	if (ops->id) {
186 		list_for_each_entry(net, net_exit_list, exit_list)
187 			kfree(net_generic(net, *ops->id));
188 	}
189 }
190 
191 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)192 static int alloc_netid(struct net *net, struct net *peer, int reqid)
193 {
194 	int min = 0, max = 0;
195 
196 	if (reqid >= 0) {
197 		min = reqid;
198 		max = reqid + 1;
199 	}
200 
201 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
202 }
203 
204 /* This function is used by idr_for_each(). If net is equal to peer, the
205  * function returns the id so that idr_for_each() stops. Because we cannot
206  * returns the id 0 (idr_for_each() will not stop), we return the magic value
207  * NET_ID_ZERO (-1) for it.
208  */
209 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)210 static int net_eq_idr(int id, void *net, void *peer)
211 {
212 	if (net_eq(net, peer))
213 		return id ? : NET_ID_ZERO;
214 	return 0;
215 }
216 
217 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)218 static int __peernet2id(const struct net *net, struct net *peer)
219 {
220 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
221 
222 	/* Magic value for id 0. */
223 	if (id == NET_ID_ZERO)
224 		return 0;
225 	if (id > 0)
226 		return id;
227 
228 	return NETNSA_NSID_NOT_ASSIGNED;
229 }
230 
231 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
232 			      struct nlmsghdr *nlh, gfp_t gfp);
233 /* This function returns the id of a peer netns. If no id is assigned, one will
234  * be allocated and returned.
235  */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)236 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
237 {
238 	int id;
239 
240 	if (refcount_read(&net->ns.count) == 0)
241 		return NETNSA_NSID_NOT_ASSIGNED;
242 
243 	spin_lock_bh(&net->nsid_lock);
244 	id = __peernet2id(net, peer);
245 	if (id >= 0) {
246 		spin_unlock_bh(&net->nsid_lock);
247 		return id;
248 	}
249 
250 	/* When peer is obtained from RCU lists, we may race with
251 	 * its cleanup. Check whether it's alive, and this guarantees
252 	 * we never hash a peer back to net->netns_ids, after it has
253 	 * just been idr_remove()'d from there in cleanup_net().
254 	 */
255 	if (!maybe_get_net(peer)) {
256 		spin_unlock_bh(&net->nsid_lock);
257 		return NETNSA_NSID_NOT_ASSIGNED;
258 	}
259 
260 	id = alloc_netid(net, peer, -1);
261 	spin_unlock_bh(&net->nsid_lock);
262 
263 	put_net(peer);
264 	if (id < 0)
265 		return NETNSA_NSID_NOT_ASSIGNED;
266 
267 	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
268 
269 	return id;
270 }
271 EXPORT_SYMBOL_GPL(peernet2id_alloc);
272 
273 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)274 int peernet2id(const struct net *net, struct net *peer)
275 {
276 	int id;
277 
278 	rcu_read_lock();
279 	id = __peernet2id(net, peer);
280 	rcu_read_unlock();
281 
282 	return id;
283 }
284 EXPORT_SYMBOL(peernet2id);
285 
286 /* This function returns true is the peer netns has an id assigned into the
287  * current netns.
288  */
peernet_has_id(const struct net * net,struct net * peer)289 bool peernet_has_id(const struct net *net, struct net *peer)
290 {
291 	return peernet2id(net, peer) >= 0;
292 }
293 
get_net_ns_by_id(const struct net * net,int id)294 struct net *get_net_ns_by_id(const struct net *net, int id)
295 {
296 	struct net *peer;
297 
298 	if (id < 0)
299 		return NULL;
300 
301 	rcu_read_lock();
302 	peer = idr_find(&net->netns_ids, id);
303 	if (peer)
304 		peer = maybe_get_net(peer);
305 	rcu_read_unlock();
306 
307 	return peer;
308 }
309 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
310 
preinit_net_sysctl(struct net * net)311 static __net_init void preinit_net_sysctl(struct net *net)
312 {
313 	net->core.sysctl_somaxconn = SOMAXCONN;
314 	/* Limits per socket sk_omem_alloc usage.
315 	 * TCP zerocopy regular usage needs 128 KB.
316 	 */
317 	net->core.sysctl_optmem_max = 128 * 1024;
318 	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
319 	net->core.sysctl_tstamp_allow_data = 1;
320 }
321 
322 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)323 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
324 {
325 	refcount_set(&net->passive, 1);
326 	refcount_set(&net->ns.count, 1);
327 	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
328 	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
329 
330 	get_random_bytes(&net->hash_mix, sizeof(u32));
331 	net->dev_base_seq = 1;
332 	net->user_ns = user_ns;
333 
334 	idr_init(&net->netns_ids);
335 	spin_lock_init(&net->nsid_lock);
336 	mutex_init(&net->ipv4.ra_mutex);
337 
338 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL
339 	mutex_init(&net->rtnl_mutex);
340 	lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
341 #endif
342 
343 	INIT_LIST_HEAD(&net->ptype_all);
344 	INIT_LIST_HEAD(&net->ptype_specific);
345 	preinit_net_sysctl(net);
346 }
347 
348 /*
349  * setup_net runs the initializers for the network namespace object.
350  */
setup_net(struct net * net)351 static __net_init int setup_net(struct net *net)
352 {
353 	/* Must be called with pernet_ops_rwsem held */
354 	const struct pernet_operations *ops, *saved_ops;
355 	LIST_HEAD(net_exit_list);
356 	LIST_HEAD(dev_kill_list);
357 	int error = 0;
358 
359 	preempt_disable();
360 	net->net_cookie = gen_cookie_next(&net_cookie);
361 	preempt_enable();
362 
363 	list_for_each_entry(ops, &pernet_list, list) {
364 		error = ops_init(ops, net);
365 		if (error < 0)
366 			goto out_undo;
367 	}
368 	down_write(&net_rwsem);
369 	list_add_tail_rcu(&net->list, &net_namespace_list);
370 	up_write(&net_rwsem);
371 out:
372 	return error;
373 
374 out_undo:
375 	/* Walk through the list backwards calling the exit functions
376 	 * for the pernet modules whose init functions did not fail.
377 	 */
378 	list_add(&net->exit_list, &net_exit_list);
379 	saved_ops = ops;
380 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
381 		ops_pre_exit_list(ops, &net_exit_list);
382 
383 	synchronize_rcu();
384 
385 	ops = saved_ops;
386 	rtnl_lock();
387 	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
388 		if (ops->exit_batch_rtnl)
389 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
390 	}
391 	unregister_netdevice_many(&dev_kill_list);
392 	rtnl_unlock();
393 
394 	ops = saved_ops;
395 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
396 		ops_exit_list(ops, &net_exit_list);
397 
398 	ops = saved_ops;
399 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
400 		ops_free_list(ops, &net_exit_list);
401 
402 	rcu_barrier();
403 	goto out;
404 }
405 
406 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)407 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
408 {
409 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
410 }
411 
dec_net_namespaces(struct ucounts * ucounts)412 static void dec_net_namespaces(struct ucounts *ucounts)
413 {
414 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
415 }
416 
417 static struct kmem_cache *net_cachep __ro_after_init;
418 static struct workqueue_struct *netns_wq;
419 
net_alloc(void)420 static struct net *net_alloc(void)
421 {
422 	struct net *net = NULL;
423 	struct net_generic *ng;
424 
425 	ng = net_alloc_generic();
426 	if (!ng)
427 		goto out;
428 
429 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
430 	if (!net)
431 		goto out_free;
432 
433 #ifdef CONFIG_KEYS
434 	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
435 	if (!net->key_domain)
436 		goto out_free_2;
437 	refcount_set(&net->key_domain->usage, 1);
438 #endif
439 
440 	rcu_assign_pointer(net->gen, ng);
441 out:
442 	return net;
443 
444 #ifdef CONFIG_KEYS
445 out_free_2:
446 	kmem_cache_free(net_cachep, net);
447 	net = NULL;
448 #endif
449 out_free:
450 	kfree(ng);
451 	goto out;
452 }
453 
454 static LLIST_HEAD(defer_free_list);
455 
net_complete_free(void)456 static void net_complete_free(void)
457 {
458 	struct llist_node *kill_list;
459 	struct net *net, *next;
460 
461 	/* Get the list of namespaces to free from last round. */
462 	kill_list = llist_del_all(&defer_free_list);
463 
464 	llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
465 		kmem_cache_free(net_cachep, net);
466 
467 }
468 
net_passive_dec(struct net * net)469 void net_passive_dec(struct net *net)
470 {
471 	if (refcount_dec_and_test(&net->passive)) {
472 		kfree(rcu_access_pointer(net->gen));
473 
474 		/* There should not be any trackers left there. */
475 		ref_tracker_dir_exit(&net->notrefcnt_tracker);
476 
477 		/* Wait for an extra rcu_barrier() before final free. */
478 		llist_add(&net->defer_free_list, &defer_free_list);
479 	}
480 }
481 
net_drop_ns(void * p)482 void net_drop_ns(void *p)
483 {
484 	struct net *net = (struct net *)p;
485 
486 	if (net)
487 		net_passive_dec(net);
488 }
489 
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)490 struct net *copy_net_ns(unsigned long flags,
491 			struct user_namespace *user_ns, struct net *old_net)
492 {
493 	struct ucounts *ucounts;
494 	struct net *net;
495 	int rv;
496 
497 	if (!(flags & CLONE_NEWNET))
498 		return get_net(old_net);
499 
500 	ucounts = inc_net_namespaces(user_ns);
501 	if (!ucounts)
502 		return ERR_PTR(-ENOSPC);
503 
504 	net = net_alloc();
505 	if (!net) {
506 		rv = -ENOMEM;
507 		goto dec_ucounts;
508 	}
509 
510 	preinit_net(net, user_ns);
511 	net->ucounts = ucounts;
512 	get_user_ns(user_ns);
513 
514 	rv = down_read_killable(&pernet_ops_rwsem);
515 	if (rv < 0)
516 		goto put_userns;
517 
518 	rv = setup_net(net);
519 
520 	up_read(&pernet_ops_rwsem);
521 
522 	if (rv < 0) {
523 put_userns:
524 #ifdef CONFIG_KEYS
525 		key_remove_domain(net->key_domain);
526 #endif
527 		put_user_ns(user_ns);
528 		net_passive_dec(net);
529 dec_ucounts:
530 		dec_net_namespaces(ucounts);
531 		return ERR_PTR(rv);
532 	}
533 	return net;
534 }
535 
536 /**
537  * net_ns_get_ownership - get sysfs ownership data for @net
538  * @net: network namespace in question (can be NULL)
539  * @uid: kernel user ID for sysfs objects
540  * @gid: kernel group ID for sysfs objects
541  *
542  * Returns the uid/gid pair of root in the user namespace associated with the
543  * given network namespace.
544  */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)545 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
546 {
547 	if (net) {
548 		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
549 		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
550 
551 		if (uid_valid(ns_root_uid))
552 			*uid = ns_root_uid;
553 
554 		if (gid_valid(ns_root_gid))
555 			*gid = ns_root_gid;
556 	} else {
557 		*uid = GLOBAL_ROOT_UID;
558 		*gid = GLOBAL_ROOT_GID;
559 	}
560 }
561 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
562 
unhash_nsid(struct net * net,struct net * last)563 static void unhash_nsid(struct net *net, struct net *last)
564 {
565 	struct net *tmp;
566 	/* This function is only called from cleanup_net() work,
567 	 * and this work is the only process, that may delete
568 	 * a net from net_namespace_list. So, when the below
569 	 * is executing, the list may only grow. Thus, we do not
570 	 * use for_each_net_rcu() or net_rwsem.
571 	 */
572 	for_each_net(tmp) {
573 		int id;
574 
575 		spin_lock_bh(&tmp->nsid_lock);
576 		id = __peernet2id(tmp, net);
577 		if (id >= 0)
578 			idr_remove(&tmp->netns_ids, id);
579 		spin_unlock_bh(&tmp->nsid_lock);
580 		if (id >= 0)
581 			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
582 					  GFP_KERNEL);
583 		if (tmp == last)
584 			break;
585 	}
586 	spin_lock_bh(&net->nsid_lock);
587 	idr_destroy(&net->netns_ids);
588 	spin_unlock_bh(&net->nsid_lock);
589 }
590 
591 static LLIST_HEAD(cleanup_list);
592 
593 struct task_struct *cleanup_net_task;
594 
cleanup_net(struct work_struct * work)595 static void cleanup_net(struct work_struct *work)
596 {
597 	const struct pernet_operations *ops;
598 	struct net *net, *tmp, *last;
599 	struct llist_node *net_kill_list;
600 	LIST_HEAD(net_exit_list);
601 	LIST_HEAD(dev_kill_list);
602 
603 	cleanup_net_task = current;
604 
605 	/* Atomically snapshot the list of namespaces to cleanup */
606 	net_kill_list = llist_del_all(&cleanup_list);
607 
608 	down_read(&pernet_ops_rwsem);
609 
610 	/* Don't let anyone else find us. */
611 	down_write(&net_rwsem);
612 	llist_for_each_entry(net, net_kill_list, cleanup_list)
613 		list_del_rcu(&net->list);
614 	/* Cache last net. After we unlock rtnl, no one new net
615 	 * added to net_namespace_list can assign nsid pointer
616 	 * to a net from net_kill_list (see peernet2id_alloc()).
617 	 * So, we skip them in unhash_nsid().
618 	 *
619 	 * Note, that unhash_nsid() does not delete nsid links
620 	 * between net_kill_list's nets, as they've already
621 	 * deleted from net_namespace_list. But, this would be
622 	 * useless anyway, as netns_ids are destroyed there.
623 	 */
624 	last = list_last_entry(&net_namespace_list, struct net, list);
625 	up_write(&net_rwsem);
626 
627 	llist_for_each_entry(net, net_kill_list, cleanup_list) {
628 		unhash_nsid(net, last);
629 		list_add_tail(&net->exit_list, &net_exit_list);
630 	}
631 
632 	/* Run all of the network namespace pre_exit methods */
633 	list_for_each_entry_reverse(ops, &pernet_list, list)
634 		ops_pre_exit_list(ops, &net_exit_list);
635 
636 	/*
637 	 * Another CPU might be rcu-iterating the list, wait for it.
638 	 * This needs to be before calling the exit() notifiers, so
639 	 * the rcu_barrier() below isn't sufficient alone.
640 	 * Also the pre_exit() and exit() methods need this barrier.
641 	 */
642 	synchronize_rcu_expedited();
643 
644 	rtnl_lock();
645 	list_for_each_entry_reverse(ops, &pernet_list, list) {
646 		if (ops->exit_batch_rtnl)
647 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
648 	}
649 	unregister_netdevice_many(&dev_kill_list);
650 	rtnl_unlock();
651 
652 	/* Run all of the network namespace exit methods */
653 	list_for_each_entry_reverse(ops, &pernet_list, list)
654 		ops_exit_list(ops, &net_exit_list);
655 
656 	/* Free the net generic variables */
657 	list_for_each_entry_reverse(ops, &pernet_list, list)
658 		ops_free_list(ops, &net_exit_list);
659 
660 	up_read(&pernet_ops_rwsem);
661 
662 	/* Ensure there are no outstanding rcu callbacks using this
663 	 * network namespace.
664 	 */
665 	rcu_barrier();
666 
667 	net_complete_free();
668 
669 	/* Finally it is safe to free my network namespace structure */
670 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
671 		list_del_init(&net->exit_list);
672 		dec_net_namespaces(net->ucounts);
673 #ifdef CONFIG_KEYS
674 		key_remove_domain(net->key_domain);
675 #endif
676 		put_user_ns(net->user_ns);
677 		net_passive_dec(net);
678 	}
679 	cleanup_net_task = NULL;
680 }
681 
682 /**
683  * net_ns_barrier - wait until concurrent net_cleanup_work is done
684  *
685  * cleanup_net runs from work queue and will first remove namespaces
686  * from the global list, then run net exit functions.
687  *
688  * Call this in module exit path to make sure that all netns
689  * ->exit ops have been invoked before the function is removed.
690  */
net_ns_barrier(void)691 void net_ns_barrier(void)
692 {
693 	down_write(&pernet_ops_rwsem);
694 	up_write(&pernet_ops_rwsem);
695 }
696 EXPORT_SYMBOL(net_ns_barrier);
697 
698 static DECLARE_WORK(net_cleanup_work, cleanup_net);
699 
__put_net(struct net * net)700 void __put_net(struct net *net)
701 {
702 	ref_tracker_dir_exit(&net->refcnt_tracker);
703 	/* Cleanup the network namespace in process context */
704 	if (llist_add(&net->cleanup_list, &cleanup_list))
705 		queue_work(netns_wq, &net_cleanup_work);
706 }
707 EXPORT_SYMBOL_GPL(__put_net);
708 
709 /**
710  * get_net_ns - increment the refcount of the network namespace
711  * @ns: common namespace (net)
712  *
713  * Returns the net's common namespace or ERR_PTR() if ref is zero.
714  */
get_net_ns(struct ns_common * ns)715 struct ns_common *get_net_ns(struct ns_common *ns)
716 {
717 	struct net *net;
718 
719 	net = maybe_get_net(container_of(ns, struct net, ns));
720 	if (net)
721 		return &net->ns;
722 	return ERR_PTR(-EINVAL);
723 }
724 EXPORT_SYMBOL_GPL(get_net_ns);
725 
get_net_ns_by_fd(int fd)726 struct net *get_net_ns_by_fd(int fd)
727 {
728 	CLASS(fd, f)(fd);
729 
730 	if (fd_empty(f))
731 		return ERR_PTR(-EBADF);
732 
733 	if (proc_ns_file(fd_file(f))) {
734 		struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
735 		if (ns->ops == &netns_operations)
736 			return get_net(container_of(ns, struct net, ns));
737 	}
738 
739 	return ERR_PTR(-EINVAL);
740 }
741 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
742 #endif
743 
get_net_ns_by_pid(pid_t pid)744 struct net *get_net_ns_by_pid(pid_t pid)
745 {
746 	struct task_struct *tsk;
747 	struct net *net;
748 
749 	/* Lookup the network namespace */
750 	net = ERR_PTR(-ESRCH);
751 	rcu_read_lock();
752 	tsk = find_task_by_vpid(pid);
753 	if (tsk) {
754 		struct nsproxy *nsproxy;
755 		task_lock(tsk);
756 		nsproxy = tsk->nsproxy;
757 		if (nsproxy)
758 			net = get_net(nsproxy->net_ns);
759 		task_unlock(tsk);
760 	}
761 	rcu_read_unlock();
762 	return net;
763 }
764 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
765 
net_ns_net_init(struct net * net)766 static __net_init int net_ns_net_init(struct net *net)
767 {
768 #ifdef CONFIG_NET_NS
769 	net->ns.ops = &netns_operations;
770 #endif
771 	return ns_alloc_inum(&net->ns);
772 }
773 
net_ns_net_exit(struct net * net)774 static __net_exit void net_ns_net_exit(struct net *net)
775 {
776 	ns_free_inum(&net->ns);
777 }
778 
779 static struct pernet_operations __net_initdata net_ns_ops = {
780 	.init = net_ns_net_init,
781 	.exit = net_ns_net_exit,
782 };
783 
784 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
785 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
786 	[NETNSA_NSID]		= { .type = NLA_S32 },
787 	[NETNSA_PID]		= { .type = NLA_U32 },
788 	[NETNSA_FD]		= { .type = NLA_U32 },
789 	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
790 };
791 
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)792 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
793 			  struct netlink_ext_ack *extack)
794 {
795 	struct net *net = sock_net(skb->sk);
796 	struct nlattr *tb[NETNSA_MAX + 1];
797 	struct nlattr *nla;
798 	struct net *peer;
799 	int nsid, err;
800 
801 	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
802 				     NETNSA_MAX, rtnl_net_policy, extack);
803 	if (err < 0)
804 		return err;
805 	if (!tb[NETNSA_NSID]) {
806 		NL_SET_ERR_MSG(extack, "nsid is missing");
807 		return -EINVAL;
808 	}
809 	nsid = nla_get_s32(tb[NETNSA_NSID]);
810 
811 	if (tb[NETNSA_PID]) {
812 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
813 		nla = tb[NETNSA_PID];
814 	} else if (tb[NETNSA_FD]) {
815 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
816 		nla = tb[NETNSA_FD];
817 	} else {
818 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
819 		return -EINVAL;
820 	}
821 	if (IS_ERR(peer)) {
822 		NL_SET_BAD_ATTR(extack, nla);
823 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
824 		return PTR_ERR(peer);
825 	}
826 
827 	spin_lock_bh(&net->nsid_lock);
828 	if (__peernet2id(net, peer) >= 0) {
829 		spin_unlock_bh(&net->nsid_lock);
830 		err = -EEXIST;
831 		NL_SET_BAD_ATTR(extack, nla);
832 		NL_SET_ERR_MSG(extack,
833 			       "Peer netns already has a nsid assigned");
834 		goto out;
835 	}
836 
837 	err = alloc_netid(net, peer, nsid);
838 	spin_unlock_bh(&net->nsid_lock);
839 	if (err >= 0) {
840 		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
841 				  nlh, GFP_KERNEL);
842 		err = 0;
843 	} else if (err == -ENOSPC && nsid >= 0) {
844 		err = -EEXIST;
845 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
846 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
847 	}
848 out:
849 	put_net(peer);
850 	return err;
851 }
852 
rtnl_net_get_size(void)853 static int rtnl_net_get_size(void)
854 {
855 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
856 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
857 	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
858 	       ;
859 }
860 
861 struct net_fill_args {
862 	u32 portid;
863 	u32 seq;
864 	int flags;
865 	int cmd;
866 	int nsid;
867 	bool add_ref;
868 	int ref_nsid;
869 };
870 
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)871 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
872 {
873 	struct nlmsghdr *nlh;
874 	struct rtgenmsg *rth;
875 
876 	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
877 			args->flags);
878 	if (!nlh)
879 		return -EMSGSIZE;
880 
881 	rth = nlmsg_data(nlh);
882 	rth->rtgen_family = AF_UNSPEC;
883 
884 	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
885 		goto nla_put_failure;
886 
887 	if (args->add_ref &&
888 	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
889 		goto nla_put_failure;
890 
891 	nlmsg_end(skb, nlh);
892 	return 0;
893 
894 nla_put_failure:
895 	nlmsg_cancel(skb, nlh);
896 	return -EMSGSIZE;
897 }
898 
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)899 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
900 				    const struct nlmsghdr *nlh,
901 				    struct nlattr **tb,
902 				    struct netlink_ext_ack *extack)
903 {
904 	int i, err;
905 
906 	if (!netlink_strict_get_check(skb))
907 		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
908 					      tb, NETNSA_MAX, rtnl_net_policy,
909 					      extack);
910 
911 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
912 					    NETNSA_MAX, rtnl_net_policy,
913 					    extack);
914 	if (err)
915 		return err;
916 
917 	for (i = 0; i <= NETNSA_MAX; i++) {
918 		if (!tb[i])
919 			continue;
920 
921 		switch (i) {
922 		case NETNSA_PID:
923 		case NETNSA_FD:
924 		case NETNSA_NSID:
925 		case NETNSA_TARGET_NSID:
926 			break;
927 		default:
928 			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
929 			return -EINVAL;
930 		}
931 	}
932 
933 	return 0;
934 }
935 
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)936 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
937 			  struct netlink_ext_ack *extack)
938 {
939 	struct net *net = sock_net(skb->sk);
940 	struct nlattr *tb[NETNSA_MAX + 1];
941 	struct net_fill_args fillargs = {
942 		.portid = NETLINK_CB(skb).portid,
943 		.seq = nlh->nlmsg_seq,
944 		.cmd = RTM_NEWNSID,
945 	};
946 	struct net *peer, *target = net;
947 	struct nlattr *nla;
948 	struct sk_buff *msg;
949 	int err;
950 
951 	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
952 	if (err < 0)
953 		return err;
954 	if (tb[NETNSA_PID]) {
955 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
956 		nla = tb[NETNSA_PID];
957 	} else if (tb[NETNSA_FD]) {
958 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
959 		nla = tb[NETNSA_FD];
960 	} else if (tb[NETNSA_NSID]) {
961 		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
962 		if (!peer)
963 			peer = ERR_PTR(-ENOENT);
964 		nla = tb[NETNSA_NSID];
965 	} else {
966 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
967 		return -EINVAL;
968 	}
969 
970 	if (IS_ERR(peer)) {
971 		NL_SET_BAD_ATTR(extack, nla);
972 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
973 		return PTR_ERR(peer);
974 	}
975 
976 	if (tb[NETNSA_TARGET_NSID]) {
977 		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
978 
979 		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
980 		if (IS_ERR(target)) {
981 			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
982 			NL_SET_ERR_MSG(extack,
983 				       "Target netns reference is invalid");
984 			err = PTR_ERR(target);
985 			goto out;
986 		}
987 		fillargs.add_ref = true;
988 		fillargs.ref_nsid = peernet2id(net, peer);
989 	}
990 
991 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
992 	if (!msg) {
993 		err = -ENOMEM;
994 		goto out;
995 	}
996 
997 	fillargs.nsid = peernet2id(target, peer);
998 	err = rtnl_net_fill(msg, &fillargs);
999 	if (err < 0)
1000 		goto err_out;
1001 
1002 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
1003 	goto out;
1004 
1005 err_out:
1006 	nlmsg_free(msg);
1007 out:
1008 	if (fillargs.add_ref)
1009 		put_net(target);
1010 	put_net(peer);
1011 	return err;
1012 }
1013 
1014 struct rtnl_net_dump_cb {
1015 	struct net *tgt_net;
1016 	struct net *ref_net;
1017 	struct sk_buff *skb;
1018 	struct net_fill_args fillargs;
1019 	int idx;
1020 	int s_idx;
1021 };
1022 
1023 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1024 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1025 {
1026 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1027 	int ret;
1028 
1029 	if (net_cb->idx < net_cb->s_idx)
1030 		goto cont;
1031 
1032 	net_cb->fillargs.nsid = id;
1033 	if (net_cb->fillargs.add_ref)
1034 		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1035 	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1036 	if (ret < 0)
1037 		return ret;
1038 
1039 cont:
1040 	net_cb->idx++;
1041 	return 0;
1042 }
1043 
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1044 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1045 				   struct rtnl_net_dump_cb *net_cb,
1046 				   struct netlink_callback *cb)
1047 {
1048 	struct netlink_ext_ack *extack = cb->extack;
1049 	struct nlattr *tb[NETNSA_MAX + 1];
1050 	int err, i;
1051 
1052 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1053 					    NETNSA_MAX, rtnl_net_policy,
1054 					    extack);
1055 	if (err < 0)
1056 		return err;
1057 
1058 	for (i = 0; i <= NETNSA_MAX; i++) {
1059 		if (!tb[i])
1060 			continue;
1061 
1062 		if (i == NETNSA_TARGET_NSID) {
1063 			struct net *net;
1064 
1065 			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1066 			if (IS_ERR(net)) {
1067 				NL_SET_BAD_ATTR(extack, tb[i]);
1068 				NL_SET_ERR_MSG(extack,
1069 					       "Invalid target network namespace id");
1070 				return PTR_ERR(net);
1071 			}
1072 			net_cb->fillargs.add_ref = true;
1073 			net_cb->ref_net = net_cb->tgt_net;
1074 			net_cb->tgt_net = net;
1075 		} else {
1076 			NL_SET_BAD_ATTR(extack, tb[i]);
1077 			NL_SET_ERR_MSG(extack,
1078 				       "Unsupported attribute in dump request");
1079 			return -EINVAL;
1080 		}
1081 	}
1082 
1083 	return 0;
1084 }
1085 
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1086 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1087 {
1088 	struct rtnl_net_dump_cb net_cb = {
1089 		.tgt_net = sock_net(skb->sk),
1090 		.skb = skb,
1091 		.fillargs = {
1092 			.portid = NETLINK_CB(cb->skb).portid,
1093 			.seq = cb->nlh->nlmsg_seq,
1094 			.flags = NLM_F_MULTI,
1095 			.cmd = RTM_NEWNSID,
1096 		},
1097 		.idx = 0,
1098 		.s_idx = cb->args[0],
1099 	};
1100 	int err = 0;
1101 
1102 	if (cb->strict_check) {
1103 		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1104 		if (err < 0)
1105 			goto end;
1106 	}
1107 
1108 	rcu_read_lock();
1109 	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1110 	rcu_read_unlock();
1111 
1112 	cb->args[0] = net_cb.idx;
1113 end:
1114 	if (net_cb.fillargs.add_ref)
1115 		put_net(net_cb.tgt_net);
1116 	return err;
1117 }
1118 
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1119 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1120 			      struct nlmsghdr *nlh, gfp_t gfp)
1121 {
1122 	struct net_fill_args fillargs = {
1123 		.portid = portid,
1124 		.seq = nlh ? nlh->nlmsg_seq : 0,
1125 		.cmd = cmd,
1126 		.nsid = id,
1127 	};
1128 	struct sk_buff *msg;
1129 	int err = -ENOMEM;
1130 
1131 	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1132 	if (!msg)
1133 		goto out;
1134 
1135 	err = rtnl_net_fill(msg, &fillargs);
1136 	if (err < 0)
1137 		goto err_out;
1138 
1139 	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1140 	return;
1141 
1142 err_out:
1143 	nlmsg_free(msg);
1144 out:
1145 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1146 }
1147 
1148 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1149 static void __init netns_ipv4_struct_check(void)
1150 {
1151 	/* TX readonly hotpath cache lines */
1152 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1153 				      sysctl_tcp_early_retrans);
1154 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1155 				      sysctl_tcp_tso_win_divisor);
1156 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1157 				      sysctl_tcp_tso_rtt_log);
1158 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1159 				      sysctl_tcp_autocorking);
1160 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1161 				      sysctl_tcp_min_snd_mss);
1162 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1163 				      sysctl_tcp_notsent_lowat);
1164 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1165 				      sysctl_tcp_limit_output_bytes);
1166 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1167 				      sysctl_tcp_min_rtt_wlen);
1168 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1169 				      sysctl_tcp_wmem);
1170 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1171 				      sysctl_ip_fwd_use_pmtu);
1172 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1173 
1174 	/* TXRX readonly hotpath cache lines */
1175 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1176 				      sysctl_tcp_moderate_rcvbuf);
1177 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1178 
1179 	/* RX readonly hotpath cache line */
1180 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1181 				      sysctl_ip_early_demux);
1182 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1183 				      sysctl_tcp_early_demux);
1184 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1185 				      sysctl_tcp_l3mdev_accept);
1186 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1187 				      sysctl_tcp_reordering);
1188 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1189 				      sysctl_tcp_rmem);
1190 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
1191 }
1192 #endif
1193 
1194 static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1195 	{.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1196 	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1197 	{.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1198 	 .dumpit = rtnl_net_dumpid,
1199 	 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1200 };
1201 
net_ns_init(void)1202 void __init net_ns_init(void)
1203 {
1204 	struct net_generic *ng;
1205 
1206 #ifdef CONFIG_NET_NS
1207 	netns_ipv4_struct_check();
1208 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1209 					SMP_CACHE_BYTES,
1210 					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1211 
1212 	/* Create workqueue for cleanup */
1213 	netns_wq = create_singlethread_workqueue("netns");
1214 	if (!netns_wq)
1215 		panic("Could not create netns workq");
1216 #endif
1217 
1218 	ng = net_alloc_generic();
1219 	if (!ng)
1220 		panic("Could not allocate generic netns");
1221 
1222 	rcu_assign_pointer(init_net.gen, ng);
1223 
1224 #ifdef CONFIG_KEYS
1225 	init_net.key_domain = &init_net_key_domain;
1226 #endif
1227 	preinit_net(&init_net, &init_user_ns);
1228 
1229 	down_write(&pernet_ops_rwsem);
1230 	if (setup_net(&init_net))
1231 		panic("Could not setup the initial network namespace");
1232 
1233 	init_net_initialized = true;
1234 	up_write(&pernet_ops_rwsem);
1235 
1236 	if (register_pernet_subsys(&net_ns_ops))
1237 		panic("Could not register network namespace subsystems");
1238 
1239 	rtnl_register_many(net_ns_rtnl_msg_handlers);
1240 }
1241 
free_exit_list(struct pernet_operations * ops,struct list_head * net_exit_list)1242 static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1243 {
1244 	ops_pre_exit_list(ops, net_exit_list);
1245 	synchronize_rcu();
1246 
1247 	if (ops->exit_batch_rtnl) {
1248 		LIST_HEAD(dev_kill_list);
1249 
1250 		rtnl_lock();
1251 		ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1252 		unregister_netdevice_many(&dev_kill_list);
1253 		rtnl_unlock();
1254 	}
1255 	ops_exit_list(ops, net_exit_list);
1256 
1257 	ops_free_list(ops, net_exit_list);
1258 }
1259 
1260 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1261 static int __register_pernet_operations(struct list_head *list,
1262 					struct pernet_operations *ops)
1263 {
1264 	struct net *net;
1265 	int error;
1266 	LIST_HEAD(net_exit_list);
1267 
1268 	list_add_tail(&ops->list, list);
1269 	if (ops->init || ops->id) {
1270 		/* We held write locked pernet_ops_rwsem, and parallel
1271 		 * setup_net() and cleanup_net() are not possible.
1272 		 */
1273 		for_each_net(net) {
1274 			error = ops_init(ops, net);
1275 			if (error)
1276 				goto out_undo;
1277 			list_add_tail(&net->exit_list, &net_exit_list);
1278 		}
1279 	}
1280 	return 0;
1281 
1282 out_undo:
1283 	/* If I have an error cleanup all namespaces I initialized */
1284 	list_del(&ops->list);
1285 	free_exit_list(ops, &net_exit_list);
1286 	return error;
1287 }
1288 
__unregister_pernet_operations(struct pernet_operations * ops)1289 static void __unregister_pernet_operations(struct pernet_operations *ops)
1290 {
1291 	struct net *net;
1292 	LIST_HEAD(net_exit_list);
1293 
1294 	list_del(&ops->list);
1295 	/* See comment in __register_pernet_operations() */
1296 	for_each_net(net)
1297 		list_add_tail(&net->exit_list, &net_exit_list);
1298 
1299 	free_exit_list(ops, &net_exit_list);
1300 }
1301 
1302 #else
1303 
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1304 static int __register_pernet_operations(struct list_head *list,
1305 					struct pernet_operations *ops)
1306 {
1307 	if (!init_net_initialized) {
1308 		list_add_tail(&ops->list, list);
1309 		return 0;
1310 	}
1311 
1312 	return ops_init(ops, &init_net);
1313 }
1314 
__unregister_pernet_operations(struct pernet_operations * ops)1315 static void __unregister_pernet_operations(struct pernet_operations *ops)
1316 {
1317 	if (!init_net_initialized) {
1318 		list_del(&ops->list);
1319 	} else {
1320 		LIST_HEAD(net_exit_list);
1321 		list_add(&init_net.exit_list, &net_exit_list);
1322 		free_exit_list(ops, &net_exit_list);
1323 	}
1324 }
1325 
1326 #endif /* CONFIG_NET_NS */
1327 
1328 static DEFINE_IDA(net_generic_ids);
1329 
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1330 static int register_pernet_operations(struct list_head *list,
1331 				      struct pernet_operations *ops)
1332 {
1333 	int error;
1334 
1335 	if (WARN_ON(!!ops->id ^ !!ops->size))
1336 		return -EINVAL;
1337 
1338 	if (ops->id) {
1339 		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1340 				GFP_KERNEL);
1341 		if (error < 0)
1342 			return error;
1343 		*ops->id = error;
1344 		/* This does not require READ_ONCE as writers already hold
1345 		 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1346 		 * net_alloc_generic.
1347 		 */
1348 		WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1349 	}
1350 	error = __register_pernet_operations(list, ops);
1351 	if (error) {
1352 		rcu_barrier();
1353 		if (ops->id)
1354 			ida_free(&net_generic_ids, *ops->id);
1355 	}
1356 
1357 	return error;
1358 }
1359 
unregister_pernet_operations(struct pernet_operations * ops)1360 static void unregister_pernet_operations(struct pernet_operations *ops)
1361 {
1362 	__unregister_pernet_operations(ops);
1363 	rcu_barrier();
1364 	if (ops->id)
1365 		ida_free(&net_generic_ids, *ops->id);
1366 }
1367 
1368 /**
1369  *      register_pernet_subsys - register a network namespace subsystem
1370  *	@ops:  pernet operations structure for the subsystem
1371  *
1372  *	Register a subsystem which has init and exit functions
1373  *	that are called when network namespaces are created and
1374  *	destroyed respectively.
1375  *
1376  *	When registered all network namespace init functions are
1377  *	called for every existing network namespace.  Allowing kernel
1378  *	modules to have a race free view of the set of network namespaces.
1379  *
1380  *	When a new network namespace is created all of the init
1381  *	methods are called in the order in which they were registered.
1382  *
1383  *	When a network namespace is destroyed all of the exit methods
1384  *	are called in the reverse of the order with which they were
1385  *	registered.
1386  */
register_pernet_subsys(struct pernet_operations * ops)1387 int register_pernet_subsys(struct pernet_operations *ops)
1388 {
1389 	int error;
1390 	down_write(&pernet_ops_rwsem);
1391 	error =  register_pernet_operations(first_device, ops);
1392 	up_write(&pernet_ops_rwsem);
1393 	return error;
1394 }
1395 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1396 
1397 /**
1398  *      unregister_pernet_subsys - unregister a network namespace subsystem
1399  *	@ops: pernet operations structure to manipulate
1400  *
1401  *	Remove the pernet operations structure from the list to be
1402  *	used when network namespaces are created or destroyed.  In
1403  *	addition run the exit method for all existing network
1404  *	namespaces.
1405  */
unregister_pernet_subsys(struct pernet_operations * ops)1406 void unregister_pernet_subsys(struct pernet_operations *ops)
1407 {
1408 	down_write(&pernet_ops_rwsem);
1409 	unregister_pernet_operations(ops);
1410 	up_write(&pernet_ops_rwsem);
1411 }
1412 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1413 
1414 /**
1415  *      register_pernet_device - register a network namespace device
1416  *	@ops:  pernet operations structure for the subsystem
1417  *
1418  *	Register a device which has init and exit functions
1419  *	that are called when network namespaces are created and
1420  *	destroyed respectively.
1421  *
1422  *	When registered all network namespace init functions are
1423  *	called for every existing network namespace.  Allowing kernel
1424  *	modules to have a race free view of the set of network namespaces.
1425  *
1426  *	When a new network namespace is created all of the init
1427  *	methods are called in the order in which they were registered.
1428  *
1429  *	When a network namespace is destroyed all of the exit methods
1430  *	are called in the reverse of the order with which they were
1431  *	registered.
1432  */
register_pernet_device(struct pernet_operations * ops)1433 int register_pernet_device(struct pernet_operations *ops)
1434 {
1435 	int error;
1436 	down_write(&pernet_ops_rwsem);
1437 	error = register_pernet_operations(&pernet_list, ops);
1438 	if (!error && (first_device == &pernet_list))
1439 		first_device = &ops->list;
1440 	up_write(&pernet_ops_rwsem);
1441 	return error;
1442 }
1443 EXPORT_SYMBOL_GPL(register_pernet_device);
1444 
1445 /**
1446  *      unregister_pernet_device - unregister a network namespace netdevice
1447  *	@ops: pernet operations structure to manipulate
1448  *
1449  *	Remove the pernet operations structure from the list to be
1450  *	used when network namespaces are created or destroyed.  In
1451  *	addition run the exit method for all existing network
1452  *	namespaces.
1453  */
unregister_pernet_device(struct pernet_operations * ops)1454 void unregister_pernet_device(struct pernet_operations *ops)
1455 {
1456 	down_write(&pernet_ops_rwsem);
1457 	if (&ops->list == first_device)
1458 		first_device = first_device->next;
1459 	unregister_pernet_operations(ops);
1460 	up_write(&pernet_ops_rwsem);
1461 }
1462 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1463 
1464 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1465 static struct ns_common *netns_get(struct task_struct *task)
1466 {
1467 	struct net *net = NULL;
1468 	struct nsproxy *nsproxy;
1469 
1470 	task_lock(task);
1471 	nsproxy = task->nsproxy;
1472 	if (nsproxy)
1473 		net = get_net(nsproxy->net_ns);
1474 	task_unlock(task);
1475 
1476 	return net ? &net->ns : NULL;
1477 }
1478 
to_net_ns(struct ns_common * ns)1479 static inline struct net *to_net_ns(struct ns_common *ns)
1480 {
1481 	return container_of(ns, struct net, ns);
1482 }
1483 
netns_put(struct ns_common * ns)1484 static void netns_put(struct ns_common *ns)
1485 {
1486 	put_net(to_net_ns(ns));
1487 }
1488 
netns_install(struct nsset * nsset,struct ns_common * ns)1489 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1490 {
1491 	struct nsproxy *nsproxy = nsset->nsproxy;
1492 	struct net *net = to_net_ns(ns);
1493 
1494 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1495 	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1496 		return -EPERM;
1497 
1498 	put_net(nsproxy->net_ns);
1499 	nsproxy->net_ns = get_net(net);
1500 	return 0;
1501 }
1502 
netns_owner(struct ns_common * ns)1503 static struct user_namespace *netns_owner(struct ns_common *ns)
1504 {
1505 	return to_net_ns(ns)->user_ns;
1506 }
1507 
1508 const struct proc_ns_operations netns_operations = {
1509 	.name		= "net",
1510 	.type		= CLONE_NEWNET,
1511 	.get		= netns_get,
1512 	.put		= netns_put,
1513 	.install	= netns_install,
1514 	.owner		= netns_owner,
1515 };
1516 #endif
1517