1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29
30 /*
31 * Our network namespace constructor/destructor lists
32 */
33
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50
51 static bool init_net_initialized;
52 /*
53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54 * init_net_initialized and first_device pointer.
55 * This is internal net namespace object. Please, don't use it
56 * outside.
57 */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59
60 #define MIN_PERNET_OPS_ID \
61 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
62
63 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
64
65 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
66
67 DEFINE_COOKIE(net_cookie);
68
net_alloc_generic(void)69 static struct net_generic *net_alloc_generic(void)
70 {
71 unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
72 unsigned int generic_size;
73 struct net_generic *ng;
74
75 generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
76
77 ng = kzalloc(generic_size, GFP_KERNEL);
78 if (ng)
79 ng->s.len = gen_ptrs;
80
81 return ng;
82 }
83
net_assign_generic(struct net * net,unsigned int id,void * data)84 static int net_assign_generic(struct net *net, unsigned int id, void *data)
85 {
86 struct net_generic *ng, *old_ng;
87
88 BUG_ON(id < MIN_PERNET_OPS_ID);
89
90 old_ng = rcu_dereference_protected(net->gen,
91 lockdep_is_held(&pernet_ops_rwsem));
92 if (old_ng->s.len > id) {
93 old_ng->ptr[id] = data;
94 return 0;
95 }
96
97 ng = net_alloc_generic();
98 if (!ng)
99 return -ENOMEM;
100
101 /*
102 * Some synchronisation notes:
103 *
104 * The net_generic explores the net->gen array inside rcu
105 * read section. Besides once set the net->gen->ptr[x]
106 * pointer never changes (see rules in netns/generic.h).
107 *
108 * That said, we simply duplicate this array and schedule
109 * the old copy for kfree after a grace period.
110 */
111
112 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
113 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
114 ng->ptr[id] = data;
115
116 rcu_assign_pointer(net->gen, ng);
117 kfree_rcu(old_ng, s.rcu);
118 return 0;
119 }
120
ops_init(const struct pernet_operations * ops,struct net * net)121 static int ops_init(const struct pernet_operations *ops, struct net *net)
122 {
123 struct net_generic *ng;
124 int err = -ENOMEM;
125 void *data = NULL;
126
127 if (ops->id) {
128 data = kzalloc(ops->size, GFP_KERNEL);
129 if (!data)
130 goto out;
131
132 err = net_assign_generic(net, *ops->id, data);
133 if (err)
134 goto cleanup;
135 }
136 err = 0;
137 if (ops->init)
138 err = ops->init(net);
139 if (!err)
140 return 0;
141
142 if (ops->id) {
143 ng = rcu_dereference_protected(net->gen,
144 lockdep_is_held(&pernet_ops_rwsem));
145 ng->ptr[*ops->id] = NULL;
146 }
147
148 cleanup:
149 kfree(data);
150
151 out:
152 return err;
153 }
154
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)155 static void ops_pre_exit_list(const struct pernet_operations *ops,
156 struct list_head *net_exit_list)
157 {
158 struct net *net;
159
160 if (ops->pre_exit) {
161 list_for_each_entry(net, net_exit_list, exit_list)
162 ops->pre_exit(net);
163 }
164 }
165
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)166 static void ops_exit_list(const struct pernet_operations *ops,
167 struct list_head *net_exit_list)
168 {
169 struct net *net;
170 if (ops->exit) {
171 list_for_each_entry(net, net_exit_list, exit_list) {
172 ops->exit(net);
173 cond_resched();
174 }
175 }
176 if (ops->exit_batch)
177 ops->exit_batch(net_exit_list);
178 }
179
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)180 static void ops_free_list(const struct pernet_operations *ops,
181 struct list_head *net_exit_list)
182 {
183 struct net *net;
184
185 if (ops->id) {
186 list_for_each_entry(net, net_exit_list, exit_list)
187 kfree(net_generic(net, *ops->id));
188 }
189 }
190
191 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)192 static int alloc_netid(struct net *net, struct net *peer, int reqid)
193 {
194 int min = 0, max = 0;
195
196 if (reqid >= 0) {
197 min = reqid;
198 max = reqid + 1;
199 }
200
201 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
202 }
203
204 /* This function is used by idr_for_each(). If net is equal to peer, the
205 * function returns the id so that idr_for_each() stops. Because we cannot
206 * returns the id 0 (idr_for_each() will not stop), we return the magic value
207 * NET_ID_ZERO (-1) for it.
208 */
209 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)210 static int net_eq_idr(int id, void *net, void *peer)
211 {
212 if (net_eq(net, peer))
213 return id ? : NET_ID_ZERO;
214 return 0;
215 }
216
217 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)218 static int __peernet2id(const struct net *net, struct net *peer)
219 {
220 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
221
222 /* Magic value for id 0. */
223 if (id == NET_ID_ZERO)
224 return 0;
225 if (id > 0)
226 return id;
227
228 return NETNSA_NSID_NOT_ASSIGNED;
229 }
230
231 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
232 struct nlmsghdr *nlh, gfp_t gfp);
233 /* This function returns the id of a peer netns. If no id is assigned, one will
234 * be allocated and returned.
235 */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)236 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
237 {
238 int id;
239
240 if (refcount_read(&net->ns.count) == 0)
241 return NETNSA_NSID_NOT_ASSIGNED;
242
243 spin_lock_bh(&net->nsid_lock);
244 id = __peernet2id(net, peer);
245 if (id >= 0) {
246 spin_unlock_bh(&net->nsid_lock);
247 return id;
248 }
249
250 /* When peer is obtained from RCU lists, we may race with
251 * its cleanup. Check whether it's alive, and this guarantees
252 * we never hash a peer back to net->netns_ids, after it has
253 * just been idr_remove()'d from there in cleanup_net().
254 */
255 if (!maybe_get_net(peer)) {
256 spin_unlock_bh(&net->nsid_lock);
257 return NETNSA_NSID_NOT_ASSIGNED;
258 }
259
260 id = alloc_netid(net, peer, -1);
261 spin_unlock_bh(&net->nsid_lock);
262
263 put_net(peer);
264 if (id < 0)
265 return NETNSA_NSID_NOT_ASSIGNED;
266
267 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
268
269 return id;
270 }
271 EXPORT_SYMBOL_GPL(peernet2id_alloc);
272
273 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)274 int peernet2id(const struct net *net, struct net *peer)
275 {
276 int id;
277
278 rcu_read_lock();
279 id = __peernet2id(net, peer);
280 rcu_read_unlock();
281
282 return id;
283 }
284 EXPORT_SYMBOL(peernet2id);
285
286 /* This function returns true is the peer netns has an id assigned into the
287 * current netns.
288 */
peernet_has_id(const struct net * net,struct net * peer)289 bool peernet_has_id(const struct net *net, struct net *peer)
290 {
291 return peernet2id(net, peer) >= 0;
292 }
293
get_net_ns_by_id(const struct net * net,int id)294 struct net *get_net_ns_by_id(const struct net *net, int id)
295 {
296 struct net *peer;
297
298 if (id < 0)
299 return NULL;
300
301 rcu_read_lock();
302 peer = idr_find(&net->netns_ids, id);
303 if (peer)
304 peer = maybe_get_net(peer);
305 rcu_read_unlock();
306
307 return peer;
308 }
309 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
310
preinit_net_sysctl(struct net * net)311 static __net_init void preinit_net_sysctl(struct net *net)
312 {
313 net->core.sysctl_somaxconn = SOMAXCONN;
314 /* Limits per socket sk_omem_alloc usage.
315 * TCP zerocopy regular usage needs 128 KB.
316 */
317 net->core.sysctl_optmem_max = 128 * 1024;
318 net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
319 net->core.sysctl_tstamp_allow_data = 1;
320 }
321
322 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)323 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
324 {
325 refcount_set(&net->passive, 1);
326 refcount_set(&net->ns.count, 1);
327 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
328 ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
329
330 get_random_bytes(&net->hash_mix, sizeof(u32));
331 net->dev_base_seq = 1;
332 net->user_ns = user_ns;
333
334 idr_init(&net->netns_ids);
335 spin_lock_init(&net->nsid_lock);
336 mutex_init(&net->ipv4.ra_mutex);
337
338 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL
339 mutex_init(&net->rtnl_mutex);
340 lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
341 #endif
342
343 preinit_net_sysctl(net);
344 }
345
346 /*
347 * setup_net runs the initializers for the network namespace object.
348 */
setup_net(struct net * net)349 static __net_init int setup_net(struct net *net)
350 {
351 /* Must be called with pernet_ops_rwsem held */
352 const struct pernet_operations *ops, *saved_ops;
353 LIST_HEAD(net_exit_list);
354 LIST_HEAD(dev_kill_list);
355 int error = 0;
356
357 preempt_disable();
358 net->net_cookie = gen_cookie_next(&net_cookie);
359 preempt_enable();
360
361 list_for_each_entry(ops, &pernet_list, list) {
362 error = ops_init(ops, net);
363 if (error < 0)
364 goto out_undo;
365 }
366 down_write(&net_rwsem);
367 list_add_tail_rcu(&net->list, &net_namespace_list);
368 up_write(&net_rwsem);
369 out:
370 return error;
371
372 out_undo:
373 /* Walk through the list backwards calling the exit functions
374 * for the pernet modules whose init functions did not fail.
375 */
376 list_add(&net->exit_list, &net_exit_list);
377 saved_ops = ops;
378 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
379 ops_pre_exit_list(ops, &net_exit_list);
380
381 synchronize_rcu();
382
383 ops = saved_ops;
384 rtnl_lock();
385 list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
386 if (ops->exit_batch_rtnl)
387 ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
388 }
389 unregister_netdevice_many(&dev_kill_list);
390 rtnl_unlock();
391
392 ops = saved_ops;
393 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
394 ops_exit_list(ops, &net_exit_list);
395
396 ops = saved_ops;
397 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
398 ops_free_list(ops, &net_exit_list);
399
400 rcu_barrier();
401 goto out;
402 }
403
404 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)405 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
406 {
407 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
408 }
409
dec_net_namespaces(struct ucounts * ucounts)410 static void dec_net_namespaces(struct ucounts *ucounts)
411 {
412 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
413 }
414
415 static struct kmem_cache *net_cachep __ro_after_init;
416 static struct workqueue_struct *netns_wq;
417
net_alloc(void)418 static struct net *net_alloc(void)
419 {
420 struct net *net = NULL;
421 struct net_generic *ng;
422
423 ng = net_alloc_generic();
424 if (!ng)
425 goto out;
426
427 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
428 if (!net)
429 goto out_free;
430
431 #ifdef CONFIG_KEYS
432 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
433 if (!net->key_domain)
434 goto out_free_2;
435 refcount_set(&net->key_domain->usage, 1);
436 #endif
437
438 rcu_assign_pointer(net->gen, ng);
439 out:
440 return net;
441
442 #ifdef CONFIG_KEYS
443 out_free_2:
444 kmem_cache_free(net_cachep, net);
445 net = NULL;
446 #endif
447 out_free:
448 kfree(ng);
449 goto out;
450 }
451
452 static LLIST_HEAD(defer_free_list);
453
net_complete_free(void)454 static void net_complete_free(void)
455 {
456 struct llist_node *kill_list;
457 struct net *net, *next;
458
459 /* Get the list of namespaces to free from last round. */
460 kill_list = llist_del_all(&defer_free_list);
461
462 llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
463 kmem_cache_free(net_cachep, net);
464
465 }
466
net_free(struct net * net)467 static void net_free(struct net *net)
468 {
469 if (refcount_dec_and_test(&net->passive)) {
470 kfree(rcu_access_pointer(net->gen));
471
472 /* There should not be any trackers left there. */
473 ref_tracker_dir_exit(&net->notrefcnt_tracker);
474
475 /* Wait for an extra rcu_barrier() before final free. */
476 llist_add(&net->defer_free_list, &defer_free_list);
477 }
478 }
479
net_drop_ns(void * p)480 void net_drop_ns(void *p)
481 {
482 struct net *net = (struct net *)p;
483
484 if (net)
485 net_free(net);
486 }
487
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)488 struct net *copy_net_ns(unsigned long flags,
489 struct user_namespace *user_ns, struct net *old_net)
490 {
491 struct ucounts *ucounts;
492 struct net *net;
493 int rv;
494
495 if (!(flags & CLONE_NEWNET))
496 return get_net(old_net);
497
498 ucounts = inc_net_namespaces(user_ns);
499 if (!ucounts)
500 return ERR_PTR(-ENOSPC);
501
502 net = net_alloc();
503 if (!net) {
504 rv = -ENOMEM;
505 goto dec_ucounts;
506 }
507
508 preinit_net(net, user_ns);
509 net->ucounts = ucounts;
510 get_user_ns(user_ns);
511
512 rv = down_read_killable(&pernet_ops_rwsem);
513 if (rv < 0)
514 goto put_userns;
515
516 rv = setup_net(net);
517
518 up_read(&pernet_ops_rwsem);
519
520 if (rv < 0) {
521 put_userns:
522 #ifdef CONFIG_KEYS
523 key_remove_domain(net->key_domain);
524 #endif
525 put_user_ns(user_ns);
526 net_free(net);
527 dec_ucounts:
528 dec_net_namespaces(ucounts);
529 return ERR_PTR(rv);
530 }
531 return net;
532 }
533
534 /**
535 * net_ns_get_ownership - get sysfs ownership data for @net
536 * @net: network namespace in question (can be NULL)
537 * @uid: kernel user ID for sysfs objects
538 * @gid: kernel group ID for sysfs objects
539 *
540 * Returns the uid/gid pair of root in the user namespace associated with the
541 * given network namespace.
542 */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)543 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
544 {
545 if (net) {
546 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
547 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
548
549 if (uid_valid(ns_root_uid))
550 *uid = ns_root_uid;
551
552 if (gid_valid(ns_root_gid))
553 *gid = ns_root_gid;
554 } else {
555 *uid = GLOBAL_ROOT_UID;
556 *gid = GLOBAL_ROOT_GID;
557 }
558 }
559 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
560
unhash_nsid(struct net * net,struct net * last)561 static void unhash_nsid(struct net *net, struct net *last)
562 {
563 struct net *tmp;
564 /* This function is only called from cleanup_net() work,
565 * and this work is the only process, that may delete
566 * a net from net_namespace_list. So, when the below
567 * is executing, the list may only grow. Thus, we do not
568 * use for_each_net_rcu() or net_rwsem.
569 */
570 for_each_net(tmp) {
571 int id;
572
573 spin_lock_bh(&tmp->nsid_lock);
574 id = __peernet2id(tmp, net);
575 if (id >= 0)
576 idr_remove(&tmp->netns_ids, id);
577 spin_unlock_bh(&tmp->nsid_lock);
578 if (id >= 0)
579 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
580 GFP_KERNEL);
581 if (tmp == last)
582 break;
583 }
584 spin_lock_bh(&net->nsid_lock);
585 idr_destroy(&net->netns_ids);
586 spin_unlock_bh(&net->nsid_lock);
587 }
588
589 static LLIST_HEAD(cleanup_list);
590
cleanup_net(struct work_struct * work)591 static void cleanup_net(struct work_struct *work)
592 {
593 const struct pernet_operations *ops;
594 struct net *net, *tmp, *last;
595 struct llist_node *net_kill_list;
596 LIST_HEAD(net_exit_list);
597 LIST_HEAD(dev_kill_list);
598
599 /* Atomically snapshot the list of namespaces to cleanup */
600 net_kill_list = llist_del_all(&cleanup_list);
601
602 down_read(&pernet_ops_rwsem);
603
604 /* Don't let anyone else find us. */
605 down_write(&net_rwsem);
606 llist_for_each_entry(net, net_kill_list, cleanup_list)
607 list_del_rcu(&net->list);
608 /* Cache last net. After we unlock rtnl, no one new net
609 * added to net_namespace_list can assign nsid pointer
610 * to a net from net_kill_list (see peernet2id_alloc()).
611 * So, we skip them in unhash_nsid().
612 *
613 * Note, that unhash_nsid() does not delete nsid links
614 * between net_kill_list's nets, as they've already
615 * deleted from net_namespace_list. But, this would be
616 * useless anyway, as netns_ids are destroyed there.
617 */
618 last = list_last_entry(&net_namespace_list, struct net, list);
619 up_write(&net_rwsem);
620
621 llist_for_each_entry(net, net_kill_list, cleanup_list) {
622 unhash_nsid(net, last);
623 list_add_tail(&net->exit_list, &net_exit_list);
624 }
625
626 /* Run all of the network namespace pre_exit methods */
627 list_for_each_entry_reverse(ops, &pernet_list, list)
628 ops_pre_exit_list(ops, &net_exit_list);
629
630 /*
631 * Another CPU might be rcu-iterating the list, wait for it.
632 * This needs to be before calling the exit() notifiers, so
633 * the rcu_barrier() below isn't sufficient alone.
634 * Also the pre_exit() and exit() methods need this barrier.
635 */
636 synchronize_rcu_expedited();
637
638 rtnl_lock();
639 list_for_each_entry_reverse(ops, &pernet_list, list) {
640 if (ops->exit_batch_rtnl)
641 ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
642 }
643 unregister_netdevice_many(&dev_kill_list);
644 rtnl_unlock();
645
646 /* Run all of the network namespace exit methods */
647 list_for_each_entry_reverse(ops, &pernet_list, list)
648 ops_exit_list(ops, &net_exit_list);
649
650 /* Free the net generic variables */
651 list_for_each_entry_reverse(ops, &pernet_list, list)
652 ops_free_list(ops, &net_exit_list);
653
654 up_read(&pernet_ops_rwsem);
655
656 /* Ensure there are no outstanding rcu callbacks using this
657 * network namespace.
658 */
659 rcu_barrier();
660
661 net_complete_free();
662
663 /* Finally it is safe to free my network namespace structure */
664 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
665 list_del_init(&net->exit_list);
666 dec_net_namespaces(net->ucounts);
667 #ifdef CONFIG_KEYS
668 key_remove_domain(net->key_domain);
669 #endif
670 put_user_ns(net->user_ns);
671 net_free(net);
672 }
673 }
674
675 /**
676 * net_ns_barrier - wait until concurrent net_cleanup_work is done
677 *
678 * cleanup_net runs from work queue and will first remove namespaces
679 * from the global list, then run net exit functions.
680 *
681 * Call this in module exit path to make sure that all netns
682 * ->exit ops have been invoked before the function is removed.
683 */
net_ns_barrier(void)684 void net_ns_barrier(void)
685 {
686 down_write(&pernet_ops_rwsem);
687 up_write(&pernet_ops_rwsem);
688 }
689 EXPORT_SYMBOL(net_ns_barrier);
690
691 static DECLARE_WORK(net_cleanup_work, cleanup_net);
692
__put_net(struct net * net)693 void __put_net(struct net *net)
694 {
695 ref_tracker_dir_exit(&net->refcnt_tracker);
696 /* Cleanup the network namespace in process context */
697 if (llist_add(&net->cleanup_list, &cleanup_list))
698 queue_work(netns_wq, &net_cleanup_work);
699 }
700 EXPORT_SYMBOL_GPL(__put_net);
701
702 /**
703 * get_net_ns - increment the refcount of the network namespace
704 * @ns: common namespace (net)
705 *
706 * Returns the net's common namespace or ERR_PTR() if ref is zero.
707 */
get_net_ns(struct ns_common * ns)708 struct ns_common *get_net_ns(struct ns_common *ns)
709 {
710 struct net *net;
711
712 net = maybe_get_net(container_of(ns, struct net, ns));
713 if (net)
714 return &net->ns;
715 return ERR_PTR(-EINVAL);
716 }
717 EXPORT_SYMBOL_GPL(get_net_ns);
718
get_net_ns_by_fd(int fd)719 struct net *get_net_ns_by_fd(int fd)
720 {
721 CLASS(fd, f)(fd);
722
723 if (fd_empty(f))
724 return ERR_PTR(-EBADF);
725
726 if (proc_ns_file(fd_file(f))) {
727 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
728 if (ns->ops == &netns_operations)
729 return get_net(container_of(ns, struct net, ns));
730 }
731
732 return ERR_PTR(-EINVAL);
733 }
734 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
735 #endif
736
get_net_ns_by_pid(pid_t pid)737 struct net *get_net_ns_by_pid(pid_t pid)
738 {
739 struct task_struct *tsk;
740 struct net *net;
741
742 /* Lookup the network namespace */
743 net = ERR_PTR(-ESRCH);
744 rcu_read_lock();
745 tsk = find_task_by_vpid(pid);
746 if (tsk) {
747 struct nsproxy *nsproxy;
748 task_lock(tsk);
749 nsproxy = tsk->nsproxy;
750 if (nsproxy)
751 net = get_net(nsproxy->net_ns);
752 task_unlock(tsk);
753 }
754 rcu_read_unlock();
755 return net;
756 }
757 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
758
net_ns_net_init(struct net * net)759 static __net_init int net_ns_net_init(struct net *net)
760 {
761 #ifdef CONFIG_NET_NS
762 net->ns.ops = &netns_operations;
763 #endif
764 return ns_alloc_inum(&net->ns);
765 }
766
net_ns_net_exit(struct net * net)767 static __net_exit void net_ns_net_exit(struct net *net)
768 {
769 ns_free_inum(&net->ns);
770 }
771
772 static struct pernet_operations __net_initdata net_ns_ops = {
773 .init = net_ns_net_init,
774 .exit = net_ns_net_exit,
775 };
776
777 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
778 [NETNSA_NONE] = { .type = NLA_UNSPEC },
779 [NETNSA_NSID] = { .type = NLA_S32 },
780 [NETNSA_PID] = { .type = NLA_U32 },
781 [NETNSA_FD] = { .type = NLA_U32 },
782 [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
783 };
784
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)785 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
786 struct netlink_ext_ack *extack)
787 {
788 struct net *net = sock_net(skb->sk);
789 struct nlattr *tb[NETNSA_MAX + 1];
790 struct nlattr *nla;
791 struct net *peer;
792 int nsid, err;
793
794 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
795 NETNSA_MAX, rtnl_net_policy, extack);
796 if (err < 0)
797 return err;
798 if (!tb[NETNSA_NSID]) {
799 NL_SET_ERR_MSG(extack, "nsid is missing");
800 return -EINVAL;
801 }
802 nsid = nla_get_s32(tb[NETNSA_NSID]);
803
804 if (tb[NETNSA_PID]) {
805 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
806 nla = tb[NETNSA_PID];
807 } else if (tb[NETNSA_FD]) {
808 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
809 nla = tb[NETNSA_FD];
810 } else {
811 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
812 return -EINVAL;
813 }
814 if (IS_ERR(peer)) {
815 NL_SET_BAD_ATTR(extack, nla);
816 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
817 return PTR_ERR(peer);
818 }
819
820 spin_lock_bh(&net->nsid_lock);
821 if (__peernet2id(net, peer) >= 0) {
822 spin_unlock_bh(&net->nsid_lock);
823 err = -EEXIST;
824 NL_SET_BAD_ATTR(extack, nla);
825 NL_SET_ERR_MSG(extack,
826 "Peer netns already has a nsid assigned");
827 goto out;
828 }
829
830 err = alloc_netid(net, peer, nsid);
831 spin_unlock_bh(&net->nsid_lock);
832 if (err >= 0) {
833 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
834 nlh, GFP_KERNEL);
835 err = 0;
836 } else if (err == -ENOSPC && nsid >= 0) {
837 err = -EEXIST;
838 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
839 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
840 }
841 out:
842 put_net(peer);
843 return err;
844 }
845
rtnl_net_get_size(void)846 static int rtnl_net_get_size(void)
847 {
848 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
849 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
850 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
851 ;
852 }
853
854 struct net_fill_args {
855 u32 portid;
856 u32 seq;
857 int flags;
858 int cmd;
859 int nsid;
860 bool add_ref;
861 int ref_nsid;
862 };
863
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)864 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
865 {
866 struct nlmsghdr *nlh;
867 struct rtgenmsg *rth;
868
869 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
870 args->flags);
871 if (!nlh)
872 return -EMSGSIZE;
873
874 rth = nlmsg_data(nlh);
875 rth->rtgen_family = AF_UNSPEC;
876
877 if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
878 goto nla_put_failure;
879
880 if (args->add_ref &&
881 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
882 goto nla_put_failure;
883
884 nlmsg_end(skb, nlh);
885 return 0;
886
887 nla_put_failure:
888 nlmsg_cancel(skb, nlh);
889 return -EMSGSIZE;
890 }
891
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)892 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
893 const struct nlmsghdr *nlh,
894 struct nlattr **tb,
895 struct netlink_ext_ack *extack)
896 {
897 int i, err;
898
899 if (!netlink_strict_get_check(skb))
900 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
901 tb, NETNSA_MAX, rtnl_net_policy,
902 extack);
903
904 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
905 NETNSA_MAX, rtnl_net_policy,
906 extack);
907 if (err)
908 return err;
909
910 for (i = 0; i <= NETNSA_MAX; i++) {
911 if (!tb[i])
912 continue;
913
914 switch (i) {
915 case NETNSA_PID:
916 case NETNSA_FD:
917 case NETNSA_NSID:
918 case NETNSA_TARGET_NSID:
919 break;
920 default:
921 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
922 return -EINVAL;
923 }
924 }
925
926 return 0;
927 }
928
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)929 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
930 struct netlink_ext_ack *extack)
931 {
932 struct net *net = sock_net(skb->sk);
933 struct nlattr *tb[NETNSA_MAX + 1];
934 struct net_fill_args fillargs = {
935 .portid = NETLINK_CB(skb).portid,
936 .seq = nlh->nlmsg_seq,
937 .cmd = RTM_NEWNSID,
938 };
939 struct net *peer, *target = net;
940 struct nlattr *nla;
941 struct sk_buff *msg;
942 int err;
943
944 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
945 if (err < 0)
946 return err;
947 if (tb[NETNSA_PID]) {
948 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
949 nla = tb[NETNSA_PID];
950 } else if (tb[NETNSA_FD]) {
951 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
952 nla = tb[NETNSA_FD];
953 } else if (tb[NETNSA_NSID]) {
954 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
955 if (!peer)
956 peer = ERR_PTR(-ENOENT);
957 nla = tb[NETNSA_NSID];
958 } else {
959 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
960 return -EINVAL;
961 }
962
963 if (IS_ERR(peer)) {
964 NL_SET_BAD_ATTR(extack, nla);
965 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
966 return PTR_ERR(peer);
967 }
968
969 if (tb[NETNSA_TARGET_NSID]) {
970 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
971
972 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
973 if (IS_ERR(target)) {
974 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
975 NL_SET_ERR_MSG(extack,
976 "Target netns reference is invalid");
977 err = PTR_ERR(target);
978 goto out;
979 }
980 fillargs.add_ref = true;
981 fillargs.ref_nsid = peernet2id(net, peer);
982 }
983
984 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
985 if (!msg) {
986 err = -ENOMEM;
987 goto out;
988 }
989
990 fillargs.nsid = peernet2id(target, peer);
991 err = rtnl_net_fill(msg, &fillargs);
992 if (err < 0)
993 goto err_out;
994
995 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
996 goto out;
997
998 err_out:
999 nlmsg_free(msg);
1000 out:
1001 if (fillargs.add_ref)
1002 put_net(target);
1003 put_net(peer);
1004 return err;
1005 }
1006
1007 struct rtnl_net_dump_cb {
1008 struct net *tgt_net;
1009 struct net *ref_net;
1010 struct sk_buff *skb;
1011 struct net_fill_args fillargs;
1012 int idx;
1013 int s_idx;
1014 };
1015
1016 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1017 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1018 {
1019 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1020 int ret;
1021
1022 if (net_cb->idx < net_cb->s_idx)
1023 goto cont;
1024
1025 net_cb->fillargs.nsid = id;
1026 if (net_cb->fillargs.add_ref)
1027 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1028 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1029 if (ret < 0)
1030 return ret;
1031
1032 cont:
1033 net_cb->idx++;
1034 return 0;
1035 }
1036
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1037 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1038 struct rtnl_net_dump_cb *net_cb,
1039 struct netlink_callback *cb)
1040 {
1041 struct netlink_ext_ack *extack = cb->extack;
1042 struct nlattr *tb[NETNSA_MAX + 1];
1043 int err, i;
1044
1045 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1046 NETNSA_MAX, rtnl_net_policy,
1047 extack);
1048 if (err < 0)
1049 return err;
1050
1051 for (i = 0; i <= NETNSA_MAX; i++) {
1052 if (!tb[i])
1053 continue;
1054
1055 if (i == NETNSA_TARGET_NSID) {
1056 struct net *net;
1057
1058 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1059 if (IS_ERR(net)) {
1060 NL_SET_BAD_ATTR(extack, tb[i]);
1061 NL_SET_ERR_MSG(extack,
1062 "Invalid target network namespace id");
1063 return PTR_ERR(net);
1064 }
1065 net_cb->fillargs.add_ref = true;
1066 net_cb->ref_net = net_cb->tgt_net;
1067 net_cb->tgt_net = net;
1068 } else {
1069 NL_SET_BAD_ATTR(extack, tb[i]);
1070 NL_SET_ERR_MSG(extack,
1071 "Unsupported attribute in dump request");
1072 return -EINVAL;
1073 }
1074 }
1075
1076 return 0;
1077 }
1078
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1079 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1080 {
1081 struct rtnl_net_dump_cb net_cb = {
1082 .tgt_net = sock_net(skb->sk),
1083 .skb = skb,
1084 .fillargs = {
1085 .portid = NETLINK_CB(cb->skb).portid,
1086 .seq = cb->nlh->nlmsg_seq,
1087 .flags = NLM_F_MULTI,
1088 .cmd = RTM_NEWNSID,
1089 },
1090 .idx = 0,
1091 .s_idx = cb->args[0],
1092 };
1093 int err = 0;
1094
1095 if (cb->strict_check) {
1096 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1097 if (err < 0)
1098 goto end;
1099 }
1100
1101 rcu_read_lock();
1102 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1103 rcu_read_unlock();
1104
1105 cb->args[0] = net_cb.idx;
1106 end:
1107 if (net_cb.fillargs.add_ref)
1108 put_net(net_cb.tgt_net);
1109 return err;
1110 }
1111
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1112 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1113 struct nlmsghdr *nlh, gfp_t gfp)
1114 {
1115 struct net_fill_args fillargs = {
1116 .portid = portid,
1117 .seq = nlh ? nlh->nlmsg_seq : 0,
1118 .cmd = cmd,
1119 .nsid = id,
1120 };
1121 struct sk_buff *msg;
1122 int err = -ENOMEM;
1123
1124 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1125 if (!msg)
1126 goto out;
1127
1128 err = rtnl_net_fill(msg, &fillargs);
1129 if (err < 0)
1130 goto err_out;
1131
1132 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1133 return;
1134
1135 err_out:
1136 nlmsg_free(msg);
1137 out:
1138 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1139 }
1140
1141 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1142 static void __init netns_ipv4_struct_check(void)
1143 {
1144 /* TX readonly hotpath cache lines */
1145 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1146 sysctl_tcp_early_retrans);
1147 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1148 sysctl_tcp_tso_win_divisor);
1149 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1150 sysctl_tcp_tso_rtt_log);
1151 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1152 sysctl_tcp_autocorking);
1153 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1154 sysctl_tcp_min_snd_mss);
1155 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1156 sysctl_tcp_notsent_lowat);
1157 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1158 sysctl_tcp_limit_output_bytes);
1159 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1160 sysctl_tcp_min_rtt_wlen);
1161 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1162 sysctl_tcp_wmem);
1163 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1164 sysctl_ip_fwd_use_pmtu);
1165 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1166
1167 /* TXRX readonly hotpath cache lines */
1168 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1169 sysctl_tcp_moderate_rcvbuf);
1170 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1171
1172 /* RX readonly hotpath cache line */
1173 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1174 sysctl_ip_early_demux);
1175 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1176 sysctl_tcp_early_demux);
1177 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1178 sysctl_tcp_l3mdev_accept);
1179 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1180 sysctl_tcp_reordering);
1181 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1182 sysctl_tcp_rmem);
1183 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
1184 }
1185 #endif
1186
1187 static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1188 {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1189 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1190 {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1191 .dumpit = rtnl_net_dumpid,
1192 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1193 };
1194
net_ns_init(void)1195 void __init net_ns_init(void)
1196 {
1197 struct net_generic *ng;
1198
1199 #ifdef CONFIG_NET_NS
1200 netns_ipv4_struct_check();
1201 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1202 SMP_CACHE_BYTES,
1203 SLAB_PANIC|SLAB_ACCOUNT, NULL);
1204
1205 /* Create workqueue for cleanup */
1206 netns_wq = create_singlethread_workqueue("netns");
1207 if (!netns_wq)
1208 panic("Could not create netns workq");
1209 #endif
1210
1211 ng = net_alloc_generic();
1212 if (!ng)
1213 panic("Could not allocate generic netns");
1214
1215 rcu_assign_pointer(init_net.gen, ng);
1216
1217 #ifdef CONFIG_KEYS
1218 init_net.key_domain = &init_net_key_domain;
1219 #endif
1220 preinit_net(&init_net, &init_user_ns);
1221
1222 down_write(&pernet_ops_rwsem);
1223 if (setup_net(&init_net))
1224 panic("Could not setup the initial network namespace");
1225
1226 init_net_initialized = true;
1227 up_write(&pernet_ops_rwsem);
1228
1229 if (register_pernet_subsys(&net_ns_ops))
1230 panic("Could not register network namespace subsystems");
1231
1232 rtnl_register_many(net_ns_rtnl_msg_handlers);
1233 }
1234
free_exit_list(struct pernet_operations * ops,struct list_head * net_exit_list)1235 static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1236 {
1237 ops_pre_exit_list(ops, net_exit_list);
1238 synchronize_rcu();
1239
1240 if (ops->exit_batch_rtnl) {
1241 LIST_HEAD(dev_kill_list);
1242
1243 rtnl_lock();
1244 ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1245 unregister_netdevice_many(&dev_kill_list);
1246 rtnl_unlock();
1247 }
1248 ops_exit_list(ops, net_exit_list);
1249
1250 ops_free_list(ops, net_exit_list);
1251 }
1252
1253 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1254 static int __register_pernet_operations(struct list_head *list,
1255 struct pernet_operations *ops)
1256 {
1257 struct net *net;
1258 int error;
1259 LIST_HEAD(net_exit_list);
1260
1261 list_add_tail(&ops->list, list);
1262 if (ops->init || ops->id) {
1263 /* We held write locked pernet_ops_rwsem, and parallel
1264 * setup_net() and cleanup_net() are not possible.
1265 */
1266 for_each_net(net) {
1267 error = ops_init(ops, net);
1268 if (error)
1269 goto out_undo;
1270 list_add_tail(&net->exit_list, &net_exit_list);
1271 }
1272 }
1273 return 0;
1274
1275 out_undo:
1276 /* If I have an error cleanup all namespaces I initialized */
1277 list_del(&ops->list);
1278 free_exit_list(ops, &net_exit_list);
1279 return error;
1280 }
1281
__unregister_pernet_operations(struct pernet_operations * ops)1282 static void __unregister_pernet_operations(struct pernet_operations *ops)
1283 {
1284 struct net *net;
1285 LIST_HEAD(net_exit_list);
1286
1287 list_del(&ops->list);
1288 /* See comment in __register_pernet_operations() */
1289 for_each_net(net)
1290 list_add_tail(&net->exit_list, &net_exit_list);
1291
1292 free_exit_list(ops, &net_exit_list);
1293 }
1294
1295 #else
1296
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1297 static int __register_pernet_operations(struct list_head *list,
1298 struct pernet_operations *ops)
1299 {
1300 if (!init_net_initialized) {
1301 list_add_tail(&ops->list, list);
1302 return 0;
1303 }
1304
1305 return ops_init(ops, &init_net);
1306 }
1307
__unregister_pernet_operations(struct pernet_operations * ops)1308 static void __unregister_pernet_operations(struct pernet_operations *ops)
1309 {
1310 if (!init_net_initialized) {
1311 list_del(&ops->list);
1312 } else {
1313 LIST_HEAD(net_exit_list);
1314 list_add(&init_net.exit_list, &net_exit_list);
1315 free_exit_list(ops, &net_exit_list);
1316 }
1317 }
1318
1319 #endif /* CONFIG_NET_NS */
1320
1321 static DEFINE_IDA(net_generic_ids);
1322
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1323 static int register_pernet_operations(struct list_head *list,
1324 struct pernet_operations *ops)
1325 {
1326 int error;
1327
1328 if (WARN_ON(!!ops->id ^ !!ops->size))
1329 return -EINVAL;
1330
1331 if (ops->id) {
1332 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1333 GFP_KERNEL);
1334 if (error < 0)
1335 return error;
1336 *ops->id = error;
1337 /* This does not require READ_ONCE as writers already hold
1338 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1339 * net_alloc_generic.
1340 */
1341 WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1342 }
1343 error = __register_pernet_operations(list, ops);
1344 if (error) {
1345 rcu_barrier();
1346 if (ops->id)
1347 ida_free(&net_generic_ids, *ops->id);
1348 }
1349
1350 return error;
1351 }
1352
unregister_pernet_operations(struct pernet_operations * ops)1353 static void unregister_pernet_operations(struct pernet_operations *ops)
1354 {
1355 __unregister_pernet_operations(ops);
1356 rcu_barrier();
1357 if (ops->id)
1358 ida_free(&net_generic_ids, *ops->id);
1359 }
1360
1361 /**
1362 * register_pernet_subsys - register a network namespace subsystem
1363 * @ops: pernet operations structure for the subsystem
1364 *
1365 * Register a subsystem which has init and exit functions
1366 * that are called when network namespaces are created and
1367 * destroyed respectively.
1368 *
1369 * When registered all network namespace init functions are
1370 * called for every existing network namespace. Allowing kernel
1371 * modules to have a race free view of the set of network namespaces.
1372 *
1373 * When a new network namespace is created all of the init
1374 * methods are called in the order in which they were registered.
1375 *
1376 * When a network namespace is destroyed all of the exit methods
1377 * are called in the reverse of the order with which they were
1378 * registered.
1379 */
register_pernet_subsys(struct pernet_operations * ops)1380 int register_pernet_subsys(struct pernet_operations *ops)
1381 {
1382 int error;
1383 down_write(&pernet_ops_rwsem);
1384 error = register_pernet_operations(first_device, ops);
1385 up_write(&pernet_ops_rwsem);
1386 return error;
1387 }
1388 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1389
1390 /**
1391 * unregister_pernet_subsys - unregister a network namespace subsystem
1392 * @ops: pernet operations structure to manipulate
1393 *
1394 * Remove the pernet operations structure from the list to be
1395 * used when network namespaces are created or destroyed. In
1396 * addition run the exit method for all existing network
1397 * namespaces.
1398 */
unregister_pernet_subsys(struct pernet_operations * ops)1399 void unregister_pernet_subsys(struct pernet_operations *ops)
1400 {
1401 down_write(&pernet_ops_rwsem);
1402 unregister_pernet_operations(ops);
1403 up_write(&pernet_ops_rwsem);
1404 }
1405 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1406
1407 /**
1408 * register_pernet_device - register a network namespace device
1409 * @ops: pernet operations structure for the subsystem
1410 *
1411 * Register a device which has init and exit functions
1412 * that are called when network namespaces are created and
1413 * destroyed respectively.
1414 *
1415 * When registered all network namespace init functions are
1416 * called for every existing network namespace. Allowing kernel
1417 * modules to have a race free view of the set of network namespaces.
1418 *
1419 * When a new network namespace is created all of the init
1420 * methods are called in the order in which they were registered.
1421 *
1422 * When a network namespace is destroyed all of the exit methods
1423 * are called in the reverse of the order with which they were
1424 * registered.
1425 */
register_pernet_device(struct pernet_operations * ops)1426 int register_pernet_device(struct pernet_operations *ops)
1427 {
1428 int error;
1429 down_write(&pernet_ops_rwsem);
1430 error = register_pernet_operations(&pernet_list, ops);
1431 if (!error && (first_device == &pernet_list))
1432 first_device = &ops->list;
1433 up_write(&pernet_ops_rwsem);
1434 return error;
1435 }
1436 EXPORT_SYMBOL_GPL(register_pernet_device);
1437
1438 /**
1439 * unregister_pernet_device - unregister a network namespace netdevice
1440 * @ops: pernet operations structure to manipulate
1441 *
1442 * Remove the pernet operations structure from the list to be
1443 * used when network namespaces are created or destroyed. In
1444 * addition run the exit method for all existing network
1445 * namespaces.
1446 */
unregister_pernet_device(struct pernet_operations * ops)1447 void unregister_pernet_device(struct pernet_operations *ops)
1448 {
1449 down_write(&pernet_ops_rwsem);
1450 if (&ops->list == first_device)
1451 first_device = first_device->next;
1452 unregister_pernet_operations(ops);
1453 up_write(&pernet_ops_rwsem);
1454 }
1455 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1456
1457 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1458 static struct ns_common *netns_get(struct task_struct *task)
1459 {
1460 struct net *net = NULL;
1461 struct nsproxy *nsproxy;
1462
1463 task_lock(task);
1464 nsproxy = task->nsproxy;
1465 if (nsproxy)
1466 net = get_net(nsproxy->net_ns);
1467 task_unlock(task);
1468
1469 return net ? &net->ns : NULL;
1470 }
1471
to_net_ns(struct ns_common * ns)1472 static inline struct net *to_net_ns(struct ns_common *ns)
1473 {
1474 return container_of(ns, struct net, ns);
1475 }
1476
netns_put(struct ns_common * ns)1477 static void netns_put(struct ns_common *ns)
1478 {
1479 put_net(to_net_ns(ns));
1480 }
1481
netns_install(struct nsset * nsset,struct ns_common * ns)1482 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1483 {
1484 struct nsproxy *nsproxy = nsset->nsproxy;
1485 struct net *net = to_net_ns(ns);
1486
1487 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1488 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1489 return -EPERM;
1490
1491 put_net(nsproxy->net_ns);
1492 nsproxy->net_ns = get_net(net);
1493 return 0;
1494 }
1495
netns_owner(struct ns_common * ns)1496 static struct user_namespace *netns_owner(struct ns_common *ns)
1497 {
1498 return to_net_ns(ns)->user_ns;
1499 }
1500
1501 const struct proc_ns_operations netns_operations = {
1502 .name = "net",
1503 .type = CLONE_NEWNET,
1504 .get = netns_get,
1505 .put = netns_put,
1506 .install = netns_install,
1507 .owner = netns_owner,
1508 };
1509 #endif
1510