1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29
30 /*
31 * Our network namespace constructor/destructor lists
32 */
33
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50
51 static bool init_net_initialized;
52 /*
53 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54 * init_net_initialized and first_device pointer.
55 * This is internal net namespace object. Please, don't use it
56 * outside.
57 */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59
60 #define MIN_PERNET_OPS_ID \
61 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
62
63 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
64
65 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
66
67 DEFINE_COOKIE(net_cookie);
68
net_alloc_generic(void)69 static struct net_generic *net_alloc_generic(void)
70 {
71 unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
72 unsigned int generic_size;
73 struct net_generic *ng;
74
75 generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
76
77 ng = kzalloc(generic_size, GFP_KERNEL);
78 if (ng)
79 ng->s.len = gen_ptrs;
80
81 return ng;
82 }
83
net_assign_generic(struct net * net,unsigned int id,void * data)84 static int net_assign_generic(struct net *net, unsigned int id, void *data)
85 {
86 struct net_generic *ng, *old_ng;
87
88 BUG_ON(id < MIN_PERNET_OPS_ID);
89
90 old_ng = rcu_dereference_protected(net->gen,
91 lockdep_is_held(&pernet_ops_rwsem));
92 if (old_ng->s.len > id) {
93 old_ng->ptr[id] = data;
94 return 0;
95 }
96
97 ng = net_alloc_generic();
98 if (!ng)
99 return -ENOMEM;
100
101 /*
102 * Some synchronisation notes:
103 *
104 * The net_generic explores the net->gen array inside rcu
105 * read section. Besides once set the net->gen->ptr[x]
106 * pointer never changes (see rules in netns/generic.h).
107 *
108 * That said, we simply duplicate this array and schedule
109 * the old copy for kfree after a grace period.
110 */
111
112 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
113 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
114 ng->ptr[id] = data;
115
116 rcu_assign_pointer(net->gen, ng);
117 kfree_rcu(old_ng, s.rcu);
118 return 0;
119 }
120
ops_init(const struct pernet_operations * ops,struct net * net)121 static int ops_init(const struct pernet_operations *ops, struct net *net)
122 {
123 struct net_generic *ng;
124 int err = -ENOMEM;
125 void *data = NULL;
126
127 if (ops->id) {
128 data = kzalloc(ops->size, GFP_KERNEL);
129 if (!data)
130 goto out;
131
132 err = net_assign_generic(net, *ops->id, data);
133 if (err)
134 goto cleanup;
135 }
136 err = 0;
137 if (ops->init)
138 err = ops->init(net);
139 if (!err)
140 return 0;
141
142 if (ops->id) {
143 ng = rcu_dereference_protected(net->gen,
144 lockdep_is_held(&pernet_ops_rwsem));
145 ng->ptr[*ops->id] = NULL;
146 }
147
148 cleanup:
149 kfree(data);
150
151 out:
152 return err;
153 }
154
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)155 static void ops_pre_exit_list(const struct pernet_operations *ops,
156 struct list_head *net_exit_list)
157 {
158 struct net *net;
159
160 if (ops->pre_exit) {
161 list_for_each_entry(net, net_exit_list, exit_list)
162 ops->pre_exit(net);
163 }
164 }
165
ops_exit_rtnl_list(const struct list_head * ops_list,const struct pernet_operations * ops,struct list_head * net_exit_list)166 static void ops_exit_rtnl_list(const struct list_head *ops_list,
167 const struct pernet_operations *ops,
168 struct list_head *net_exit_list)
169 {
170 const struct pernet_operations *saved_ops = ops;
171 LIST_HEAD(dev_kill_list);
172 struct net *net;
173
174 rtnl_lock();
175
176 list_for_each_entry(net, net_exit_list, exit_list) {
177 __rtnl_net_lock(net);
178
179 ops = saved_ops;
180 list_for_each_entry_continue_reverse(ops, ops_list, list) {
181 if (ops->exit_rtnl)
182 ops->exit_rtnl(net, &dev_kill_list);
183 }
184
185 __rtnl_net_unlock(net);
186 }
187
188 unregister_netdevice_many(&dev_kill_list);
189
190 rtnl_unlock();
191 }
192
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)193 static void ops_exit_list(const struct pernet_operations *ops,
194 struct list_head *net_exit_list)
195 {
196 if (ops->exit) {
197 struct net *net;
198
199 list_for_each_entry(net, net_exit_list, exit_list) {
200 ops->exit(net);
201 cond_resched();
202 }
203 }
204
205 if (ops->exit_batch)
206 ops->exit_batch(net_exit_list);
207 }
208
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)209 static void ops_free_list(const struct pernet_operations *ops,
210 struct list_head *net_exit_list)
211 {
212 struct net *net;
213
214 if (ops->id) {
215 list_for_each_entry(net, net_exit_list, exit_list)
216 kfree(net_generic(net, *ops->id));
217 }
218 }
219
ops_undo_list(const struct list_head * ops_list,const struct pernet_operations * ops,struct list_head * net_exit_list,bool expedite_rcu)220 static void ops_undo_list(const struct list_head *ops_list,
221 const struct pernet_operations *ops,
222 struct list_head *net_exit_list,
223 bool expedite_rcu)
224 {
225 const struct pernet_operations *saved_ops;
226 bool hold_rtnl = false;
227
228 if (!ops)
229 ops = list_entry(ops_list, typeof(*ops), list);
230
231 saved_ops = ops;
232
233 list_for_each_entry_continue_reverse(ops, ops_list, list) {
234 hold_rtnl |= !!ops->exit_rtnl;
235 ops_pre_exit_list(ops, net_exit_list);
236 }
237
238 /* Another CPU might be rcu-iterating the list, wait for it.
239 * This needs to be before calling the exit() notifiers, so the
240 * rcu_barrier() after ops_undo_list() isn't sufficient alone.
241 * Also the pre_exit() and exit() methods need this barrier.
242 */
243 if (expedite_rcu)
244 synchronize_rcu_expedited();
245 else
246 synchronize_rcu();
247
248 if (hold_rtnl)
249 ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list);
250
251 ops = saved_ops;
252 list_for_each_entry_continue_reverse(ops, ops_list, list)
253 ops_exit_list(ops, net_exit_list);
254
255 ops = saved_ops;
256 list_for_each_entry_continue_reverse(ops, ops_list, list)
257 ops_free_list(ops, net_exit_list);
258 }
259
ops_undo_single(struct pernet_operations * ops,struct list_head * net_exit_list)260 static void ops_undo_single(struct pernet_operations *ops,
261 struct list_head *net_exit_list)
262 {
263 LIST_HEAD(ops_list);
264
265 list_add(&ops->list, &ops_list);
266 ops_undo_list(&ops_list, NULL, net_exit_list, false);
267 list_del(&ops->list);
268 }
269
270 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)271 static int alloc_netid(struct net *net, struct net *peer, int reqid)
272 {
273 int min = 0, max = 0;
274
275 if (reqid >= 0) {
276 min = reqid;
277 max = reqid + 1;
278 }
279
280 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
281 }
282
283 /* This function is used by idr_for_each(). If net is equal to peer, the
284 * function returns the id so that idr_for_each() stops. Because we cannot
285 * returns the id 0 (idr_for_each() will not stop), we return the magic value
286 * NET_ID_ZERO (-1) for it.
287 */
288 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)289 static int net_eq_idr(int id, void *net, void *peer)
290 {
291 if (net_eq(net, peer))
292 return id ? : NET_ID_ZERO;
293 return 0;
294 }
295
296 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)297 static int __peernet2id(const struct net *net, struct net *peer)
298 {
299 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
300
301 /* Magic value for id 0. */
302 if (id == NET_ID_ZERO)
303 return 0;
304 if (id > 0)
305 return id;
306
307 return NETNSA_NSID_NOT_ASSIGNED;
308 }
309
310 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
311 struct nlmsghdr *nlh, gfp_t gfp);
312 /* This function returns the id of a peer netns. If no id is assigned, one will
313 * be allocated and returned.
314 */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)315 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
316 {
317 int id;
318
319 if (refcount_read(&net->ns.count) == 0)
320 return NETNSA_NSID_NOT_ASSIGNED;
321
322 spin_lock_bh(&net->nsid_lock);
323 id = __peernet2id(net, peer);
324 if (id >= 0) {
325 spin_unlock_bh(&net->nsid_lock);
326 return id;
327 }
328
329 /* When peer is obtained from RCU lists, we may race with
330 * its cleanup. Check whether it's alive, and this guarantees
331 * we never hash a peer back to net->netns_ids, after it has
332 * just been idr_remove()'d from there in cleanup_net().
333 */
334 if (!maybe_get_net(peer)) {
335 spin_unlock_bh(&net->nsid_lock);
336 return NETNSA_NSID_NOT_ASSIGNED;
337 }
338
339 id = alloc_netid(net, peer, -1);
340 spin_unlock_bh(&net->nsid_lock);
341
342 put_net(peer);
343 if (id < 0)
344 return NETNSA_NSID_NOT_ASSIGNED;
345
346 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
347
348 return id;
349 }
350 EXPORT_SYMBOL_GPL(peernet2id_alloc);
351
352 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)353 int peernet2id(const struct net *net, struct net *peer)
354 {
355 int id;
356
357 rcu_read_lock();
358 id = __peernet2id(net, peer);
359 rcu_read_unlock();
360
361 return id;
362 }
363 EXPORT_SYMBOL(peernet2id);
364
365 /* This function returns true is the peer netns has an id assigned into the
366 * current netns.
367 */
peernet_has_id(const struct net * net,struct net * peer)368 bool peernet_has_id(const struct net *net, struct net *peer)
369 {
370 return peernet2id(net, peer) >= 0;
371 }
372
get_net_ns_by_id(const struct net * net,int id)373 struct net *get_net_ns_by_id(const struct net *net, int id)
374 {
375 struct net *peer;
376
377 if (id < 0)
378 return NULL;
379
380 rcu_read_lock();
381 peer = idr_find(&net->netns_ids, id);
382 if (peer)
383 peer = maybe_get_net(peer);
384 rcu_read_unlock();
385
386 return peer;
387 }
388 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
389
preinit_net_sysctl(struct net * net)390 static __net_init void preinit_net_sysctl(struct net *net)
391 {
392 net->core.sysctl_somaxconn = SOMAXCONN;
393 /* Limits per socket sk_omem_alloc usage.
394 * TCP zerocopy regular usage needs 128 KB.
395 */
396 net->core.sysctl_optmem_max = 128 * 1024;
397 net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
398 net->core.sysctl_tstamp_allow_data = 1;
399 }
400
401 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net,struct user_namespace * user_ns)402 static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
403 {
404 refcount_set(&net->passive, 1);
405 refcount_set(&net->ns.count, 1);
406 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
407 ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
408
409 get_random_bytes(&net->hash_mix, sizeof(u32));
410 net->dev_base_seq = 1;
411 net->user_ns = user_ns;
412
413 idr_init(&net->netns_ids);
414 spin_lock_init(&net->nsid_lock);
415 mutex_init(&net->ipv4.ra_mutex);
416
417 #ifdef CONFIG_DEBUG_NET_SMALL_RTNL
418 mutex_init(&net->rtnl_mutex);
419 lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
420 #endif
421
422 INIT_LIST_HEAD(&net->ptype_all);
423 INIT_LIST_HEAD(&net->ptype_specific);
424 preinit_net_sysctl(net);
425 }
426
427 /*
428 * setup_net runs the initializers for the network namespace object.
429 */
setup_net(struct net * net)430 static __net_init int setup_net(struct net *net)
431 {
432 /* Must be called with pernet_ops_rwsem held */
433 const struct pernet_operations *ops;
434 LIST_HEAD(net_exit_list);
435 int error = 0;
436
437 preempt_disable();
438 net->net_cookie = gen_cookie_next(&net_cookie);
439 preempt_enable();
440
441 list_for_each_entry(ops, &pernet_list, list) {
442 error = ops_init(ops, net);
443 if (error < 0)
444 goto out_undo;
445 }
446 down_write(&net_rwsem);
447 list_add_tail_rcu(&net->list, &net_namespace_list);
448 up_write(&net_rwsem);
449 out:
450 return error;
451
452 out_undo:
453 /* Walk through the list backwards calling the exit functions
454 * for the pernet modules whose init functions did not fail.
455 */
456 list_add(&net->exit_list, &net_exit_list);
457 ops_undo_list(&pernet_list, ops, &net_exit_list, false);
458 rcu_barrier();
459 goto out;
460 }
461
462 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)463 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
464 {
465 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
466 }
467
dec_net_namespaces(struct ucounts * ucounts)468 static void dec_net_namespaces(struct ucounts *ucounts)
469 {
470 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
471 }
472
473 static struct kmem_cache *net_cachep __ro_after_init;
474 static struct workqueue_struct *netns_wq;
475
net_alloc(void)476 static struct net *net_alloc(void)
477 {
478 struct net *net = NULL;
479 struct net_generic *ng;
480
481 ng = net_alloc_generic();
482 if (!ng)
483 goto out;
484
485 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
486 if (!net)
487 goto out_free;
488
489 #ifdef CONFIG_KEYS
490 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
491 if (!net->key_domain)
492 goto out_free_2;
493 refcount_set(&net->key_domain->usage, 1);
494 #endif
495
496 rcu_assign_pointer(net->gen, ng);
497 out:
498 return net;
499
500 #ifdef CONFIG_KEYS
501 out_free_2:
502 kmem_cache_free(net_cachep, net);
503 net = NULL;
504 #endif
505 out_free:
506 kfree(ng);
507 goto out;
508 }
509
510 static LLIST_HEAD(defer_free_list);
511
net_complete_free(void)512 static void net_complete_free(void)
513 {
514 struct llist_node *kill_list;
515 struct net *net, *next;
516
517 /* Get the list of namespaces to free from last round. */
518 kill_list = llist_del_all(&defer_free_list);
519
520 llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
521 kmem_cache_free(net_cachep, net);
522
523 }
524
net_passive_dec(struct net * net)525 void net_passive_dec(struct net *net)
526 {
527 if (refcount_dec_and_test(&net->passive)) {
528 kfree(rcu_access_pointer(net->gen));
529
530 /* There should not be any trackers left there. */
531 ref_tracker_dir_exit(&net->notrefcnt_tracker);
532
533 /* Wait for an extra rcu_barrier() before final free. */
534 llist_add(&net->defer_free_list, &defer_free_list);
535 }
536 }
537
net_drop_ns(void * p)538 void net_drop_ns(void *p)
539 {
540 struct net *net = (struct net *)p;
541
542 if (net)
543 net_passive_dec(net);
544 }
545
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)546 struct net *copy_net_ns(unsigned long flags,
547 struct user_namespace *user_ns, struct net *old_net)
548 {
549 struct ucounts *ucounts;
550 struct net *net;
551 int rv;
552
553 if (!(flags & CLONE_NEWNET))
554 return get_net(old_net);
555
556 ucounts = inc_net_namespaces(user_ns);
557 if (!ucounts)
558 return ERR_PTR(-ENOSPC);
559
560 net = net_alloc();
561 if (!net) {
562 rv = -ENOMEM;
563 goto dec_ucounts;
564 }
565
566 preinit_net(net, user_ns);
567 net->ucounts = ucounts;
568 get_user_ns(user_ns);
569
570 rv = down_read_killable(&pernet_ops_rwsem);
571 if (rv < 0)
572 goto put_userns;
573
574 rv = setup_net(net);
575
576 up_read(&pernet_ops_rwsem);
577
578 if (rv < 0) {
579 put_userns:
580 #ifdef CONFIG_KEYS
581 key_remove_domain(net->key_domain);
582 #endif
583 put_user_ns(user_ns);
584 net_passive_dec(net);
585 dec_ucounts:
586 dec_net_namespaces(ucounts);
587 return ERR_PTR(rv);
588 }
589 return net;
590 }
591
592 /**
593 * net_ns_get_ownership - get sysfs ownership data for @net
594 * @net: network namespace in question (can be NULL)
595 * @uid: kernel user ID for sysfs objects
596 * @gid: kernel group ID for sysfs objects
597 *
598 * Returns the uid/gid pair of root in the user namespace associated with the
599 * given network namespace.
600 */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)601 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
602 {
603 if (net) {
604 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
605 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
606
607 if (uid_valid(ns_root_uid))
608 *uid = ns_root_uid;
609
610 if (gid_valid(ns_root_gid))
611 *gid = ns_root_gid;
612 } else {
613 *uid = GLOBAL_ROOT_UID;
614 *gid = GLOBAL_ROOT_GID;
615 }
616 }
617 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
618
unhash_nsid(struct net * net,struct net * last)619 static void unhash_nsid(struct net *net, struct net *last)
620 {
621 struct net *tmp;
622 /* This function is only called from cleanup_net() work,
623 * and this work is the only process, that may delete
624 * a net from net_namespace_list. So, when the below
625 * is executing, the list may only grow. Thus, we do not
626 * use for_each_net_rcu() or net_rwsem.
627 */
628 for_each_net(tmp) {
629 int id;
630
631 spin_lock_bh(&tmp->nsid_lock);
632 id = __peernet2id(tmp, net);
633 if (id >= 0)
634 idr_remove(&tmp->netns_ids, id);
635 spin_unlock_bh(&tmp->nsid_lock);
636 if (id >= 0)
637 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
638 GFP_KERNEL);
639 if (tmp == last)
640 break;
641 }
642 spin_lock_bh(&net->nsid_lock);
643 idr_destroy(&net->netns_ids);
644 spin_unlock_bh(&net->nsid_lock);
645 }
646
647 static LLIST_HEAD(cleanup_list);
648
649 struct task_struct *cleanup_net_task;
650
cleanup_net(struct work_struct * work)651 static void cleanup_net(struct work_struct *work)
652 {
653 struct llist_node *net_kill_list;
654 struct net *net, *tmp, *last;
655 LIST_HEAD(net_exit_list);
656
657 WRITE_ONCE(cleanup_net_task, current);
658
659 /* Atomically snapshot the list of namespaces to cleanup */
660 net_kill_list = llist_del_all(&cleanup_list);
661
662 down_read(&pernet_ops_rwsem);
663
664 /* Don't let anyone else find us. */
665 down_write(&net_rwsem);
666 llist_for_each_entry(net, net_kill_list, cleanup_list)
667 list_del_rcu(&net->list);
668 /* Cache last net. After we unlock rtnl, no one new net
669 * added to net_namespace_list can assign nsid pointer
670 * to a net from net_kill_list (see peernet2id_alloc()).
671 * So, we skip them in unhash_nsid().
672 *
673 * Note, that unhash_nsid() does not delete nsid links
674 * between net_kill_list's nets, as they've already
675 * deleted from net_namespace_list. But, this would be
676 * useless anyway, as netns_ids are destroyed there.
677 */
678 last = list_last_entry(&net_namespace_list, struct net, list);
679 up_write(&net_rwsem);
680
681 llist_for_each_entry(net, net_kill_list, cleanup_list) {
682 unhash_nsid(net, last);
683 list_add_tail(&net->exit_list, &net_exit_list);
684 }
685
686 ops_undo_list(&pernet_list, NULL, &net_exit_list, true);
687
688 up_read(&pernet_ops_rwsem);
689
690 /* Ensure there are no outstanding rcu callbacks using this
691 * network namespace.
692 */
693 rcu_barrier();
694
695 net_complete_free();
696
697 /* Finally it is safe to free my network namespace structure */
698 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
699 list_del_init(&net->exit_list);
700 dec_net_namespaces(net->ucounts);
701 #ifdef CONFIG_KEYS
702 key_remove_domain(net->key_domain);
703 #endif
704 put_user_ns(net->user_ns);
705 net_passive_dec(net);
706 }
707 WRITE_ONCE(cleanup_net_task, NULL);
708 }
709
710 /**
711 * net_ns_barrier - wait until concurrent net_cleanup_work is done
712 *
713 * cleanup_net runs from work queue and will first remove namespaces
714 * from the global list, then run net exit functions.
715 *
716 * Call this in module exit path to make sure that all netns
717 * ->exit ops have been invoked before the function is removed.
718 */
net_ns_barrier(void)719 void net_ns_barrier(void)
720 {
721 down_write(&pernet_ops_rwsem);
722 up_write(&pernet_ops_rwsem);
723 }
724 EXPORT_SYMBOL(net_ns_barrier);
725
726 static DECLARE_WORK(net_cleanup_work, cleanup_net);
727
__put_net(struct net * net)728 void __put_net(struct net *net)
729 {
730 ref_tracker_dir_exit(&net->refcnt_tracker);
731 /* Cleanup the network namespace in process context */
732 if (llist_add(&net->cleanup_list, &cleanup_list))
733 queue_work(netns_wq, &net_cleanup_work);
734 }
735 EXPORT_SYMBOL_GPL(__put_net);
736
737 /**
738 * get_net_ns - increment the refcount of the network namespace
739 * @ns: common namespace (net)
740 *
741 * Returns the net's common namespace or ERR_PTR() if ref is zero.
742 */
get_net_ns(struct ns_common * ns)743 struct ns_common *get_net_ns(struct ns_common *ns)
744 {
745 struct net *net;
746
747 net = maybe_get_net(container_of(ns, struct net, ns));
748 if (net)
749 return &net->ns;
750 return ERR_PTR(-EINVAL);
751 }
752 EXPORT_SYMBOL_GPL(get_net_ns);
753
get_net_ns_by_fd(int fd)754 struct net *get_net_ns_by_fd(int fd)
755 {
756 CLASS(fd, f)(fd);
757
758 if (fd_empty(f))
759 return ERR_PTR(-EBADF);
760
761 if (proc_ns_file(fd_file(f))) {
762 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
763 if (ns->ops == &netns_operations)
764 return get_net(container_of(ns, struct net, ns));
765 }
766
767 return ERR_PTR(-EINVAL);
768 }
769 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
770 #endif
771
get_net_ns_by_pid(pid_t pid)772 struct net *get_net_ns_by_pid(pid_t pid)
773 {
774 struct task_struct *tsk;
775 struct net *net;
776
777 /* Lookup the network namespace */
778 net = ERR_PTR(-ESRCH);
779 rcu_read_lock();
780 tsk = find_task_by_vpid(pid);
781 if (tsk) {
782 struct nsproxy *nsproxy;
783 task_lock(tsk);
784 nsproxy = tsk->nsproxy;
785 if (nsproxy)
786 net = get_net(nsproxy->net_ns);
787 task_unlock(tsk);
788 }
789 rcu_read_unlock();
790 return net;
791 }
792 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
793
net_ns_net_init(struct net * net)794 static __net_init int net_ns_net_init(struct net *net)
795 {
796 #ifdef CONFIG_NET_NS
797 net->ns.ops = &netns_operations;
798 #endif
799 return ns_alloc_inum(&net->ns);
800 }
801
net_ns_net_exit(struct net * net)802 static __net_exit void net_ns_net_exit(struct net *net)
803 {
804 ns_free_inum(&net->ns);
805 }
806
807 static struct pernet_operations __net_initdata net_ns_ops = {
808 .init = net_ns_net_init,
809 .exit = net_ns_net_exit,
810 };
811
812 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
813 [NETNSA_NONE] = { .type = NLA_UNSPEC },
814 [NETNSA_NSID] = { .type = NLA_S32 },
815 [NETNSA_PID] = { .type = NLA_U32 },
816 [NETNSA_FD] = { .type = NLA_U32 },
817 [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
818 };
819
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)820 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
821 struct netlink_ext_ack *extack)
822 {
823 struct net *net = sock_net(skb->sk);
824 struct nlattr *tb[NETNSA_MAX + 1];
825 struct nlattr *nla;
826 struct net *peer;
827 int nsid, err;
828
829 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
830 NETNSA_MAX, rtnl_net_policy, extack);
831 if (err < 0)
832 return err;
833 if (!tb[NETNSA_NSID]) {
834 NL_SET_ERR_MSG(extack, "nsid is missing");
835 return -EINVAL;
836 }
837 nsid = nla_get_s32(tb[NETNSA_NSID]);
838
839 if (tb[NETNSA_PID]) {
840 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
841 nla = tb[NETNSA_PID];
842 } else if (tb[NETNSA_FD]) {
843 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
844 nla = tb[NETNSA_FD];
845 } else {
846 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
847 return -EINVAL;
848 }
849 if (IS_ERR(peer)) {
850 NL_SET_BAD_ATTR(extack, nla);
851 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
852 return PTR_ERR(peer);
853 }
854
855 spin_lock_bh(&net->nsid_lock);
856 if (__peernet2id(net, peer) >= 0) {
857 spin_unlock_bh(&net->nsid_lock);
858 err = -EEXIST;
859 NL_SET_BAD_ATTR(extack, nla);
860 NL_SET_ERR_MSG(extack,
861 "Peer netns already has a nsid assigned");
862 goto out;
863 }
864
865 err = alloc_netid(net, peer, nsid);
866 spin_unlock_bh(&net->nsid_lock);
867 if (err >= 0) {
868 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
869 nlh, GFP_KERNEL);
870 err = 0;
871 } else if (err == -ENOSPC && nsid >= 0) {
872 err = -EEXIST;
873 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
874 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
875 }
876 out:
877 put_net(peer);
878 return err;
879 }
880
rtnl_net_get_size(void)881 static int rtnl_net_get_size(void)
882 {
883 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
884 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
885 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
886 ;
887 }
888
889 struct net_fill_args {
890 u32 portid;
891 u32 seq;
892 int flags;
893 int cmd;
894 int nsid;
895 bool add_ref;
896 int ref_nsid;
897 };
898
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)899 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
900 {
901 struct nlmsghdr *nlh;
902 struct rtgenmsg *rth;
903
904 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
905 args->flags);
906 if (!nlh)
907 return -EMSGSIZE;
908
909 rth = nlmsg_data(nlh);
910 rth->rtgen_family = AF_UNSPEC;
911
912 if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
913 goto nla_put_failure;
914
915 if (args->add_ref &&
916 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
917 goto nla_put_failure;
918
919 nlmsg_end(skb, nlh);
920 return 0;
921
922 nla_put_failure:
923 nlmsg_cancel(skb, nlh);
924 return -EMSGSIZE;
925 }
926
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)927 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
928 const struct nlmsghdr *nlh,
929 struct nlattr **tb,
930 struct netlink_ext_ack *extack)
931 {
932 int i, err;
933
934 if (!netlink_strict_get_check(skb))
935 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
936 tb, NETNSA_MAX, rtnl_net_policy,
937 extack);
938
939 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
940 NETNSA_MAX, rtnl_net_policy,
941 extack);
942 if (err)
943 return err;
944
945 for (i = 0; i <= NETNSA_MAX; i++) {
946 if (!tb[i])
947 continue;
948
949 switch (i) {
950 case NETNSA_PID:
951 case NETNSA_FD:
952 case NETNSA_NSID:
953 case NETNSA_TARGET_NSID:
954 break;
955 default:
956 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
957 return -EINVAL;
958 }
959 }
960
961 return 0;
962 }
963
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)964 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
965 struct netlink_ext_ack *extack)
966 {
967 struct net *net = sock_net(skb->sk);
968 struct nlattr *tb[NETNSA_MAX + 1];
969 struct net_fill_args fillargs = {
970 .portid = NETLINK_CB(skb).portid,
971 .seq = nlh->nlmsg_seq,
972 .cmd = RTM_NEWNSID,
973 };
974 struct net *peer, *target = net;
975 struct nlattr *nla;
976 struct sk_buff *msg;
977 int err;
978
979 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
980 if (err < 0)
981 return err;
982 if (tb[NETNSA_PID]) {
983 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
984 nla = tb[NETNSA_PID];
985 } else if (tb[NETNSA_FD]) {
986 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
987 nla = tb[NETNSA_FD];
988 } else if (tb[NETNSA_NSID]) {
989 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
990 if (!peer)
991 peer = ERR_PTR(-ENOENT);
992 nla = tb[NETNSA_NSID];
993 } else {
994 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
995 return -EINVAL;
996 }
997
998 if (IS_ERR(peer)) {
999 NL_SET_BAD_ATTR(extack, nla);
1000 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
1001 return PTR_ERR(peer);
1002 }
1003
1004 if (tb[NETNSA_TARGET_NSID]) {
1005 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
1006
1007 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
1008 if (IS_ERR(target)) {
1009 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
1010 NL_SET_ERR_MSG(extack,
1011 "Target netns reference is invalid");
1012 err = PTR_ERR(target);
1013 goto out;
1014 }
1015 fillargs.add_ref = true;
1016 fillargs.ref_nsid = peernet2id(net, peer);
1017 }
1018
1019 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
1020 if (!msg) {
1021 err = -ENOMEM;
1022 goto out;
1023 }
1024
1025 fillargs.nsid = peernet2id(target, peer);
1026 err = rtnl_net_fill(msg, &fillargs);
1027 if (err < 0)
1028 goto err_out;
1029
1030 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
1031 goto out;
1032
1033 err_out:
1034 nlmsg_free(msg);
1035 out:
1036 if (fillargs.add_ref)
1037 put_net(target);
1038 put_net(peer);
1039 return err;
1040 }
1041
1042 struct rtnl_net_dump_cb {
1043 struct net *tgt_net;
1044 struct net *ref_net;
1045 struct sk_buff *skb;
1046 struct net_fill_args fillargs;
1047 int idx;
1048 int s_idx;
1049 };
1050
1051 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1052 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1053 {
1054 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1055 int ret;
1056
1057 if (net_cb->idx < net_cb->s_idx)
1058 goto cont;
1059
1060 net_cb->fillargs.nsid = id;
1061 if (net_cb->fillargs.add_ref)
1062 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1063 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1064 if (ret < 0)
1065 return ret;
1066
1067 cont:
1068 net_cb->idx++;
1069 return 0;
1070 }
1071
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1072 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1073 struct rtnl_net_dump_cb *net_cb,
1074 struct netlink_callback *cb)
1075 {
1076 struct netlink_ext_ack *extack = cb->extack;
1077 struct nlattr *tb[NETNSA_MAX + 1];
1078 int err, i;
1079
1080 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1081 NETNSA_MAX, rtnl_net_policy,
1082 extack);
1083 if (err < 0)
1084 return err;
1085
1086 for (i = 0; i <= NETNSA_MAX; i++) {
1087 if (!tb[i])
1088 continue;
1089
1090 if (i == NETNSA_TARGET_NSID) {
1091 struct net *net;
1092
1093 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1094 if (IS_ERR(net)) {
1095 NL_SET_BAD_ATTR(extack, tb[i]);
1096 NL_SET_ERR_MSG(extack,
1097 "Invalid target network namespace id");
1098 return PTR_ERR(net);
1099 }
1100 net_cb->fillargs.add_ref = true;
1101 net_cb->ref_net = net_cb->tgt_net;
1102 net_cb->tgt_net = net;
1103 } else {
1104 NL_SET_BAD_ATTR(extack, tb[i]);
1105 NL_SET_ERR_MSG(extack,
1106 "Unsupported attribute in dump request");
1107 return -EINVAL;
1108 }
1109 }
1110
1111 return 0;
1112 }
1113
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1114 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1115 {
1116 struct rtnl_net_dump_cb net_cb = {
1117 .tgt_net = sock_net(skb->sk),
1118 .skb = skb,
1119 .fillargs = {
1120 .portid = NETLINK_CB(cb->skb).portid,
1121 .seq = cb->nlh->nlmsg_seq,
1122 .flags = NLM_F_MULTI,
1123 .cmd = RTM_NEWNSID,
1124 },
1125 .idx = 0,
1126 .s_idx = cb->args[0],
1127 };
1128 int err = 0;
1129
1130 if (cb->strict_check) {
1131 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1132 if (err < 0)
1133 goto end;
1134 }
1135
1136 rcu_read_lock();
1137 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1138 rcu_read_unlock();
1139
1140 cb->args[0] = net_cb.idx;
1141 end:
1142 if (net_cb.fillargs.add_ref)
1143 put_net(net_cb.tgt_net);
1144 return err;
1145 }
1146
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1147 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1148 struct nlmsghdr *nlh, gfp_t gfp)
1149 {
1150 struct net_fill_args fillargs = {
1151 .portid = portid,
1152 .seq = nlh ? nlh->nlmsg_seq : 0,
1153 .cmd = cmd,
1154 .nsid = id,
1155 };
1156 struct sk_buff *msg;
1157 int err = -ENOMEM;
1158
1159 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1160 if (!msg)
1161 goto out;
1162
1163 err = rtnl_net_fill(msg, &fillargs);
1164 if (err < 0)
1165 goto err_out;
1166
1167 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1168 return;
1169
1170 err_out:
1171 nlmsg_free(msg);
1172 out:
1173 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1174 }
1175
1176 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1177 static void __init netns_ipv4_struct_check(void)
1178 {
1179 /* TX readonly hotpath cache lines */
1180 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1181 sysctl_tcp_early_retrans);
1182 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1183 sysctl_tcp_tso_win_divisor);
1184 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1185 sysctl_tcp_tso_rtt_log);
1186 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1187 sysctl_tcp_autocorking);
1188 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1189 sysctl_tcp_min_snd_mss);
1190 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1191 sysctl_tcp_notsent_lowat);
1192 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1193 sysctl_tcp_limit_output_bytes);
1194 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1195 sysctl_tcp_min_rtt_wlen);
1196 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1197 sysctl_tcp_wmem);
1198 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1199 sysctl_ip_fwd_use_pmtu);
1200 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1201
1202 /* TXRX readonly hotpath cache lines */
1203 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1204 sysctl_tcp_moderate_rcvbuf);
1205 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1206
1207 /* RX readonly hotpath cache line */
1208 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1209 sysctl_ip_early_demux);
1210 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1211 sysctl_tcp_early_demux);
1212 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1213 sysctl_tcp_l3mdev_accept);
1214 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1215 sysctl_tcp_reordering);
1216 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1217 sysctl_tcp_rmem);
1218 CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22);
1219 }
1220 #endif
1221
1222 static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1223 {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1224 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1225 {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1226 .dumpit = rtnl_net_dumpid,
1227 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1228 };
1229
net_ns_init(void)1230 void __init net_ns_init(void)
1231 {
1232 struct net_generic *ng;
1233
1234 #ifdef CONFIG_NET_NS
1235 netns_ipv4_struct_check();
1236 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1237 SMP_CACHE_BYTES,
1238 SLAB_PANIC|SLAB_ACCOUNT, NULL);
1239
1240 /* Create workqueue for cleanup */
1241 netns_wq = create_singlethread_workqueue("netns");
1242 if (!netns_wq)
1243 panic("Could not create netns workq");
1244 #endif
1245
1246 ng = net_alloc_generic();
1247 if (!ng)
1248 panic("Could not allocate generic netns");
1249
1250 rcu_assign_pointer(init_net.gen, ng);
1251
1252 #ifdef CONFIG_KEYS
1253 init_net.key_domain = &init_net_key_domain;
1254 #endif
1255 preinit_net(&init_net, &init_user_ns);
1256
1257 down_write(&pernet_ops_rwsem);
1258 if (setup_net(&init_net))
1259 panic("Could not setup the initial network namespace");
1260
1261 init_net_initialized = true;
1262 up_write(&pernet_ops_rwsem);
1263
1264 if (register_pernet_subsys(&net_ns_ops))
1265 panic("Could not register network namespace subsystems");
1266
1267 rtnl_register_many(net_ns_rtnl_msg_handlers);
1268 }
1269
1270 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1271 static int __register_pernet_operations(struct list_head *list,
1272 struct pernet_operations *ops)
1273 {
1274 LIST_HEAD(net_exit_list);
1275 struct net *net;
1276 int error;
1277
1278 list_add_tail(&ops->list, list);
1279 if (ops->init || ops->id) {
1280 /* We held write locked pernet_ops_rwsem, and parallel
1281 * setup_net() and cleanup_net() are not possible.
1282 */
1283 for_each_net(net) {
1284 error = ops_init(ops, net);
1285 if (error)
1286 goto out_undo;
1287 list_add_tail(&net->exit_list, &net_exit_list);
1288 }
1289 }
1290 return 0;
1291
1292 out_undo:
1293 /* If I have an error cleanup all namespaces I initialized */
1294 list_del(&ops->list);
1295 ops_undo_single(ops, &net_exit_list);
1296 return error;
1297 }
1298
__unregister_pernet_operations(struct pernet_operations * ops)1299 static void __unregister_pernet_operations(struct pernet_operations *ops)
1300 {
1301 LIST_HEAD(net_exit_list);
1302 struct net *net;
1303
1304 /* See comment in __register_pernet_operations() */
1305 for_each_net(net)
1306 list_add_tail(&net->exit_list, &net_exit_list);
1307
1308 list_del(&ops->list);
1309 ops_undo_single(ops, &net_exit_list);
1310 }
1311
1312 #else
1313
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1314 static int __register_pernet_operations(struct list_head *list,
1315 struct pernet_operations *ops)
1316 {
1317 if (!init_net_initialized) {
1318 list_add_tail(&ops->list, list);
1319 return 0;
1320 }
1321
1322 return ops_init(ops, &init_net);
1323 }
1324
__unregister_pernet_operations(struct pernet_operations * ops)1325 static void __unregister_pernet_operations(struct pernet_operations *ops)
1326 {
1327 if (!init_net_initialized) {
1328 list_del(&ops->list);
1329 } else {
1330 LIST_HEAD(net_exit_list);
1331
1332 list_add(&init_net.exit_list, &net_exit_list);
1333 ops_undo_single(ops, &net_exit_list);
1334 }
1335 }
1336
1337 #endif /* CONFIG_NET_NS */
1338
1339 static DEFINE_IDA(net_generic_ids);
1340
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1341 static int register_pernet_operations(struct list_head *list,
1342 struct pernet_operations *ops)
1343 {
1344 int error;
1345
1346 if (WARN_ON(!!ops->id ^ !!ops->size))
1347 return -EINVAL;
1348
1349 if (ops->id) {
1350 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1351 GFP_KERNEL);
1352 if (error < 0)
1353 return error;
1354 *ops->id = error;
1355 /* This does not require READ_ONCE as writers already hold
1356 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1357 * net_alloc_generic.
1358 */
1359 WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1360 }
1361 error = __register_pernet_operations(list, ops);
1362 if (error) {
1363 rcu_barrier();
1364 if (ops->id)
1365 ida_free(&net_generic_ids, *ops->id);
1366 }
1367
1368 return error;
1369 }
1370
unregister_pernet_operations(struct pernet_operations * ops)1371 static void unregister_pernet_operations(struct pernet_operations *ops)
1372 {
1373 __unregister_pernet_operations(ops);
1374 rcu_barrier();
1375 if (ops->id)
1376 ida_free(&net_generic_ids, *ops->id);
1377 }
1378
1379 /**
1380 * register_pernet_subsys - register a network namespace subsystem
1381 * @ops: pernet operations structure for the subsystem
1382 *
1383 * Register a subsystem which has init and exit functions
1384 * that are called when network namespaces are created and
1385 * destroyed respectively.
1386 *
1387 * When registered all network namespace init functions are
1388 * called for every existing network namespace. Allowing kernel
1389 * modules to have a race free view of the set of network namespaces.
1390 *
1391 * When a new network namespace is created all of the init
1392 * methods are called in the order in which they were registered.
1393 *
1394 * When a network namespace is destroyed all of the exit methods
1395 * are called in the reverse of the order with which they were
1396 * registered.
1397 */
register_pernet_subsys(struct pernet_operations * ops)1398 int register_pernet_subsys(struct pernet_operations *ops)
1399 {
1400 int error;
1401 down_write(&pernet_ops_rwsem);
1402 error = register_pernet_operations(first_device, ops);
1403 up_write(&pernet_ops_rwsem);
1404 return error;
1405 }
1406 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1407
1408 /**
1409 * unregister_pernet_subsys - unregister a network namespace subsystem
1410 * @ops: pernet operations structure to manipulate
1411 *
1412 * Remove the pernet operations structure from the list to be
1413 * used when network namespaces are created or destroyed. In
1414 * addition run the exit method for all existing network
1415 * namespaces.
1416 */
unregister_pernet_subsys(struct pernet_operations * ops)1417 void unregister_pernet_subsys(struct pernet_operations *ops)
1418 {
1419 down_write(&pernet_ops_rwsem);
1420 unregister_pernet_operations(ops);
1421 up_write(&pernet_ops_rwsem);
1422 }
1423 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1424
1425 /**
1426 * register_pernet_device - register a network namespace device
1427 * @ops: pernet operations structure for the subsystem
1428 *
1429 * Register a device which has init and exit functions
1430 * that are called when network namespaces are created and
1431 * destroyed respectively.
1432 *
1433 * When registered all network namespace init functions are
1434 * called for every existing network namespace. Allowing kernel
1435 * modules to have a race free view of the set of network namespaces.
1436 *
1437 * When a new network namespace is created all of the init
1438 * methods are called in the order in which they were registered.
1439 *
1440 * When a network namespace is destroyed all of the exit methods
1441 * are called in the reverse of the order with which they were
1442 * registered.
1443 */
register_pernet_device(struct pernet_operations * ops)1444 int register_pernet_device(struct pernet_operations *ops)
1445 {
1446 int error;
1447 down_write(&pernet_ops_rwsem);
1448 error = register_pernet_operations(&pernet_list, ops);
1449 if (!error && (first_device == &pernet_list))
1450 first_device = &ops->list;
1451 up_write(&pernet_ops_rwsem);
1452 return error;
1453 }
1454 EXPORT_SYMBOL_GPL(register_pernet_device);
1455
1456 /**
1457 * unregister_pernet_device - unregister a network namespace netdevice
1458 * @ops: pernet operations structure to manipulate
1459 *
1460 * Remove the pernet operations structure from the list to be
1461 * used when network namespaces are created or destroyed. In
1462 * addition run the exit method for all existing network
1463 * namespaces.
1464 */
unregister_pernet_device(struct pernet_operations * ops)1465 void unregister_pernet_device(struct pernet_operations *ops)
1466 {
1467 down_write(&pernet_ops_rwsem);
1468 if (&ops->list == first_device)
1469 first_device = first_device->next;
1470 unregister_pernet_operations(ops);
1471 up_write(&pernet_ops_rwsem);
1472 }
1473 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1474
1475 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1476 static struct ns_common *netns_get(struct task_struct *task)
1477 {
1478 struct net *net = NULL;
1479 struct nsproxy *nsproxy;
1480
1481 task_lock(task);
1482 nsproxy = task->nsproxy;
1483 if (nsproxy)
1484 net = get_net(nsproxy->net_ns);
1485 task_unlock(task);
1486
1487 return net ? &net->ns : NULL;
1488 }
1489
to_net_ns(struct ns_common * ns)1490 static inline struct net *to_net_ns(struct ns_common *ns)
1491 {
1492 return container_of(ns, struct net, ns);
1493 }
1494
netns_put(struct ns_common * ns)1495 static void netns_put(struct ns_common *ns)
1496 {
1497 put_net(to_net_ns(ns));
1498 }
1499
netns_install(struct nsset * nsset,struct ns_common * ns)1500 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1501 {
1502 struct nsproxy *nsproxy = nsset->nsproxy;
1503 struct net *net = to_net_ns(ns);
1504
1505 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1506 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1507 return -EPERM;
1508
1509 put_net(nsproxy->net_ns);
1510 nsproxy->net_ns = get_net(net);
1511 return 0;
1512 }
1513
netns_owner(struct ns_common * ns)1514 static struct user_namespace *netns_owner(struct ns_common *ns)
1515 {
1516 return to_net_ns(ns)->user_ns;
1517 }
1518
1519 const struct proc_ns_operations netns_operations = {
1520 .name = "net",
1521 .type = CLONE_NEWNET,
1522 .get = netns_get,
1523 .put = netns_put,
1524 .install = netns_install,
1525 .owner = netns_owner,
1526 };
1527 #endif
1528