1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/rtnetlink.h> 20 #include <net/sock.h> 21 #include <net/netlink.h> 22 #include <net/net_namespace.h> 23 #include <net/netns/generic.h> 24 25 /* 26 * Our network namespace constructor/destructor lists 27 */ 28 29 static LIST_HEAD(pernet_list); 30 static struct list_head *first_device = &pernet_list; 31 DEFINE_MUTEX(net_mutex); 32 33 LIST_HEAD(net_namespace_list); 34 EXPORT_SYMBOL_GPL(net_namespace_list); 35 36 struct net init_net = { 37 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 38 }; 39 EXPORT_SYMBOL(init_net); 40 41 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 42 43 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 44 45 static struct net_generic *net_alloc_generic(void) 46 { 47 struct net_generic *ng; 48 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 49 50 ng = kzalloc(generic_size, GFP_KERNEL); 51 if (ng) 52 ng->len = max_gen_ptrs; 53 54 return ng; 55 } 56 57 static int net_assign_generic(struct net *net, int id, void *data) 58 { 59 struct net_generic *ng, *old_ng; 60 61 BUG_ON(!mutex_is_locked(&net_mutex)); 62 BUG_ON(id == 0); 63 64 old_ng = rcu_dereference_protected(net->gen, 65 lockdep_is_held(&net_mutex)); 66 ng = old_ng; 67 if (old_ng->len >= id) 68 goto assign; 69 70 ng = net_alloc_generic(); 71 if (ng == NULL) 72 return -ENOMEM; 73 74 /* 75 * Some synchronisation notes: 76 * 77 * The net_generic explores the net->gen array inside rcu 78 * read section. Besides once set the net->gen->ptr[x] 79 * pointer never changes (see rules in netns/generic.h). 80 * 81 * That said, we simply duplicate this array and schedule 82 * the old copy for kfree after a grace period. 83 */ 84 85 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 86 87 rcu_assign_pointer(net->gen, ng); 88 kfree_rcu(old_ng, rcu); 89 assign: 90 ng->ptr[id - 1] = data; 91 return 0; 92 } 93 94 static int ops_init(const struct pernet_operations *ops, struct net *net) 95 { 96 int err = -ENOMEM; 97 void *data = NULL; 98 99 if (ops->id && ops->size) { 100 data = kzalloc(ops->size, GFP_KERNEL); 101 if (!data) 102 goto out; 103 104 err = net_assign_generic(net, *ops->id, data); 105 if (err) 106 goto cleanup; 107 } 108 err = 0; 109 if (ops->init) 110 err = ops->init(net); 111 if (!err) 112 return 0; 113 114 cleanup: 115 kfree(data); 116 117 out: 118 return err; 119 } 120 121 static void ops_free(const struct pernet_operations *ops, struct net *net) 122 { 123 if (ops->id && ops->size) { 124 int id = *ops->id; 125 kfree(net_generic(net, id)); 126 } 127 } 128 129 static void ops_exit_list(const struct pernet_operations *ops, 130 struct list_head *net_exit_list) 131 { 132 struct net *net; 133 if (ops->exit) { 134 list_for_each_entry(net, net_exit_list, exit_list) 135 ops->exit(net); 136 } 137 if (ops->exit_batch) 138 ops->exit_batch(net_exit_list); 139 } 140 141 static void ops_free_list(const struct pernet_operations *ops, 142 struct list_head *net_exit_list) 143 { 144 struct net *net; 145 if (ops->size && ops->id) { 146 list_for_each_entry(net, net_exit_list, exit_list) 147 ops_free(ops, net); 148 } 149 } 150 151 static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, 152 int id); 153 static int alloc_netid(struct net *net, struct net *peer, int reqid) 154 { 155 int min = 0, max = 0, id; 156 157 ASSERT_RTNL(); 158 159 if (reqid >= 0) { 160 min = reqid; 161 max = reqid + 1; 162 } 163 164 id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); 165 if (id >= 0) 166 rtnl_net_notifyid(net, peer, RTM_NEWNSID, id); 167 168 return id; 169 } 170 171 /* This function is used by idr_for_each(). If net is equal to peer, the 172 * function returns the id so that idr_for_each() stops. Because we cannot 173 * returns the id 0 (idr_for_each() will not stop), we return the magic value 174 * NET_ID_ZERO (-1) for it. 175 */ 176 #define NET_ID_ZERO -1 177 static int net_eq_idr(int id, void *net, void *peer) 178 { 179 if (net_eq(net, peer)) 180 return id ? : NET_ID_ZERO; 181 return 0; 182 } 183 184 static int __peernet2id(struct net *net, struct net *peer, bool alloc) 185 { 186 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 187 188 ASSERT_RTNL(); 189 190 /* Magic value for id 0. */ 191 if (id == NET_ID_ZERO) 192 return 0; 193 if (id > 0) 194 return id; 195 196 if (alloc) 197 return alloc_netid(net, peer, -1); 198 199 return -ENOENT; 200 } 201 202 /* This function returns the id of a peer netns. If no id is assigned, one will 203 * be allocated and returned. 204 */ 205 int peernet2id(struct net *net, struct net *peer) 206 { 207 bool alloc = atomic_read(&peer->count) == 0 ? false : true; 208 int id; 209 210 id = __peernet2id(net, peer, alloc); 211 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 212 } 213 EXPORT_SYMBOL(peernet2id); 214 215 struct net *get_net_ns_by_id(struct net *net, int id) 216 { 217 struct net *peer; 218 219 if (id < 0) 220 return NULL; 221 222 rcu_read_lock(); 223 peer = idr_find(&net->netns_ids, id); 224 if (peer) 225 get_net(peer); 226 rcu_read_unlock(); 227 228 return peer; 229 } 230 231 /* 232 * setup_net runs the initializers for the network namespace object. 233 */ 234 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 235 { 236 /* Must be called with net_mutex held */ 237 const struct pernet_operations *ops, *saved_ops; 238 int error = 0; 239 LIST_HEAD(net_exit_list); 240 241 atomic_set(&net->count, 1); 242 atomic_set(&net->passive, 1); 243 net->dev_base_seq = 1; 244 net->user_ns = user_ns; 245 idr_init(&net->netns_ids); 246 247 list_for_each_entry(ops, &pernet_list, list) { 248 error = ops_init(ops, net); 249 if (error < 0) 250 goto out_undo; 251 } 252 out: 253 return error; 254 255 out_undo: 256 /* Walk through the list backwards calling the exit functions 257 * for the pernet modules whose init functions did not fail. 258 */ 259 list_add(&net->exit_list, &net_exit_list); 260 saved_ops = ops; 261 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 262 ops_exit_list(ops, &net_exit_list); 263 264 ops = saved_ops; 265 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 266 ops_free_list(ops, &net_exit_list); 267 268 rcu_barrier(); 269 goto out; 270 } 271 272 273 #ifdef CONFIG_NET_NS 274 static struct kmem_cache *net_cachep; 275 static struct workqueue_struct *netns_wq; 276 277 static struct net *net_alloc(void) 278 { 279 struct net *net = NULL; 280 struct net_generic *ng; 281 282 ng = net_alloc_generic(); 283 if (!ng) 284 goto out; 285 286 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 287 if (!net) 288 goto out_free; 289 290 rcu_assign_pointer(net->gen, ng); 291 out: 292 return net; 293 294 out_free: 295 kfree(ng); 296 goto out; 297 } 298 299 static void net_free(struct net *net) 300 { 301 kfree(rcu_access_pointer(net->gen)); 302 kmem_cache_free(net_cachep, net); 303 } 304 305 void net_drop_ns(void *p) 306 { 307 struct net *ns = p; 308 if (ns && atomic_dec_and_test(&ns->passive)) 309 net_free(ns); 310 } 311 312 struct net *copy_net_ns(unsigned long flags, 313 struct user_namespace *user_ns, struct net *old_net) 314 { 315 struct net *net; 316 int rv; 317 318 if (!(flags & CLONE_NEWNET)) 319 return get_net(old_net); 320 321 net = net_alloc(); 322 if (!net) 323 return ERR_PTR(-ENOMEM); 324 325 get_user_ns(user_ns); 326 327 mutex_lock(&net_mutex); 328 rv = setup_net(net, user_ns); 329 if (rv == 0) { 330 rtnl_lock(); 331 list_add_tail_rcu(&net->list, &net_namespace_list); 332 rtnl_unlock(); 333 } 334 mutex_unlock(&net_mutex); 335 if (rv < 0) { 336 put_user_ns(user_ns); 337 net_drop_ns(net); 338 return ERR_PTR(rv); 339 } 340 return net; 341 } 342 343 static DEFINE_SPINLOCK(cleanup_list_lock); 344 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 345 346 static void cleanup_net(struct work_struct *work) 347 { 348 const struct pernet_operations *ops; 349 struct net *net, *tmp; 350 struct list_head net_kill_list; 351 LIST_HEAD(net_exit_list); 352 353 /* Atomically snapshot the list of namespaces to cleanup */ 354 spin_lock_irq(&cleanup_list_lock); 355 list_replace_init(&cleanup_list, &net_kill_list); 356 spin_unlock_irq(&cleanup_list_lock); 357 358 mutex_lock(&net_mutex); 359 360 /* Don't let anyone else find us. */ 361 rtnl_lock(); 362 list_for_each_entry(net, &net_kill_list, cleanup_list) { 363 list_del_rcu(&net->list); 364 list_add_tail(&net->exit_list, &net_exit_list); 365 for_each_net(tmp) { 366 int id = __peernet2id(tmp, net, false); 367 368 if (id >= 0) { 369 rtnl_net_notifyid(tmp, net, RTM_DELNSID, id); 370 idr_remove(&tmp->netns_ids, id); 371 } 372 } 373 idr_destroy(&net->netns_ids); 374 375 } 376 rtnl_unlock(); 377 378 /* 379 * Another CPU might be rcu-iterating the list, wait for it. 380 * This needs to be before calling the exit() notifiers, so 381 * the rcu_barrier() below isn't sufficient alone. 382 */ 383 synchronize_rcu(); 384 385 /* Run all of the network namespace exit methods */ 386 list_for_each_entry_reverse(ops, &pernet_list, list) 387 ops_exit_list(ops, &net_exit_list); 388 389 /* Free the net generic variables */ 390 list_for_each_entry_reverse(ops, &pernet_list, list) 391 ops_free_list(ops, &net_exit_list); 392 393 mutex_unlock(&net_mutex); 394 395 /* Ensure there are no outstanding rcu callbacks using this 396 * network namespace. 397 */ 398 rcu_barrier(); 399 400 /* Finally it is safe to free my network namespace structure */ 401 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 402 list_del_init(&net->exit_list); 403 put_user_ns(net->user_ns); 404 net_drop_ns(net); 405 } 406 } 407 static DECLARE_WORK(net_cleanup_work, cleanup_net); 408 409 void __put_net(struct net *net) 410 { 411 /* Cleanup the network namespace in process context */ 412 unsigned long flags; 413 414 spin_lock_irqsave(&cleanup_list_lock, flags); 415 list_add(&net->cleanup_list, &cleanup_list); 416 spin_unlock_irqrestore(&cleanup_list_lock, flags); 417 418 queue_work(netns_wq, &net_cleanup_work); 419 } 420 EXPORT_SYMBOL_GPL(__put_net); 421 422 struct net *get_net_ns_by_fd(int fd) 423 { 424 struct file *file; 425 struct ns_common *ns; 426 struct net *net; 427 428 file = proc_ns_fget(fd); 429 if (IS_ERR(file)) 430 return ERR_CAST(file); 431 432 ns = get_proc_ns(file_inode(file)); 433 if (ns->ops == &netns_operations) 434 net = get_net(container_of(ns, struct net, ns)); 435 else 436 net = ERR_PTR(-EINVAL); 437 438 fput(file); 439 return net; 440 } 441 442 #else 443 struct net *get_net_ns_by_fd(int fd) 444 { 445 return ERR_PTR(-EINVAL); 446 } 447 #endif 448 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 449 450 struct net *get_net_ns_by_pid(pid_t pid) 451 { 452 struct task_struct *tsk; 453 struct net *net; 454 455 /* Lookup the network namespace */ 456 net = ERR_PTR(-ESRCH); 457 rcu_read_lock(); 458 tsk = find_task_by_vpid(pid); 459 if (tsk) { 460 struct nsproxy *nsproxy; 461 task_lock(tsk); 462 nsproxy = tsk->nsproxy; 463 if (nsproxy) 464 net = get_net(nsproxy->net_ns); 465 task_unlock(tsk); 466 } 467 rcu_read_unlock(); 468 return net; 469 } 470 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 471 472 static __net_init int net_ns_net_init(struct net *net) 473 { 474 #ifdef CONFIG_NET_NS 475 net->ns.ops = &netns_operations; 476 #endif 477 return ns_alloc_inum(&net->ns); 478 } 479 480 static __net_exit void net_ns_net_exit(struct net *net) 481 { 482 ns_free_inum(&net->ns); 483 } 484 485 static struct pernet_operations __net_initdata net_ns_ops = { 486 .init = net_ns_net_init, 487 .exit = net_ns_net_exit, 488 }; 489 490 static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 491 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 492 [NETNSA_NSID] = { .type = NLA_S32 }, 493 [NETNSA_PID] = { .type = NLA_U32 }, 494 [NETNSA_FD] = { .type = NLA_U32 }, 495 }; 496 497 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) 498 { 499 struct net *net = sock_net(skb->sk); 500 struct nlattr *tb[NETNSA_MAX + 1]; 501 struct net *peer; 502 int nsid, err; 503 504 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 505 rtnl_net_policy); 506 if (err < 0) 507 return err; 508 if (!tb[NETNSA_NSID]) 509 return -EINVAL; 510 nsid = nla_get_s32(tb[NETNSA_NSID]); 511 512 if (tb[NETNSA_PID]) 513 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 514 else if (tb[NETNSA_FD]) 515 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 516 else 517 return -EINVAL; 518 if (IS_ERR(peer)) 519 return PTR_ERR(peer); 520 521 if (__peernet2id(net, peer, false) >= 0) { 522 err = -EEXIST; 523 goto out; 524 } 525 526 err = alloc_netid(net, peer, nsid); 527 if (err > 0) 528 err = 0; 529 out: 530 put_net(peer); 531 return err; 532 } 533 534 static int rtnl_net_get_size(void) 535 { 536 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 537 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 538 ; 539 } 540 541 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 542 int cmd, struct net *net, struct net *peer, 543 int nsid) 544 { 545 struct nlmsghdr *nlh; 546 struct rtgenmsg *rth; 547 int id; 548 549 ASSERT_RTNL(); 550 551 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 552 if (!nlh) 553 return -EMSGSIZE; 554 555 rth = nlmsg_data(nlh); 556 rth->rtgen_family = AF_UNSPEC; 557 558 if (nsid >= 0) { 559 id = nsid; 560 } else { 561 id = __peernet2id(net, peer, false); 562 if (id < 0) 563 id = NETNSA_NSID_NOT_ASSIGNED; 564 } 565 if (nla_put_s32(skb, NETNSA_NSID, id)) 566 goto nla_put_failure; 567 568 nlmsg_end(skb, nlh); 569 return 0; 570 571 nla_put_failure: 572 nlmsg_cancel(skb, nlh); 573 return -EMSGSIZE; 574 } 575 576 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) 577 { 578 struct net *net = sock_net(skb->sk); 579 struct nlattr *tb[NETNSA_MAX + 1]; 580 struct sk_buff *msg; 581 struct net *peer; 582 int err; 583 584 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 585 rtnl_net_policy); 586 if (err < 0) 587 return err; 588 if (tb[NETNSA_PID]) 589 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 590 else if (tb[NETNSA_FD]) 591 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 592 else 593 return -EINVAL; 594 595 if (IS_ERR(peer)) 596 return PTR_ERR(peer); 597 598 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 599 if (!msg) { 600 err = -ENOMEM; 601 goto out; 602 } 603 604 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 605 RTM_GETNSID, net, peer, -1); 606 if (err < 0) 607 goto err_out; 608 609 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 610 goto out; 611 612 err_out: 613 nlmsg_free(msg); 614 out: 615 put_net(peer); 616 return err; 617 } 618 619 struct rtnl_net_dump_cb { 620 struct net *net; 621 struct sk_buff *skb; 622 struct netlink_callback *cb; 623 int idx; 624 int s_idx; 625 }; 626 627 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 628 { 629 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 630 int ret; 631 632 if (net_cb->idx < net_cb->s_idx) 633 goto cont; 634 635 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 636 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 637 RTM_NEWNSID, net_cb->net, peer, id); 638 if (ret < 0) 639 return ret; 640 641 cont: 642 net_cb->idx++; 643 return 0; 644 } 645 646 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 647 { 648 struct net *net = sock_net(skb->sk); 649 struct rtnl_net_dump_cb net_cb = { 650 .net = net, 651 .skb = skb, 652 .cb = cb, 653 .idx = 0, 654 .s_idx = cb->args[0], 655 }; 656 657 ASSERT_RTNL(); 658 659 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 660 661 cb->args[0] = net_cb.idx; 662 return skb->len; 663 } 664 665 static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd, 666 int id) 667 { 668 struct sk_buff *msg; 669 int err = -ENOMEM; 670 671 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 672 if (!msg) 673 goto out; 674 675 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id); 676 if (err < 0) 677 goto err_out; 678 679 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 680 return; 681 682 err_out: 683 nlmsg_free(msg); 684 out: 685 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 686 } 687 688 static int __init net_ns_init(void) 689 { 690 struct net_generic *ng; 691 692 #ifdef CONFIG_NET_NS 693 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 694 SMP_CACHE_BYTES, 695 SLAB_PANIC, NULL); 696 697 /* Create workqueue for cleanup */ 698 netns_wq = create_singlethread_workqueue("netns"); 699 if (!netns_wq) 700 panic("Could not create netns workq"); 701 #endif 702 703 ng = net_alloc_generic(); 704 if (!ng) 705 panic("Could not allocate generic netns"); 706 707 rcu_assign_pointer(init_net.gen, ng); 708 709 mutex_lock(&net_mutex); 710 if (setup_net(&init_net, &init_user_ns)) 711 panic("Could not setup the initial network namespace"); 712 713 rtnl_lock(); 714 list_add_tail_rcu(&init_net.list, &net_namespace_list); 715 rtnl_unlock(); 716 717 mutex_unlock(&net_mutex); 718 719 register_pernet_subsys(&net_ns_ops); 720 721 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 722 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 723 NULL); 724 725 return 0; 726 } 727 728 pure_initcall(net_ns_init); 729 730 #ifdef CONFIG_NET_NS 731 static int __register_pernet_operations(struct list_head *list, 732 struct pernet_operations *ops) 733 { 734 struct net *net; 735 int error; 736 LIST_HEAD(net_exit_list); 737 738 list_add_tail(&ops->list, list); 739 if (ops->init || (ops->id && ops->size)) { 740 for_each_net(net) { 741 error = ops_init(ops, net); 742 if (error) 743 goto out_undo; 744 list_add_tail(&net->exit_list, &net_exit_list); 745 } 746 } 747 return 0; 748 749 out_undo: 750 /* If I have an error cleanup all namespaces I initialized */ 751 list_del(&ops->list); 752 ops_exit_list(ops, &net_exit_list); 753 ops_free_list(ops, &net_exit_list); 754 return error; 755 } 756 757 static void __unregister_pernet_operations(struct pernet_operations *ops) 758 { 759 struct net *net; 760 LIST_HEAD(net_exit_list); 761 762 list_del(&ops->list); 763 for_each_net(net) 764 list_add_tail(&net->exit_list, &net_exit_list); 765 ops_exit_list(ops, &net_exit_list); 766 ops_free_list(ops, &net_exit_list); 767 } 768 769 #else 770 771 static int __register_pernet_operations(struct list_head *list, 772 struct pernet_operations *ops) 773 { 774 return ops_init(ops, &init_net); 775 } 776 777 static void __unregister_pernet_operations(struct pernet_operations *ops) 778 { 779 LIST_HEAD(net_exit_list); 780 list_add(&init_net.exit_list, &net_exit_list); 781 ops_exit_list(ops, &net_exit_list); 782 ops_free_list(ops, &net_exit_list); 783 } 784 785 #endif /* CONFIG_NET_NS */ 786 787 static DEFINE_IDA(net_generic_ids); 788 789 static int register_pernet_operations(struct list_head *list, 790 struct pernet_operations *ops) 791 { 792 int error; 793 794 if (ops->id) { 795 again: 796 error = ida_get_new_above(&net_generic_ids, 1, ops->id); 797 if (error < 0) { 798 if (error == -EAGAIN) { 799 ida_pre_get(&net_generic_ids, GFP_KERNEL); 800 goto again; 801 } 802 return error; 803 } 804 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); 805 } 806 error = __register_pernet_operations(list, ops); 807 if (error) { 808 rcu_barrier(); 809 if (ops->id) 810 ida_remove(&net_generic_ids, *ops->id); 811 } 812 813 return error; 814 } 815 816 static void unregister_pernet_operations(struct pernet_operations *ops) 817 { 818 819 __unregister_pernet_operations(ops); 820 rcu_barrier(); 821 if (ops->id) 822 ida_remove(&net_generic_ids, *ops->id); 823 } 824 825 /** 826 * register_pernet_subsys - register a network namespace subsystem 827 * @ops: pernet operations structure for the subsystem 828 * 829 * Register a subsystem which has init and exit functions 830 * that are called when network namespaces are created and 831 * destroyed respectively. 832 * 833 * When registered all network namespace init functions are 834 * called for every existing network namespace. Allowing kernel 835 * modules to have a race free view of the set of network namespaces. 836 * 837 * When a new network namespace is created all of the init 838 * methods are called in the order in which they were registered. 839 * 840 * When a network namespace is destroyed all of the exit methods 841 * are called in the reverse of the order with which they were 842 * registered. 843 */ 844 int register_pernet_subsys(struct pernet_operations *ops) 845 { 846 int error; 847 mutex_lock(&net_mutex); 848 error = register_pernet_operations(first_device, ops); 849 mutex_unlock(&net_mutex); 850 return error; 851 } 852 EXPORT_SYMBOL_GPL(register_pernet_subsys); 853 854 /** 855 * unregister_pernet_subsys - unregister a network namespace subsystem 856 * @ops: pernet operations structure to manipulate 857 * 858 * Remove the pernet operations structure from the list to be 859 * used when network namespaces are created or destroyed. In 860 * addition run the exit method for all existing network 861 * namespaces. 862 */ 863 void unregister_pernet_subsys(struct pernet_operations *ops) 864 { 865 mutex_lock(&net_mutex); 866 unregister_pernet_operations(ops); 867 mutex_unlock(&net_mutex); 868 } 869 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 870 871 /** 872 * register_pernet_device - register a network namespace device 873 * @ops: pernet operations structure for the subsystem 874 * 875 * Register a device which has init and exit functions 876 * that are called when network namespaces are created and 877 * destroyed respectively. 878 * 879 * When registered all network namespace init functions are 880 * called for every existing network namespace. Allowing kernel 881 * modules to have a race free view of the set of network namespaces. 882 * 883 * When a new network namespace is created all of the init 884 * methods are called in the order in which they were registered. 885 * 886 * When a network namespace is destroyed all of the exit methods 887 * are called in the reverse of the order with which they were 888 * registered. 889 */ 890 int register_pernet_device(struct pernet_operations *ops) 891 { 892 int error; 893 mutex_lock(&net_mutex); 894 error = register_pernet_operations(&pernet_list, ops); 895 if (!error && (first_device == &pernet_list)) 896 first_device = &ops->list; 897 mutex_unlock(&net_mutex); 898 return error; 899 } 900 EXPORT_SYMBOL_GPL(register_pernet_device); 901 902 /** 903 * unregister_pernet_device - unregister a network namespace netdevice 904 * @ops: pernet operations structure to manipulate 905 * 906 * Remove the pernet operations structure from the list to be 907 * used when network namespaces are created or destroyed. In 908 * addition run the exit method for all existing network 909 * namespaces. 910 */ 911 void unregister_pernet_device(struct pernet_operations *ops) 912 { 913 mutex_lock(&net_mutex); 914 if (&ops->list == first_device) 915 first_device = first_device->next; 916 unregister_pernet_operations(ops); 917 mutex_unlock(&net_mutex); 918 } 919 EXPORT_SYMBOL_GPL(unregister_pernet_device); 920 921 #ifdef CONFIG_NET_NS 922 static struct ns_common *netns_get(struct task_struct *task) 923 { 924 struct net *net = NULL; 925 struct nsproxy *nsproxy; 926 927 task_lock(task); 928 nsproxy = task->nsproxy; 929 if (nsproxy) 930 net = get_net(nsproxy->net_ns); 931 task_unlock(task); 932 933 return net ? &net->ns : NULL; 934 } 935 936 static inline struct net *to_net_ns(struct ns_common *ns) 937 { 938 return container_of(ns, struct net, ns); 939 } 940 941 static void netns_put(struct ns_common *ns) 942 { 943 put_net(to_net_ns(ns)); 944 } 945 946 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 947 { 948 struct net *net = to_net_ns(ns); 949 950 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 951 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 952 return -EPERM; 953 954 put_net(nsproxy->net_ns); 955 nsproxy->net_ns = get_net(net); 956 return 0; 957 } 958 959 const struct proc_ns_operations netns_operations = { 960 .name = "net", 961 .type = CLONE_NEWNET, 962 .get = netns_get, 963 .put = netns_put, 964 .install = netns_install, 965 }; 966 #endif 967