1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/workqueue.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/cache.h> 6 #include <linux/slab.h> 7 #include <linux/list.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/idr.h> 11 #include <linux/rculist.h> 12 #include <linux/nsproxy.h> 13 #include <linux/fs.h> 14 #include <linux/proc_ns.h> 15 #include <linux/file.h> 16 #include <linux/export.h> 17 #include <linux/user_namespace.h> 18 #include <linux/net_namespace.h> 19 #include <linux/sched/task.h> 20 21 #include <net/sock.h> 22 #include <net/netlink.h> 23 #include <net/net_namespace.h> 24 #include <net/netns/generic.h> 25 26 /* 27 * Our network namespace constructor/destructor lists 28 */ 29 30 static LIST_HEAD(pernet_list); 31 static struct list_head *first_device = &pernet_list; 32 33 LIST_HEAD(net_namespace_list); 34 EXPORT_SYMBOL_GPL(net_namespace_list); 35 36 /* Protects net_namespace_list. Nests iside rtnl_lock() */ 37 DECLARE_RWSEM(net_rwsem); 38 EXPORT_SYMBOL_GPL(net_rwsem); 39 40 struct net init_net = { 41 .count = REFCOUNT_INIT(1), 42 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), 43 }; 44 EXPORT_SYMBOL(init_net); 45 46 static bool init_net_initialized; 47 /* 48 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids, 49 * init_net_initialized and first_device pointer. 50 * This is internal net namespace object. Please, don't use it 51 * outside. 52 */ 53 DECLARE_RWSEM(pernet_ops_rwsem); 54 EXPORT_SYMBOL_GPL(pernet_ops_rwsem); 55 56 #define MIN_PERNET_OPS_ID \ 57 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) 58 59 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 60 61 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; 62 63 static struct net_generic *net_alloc_generic(void) 64 { 65 struct net_generic *ng; 66 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); 67 68 ng = kzalloc(generic_size, GFP_KERNEL); 69 if (ng) 70 ng->s.len = max_gen_ptrs; 71 72 return ng; 73 } 74 75 static int net_assign_generic(struct net *net, unsigned int id, void *data) 76 { 77 struct net_generic *ng, *old_ng; 78 79 BUG_ON(id < MIN_PERNET_OPS_ID); 80 81 old_ng = rcu_dereference_protected(net->gen, 82 lockdep_is_held(&pernet_ops_rwsem)); 83 if (old_ng->s.len > id) { 84 old_ng->ptr[id] = data; 85 return 0; 86 } 87 88 ng = net_alloc_generic(); 89 if (ng == NULL) 90 return -ENOMEM; 91 92 /* 93 * Some synchronisation notes: 94 * 95 * The net_generic explores the net->gen array inside rcu 96 * read section. Besides once set the net->gen->ptr[x] 97 * pointer never changes (see rules in netns/generic.h). 98 * 99 * That said, we simply duplicate this array and schedule 100 * the old copy for kfree after a grace period. 101 */ 102 103 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], 104 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); 105 ng->ptr[id] = data; 106 107 rcu_assign_pointer(net->gen, ng); 108 kfree_rcu(old_ng, s.rcu); 109 return 0; 110 } 111 112 static int ops_init(const struct pernet_operations *ops, struct net *net) 113 { 114 int err = -ENOMEM; 115 void *data = NULL; 116 117 if (ops->id && ops->size) { 118 data = kzalloc(ops->size, GFP_KERNEL); 119 if (!data) 120 goto out; 121 122 err = net_assign_generic(net, *ops->id, data); 123 if (err) 124 goto cleanup; 125 } 126 err = 0; 127 if (ops->init) 128 err = ops->init(net); 129 if (!err) 130 return 0; 131 132 cleanup: 133 kfree(data); 134 135 out: 136 return err; 137 } 138 139 static void ops_free(const struct pernet_operations *ops, struct net *net) 140 { 141 if (ops->id && ops->size) { 142 kfree(net_generic(net, *ops->id)); 143 } 144 } 145 146 static void ops_exit_list(const struct pernet_operations *ops, 147 struct list_head *net_exit_list) 148 { 149 struct net *net; 150 if (ops->exit) { 151 list_for_each_entry(net, net_exit_list, exit_list) 152 ops->exit(net); 153 } 154 if (ops->exit_batch) 155 ops->exit_batch(net_exit_list); 156 } 157 158 static void ops_free_list(const struct pernet_operations *ops, 159 struct list_head *net_exit_list) 160 { 161 struct net *net; 162 if (ops->size && ops->id) { 163 list_for_each_entry(net, net_exit_list, exit_list) 164 ops_free(ops, net); 165 } 166 } 167 168 /* should be called with nsid_lock held */ 169 static int alloc_netid(struct net *net, struct net *peer, int reqid) 170 { 171 int min = 0, max = 0; 172 173 if (reqid >= 0) { 174 min = reqid; 175 max = reqid + 1; 176 } 177 178 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); 179 } 180 181 /* This function is used by idr_for_each(). If net is equal to peer, the 182 * function returns the id so that idr_for_each() stops. Because we cannot 183 * returns the id 0 (idr_for_each() will not stop), we return the magic value 184 * NET_ID_ZERO (-1) for it. 185 */ 186 #define NET_ID_ZERO -1 187 static int net_eq_idr(int id, void *net, void *peer) 188 { 189 if (net_eq(net, peer)) 190 return id ? : NET_ID_ZERO; 191 return 0; 192 } 193 194 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc 195 * is set to true, thus the caller knows that the new id must be notified via 196 * rtnl. 197 */ 198 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) 199 { 200 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); 201 bool alloc_it = *alloc; 202 203 *alloc = false; 204 205 /* Magic value for id 0. */ 206 if (id == NET_ID_ZERO) 207 return 0; 208 if (id > 0) 209 return id; 210 211 if (alloc_it) { 212 id = alloc_netid(net, peer, -1); 213 *alloc = true; 214 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 215 } 216 217 return NETNSA_NSID_NOT_ASSIGNED; 218 } 219 220 /* should be called with nsid_lock held */ 221 static int __peernet2id(struct net *net, struct net *peer) 222 { 223 bool no = false; 224 225 return __peernet2id_alloc(net, peer, &no); 226 } 227 228 static void rtnl_net_notifyid(struct net *net, int cmd, int id); 229 /* This function returns the id of a peer netns. If no id is assigned, one will 230 * be allocated and returned. 231 */ 232 int peernet2id_alloc(struct net *net, struct net *peer) 233 { 234 bool alloc = false, alive = false; 235 int id; 236 237 if (refcount_read(&net->count) == 0) 238 return NETNSA_NSID_NOT_ASSIGNED; 239 spin_lock_bh(&net->nsid_lock); 240 /* 241 * When peer is obtained from RCU lists, we may race with 242 * its cleanup. Check whether it's alive, and this guarantees 243 * we never hash a peer back to net->netns_ids, after it has 244 * just been idr_remove()'d from there in cleanup_net(). 245 */ 246 if (maybe_get_net(peer)) 247 alive = alloc = true; 248 id = __peernet2id_alloc(net, peer, &alloc); 249 spin_unlock_bh(&net->nsid_lock); 250 if (alloc && id >= 0) 251 rtnl_net_notifyid(net, RTM_NEWNSID, id); 252 if (alive) 253 put_net(peer); 254 return id; 255 } 256 EXPORT_SYMBOL_GPL(peernet2id_alloc); 257 258 /* This function returns, if assigned, the id of a peer netns. */ 259 int peernet2id(struct net *net, struct net *peer) 260 { 261 int id; 262 263 spin_lock_bh(&net->nsid_lock); 264 id = __peernet2id(net, peer); 265 spin_unlock_bh(&net->nsid_lock); 266 return id; 267 } 268 EXPORT_SYMBOL(peernet2id); 269 270 /* This function returns true is the peer netns has an id assigned into the 271 * current netns. 272 */ 273 bool peernet_has_id(struct net *net, struct net *peer) 274 { 275 return peernet2id(net, peer) >= 0; 276 } 277 278 struct net *get_net_ns_by_id(struct net *net, int id) 279 { 280 struct net *peer; 281 282 if (id < 0) 283 return NULL; 284 285 rcu_read_lock(); 286 peer = idr_find(&net->netns_ids, id); 287 if (peer) 288 peer = maybe_get_net(peer); 289 rcu_read_unlock(); 290 291 return peer; 292 } 293 294 /* 295 * setup_net runs the initializers for the network namespace object. 296 */ 297 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) 298 { 299 /* Must be called with pernet_ops_rwsem held */ 300 const struct pernet_operations *ops, *saved_ops; 301 int error = 0; 302 LIST_HEAD(net_exit_list); 303 304 refcount_set(&net->count, 1); 305 refcount_set(&net->passive, 1); 306 net->dev_base_seq = 1; 307 net->user_ns = user_ns; 308 idr_init(&net->netns_ids); 309 spin_lock_init(&net->nsid_lock); 310 mutex_init(&net->ipv4.ra_mutex); 311 312 list_for_each_entry(ops, &pernet_list, list) { 313 error = ops_init(ops, net); 314 if (error < 0) 315 goto out_undo; 316 } 317 down_write(&net_rwsem); 318 list_add_tail_rcu(&net->list, &net_namespace_list); 319 up_write(&net_rwsem); 320 out: 321 return error; 322 323 out_undo: 324 /* Walk through the list backwards calling the exit functions 325 * for the pernet modules whose init functions did not fail. 326 */ 327 list_add(&net->exit_list, &net_exit_list); 328 saved_ops = ops; 329 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 330 ops_exit_list(ops, &net_exit_list); 331 332 ops = saved_ops; 333 list_for_each_entry_continue_reverse(ops, &pernet_list, list) 334 ops_free_list(ops, &net_exit_list); 335 336 rcu_barrier(); 337 goto out; 338 } 339 340 static int __net_init net_defaults_init_net(struct net *net) 341 { 342 net->core.sysctl_somaxconn = SOMAXCONN; 343 return 0; 344 } 345 346 static struct pernet_operations net_defaults_ops = { 347 .init = net_defaults_init_net, 348 }; 349 350 static __init int net_defaults_init(void) 351 { 352 if (register_pernet_subsys(&net_defaults_ops)) 353 panic("Cannot initialize net default settings"); 354 355 return 0; 356 } 357 358 core_initcall(net_defaults_init); 359 360 #ifdef CONFIG_NET_NS 361 static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 362 { 363 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); 364 } 365 366 static void dec_net_namespaces(struct ucounts *ucounts) 367 { 368 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); 369 } 370 371 static struct kmem_cache *net_cachep __ro_after_init; 372 static struct workqueue_struct *netns_wq; 373 374 static struct net *net_alloc(void) 375 { 376 struct net *net = NULL; 377 struct net_generic *ng; 378 379 ng = net_alloc_generic(); 380 if (!ng) 381 goto out; 382 383 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); 384 if (!net) 385 goto out_free; 386 387 rcu_assign_pointer(net->gen, ng); 388 out: 389 return net; 390 391 out_free: 392 kfree(ng); 393 goto out; 394 } 395 396 static void net_free(struct net *net) 397 { 398 kfree(rcu_access_pointer(net->gen)); 399 kmem_cache_free(net_cachep, net); 400 } 401 402 void net_drop_ns(void *p) 403 { 404 struct net *ns = p; 405 if (ns && refcount_dec_and_test(&ns->passive)) 406 net_free(ns); 407 } 408 409 struct net *copy_net_ns(unsigned long flags, 410 struct user_namespace *user_ns, struct net *old_net) 411 { 412 struct ucounts *ucounts; 413 struct net *net; 414 int rv; 415 416 if (!(flags & CLONE_NEWNET)) 417 return get_net(old_net); 418 419 ucounts = inc_net_namespaces(user_ns); 420 if (!ucounts) 421 return ERR_PTR(-ENOSPC); 422 423 net = net_alloc(); 424 if (!net) { 425 rv = -ENOMEM; 426 goto dec_ucounts; 427 } 428 refcount_set(&net->passive, 1); 429 net->ucounts = ucounts; 430 get_user_ns(user_ns); 431 432 rv = down_read_killable(&pernet_ops_rwsem); 433 if (rv < 0) 434 goto put_userns; 435 436 rv = setup_net(net, user_ns); 437 438 up_read(&pernet_ops_rwsem); 439 440 if (rv < 0) { 441 put_userns: 442 put_user_ns(user_ns); 443 net_drop_ns(net); 444 dec_ucounts: 445 dec_net_namespaces(ucounts); 446 return ERR_PTR(rv); 447 } 448 return net; 449 } 450 451 static void unhash_nsid(struct net *net, struct net *last) 452 { 453 struct net *tmp; 454 /* This function is only called from cleanup_net() work, 455 * and this work is the only process, that may delete 456 * a net from net_namespace_list. So, when the below 457 * is executing, the list may only grow. Thus, we do not 458 * use for_each_net_rcu() or net_rwsem. 459 */ 460 for_each_net(tmp) { 461 int id; 462 463 spin_lock_bh(&tmp->nsid_lock); 464 id = __peernet2id(tmp, net); 465 if (id >= 0) 466 idr_remove(&tmp->netns_ids, id); 467 spin_unlock_bh(&tmp->nsid_lock); 468 if (id >= 0) 469 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 470 if (tmp == last) 471 break; 472 } 473 spin_lock_bh(&net->nsid_lock); 474 idr_destroy(&net->netns_ids); 475 spin_unlock_bh(&net->nsid_lock); 476 } 477 478 static LLIST_HEAD(cleanup_list); 479 480 static void cleanup_net(struct work_struct *work) 481 { 482 const struct pernet_operations *ops; 483 struct net *net, *tmp, *last; 484 struct llist_node *net_kill_list; 485 LIST_HEAD(net_exit_list); 486 487 /* Atomically snapshot the list of namespaces to cleanup */ 488 net_kill_list = llist_del_all(&cleanup_list); 489 490 down_read(&pernet_ops_rwsem); 491 492 /* Don't let anyone else find us. */ 493 down_write(&net_rwsem); 494 llist_for_each_entry(net, net_kill_list, cleanup_list) 495 list_del_rcu(&net->list); 496 /* Cache last net. After we unlock rtnl, no one new net 497 * added to net_namespace_list can assign nsid pointer 498 * to a net from net_kill_list (see peernet2id_alloc()). 499 * So, we skip them in unhash_nsid(). 500 * 501 * Note, that unhash_nsid() does not delete nsid links 502 * between net_kill_list's nets, as they've already 503 * deleted from net_namespace_list. But, this would be 504 * useless anyway, as netns_ids are destroyed there. 505 */ 506 last = list_last_entry(&net_namespace_list, struct net, list); 507 up_write(&net_rwsem); 508 509 llist_for_each_entry(net, net_kill_list, cleanup_list) { 510 unhash_nsid(net, last); 511 list_add_tail(&net->exit_list, &net_exit_list); 512 } 513 514 /* 515 * Another CPU might be rcu-iterating the list, wait for it. 516 * This needs to be before calling the exit() notifiers, so 517 * the rcu_barrier() below isn't sufficient alone. 518 */ 519 synchronize_rcu(); 520 521 /* Run all of the network namespace exit methods */ 522 list_for_each_entry_reverse(ops, &pernet_list, list) 523 ops_exit_list(ops, &net_exit_list); 524 525 /* Free the net generic variables */ 526 list_for_each_entry_reverse(ops, &pernet_list, list) 527 ops_free_list(ops, &net_exit_list); 528 529 up_read(&pernet_ops_rwsem); 530 531 /* Ensure there are no outstanding rcu callbacks using this 532 * network namespace. 533 */ 534 rcu_barrier(); 535 536 /* Finally it is safe to free my network namespace structure */ 537 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 538 list_del_init(&net->exit_list); 539 dec_net_namespaces(net->ucounts); 540 put_user_ns(net->user_ns); 541 net_drop_ns(net); 542 } 543 } 544 545 /** 546 * net_ns_barrier - wait until concurrent net_cleanup_work is done 547 * 548 * cleanup_net runs from work queue and will first remove namespaces 549 * from the global list, then run net exit functions. 550 * 551 * Call this in module exit path to make sure that all netns 552 * ->exit ops have been invoked before the function is removed. 553 */ 554 void net_ns_barrier(void) 555 { 556 down_write(&pernet_ops_rwsem); 557 up_write(&pernet_ops_rwsem); 558 } 559 EXPORT_SYMBOL(net_ns_barrier); 560 561 static DECLARE_WORK(net_cleanup_work, cleanup_net); 562 563 void __put_net(struct net *net) 564 { 565 /* Cleanup the network namespace in process context */ 566 if (llist_add(&net->cleanup_list, &cleanup_list)) 567 queue_work(netns_wq, &net_cleanup_work); 568 } 569 EXPORT_SYMBOL_GPL(__put_net); 570 571 struct net *get_net_ns_by_fd(int fd) 572 { 573 struct file *file; 574 struct ns_common *ns; 575 struct net *net; 576 577 file = proc_ns_fget(fd); 578 if (IS_ERR(file)) 579 return ERR_CAST(file); 580 581 ns = get_proc_ns(file_inode(file)); 582 if (ns->ops == &netns_operations) 583 net = get_net(container_of(ns, struct net, ns)); 584 else 585 net = ERR_PTR(-EINVAL); 586 587 fput(file); 588 return net; 589 } 590 591 #else 592 struct net *get_net_ns_by_fd(int fd) 593 { 594 return ERR_PTR(-EINVAL); 595 } 596 #endif 597 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 598 599 struct net *get_net_ns_by_pid(pid_t pid) 600 { 601 struct task_struct *tsk; 602 struct net *net; 603 604 /* Lookup the network namespace */ 605 net = ERR_PTR(-ESRCH); 606 rcu_read_lock(); 607 tsk = find_task_by_vpid(pid); 608 if (tsk) { 609 struct nsproxy *nsproxy; 610 task_lock(tsk); 611 nsproxy = tsk->nsproxy; 612 if (nsproxy) 613 net = get_net(nsproxy->net_ns); 614 task_unlock(tsk); 615 } 616 rcu_read_unlock(); 617 return net; 618 } 619 EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 620 621 static __net_init int net_ns_net_init(struct net *net) 622 { 623 #ifdef CONFIG_NET_NS 624 net->ns.ops = &netns_operations; 625 #endif 626 return ns_alloc_inum(&net->ns); 627 } 628 629 static __net_exit void net_ns_net_exit(struct net *net) 630 { 631 ns_free_inum(&net->ns); 632 } 633 634 static struct pernet_operations __net_initdata net_ns_ops = { 635 .init = net_ns_net_init, 636 .exit = net_ns_net_exit, 637 }; 638 639 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { 640 [NETNSA_NONE] = { .type = NLA_UNSPEC }, 641 [NETNSA_NSID] = { .type = NLA_S32 }, 642 [NETNSA_PID] = { .type = NLA_U32 }, 643 [NETNSA_FD] = { .type = NLA_U32 }, 644 }; 645 646 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, 647 struct netlink_ext_ack *extack) 648 { 649 struct net *net = sock_net(skb->sk); 650 struct nlattr *tb[NETNSA_MAX + 1]; 651 struct nlattr *nla; 652 struct net *peer; 653 int nsid, err; 654 655 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 656 rtnl_net_policy, extack); 657 if (err < 0) 658 return err; 659 if (!tb[NETNSA_NSID]) { 660 NL_SET_ERR_MSG(extack, "nsid is missing"); 661 return -EINVAL; 662 } 663 nsid = nla_get_s32(tb[NETNSA_NSID]); 664 665 if (tb[NETNSA_PID]) { 666 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 667 nla = tb[NETNSA_PID]; 668 } else if (tb[NETNSA_FD]) { 669 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 670 nla = tb[NETNSA_FD]; 671 } else { 672 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 673 return -EINVAL; 674 } 675 if (IS_ERR(peer)) { 676 NL_SET_BAD_ATTR(extack, nla); 677 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 678 return PTR_ERR(peer); 679 } 680 681 spin_lock_bh(&net->nsid_lock); 682 if (__peernet2id(net, peer) >= 0) { 683 spin_unlock_bh(&net->nsid_lock); 684 err = -EEXIST; 685 NL_SET_BAD_ATTR(extack, nla); 686 NL_SET_ERR_MSG(extack, 687 "Peer netns already has a nsid assigned"); 688 goto out; 689 } 690 691 err = alloc_netid(net, peer, nsid); 692 spin_unlock_bh(&net->nsid_lock); 693 if (err >= 0) { 694 rtnl_net_notifyid(net, RTM_NEWNSID, err); 695 err = 0; 696 } else if (err == -ENOSPC && nsid >= 0) { 697 err = -EEXIST; 698 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); 699 NL_SET_ERR_MSG(extack, "The specified nsid is already used"); 700 } 701 out: 702 put_net(peer); 703 return err; 704 } 705 706 static int rtnl_net_get_size(void) 707 { 708 return NLMSG_ALIGN(sizeof(struct rtgenmsg)) 709 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ 710 ; 711 } 712 713 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 714 int cmd, struct net *net, int nsid) 715 { 716 struct nlmsghdr *nlh; 717 struct rtgenmsg *rth; 718 719 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); 720 if (!nlh) 721 return -EMSGSIZE; 722 723 rth = nlmsg_data(nlh); 724 rth->rtgen_family = AF_UNSPEC; 725 726 if (nla_put_s32(skb, NETNSA_NSID, nsid)) 727 goto nla_put_failure; 728 729 nlmsg_end(skb, nlh); 730 return 0; 731 732 nla_put_failure: 733 nlmsg_cancel(skb, nlh); 734 return -EMSGSIZE; 735 } 736 737 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, 738 struct netlink_ext_ack *extack) 739 { 740 struct net *net = sock_net(skb->sk); 741 struct nlattr *tb[NETNSA_MAX + 1]; 742 struct nlattr *nla; 743 struct sk_buff *msg; 744 struct net *peer; 745 int err, id; 746 747 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 748 rtnl_net_policy, extack); 749 if (err < 0) 750 return err; 751 if (tb[NETNSA_PID]) { 752 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); 753 nla = tb[NETNSA_PID]; 754 } else if (tb[NETNSA_FD]) { 755 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); 756 nla = tb[NETNSA_FD]; 757 } else { 758 NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); 759 return -EINVAL; 760 } 761 762 if (IS_ERR(peer)) { 763 NL_SET_BAD_ATTR(extack, nla); 764 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); 765 return PTR_ERR(peer); 766 } 767 768 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 769 if (!msg) { 770 err = -ENOMEM; 771 goto out; 772 } 773 774 id = peernet2id(net, peer); 775 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 776 RTM_NEWNSID, net, id); 777 if (err < 0) 778 goto err_out; 779 780 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); 781 goto out; 782 783 err_out: 784 nlmsg_free(msg); 785 out: 786 put_net(peer); 787 return err; 788 } 789 790 struct rtnl_net_dump_cb { 791 struct net *net; 792 struct sk_buff *skb; 793 struct netlink_callback *cb; 794 int idx; 795 int s_idx; 796 }; 797 798 static int rtnl_net_dumpid_one(int id, void *peer, void *data) 799 { 800 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; 801 int ret; 802 803 if (net_cb->idx < net_cb->s_idx) 804 goto cont; 805 806 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, 807 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, 808 RTM_NEWNSID, net_cb->net, id); 809 if (ret < 0) 810 return ret; 811 812 cont: 813 net_cb->idx++; 814 return 0; 815 } 816 817 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) 818 { 819 struct net *net = sock_net(skb->sk); 820 struct rtnl_net_dump_cb net_cb = { 821 .net = net, 822 .skb = skb, 823 .cb = cb, 824 .idx = 0, 825 .s_idx = cb->args[0], 826 }; 827 828 spin_lock_bh(&net->nsid_lock); 829 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 830 spin_unlock_bh(&net->nsid_lock); 831 832 cb->args[0] = net_cb.idx; 833 return skb->len; 834 } 835 836 static void rtnl_net_notifyid(struct net *net, int cmd, int id) 837 { 838 struct sk_buff *msg; 839 int err = -ENOMEM; 840 841 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); 842 if (!msg) 843 goto out; 844 845 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); 846 if (err < 0) 847 goto err_out; 848 849 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); 850 return; 851 852 err_out: 853 nlmsg_free(msg); 854 out: 855 rtnl_set_sk_err(net, RTNLGRP_NSID, err); 856 } 857 858 static int __init net_ns_init(void) 859 { 860 struct net_generic *ng; 861 862 #ifdef CONFIG_NET_NS 863 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 864 SMP_CACHE_BYTES, 865 SLAB_PANIC|SLAB_ACCOUNT, NULL); 866 867 /* Create workqueue for cleanup */ 868 netns_wq = create_singlethread_workqueue("netns"); 869 if (!netns_wq) 870 panic("Could not create netns workq"); 871 #endif 872 873 ng = net_alloc_generic(); 874 if (!ng) 875 panic("Could not allocate generic netns"); 876 877 rcu_assign_pointer(init_net.gen, ng); 878 879 down_write(&pernet_ops_rwsem); 880 if (setup_net(&init_net, &init_user_ns)) 881 panic("Could not setup the initial network namespace"); 882 883 init_net_initialized = true; 884 up_write(&pernet_ops_rwsem); 885 886 register_pernet_subsys(&net_ns_ops); 887 888 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 889 RTNL_FLAG_DOIT_UNLOCKED); 890 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, 891 RTNL_FLAG_DOIT_UNLOCKED); 892 893 return 0; 894 } 895 896 pure_initcall(net_ns_init); 897 898 #ifdef CONFIG_NET_NS 899 static int __register_pernet_operations(struct list_head *list, 900 struct pernet_operations *ops) 901 { 902 struct net *net; 903 int error; 904 LIST_HEAD(net_exit_list); 905 906 list_add_tail(&ops->list, list); 907 if (ops->init || (ops->id && ops->size)) { 908 /* We held write locked pernet_ops_rwsem, and parallel 909 * setup_net() and cleanup_net() are not possible. 910 */ 911 for_each_net(net) { 912 error = ops_init(ops, net); 913 if (error) 914 goto out_undo; 915 list_add_tail(&net->exit_list, &net_exit_list); 916 } 917 } 918 return 0; 919 920 out_undo: 921 /* If I have an error cleanup all namespaces I initialized */ 922 list_del(&ops->list); 923 ops_exit_list(ops, &net_exit_list); 924 ops_free_list(ops, &net_exit_list); 925 return error; 926 } 927 928 static void __unregister_pernet_operations(struct pernet_operations *ops) 929 { 930 struct net *net; 931 LIST_HEAD(net_exit_list); 932 933 list_del(&ops->list); 934 /* See comment in __register_pernet_operations() */ 935 for_each_net(net) 936 list_add_tail(&net->exit_list, &net_exit_list); 937 ops_exit_list(ops, &net_exit_list); 938 ops_free_list(ops, &net_exit_list); 939 } 940 941 #else 942 943 static int __register_pernet_operations(struct list_head *list, 944 struct pernet_operations *ops) 945 { 946 if (!init_net_initialized) { 947 list_add_tail(&ops->list, list); 948 return 0; 949 } 950 951 return ops_init(ops, &init_net); 952 } 953 954 static void __unregister_pernet_operations(struct pernet_operations *ops) 955 { 956 if (!init_net_initialized) { 957 list_del(&ops->list); 958 } else { 959 LIST_HEAD(net_exit_list); 960 list_add(&init_net.exit_list, &net_exit_list); 961 ops_exit_list(ops, &net_exit_list); 962 ops_free_list(ops, &net_exit_list); 963 } 964 } 965 966 #endif /* CONFIG_NET_NS */ 967 968 static DEFINE_IDA(net_generic_ids); 969 970 static int register_pernet_operations(struct list_head *list, 971 struct pernet_operations *ops) 972 { 973 int error; 974 975 if (ops->id) { 976 again: 977 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); 978 if (error < 0) { 979 if (error == -EAGAIN) { 980 ida_pre_get(&net_generic_ids, GFP_KERNEL); 981 goto again; 982 } 983 return error; 984 } 985 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); 986 } 987 error = __register_pernet_operations(list, ops); 988 if (error) { 989 rcu_barrier(); 990 if (ops->id) 991 ida_remove(&net_generic_ids, *ops->id); 992 } 993 994 return error; 995 } 996 997 static void unregister_pernet_operations(struct pernet_operations *ops) 998 { 999 __unregister_pernet_operations(ops); 1000 rcu_barrier(); 1001 if (ops->id) 1002 ida_remove(&net_generic_ids, *ops->id); 1003 } 1004 1005 /** 1006 * register_pernet_subsys - register a network namespace subsystem 1007 * @ops: pernet operations structure for the subsystem 1008 * 1009 * Register a subsystem which has init and exit functions 1010 * that are called when network namespaces are created and 1011 * destroyed respectively. 1012 * 1013 * When registered all network namespace init functions are 1014 * called for every existing network namespace. Allowing kernel 1015 * modules to have a race free view of the set of network namespaces. 1016 * 1017 * When a new network namespace is created all of the init 1018 * methods are called in the order in which they were registered. 1019 * 1020 * When a network namespace is destroyed all of the exit methods 1021 * are called in the reverse of the order with which they were 1022 * registered. 1023 */ 1024 int register_pernet_subsys(struct pernet_operations *ops) 1025 { 1026 int error; 1027 down_write(&pernet_ops_rwsem); 1028 error = register_pernet_operations(first_device, ops); 1029 up_write(&pernet_ops_rwsem); 1030 return error; 1031 } 1032 EXPORT_SYMBOL_GPL(register_pernet_subsys); 1033 1034 /** 1035 * unregister_pernet_subsys - unregister a network namespace subsystem 1036 * @ops: pernet operations structure to manipulate 1037 * 1038 * Remove the pernet operations structure from the list to be 1039 * used when network namespaces are created or destroyed. In 1040 * addition run the exit method for all existing network 1041 * namespaces. 1042 */ 1043 void unregister_pernet_subsys(struct pernet_operations *ops) 1044 { 1045 down_write(&pernet_ops_rwsem); 1046 unregister_pernet_operations(ops); 1047 up_write(&pernet_ops_rwsem); 1048 } 1049 EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 1050 1051 /** 1052 * register_pernet_device - register a network namespace device 1053 * @ops: pernet operations structure for the subsystem 1054 * 1055 * Register a device which has init and exit functions 1056 * that are called when network namespaces are created and 1057 * destroyed respectively. 1058 * 1059 * When registered all network namespace init functions are 1060 * called for every existing network namespace. Allowing kernel 1061 * modules to have a race free view of the set of network namespaces. 1062 * 1063 * When a new network namespace is created all of the init 1064 * methods are called in the order in which they were registered. 1065 * 1066 * When a network namespace is destroyed all of the exit methods 1067 * are called in the reverse of the order with which they were 1068 * registered. 1069 */ 1070 int register_pernet_device(struct pernet_operations *ops) 1071 { 1072 int error; 1073 down_write(&pernet_ops_rwsem); 1074 error = register_pernet_operations(&pernet_list, ops); 1075 if (!error && (first_device == &pernet_list)) 1076 first_device = &ops->list; 1077 up_write(&pernet_ops_rwsem); 1078 return error; 1079 } 1080 EXPORT_SYMBOL_GPL(register_pernet_device); 1081 1082 /** 1083 * unregister_pernet_device - unregister a network namespace netdevice 1084 * @ops: pernet operations structure to manipulate 1085 * 1086 * Remove the pernet operations structure from the list to be 1087 * used when network namespaces are created or destroyed. In 1088 * addition run the exit method for all existing network 1089 * namespaces. 1090 */ 1091 void unregister_pernet_device(struct pernet_operations *ops) 1092 { 1093 down_write(&pernet_ops_rwsem); 1094 if (&ops->list == first_device) 1095 first_device = first_device->next; 1096 unregister_pernet_operations(ops); 1097 up_write(&pernet_ops_rwsem); 1098 } 1099 EXPORT_SYMBOL_GPL(unregister_pernet_device); 1100 1101 #ifdef CONFIG_NET_NS 1102 static struct ns_common *netns_get(struct task_struct *task) 1103 { 1104 struct net *net = NULL; 1105 struct nsproxy *nsproxy; 1106 1107 task_lock(task); 1108 nsproxy = task->nsproxy; 1109 if (nsproxy) 1110 net = get_net(nsproxy->net_ns); 1111 task_unlock(task); 1112 1113 return net ? &net->ns : NULL; 1114 } 1115 1116 static inline struct net *to_net_ns(struct ns_common *ns) 1117 { 1118 return container_of(ns, struct net, ns); 1119 } 1120 1121 static void netns_put(struct ns_common *ns) 1122 { 1123 put_net(to_net_ns(ns)); 1124 } 1125 1126 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) 1127 { 1128 struct net *net = to_net_ns(ns); 1129 1130 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || 1131 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1132 return -EPERM; 1133 1134 put_net(nsproxy->net_ns); 1135 nsproxy->net_ns = get_net(net); 1136 return 0; 1137 } 1138 1139 static struct user_namespace *netns_owner(struct ns_common *ns) 1140 { 1141 return to_net_ns(ns)->user_ns; 1142 } 1143 1144 const struct proc_ns_operations netns_operations = { 1145 .name = "net", 1146 .type = CLONE_NEWNET, 1147 .get = netns_get, 1148 .put = netns_put, 1149 .install = netns_install, 1150 .owner = netns_owner, 1151 }; 1152 #endif 1153