1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/ipc/namespace.c
4 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
5 */
6
7 #include <linux/ipc.h>
8 #include <linux/msg.h>
9 #include <linux/ipc_namespace.h>
10 #include <linux/rcupdate.h>
11 #include <linux/nsproxy.h>
12 #include <linux/slab.h>
13 #include <linux/cred.h>
14 #include <linux/fs.h>
15 #include <linux/mount.h>
16 #include <linux/user_namespace.h>
17 #include <linux/proc_ns.h>
18 #include <linux/nstree.h>
19 #include <linux/sched/task.h>
20
21 #include "util.h"
22
23 /*
24 * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
25 */
26 static void free_ipc(struct work_struct *unused);
27 static DECLARE_WORK(free_ipc_work, free_ipc);
28
inc_ipc_namespaces(struct user_namespace * ns)29 static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
30 {
31 return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
32 }
33
dec_ipc_namespaces(struct ucounts * ucounts)34 static void dec_ipc_namespaces(struct ucounts *ucounts)
35 {
36 dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
37 }
38
create_ipc_ns(struct user_namespace * user_ns,struct ipc_namespace * old_ns)39 static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
40 struct ipc_namespace *old_ns)
41 {
42 struct ipc_namespace *ns;
43 struct ucounts *ucounts;
44 int err;
45
46 err = -ENOSPC;
47 again:
48 ucounts = inc_ipc_namespaces(user_ns);
49 if (!ucounts) {
50 /*
51 * IPC namespaces are freed asynchronously, by free_ipc_work.
52 * If frees were pending, flush_work will wait, and
53 * return true. Fail the allocation if no frees are pending.
54 */
55 if (flush_work(&free_ipc_work))
56 goto again;
57 goto fail;
58 }
59
60 err = -ENOMEM;
61 ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
62 if (ns == NULL)
63 goto fail_dec;
64
65 err = ns_common_init(ns);
66 if (err)
67 goto fail_free;
68
69 ns_tree_gen_id(ns);
70 ns->user_ns = get_user_ns(user_ns);
71 ns->ucounts = ucounts;
72
73 err = mq_init_ns(ns);
74 if (err)
75 goto fail_put;
76
77 err = -ENOMEM;
78 if (!setup_mq_sysctls(ns))
79 goto fail_put;
80
81 if (!setup_ipc_sysctls(ns))
82 goto fail_mq;
83
84 err = msg_init_ns(ns);
85 if (err)
86 goto fail_ipc;
87
88 sem_init_ns(ns);
89 shm_init_ns(ns);
90 ns_tree_add_raw(ns);
91
92 return ns;
93
94 fail_ipc:
95 retire_ipc_sysctls(ns);
96 fail_mq:
97 retire_mq_sysctls(ns);
98
99 fail_put:
100 put_user_ns(ns->user_ns);
101 ns_common_free(ns);
102 fail_free:
103 kfree(ns);
104 fail_dec:
105 dec_ipc_namespaces(ucounts);
106 fail:
107 return ERR_PTR(err);
108 }
109
copy_ipcs(u64 flags,struct user_namespace * user_ns,struct ipc_namespace * ns)110 struct ipc_namespace *copy_ipcs(u64 flags,
111 struct user_namespace *user_ns, struct ipc_namespace *ns)
112 {
113 if (!(flags & CLONE_NEWIPC))
114 return get_ipc_ns(ns);
115 return create_ipc_ns(user_ns, ns);
116 }
117
118 /*
119 * free_ipcs - free all ipcs of one type
120 * @ns: the namespace to remove the ipcs from
121 * @ids: the table of ipcs to free
122 * @free: the function called to free each individual ipc
123 *
124 * Called for each kind of ipc when an ipc_namespace exits.
125 */
free_ipcs(struct ipc_namespace * ns,struct ipc_ids * ids,void (* free)(struct ipc_namespace *,struct kern_ipc_perm *))126 void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
127 void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
128 {
129 struct kern_ipc_perm *perm;
130 int next_id;
131 int total, in_use;
132
133 down_write(&ids->rwsem);
134
135 in_use = ids->in_use;
136
137 for (total = 0, next_id = 0; total < in_use; next_id++) {
138 perm = idr_find(&ids->ipcs_idr, next_id);
139 if (perm == NULL)
140 continue;
141 rcu_read_lock();
142 ipc_lock_object(perm);
143 free(ns, perm);
144 total++;
145 }
146 up_write(&ids->rwsem);
147 }
148
free_ipc_ns(struct ipc_namespace * ns)149 static void free_ipc_ns(struct ipc_namespace *ns)
150 {
151 /*
152 * Caller needs to wait for an RCU grace period to have passed
153 * after making the mount point inaccessible to new accesses.
154 */
155 mntput(ns->mq_mnt);
156 sem_exit_ns(ns);
157 msg_exit_ns(ns);
158 shm_exit_ns(ns);
159
160 retire_mq_sysctls(ns);
161 retire_ipc_sysctls(ns);
162
163 dec_ipc_namespaces(ns->ucounts);
164 put_user_ns(ns->user_ns);
165 ns_common_free(ns);
166 kfree(ns);
167 }
168
169 static LLIST_HEAD(free_ipc_list);
free_ipc(struct work_struct * unused)170 static void free_ipc(struct work_struct *unused)
171 {
172 struct llist_node *node = llist_del_all(&free_ipc_list);
173 struct ipc_namespace *n, *t;
174
175 llist_for_each_entry_safe(n, t, node, mnt_llist)
176 mnt_make_shortterm(n->mq_mnt);
177
178 /* Wait for any last users to have gone away. */
179 synchronize_rcu();
180
181 llist_for_each_entry_safe(n, t, node, mnt_llist)
182 free_ipc_ns(n);
183 }
184
185 /*
186 * put_ipc_ns - drop a reference to an ipc namespace.
187 * @ns: the namespace to put
188 *
189 * If this is the last task in the namespace exiting, and
190 * it is dropping the refcount to 0, then it can race with
191 * a task in another ipc namespace but in a mounts namespace
192 * which has this ipcns's mqueuefs mounted, doing some action
193 * with one of the mqueuefs files. That can raise the refcount.
194 * So dropping the refcount, and raising the refcount when
195 * accessing it through the VFS, are protected with mq_lock.
196 *
197 * (Clearly, a task raising the refcount on its own ipc_ns
198 * needn't take mq_lock since it can't race with the last task
199 * in the ipcns exiting).
200 */
put_ipc_ns(struct ipc_namespace * ns)201 void put_ipc_ns(struct ipc_namespace *ns)
202 {
203 if (ns_ref_put_and_lock(ns, &mq_lock)) {
204 mq_clear_sbinfo(ns);
205 spin_unlock(&mq_lock);
206
207 ns_tree_remove(ns);
208 if (llist_add(&ns->mnt_llist, &free_ipc_list))
209 schedule_work(&free_ipc_work);
210 }
211 }
212
ipcns_get(struct task_struct * task)213 static struct ns_common *ipcns_get(struct task_struct *task)
214 {
215 struct ipc_namespace *ns = NULL;
216 struct nsproxy *nsproxy;
217
218 task_lock(task);
219 nsproxy = task->nsproxy;
220 if (nsproxy)
221 ns = get_ipc_ns(nsproxy->ipc_ns);
222 task_unlock(task);
223
224 return ns ? &ns->ns : NULL;
225 }
226
ipcns_put(struct ns_common * ns)227 static void ipcns_put(struct ns_common *ns)
228 {
229 return put_ipc_ns(to_ipc_ns(ns));
230 }
231
ipcns_install(struct nsset * nsset,struct ns_common * new)232 static int ipcns_install(struct nsset *nsset, struct ns_common *new)
233 {
234 struct nsproxy *nsproxy = nsset->nsproxy;
235 struct ipc_namespace *ns = to_ipc_ns(new);
236 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
237 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
238 return -EPERM;
239
240 put_ipc_ns(nsproxy->ipc_ns);
241 nsproxy->ipc_ns = get_ipc_ns(ns);
242 return 0;
243 }
244
ipcns_owner(struct ns_common * ns)245 static struct user_namespace *ipcns_owner(struct ns_common *ns)
246 {
247 return to_ipc_ns(ns)->user_ns;
248 }
249
250 const struct proc_ns_operations ipcns_operations = {
251 .name = "ipc",
252 .get = ipcns_get,
253 .put = ipcns_put,
254 .install = ipcns_install,
255 .owner = ipcns_owner,
256 };
257