1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/export.h>
4 #include <linux/nsproxy.h>
5 #include <linux/slab.h>
6 #include <linux/sched/signal.h>
7 #include <linux/user_namespace.h>
8 #include <linux/proc_ns.h>
9 #include <linux/highuid.h>
10 #include <linux/cred.h>
11 #include <linux/securebits.h>
12 #include <linux/security.h>
13 #include <linux/keyctl.h>
14 #include <linux/key-type.h>
15 #include <keys/user-type.h>
16 #include <linux/seq_file.h>
17 #include <linux/fs.h>
18 #include <linux/uaccess.h>
19 #include <linux/ctype.h>
20 #include <linux/projid.h>
21 #include <linux/fs_struct.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/nstree.h>
25
26 static struct kmem_cache *user_ns_cachep __ro_after_init;
27 static DEFINE_MUTEX(userns_state_mutex);
28
29 static bool new_idmap_permitted(const struct file *file,
30 struct user_namespace *ns, int cap_setid,
31 struct uid_gid_map *map);
32 static void free_user_ns(struct work_struct *work);
33
inc_user_namespaces(struct user_namespace * ns,kuid_t uid)34 static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
35 {
36 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
37 }
38
dec_user_namespaces(struct ucounts * ucounts)39 static void dec_user_namespaces(struct ucounts *ucounts)
40 {
41 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
42 }
43
set_cred_user_ns(struct cred * cred,struct user_namespace * user_ns)44 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
45 {
46 /* Start with the same capabilities as init but useless for doing
47 * anything as the capabilities are bound to the new user namespace.
48 */
49 cred->securebits = SECUREBITS_DEFAULT;
50 cred->cap_inheritable = CAP_EMPTY_SET;
51 cred->cap_permitted = CAP_FULL_SET;
52 cred->cap_effective = CAP_FULL_SET;
53 cred->cap_ambient = CAP_EMPTY_SET;
54 cred->cap_bset = CAP_FULL_SET;
55 #ifdef CONFIG_KEYS
56 key_put(cred->request_key_auth);
57 cred->request_key_auth = NULL;
58 #endif
59 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
60 cred->user_ns = user_ns;
61 }
62
enforced_nproc_rlimit(void)63 static unsigned long enforced_nproc_rlimit(void)
64 {
65 unsigned long limit = RLIM_INFINITY;
66
67 /* Is RLIMIT_NPROC currently enforced? */
68 if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ||
69 (current_user_ns() != &init_user_ns))
70 limit = rlimit(RLIMIT_NPROC);
71
72 return limit;
73 }
74
75 /*
76 * Create a new user namespace, deriving the creator from the user in the
77 * passed credentials, and replacing that user with the new root user for the
78 * new namespace.
79 *
80 * This is called by copy_creds(), which will finish setting the target task's
81 * credentials.
82 */
create_user_ns(struct cred * new)83 int create_user_ns(struct cred *new)
84 {
85 struct user_namespace *ns, *parent_ns = new->user_ns;
86 kuid_t owner = new->euid;
87 kgid_t group = new->egid;
88 struct ucounts *ucounts;
89 int ret, i;
90
91 ret = -ENOSPC;
92 if (parent_ns->level > 32)
93 goto fail;
94
95 ucounts = inc_user_namespaces(parent_ns, owner);
96 if (!ucounts)
97 goto fail;
98
99 /*
100 * Verify that we can not violate the policy of which files
101 * may be accessed that is specified by the root directory,
102 * by verifying that the root directory is at the root of the
103 * mount namespace which allows all files to be accessed.
104 */
105 ret = -EPERM;
106 if (current_chrooted())
107 goto fail_dec;
108
109 /* The creator needs a mapping in the parent user namespace
110 * or else we won't be able to reasonably tell userspace who
111 * created a user_namespace.
112 */
113 ret = -EPERM;
114 if (!kuid_has_mapping(parent_ns, owner) ||
115 !kgid_has_mapping(parent_ns, group))
116 goto fail_dec;
117
118 ret = security_create_user_ns(new);
119 if (ret < 0)
120 goto fail_dec;
121
122 ret = -ENOMEM;
123 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
124 if (!ns)
125 goto fail_dec;
126
127 ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
128
129 ret = ns_common_init(ns);
130 if (ret)
131 goto fail_free;
132
133 /* Leave the new->user_ns reference with the new user namespace. */
134 ns->parent = parent_ns;
135 ns->level = parent_ns->level + 1;
136 ns->owner = owner;
137 ns->group = group;
138 INIT_WORK(&ns->work, free_user_ns);
139 for (i = 0; i < UCOUNT_COUNTS; i++) {
140 ns->ucount_max[i] = INT_MAX;
141 }
142 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_NPROC, enforced_nproc_rlimit());
143 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
144 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
145 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
146 ns->ucounts = ucounts;
147
148 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
149 mutex_lock(&userns_state_mutex);
150 ns->flags = parent_ns->flags;
151 mutex_unlock(&userns_state_mutex);
152
153 #ifdef CONFIG_KEYS
154 INIT_LIST_HEAD(&ns->keyring_name_list);
155 init_rwsem(&ns->keyring_sem);
156 #endif
157 ret = -ENOMEM;
158 if (!setup_userns_sysctls(ns))
159 goto fail_keyring;
160
161 set_cred_user_ns(new, ns);
162 ns_tree_add(ns);
163 return 0;
164 fail_keyring:
165 #ifdef CONFIG_PERSISTENT_KEYRINGS
166 key_put(ns->persistent_keyring_register);
167 #endif
168 ns_common_free(ns);
169 fail_free:
170 kmem_cache_free(user_ns_cachep, ns);
171 fail_dec:
172 dec_user_namespaces(ucounts);
173 fail:
174 return ret;
175 }
176
unshare_userns(unsigned long unshare_flags,struct cred ** new_cred)177 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
178 {
179 struct cred *cred;
180 int err = -ENOMEM;
181
182 if (!(unshare_flags & CLONE_NEWUSER))
183 return 0;
184
185 cred = prepare_creds();
186 if (cred) {
187 err = create_user_ns(cred);
188 if (err)
189 put_cred(cred);
190 else
191 *new_cred = cred;
192 }
193
194 return err;
195 }
196
free_user_ns(struct work_struct * work)197 static void free_user_ns(struct work_struct *work)
198 {
199 struct user_namespace *parent, *ns =
200 container_of(work, struct user_namespace, work);
201
202 do {
203 struct ucounts *ucounts = ns->ucounts;
204 parent = ns->parent;
205 ns_tree_remove(ns);
206 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
207 kfree(ns->gid_map.forward);
208 kfree(ns->gid_map.reverse);
209 }
210 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
211 kfree(ns->uid_map.forward);
212 kfree(ns->uid_map.reverse);
213 }
214 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
215 kfree(ns->projid_map.forward);
216 kfree(ns->projid_map.reverse);
217 }
218 #if IS_ENABLED(CONFIG_BINFMT_MISC)
219 kfree(ns->binfmt_misc);
220 #endif
221 retire_userns_sysctls(ns);
222 key_free_user_ns(ns);
223 ns_common_free(ns);
224 /* Concurrent nstree traversal depends on a grace period. */
225 kfree_rcu(ns, ns.ns_rcu);
226 dec_user_namespaces(ucounts);
227 ns = parent;
228 } while (ns_ref_put(parent));
229 }
230
__put_user_ns(struct user_namespace * ns)231 void __put_user_ns(struct user_namespace *ns)
232 {
233 schedule_work(&ns->work);
234 }
235 EXPORT_SYMBOL(__put_user_ns);
236
237 /*
238 * struct idmap_key - holds the information necessary to find an idmapping in a
239 * sorted idmap array. It is passed to cmp_map_id() as first argument.
240 */
241 struct idmap_key {
242 bool map_up; /* true -> id from kid; false -> kid from id */
243 u32 id; /* id to find */
244 u32 count;
245 };
246
247 /*
248 * cmp_map_id - Function to be passed to bsearch() to find the requested
249 * idmapping. Expects struct idmap_key to be passed via @k.
250 */
cmp_map_id(const void * k,const void * e)251 static int cmp_map_id(const void *k, const void *e)
252 {
253 u32 first, last, id2;
254 const struct idmap_key *key = k;
255 const struct uid_gid_extent *el = e;
256
257 id2 = key->id + key->count - 1;
258
259 /* handle map_id_{down,up}() */
260 if (key->map_up)
261 first = el->lower_first;
262 else
263 first = el->first;
264
265 last = first + el->count - 1;
266
267 if (key->id >= first && key->id <= last &&
268 (id2 >= first && id2 <= last))
269 return 0;
270
271 if (key->id < first || id2 < first)
272 return -1;
273
274 return 1;
275 }
276
277 /*
278 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
279 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
280 */
281 static struct uid_gid_extent *
map_id_range_down_max(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)282 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
283 {
284 struct idmap_key key;
285
286 key.map_up = false;
287 key.count = count;
288 key.id = id;
289
290 return bsearch(&key, map->forward, extents,
291 sizeof(struct uid_gid_extent), cmp_map_id);
292 }
293
294 /*
295 * map_id_range_down_base - Find idmap via binary search in static extent array.
296 * Can only be called if number of mappings is equal or less than
297 * UID_GID_MAP_MAX_BASE_EXTENTS.
298 */
299 static struct uid_gid_extent *
map_id_range_down_base(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)300 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
301 {
302 unsigned idx;
303 u32 first, last, id2;
304
305 id2 = id + count - 1;
306
307 /* Find the matching extent */
308 for (idx = 0; idx < extents; idx++) {
309 first = map->extent[idx].first;
310 last = first + map->extent[idx].count - 1;
311 if (id >= first && id <= last &&
312 (id2 >= first && id2 <= last))
313 return &map->extent[idx];
314 }
315 return NULL;
316 }
317
map_id_range_down(struct uid_gid_map * map,u32 id,u32 count)318 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
319 {
320 struct uid_gid_extent *extent;
321 unsigned extents = map->nr_extents;
322 smp_rmb();
323
324 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
325 extent = map_id_range_down_base(extents, map, id, count);
326 else
327 extent = map_id_range_down_max(extents, map, id, count);
328
329 /* Map the id or note failure */
330 if (extent)
331 id = (id - extent->first) + extent->lower_first;
332 else
333 id = (u32) -1;
334
335 return id;
336 }
337
map_id_down(struct uid_gid_map * map,u32 id)338 u32 map_id_down(struct uid_gid_map *map, u32 id)
339 {
340 return map_id_range_down(map, id, 1);
341 }
342
343 /*
344 * map_id_up_base - Find idmap via binary search in static extent array.
345 * Can only be called if number of mappings is equal or less than
346 * UID_GID_MAP_MAX_BASE_EXTENTS.
347 */
348 static struct uid_gid_extent *
map_id_range_up_base(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)349 map_id_range_up_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
350 {
351 unsigned idx;
352 u32 first, last, id2;
353
354 id2 = id + count - 1;
355
356 /* Find the matching extent */
357 for (idx = 0; idx < extents; idx++) {
358 first = map->extent[idx].lower_first;
359 last = first + map->extent[idx].count - 1;
360 if (id >= first && id <= last &&
361 (id2 >= first && id2 <= last))
362 return &map->extent[idx];
363 }
364 return NULL;
365 }
366
367 /*
368 * map_id_up_max - Find idmap via binary search in ordered idmap array.
369 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
370 */
371 static struct uid_gid_extent *
map_id_range_up_max(unsigned extents,struct uid_gid_map * map,u32 id,u32 count)372 map_id_range_up_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
373 {
374 struct idmap_key key;
375
376 key.map_up = true;
377 key.count = count;
378 key.id = id;
379
380 return bsearch(&key, map->reverse, extents,
381 sizeof(struct uid_gid_extent), cmp_map_id);
382 }
383
map_id_range_up(struct uid_gid_map * map,u32 id,u32 count)384 u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
385 {
386 struct uid_gid_extent *extent;
387 unsigned extents = map->nr_extents;
388 smp_rmb();
389
390 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
391 extent = map_id_range_up_base(extents, map, id, count);
392 else
393 extent = map_id_range_up_max(extents, map, id, count);
394
395 /* Map the id or note failure */
396 if (extent)
397 id = (id - extent->lower_first) + extent->first;
398 else
399 id = (u32) -1;
400
401 return id;
402 }
403
map_id_up(struct uid_gid_map * map,u32 id)404 u32 map_id_up(struct uid_gid_map *map, u32 id)
405 {
406 return map_id_range_up(map, id, 1);
407 }
408
409 /**
410 * make_kuid - Map a user-namespace uid pair into a kuid.
411 * @ns: User namespace that the uid is in
412 * @uid: User identifier
413 *
414 * Maps a user-namespace uid pair into a kernel internal kuid,
415 * and returns that kuid.
416 *
417 * When there is no mapping defined for the user-namespace uid
418 * pair INVALID_UID is returned. Callers are expected to test
419 * for and handle INVALID_UID being returned. INVALID_UID
420 * may be tested for using uid_valid().
421 */
make_kuid(struct user_namespace * ns,uid_t uid)422 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
423 {
424 /* Map the uid to a global kernel uid */
425 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
426 }
427 EXPORT_SYMBOL(make_kuid);
428
429 /**
430 * from_kuid - Create a uid from a kuid user-namespace pair.
431 * @targ: The user namespace we want a uid in.
432 * @kuid: The kernel internal uid to start with.
433 *
434 * Map @kuid into the user-namespace specified by @targ and
435 * return the resulting uid.
436 *
437 * There is always a mapping into the initial user_namespace.
438 *
439 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
440 */
from_kuid(struct user_namespace * targ,kuid_t kuid)441 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
442 {
443 /* Map the uid from a global kernel uid */
444 return map_id_up(&targ->uid_map, __kuid_val(kuid));
445 }
446 EXPORT_SYMBOL(from_kuid);
447
448 /**
449 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
450 * @targ: The user namespace we want a uid in.
451 * @kuid: The kernel internal uid to start with.
452 *
453 * Map @kuid into the user-namespace specified by @targ and
454 * return the resulting uid.
455 *
456 * There is always a mapping into the initial user_namespace.
457 *
458 * Unlike from_kuid from_kuid_munged never fails and always
459 * returns a valid uid. This makes from_kuid_munged appropriate
460 * for use in syscalls like stat and getuid where failing the
461 * system call and failing to provide a valid uid are not an
462 * options.
463 *
464 * If @kuid has no mapping in @targ overflowuid is returned.
465 */
from_kuid_munged(struct user_namespace * targ,kuid_t kuid)466 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
467 {
468 uid_t uid;
469 uid = from_kuid(targ, kuid);
470
471 if (uid == (uid_t) -1)
472 uid = overflowuid;
473 return uid;
474 }
475 EXPORT_SYMBOL(from_kuid_munged);
476
477 /**
478 * make_kgid - Map a user-namespace gid pair into a kgid.
479 * @ns: User namespace that the gid is in
480 * @gid: group identifier
481 *
482 * Maps a user-namespace gid pair into a kernel internal kgid,
483 * and returns that kgid.
484 *
485 * When there is no mapping defined for the user-namespace gid
486 * pair INVALID_GID is returned. Callers are expected to test
487 * for and handle INVALID_GID being returned. INVALID_GID may be
488 * tested for using gid_valid().
489 */
make_kgid(struct user_namespace * ns,gid_t gid)490 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
491 {
492 /* Map the gid to a global kernel gid */
493 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
494 }
495 EXPORT_SYMBOL(make_kgid);
496
497 /**
498 * from_kgid - Create a gid from a kgid user-namespace pair.
499 * @targ: The user namespace we want a gid in.
500 * @kgid: The kernel internal gid to start with.
501 *
502 * Map @kgid into the user-namespace specified by @targ and
503 * return the resulting gid.
504 *
505 * There is always a mapping into the initial user_namespace.
506 *
507 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
508 */
from_kgid(struct user_namespace * targ,kgid_t kgid)509 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
510 {
511 /* Map the gid from a global kernel gid */
512 return map_id_up(&targ->gid_map, __kgid_val(kgid));
513 }
514 EXPORT_SYMBOL(from_kgid);
515
516 /**
517 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
518 * @targ: The user namespace we want a gid in.
519 * @kgid: The kernel internal gid to start with.
520 *
521 * Map @kgid into the user-namespace specified by @targ and
522 * return the resulting gid.
523 *
524 * There is always a mapping into the initial user_namespace.
525 *
526 * Unlike from_kgid from_kgid_munged never fails and always
527 * returns a valid gid. This makes from_kgid_munged appropriate
528 * for use in syscalls like stat and getgid where failing the
529 * system call and failing to provide a valid gid are not options.
530 *
531 * If @kgid has no mapping in @targ overflowgid is returned.
532 */
from_kgid_munged(struct user_namespace * targ,kgid_t kgid)533 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
534 {
535 gid_t gid;
536 gid = from_kgid(targ, kgid);
537
538 if (gid == (gid_t) -1)
539 gid = overflowgid;
540 return gid;
541 }
542 EXPORT_SYMBOL(from_kgid_munged);
543
544 /**
545 * make_kprojid - Map a user-namespace projid pair into a kprojid.
546 * @ns: User namespace that the projid is in
547 * @projid: Project identifier
548 *
549 * Maps a user-namespace uid pair into a kernel internal kuid,
550 * and returns that kuid.
551 *
552 * When there is no mapping defined for the user-namespace projid
553 * pair INVALID_PROJID is returned. Callers are expected to test
554 * for and handle INVALID_PROJID being returned. INVALID_PROJID
555 * may be tested for using projid_valid().
556 */
make_kprojid(struct user_namespace * ns,projid_t projid)557 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
558 {
559 /* Map the uid to a global kernel uid */
560 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
561 }
562 EXPORT_SYMBOL(make_kprojid);
563
564 /**
565 * from_kprojid - Create a projid from a kprojid user-namespace pair.
566 * @targ: The user namespace we want a projid in.
567 * @kprojid: The kernel internal project identifier to start with.
568 *
569 * Map @kprojid into the user-namespace specified by @targ and
570 * return the resulting projid.
571 *
572 * There is always a mapping into the initial user_namespace.
573 *
574 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
575 */
from_kprojid(struct user_namespace * targ,kprojid_t kprojid)576 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
577 {
578 /* Map the uid from a global kernel uid */
579 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
580 }
581 EXPORT_SYMBOL(from_kprojid);
582
583 /**
584 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
585 * @targ: The user namespace we want a projid in.
586 * @kprojid: The kernel internal projid to start with.
587 *
588 * Map @kprojid into the user-namespace specified by @targ and
589 * return the resulting projid.
590 *
591 * There is always a mapping into the initial user_namespace.
592 *
593 * Unlike from_kprojid from_kprojid_munged never fails and always
594 * returns a valid projid. This makes from_kprojid_munged
595 * appropriate for use in syscalls like stat and where
596 * failing the system call and failing to provide a valid projid are
597 * not an options.
598 *
599 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
600 */
from_kprojid_munged(struct user_namespace * targ,kprojid_t kprojid)601 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
602 {
603 projid_t projid;
604 projid = from_kprojid(targ, kprojid);
605
606 if (projid == (projid_t) -1)
607 projid = OVERFLOW_PROJID;
608 return projid;
609 }
610 EXPORT_SYMBOL(from_kprojid_munged);
611
612
uid_m_show(struct seq_file * seq,void * v)613 static int uid_m_show(struct seq_file *seq, void *v)
614 {
615 struct user_namespace *ns = seq->private;
616 struct uid_gid_extent *extent = v;
617 struct user_namespace *lower_ns;
618 uid_t lower;
619
620 lower_ns = seq_user_ns(seq);
621 if ((lower_ns == ns) && lower_ns->parent)
622 lower_ns = lower_ns->parent;
623
624 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
625
626 seq_printf(seq, "%10u %10u %10u\n",
627 extent->first,
628 lower,
629 extent->count);
630
631 return 0;
632 }
633
gid_m_show(struct seq_file * seq,void * v)634 static int gid_m_show(struct seq_file *seq, void *v)
635 {
636 struct user_namespace *ns = seq->private;
637 struct uid_gid_extent *extent = v;
638 struct user_namespace *lower_ns;
639 gid_t lower;
640
641 lower_ns = seq_user_ns(seq);
642 if ((lower_ns == ns) && lower_ns->parent)
643 lower_ns = lower_ns->parent;
644
645 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
646
647 seq_printf(seq, "%10u %10u %10u\n",
648 extent->first,
649 lower,
650 extent->count);
651
652 return 0;
653 }
654
projid_m_show(struct seq_file * seq,void * v)655 static int projid_m_show(struct seq_file *seq, void *v)
656 {
657 struct user_namespace *ns = seq->private;
658 struct uid_gid_extent *extent = v;
659 struct user_namespace *lower_ns;
660 projid_t lower;
661
662 lower_ns = seq_user_ns(seq);
663 if ((lower_ns == ns) && lower_ns->parent)
664 lower_ns = lower_ns->parent;
665
666 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
667
668 seq_printf(seq, "%10u %10u %10u\n",
669 extent->first,
670 lower,
671 extent->count);
672
673 return 0;
674 }
675
m_start(struct seq_file * seq,loff_t * ppos,struct uid_gid_map * map)676 static void *m_start(struct seq_file *seq, loff_t *ppos,
677 struct uid_gid_map *map)
678 {
679 loff_t pos = *ppos;
680 unsigned extents = map->nr_extents;
681 smp_rmb();
682
683 if (pos >= extents)
684 return NULL;
685
686 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
687 return &map->extent[pos];
688
689 return &map->forward[pos];
690 }
691
uid_m_start(struct seq_file * seq,loff_t * ppos)692 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
693 {
694 struct user_namespace *ns = seq->private;
695
696 return m_start(seq, ppos, &ns->uid_map);
697 }
698
gid_m_start(struct seq_file * seq,loff_t * ppos)699 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
700 {
701 struct user_namespace *ns = seq->private;
702
703 return m_start(seq, ppos, &ns->gid_map);
704 }
705
projid_m_start(struct seq_file * seq,loff_t * ppos)706 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
707 {
708 struct user_namespace *ns = seq->private;
709
710 return m_start(seq, ppos, &ns->projid_map);
711 }
712
m_next(struct seq_file * seq,void * v,loff_t * pos)713 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
714 {
715 (*pos)++;
716 return seq->op->start(seq, pos);
717 }
718
m_stop(struct seq_file * seq,void * v)719 static void m_stop(struct seq_file *seq, void *v)
720 {
721 return;
722 }
723
724 const struct seq_operations proc_uid_seq_operations = {
725 .start = uid_m_start,
726 .stop = m_stop,
727 .next = m_next,
728 .show = uid_m_show,
729 };
730
731 const struct seq_operations proc_gid_seq_operations = {
732 .start = gid_m_start,
733 .stop = m_stop,
734 .next = m_next,
735 .show = gid_m_show,
736 };
737
738 const struct seq_operations proc_projid_seq_operations = {
739 .start = projid_m_start,
740 .stop = m_stop,
741 .next = m_next,
742 .show = projid_m_show,
743 };
744
mappings_overlap(struct uid_gid_map * new_map,struct uid_gid_extent * extent)745 static bool mappings_overlap(struct uid_gid_map *new_map,
746 struct uid_gid_extent *extent)
747 {
748 u32 upper_first, lower_first, upper_last, lower_last;
749 unsigned idx;
750
751 upper_first = extent->first;
752 lower_first = extent->lower_first;
753 upper_last = upper_first + extent->count - 1;
754 lower_last = lower_first + extent->count - 1;
755
756 for (idx = 0; idx < new_map->nr_extents; idx++) {
757 u32 prev_upper_first, prev_lower_first;
758 u32 prev_upper_last, prev_lower_last;
759 struct uid_gid_extent *prev;
760
761 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
762 prev = &new_map->extent[idx];
763 else
764 prev = &new_map->forward[idx];
765
766 prev_upper_first = prev->first;
767 prev_lower_first = prev->lower_first;
768 prev_upper_last = prev_upper_first + prev->count - 1;
769 prev_lower_last = prev_lower_first + prev->count - 1;
770
771 /* Does the upper range intersect a previous extent? */
772 if ((prev_upper_first <= upper_last) &&
773 (prev_upper_last >= upper_first))
774 return true;
775
776 /* Does the lower range intersect a previous extent? */
777 if ((prev_lower_first <= lower_last) &&
778 (prev_lower_last >= lower_first))
779 return true;
780 }
781 return false;
782 }
783
784 /*
785 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
786 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
787 * UID_GID_MAP_MAX_BASE_EXTENTS.
788 */
insert_extent(struct uid_gid_map * map,struct uid_gid_extent * extent)789 static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
790 {
791 struct uid_gid_extent *dest;
792
793 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
794 struct uid_gid_extent *forward;
795
796 /* Allocate memory for 340 mappings. */
797 forward = kmalloc_objs(struct uid_gid_extent,
798 UID_GID_MAP_MAX_EXTENTS);
799 if (!forward)
800 return -ENOMEM;
801
802 /* Copy over memory. Only set up memory for the forward pointer.
803 * Defer the memory setup for the reverse pointer.
804 */
805 memcpy(forward, map->extent,
806 map->nr_extents * sizeof(map->extent[0]));
807
808 map->forward = forward;
809 map->reverse = NULL;
810 }
811
812 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
813 dest = &map->extent[map->nr_extents];
814 else
815 dest = &map->forward[map->nr_extents];
816
817 *dest = *extent;
818 map->nr_extents++;
819 return 0;
820 }
821
822 /* cmp function to sort() forward mappings */
cmp_extents_forward(const void * a,const void * b)823 static int cmp_extents_forward(const void *a, const void *b)
824 {
825 const struct uid_gid_extent *e1 = a;
826 const struct uid_gid_extent *e2 = b;
827
828 if (e1->first < e2->first)
829 return -1;
830
831 if (e1->first > e2->first)
832 return 1;
833
834 return 0;
835 }
836
837 /* cmp function to sort() reverse mappings */
cmp_extents_reverse(const void * a,const void * b)838 static int cmp_extents_reverse(const void *a, const void *b)
839 {
840 const struct uid_gid_extent *e1 = a;
841 const struct uid_gid_extent *e2 = b;
842
843 if (e1->lower_first < e2->lower_first)
844 return -1;
845
846 if (e1->lower_first > e2->lower_first)
847 return 1;
848
849 return 0;
850 }
851
852 /*
853 * sort_idmaps - Sorts an array of idmap entries.
854 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
855 */
sort_idmaps(struct uid_gid_map * map)856 static int sort_idmaps(struct uid_gid_map *map)
857 {
858 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
859 return 0;
860
861 /* Sort forward array. */
862 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
863 cmp_extents_forward, NULL);
864
865 /* Only copy the memory from forward we actually need. */
866 map->reverse = kmemdup_array(map->forward, map->nr_extents,
867 sizeof(struct uid_gid_extent), GFP_KERNEL);
868 if (!map->reverse)
869 return -ENOMEM;
870
871 /* Sort reverse array. */
872 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
873 cmp_extents_reverse, NULL);
874
875 return 0;
876 }
877
878 /**
879 * verify_root_map() - check the uid 0 mapping
880 * @file: idmapping file
881 * @map_ns: user namespace of the target process
882 * @new_map: requested idmap
883 *
884 * If a process requests mapping parent uid 0 into the new ns, verify that the
885 * process writing the map had the CAP_SETFCAP capability as the target process
886 * will be able to write fscaps that are valid in ancestor user namespaces.
887 *
888 * Return: true if the mapping is allowed, false if not.
889 */
verify_root_map(const struct file * file,struct user_namespace * map_ns,struct uid_gid_map * new_map)890 static bool verify_root_map(const struct file *file,
891 struct user_namespace *map_ns,
892 struct uid_gid_map *new_map)
893 {
894 int idx;
895 const struct user_namespace *file_ns = file->f_cred->user_ns;
896 struct uid_gid_extent *extent0 = NULL;
897
898 for (idx = 0; idx < new_map->nr_extents; idx++) {
899 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
900 extent0 = &new_map->extent[idx];
901 else
902 extent0 = &new_map->forward[idx];
903 if (extent0->lower_first == 0)
904 break;
905
906 extent0 = NULL;
907 }
908
909 if (!extent0)
910 return true;
911
912 if (map_ns == file_ns) {
913 /* The process unshared its ns and is writing to its own
914 * /proc/self/uid_map. User already has full capabilites in
915 * the new namespace. Verify that the parent had CAP_SETFCAP
916 * when it unshared.
917 * */
918 if (!file_ns->parent_could_setfcap)
919 return false;
920 } else {
921 /* Process p1 is writing to uid_map of p2, who is in a child
922 * user namespace to p1's. Verify that the opener of the map
923 * file has CAP_SETFCAP against the parent of the new map
924 * namespace */
925 if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
926 return false;
927 }
928
929 return true;
930 }
931
map_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos,int cap_setid,struct uid_gid_map * map,struct uid_gid_map * parent_map)932 static ssize_t map_write(struct file *file, const char __user *buf,
933 size_t count, loff_t *ppos,
934 int cap_setid,
935 struct uid_gid_map *map,
936 struct uid_gid_map *parent_map)
937 {
938 struct seq_file *seq = file->private_data;
939 struct user_namespace *map_ns = seq->private;
940 struct uid_gid_map new_map;
941 unsigned idx;
942 struct uid_gid_extent extent;
943 char *kbuf, *pos, *next_line;
944 ssize_t ret;
945
946 /* Only allow < page size writes at the beginning of the file */
947 if ((*ppos != 0) || (count >= PAGE_SIZE))
948 return -EINVAL;
949
950 /* Slurp in the user data */
951 kbuf = memdup_user_nul(buf, count);
952 if (IS_ERR(kbuf))
953 return PTR_ERR(kbuf);
954
955 /*
956 * The userns_state_mutex serializes all writes to any given map.
957 *
958 * Any map is only ever written once.
959 *
960 * An id map fits within 1 cache line on most architectures.
961 *
962 * On read nothing needs to be done unless you are on an
963 * architecture with a crazy cache coherency model like alpha.
964 *
965 * There is a one time data dependency between reading the
966 * count of the extents and the values of the extents. The
967 * desired behavior is to see the values of the extents that
968 * were written before the count of the extents.
969 *
970 * To achieve this smp_wmb() is used on guarantee the write
971 * order and smp_rmb() is guaranteed that we don't have crazy
972 * architectures returning stale data.
973 */
974 mutex_lock(&userns_state_mutex);
975
976 memset(&new_map, 0, sizeof(struct uid_gid_map));
977
978 ret = -EPERM;
979 /* Only allow one successful write to the map */
980 if (map->nr_extents != 0)
981 goto out;
982
983 /*
984 * Adjusting namespace settings requires capabilities on the target.
985 */
986 if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
987 goto out;
988
989 /* Parse the user data */
990 ret = -EINVAL;
991 pos = kbuf;
992 for (; pos; pos = next_line) {
993
994 /* Find the end of line and ensure I don't look past it */
995 next_line = strchr(pos, '\n');
996 if (next_line) {
997 *next_line = '\0';
998 next_line++;
999 if (*next_line == '\0')
1000 next_line = NULL;
1001 }
1002
1003 pos = skip_spaces(pos);
1004 extent.first = simple_strtoul(pos, &pos, 10);
1005 if (!isspace(*pos))
1006 goto out;
1007
1008 pos = skip_spaces(pos);
1009 extent.lower_first = simple_strtoul(pos, &pos, 10);
1010 if (!isspace(*pos))
1011 goto out;
1012
1013 pos = skip_spaces(pos);
1014 extent.count = simple_strtoul(pos, &pos, 10);
1015 if (*pos && !isspace(*pos))
1016 goto out;
1017
1018 /* Verify there is not trailing junk on the line */
1019 pos = skip_spaces(pos);
1020 if (*pos != '\0')
1021 goto out;
1022
1023 /* Verify we have been given valid starting values */
1024 if ((extent.first == (u32) -1) ||
1025 (extent.lower_first == (u32) -1))
1026 goto out;
1027
1028 /* Verify count is not zero and does not cause the
1029 * extent to wrap
1030 */
1031 if ((extent.first + extent.count) <= extent.first)
1032 goto out;
1033 if ((extent.lower_first + extent.count) <=
1034 extent.lower_first)
1035 goto out;
1036
1037 /* Do the ranges in extent overlap any previous extents? */
1038 if (mappings_overlap(&new_map, &extent))
1039 goto out;
1040
1041 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
1042 (next_line != NULL))
1043 goto out;
1044
1045 ret = insert_extent(&new_map, &extent);
1046 if (ret < 0)
1047 goto out;
1048 ret = -EINVAL;
1049 }
1050 /* Be very certain the new map actually exists */
1051 if (new_map.nr_extents == 0)
1052 goto out;
1053
1054 ret = -EPERM;
1055 /* Validate the user is allowed to use user id's mapped to. */
1056 if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
1057 goto out;
1058
1059 ret = -EPERM;
1060 /* Map the lower ids from the parent user namespace to the
1061 * kernel global id space.
1062 */
1063 for (idx = 0; idx < new_map.nr_extents; idx++) {
1064 struct uid_gid_extent *e;
1065 u32 lower_first;
1066
1067 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
1068 e = &new_map.extent[idx];
1069 else
1070 e = &new_map.forward[idx];
1071
1072 lower_first = map_id_range_down(parent_map,
1073 e->lower_first,
1074 e->count);
1075
1076 /* Fail if we can not map the specified extent to
1077 * the kernel global id space.
1078 */
1079 if (lower_first == (u32) -1)
1080 goto out;
1081
1082 e->lower_first = lower_first;
1083 }
1084
1085 /*
1086 * If we want to use binary search for lookup, this clones the extent
1087 * array and sorts both copies.
1088 */
1089 ret = sort_idmaps(&new_map);
1090 if (ret < 0)
1091 goto out;
1092
1093 /* Install the map */
1094 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1095 memcpy(map->extent, new_map.extent,
1096 new_map.nr_extents * sizeof(new_map.extent[0]));
1097 } else {
1098 map->forward = new_map.forward;
1099 map->reverse = new_map.reverse;
1100 }
1101 smp_wmb();
1102 map->nr_extents = new_map.nr_extents;
1103
1104 *ppos = count;
1105 ret = count;
1106 out:
1107 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1108 kfree(new_map.forward);
1109 kfree(new_map.reverse);
1110 map->forward = NULL;
1111 map->reverse = NULL;
1112 map->nr_extents = 0;
1113 }
1114
1115 mutex_unlock(&userns_state_mutex);
1116 kfree(kbuf);
1117 return ret;
1118 }
1119
proc_uid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1120 ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1121 size_t size, loff_t *ppos)
1122 {
1123 struct seq_file *seq = file->private_data;
1124 struct user_namespace *ns = seq->private;
1125 struct user_namespace *seq_ns = seq_user_ns(seq);
1126
1127 if (!ns->parent)
1128 return -EPERM;
1129
1130 if ((seq_ns != ns) && (seq_ns != ns->parent))
1131 return -EPERM;
1132
1133 return map_write(file, buf, size, ppos, CAP_SETUID,
1134 &ns->uid_map, &ns->parent->uid_map);
1135 }
1136
proc_gid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1137 ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1138 size_t size, loff_t *ppos)
1139 {
1140 struct seq_file *seq = file->private_data;
1141 struct user_namespace *ns = seq->private;
1142 struct user_namespace *seq_ns = seq_user_ns(seq);
1143
1144 if (!ns->parent)
1145 return -EPERM;
1146
1147 if ((seq_ns != ns) && (seq_ns != ns->parent))
1148 return -EPERM;
1149
1150 return map_write(file, buf, size, ppos, CAP_SETGID,
1151 &ns->gid_map, &ns->parent->gid_map);
1152 }
1153
proc_projid_map_write(struct file * file,const char __user * buf,size_t size,loff_t * ppos)1154 ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1155 size_t size, loff_t *ppos)
1156 {
1157 struct seq_file *seq = file->private_data;
1158 struct user_namespace *ns = seq->private;
1159 struct user_namespace *seq_ns = seq_user_ns(seq);
1160
1161 if (!ns->parent)
1162 return -EPERM;
1163
1164 if ((seq_ns != ns) && (seq_ns != ns->parent))
1165 return -EPERM;
1166
1167 /* Anyone can set any valid project id no capability needed */
1168 return map_write(file, buf, size, ppos, -1,
1169 &ns->projid_map, &ns->parent->projid_map);
1170 }
1171
new_idmap_permitted(const struct file * file,struct user_namespace * ns,int cap_setid,struct uid_gid_map * new_map)1172 static bool new_idmap_permitted(const struct file *file,
1173 struct user_namespace *ns, int cap_setid,
1174 struct uid_gid_map *new_map)
1175 {
1176 const struct cred *cred = file->f_cred;
1177
1178 if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
1179 return false;
1180
1181 /* Don't allow mappings that would allow anything that wouldn't
1182 * be allowed without the establishment of unprivileged mappings.
1183 */
1184 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1185 uid_eq(ns->owner, cred->euid)) {
1186 u32 id = new_map->extent[0].lower_first;
1187 if (cap_setid == CAP_SETUID) {
1188 kuid_t uid = make_kuid(ns->parent, id);
1189 if (uid_eq(uid, cred->euid))
1190 return true;
1191 } else if (cap_setid == CAP_SETGID) {
1192 kgid_t gid = make_kgid(ns->parent, id);
1193 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1194 gid_eq(gid, cred->egid))
1195 return true;
1196 }
1197 }
1198
1199 /* Allow anyone to set a mapping that doesn't require privilege */
1200 if (!cap_valid(cap_setid))
1201 return true;
1202
1203 /* Allow the specified ids if we have the appropriate capability
1204 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1205 * And the opener of the id file also has the appropriate capability.
1206 */
1207 if (ns_capable(ns->parent, cap_setid) &&
1208 file_ns_capable(file, ns->parent, cap_setid))
1209 return true;
1210
1211 return false;
1212 }
1213
proc_setgroups_show(struct seq_file * seq,void * v)1214 int proc_setgroups_show(struct seq_file *seq, void *v)
1215 {
1216 struct user_namespace *ns = seq->private;
1217 unsigned long userns_flags = READ_ONCE(ns->flags);
1218
1219 seq_printf(seq, "%s\n",
1220 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1221 "allow" : "deny");
1222 return 0;
1223 }
1224
proc_setgroups_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1225 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1226 size_t count, loff_t *ppos)
1227 {
1228 struct seq_file *seq = file->private_data;
1229 struct user_namespace *ns = seq->private;
1230 char kbuf[8], *pos;
1231 bool setgroups_allowed;
1232 ssize_t ret;
1233
1234 /* Only allow a very narrow range of strings to be written */
1235 ret = -EINVAL;
1236 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1237 goto out;
1238
1239 /* What was written? */
1240 ret = -EFAULT;
1241 if (copy_from_user(kbuf, buf, count))
1242 goto out;
1243 kbuf[count] = '\0';
1244 pos = kbuf;
1245
1246 /* What is being requested? */
1247 ret = -EINVAL;
1248 if (strncmp(pos, "allow", 5) == 0) {
1249 pos += 5;
1250 setgroups_allowed = true;
1251 }
1252 else if (strncmp(pos, "deny", 4) == 0) {
1253 pos += 4;
1254 setgroups_allowed = false;
1255 }
1256 else
1257 goto out;
1258
1259 /* Verify there is not trailing junk on the line */
1260 pos = skip_spaces(pos);
1261 if (*pos != '\0')
1262 goto out;
1263
1264 ret = -EPERM;
1265 mutex_lock(&userns_state_mutex);
1266 if (setgroups_allowed) {
1267 /* Enabling setgroups after setgroups has been disabled
1268 * is not allowed.
1269 */
1270 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1271 goto out_unlock;
1272 } else {
1273 /* Permanently disabling setgroups after setgroups has
1274 * been enabled by writing the gid_map is not allowed.
1275 */
1276 if (ns->gid_map.nr_extents != 0)
1277 goto out_unlock;
1278 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1279 }
1280 mutex_unlock(&userns_state_mutex);
1281
1282 /* Report a successful write */
1283 *ppos = count;
1284 ret = count;
1285 out:
1286 return ret;
1287 out_unlock:
1288 mutex_unlock(&userns_state_mutex);
1289 goto out;
1290 }
1291
userns_may_setgroups(const struct user_namespace * ns)1292 bool userns_may_setgroups(const struct user_namespace *ns)
1293 {
1294 bool allowed;
1295
1296 mutex_lock(&userns_state_mutex);
1297 /* It is not safe to use setgroups until a gid mapping in
1298 * the user namespace has been established.
1299 */
1300 allowed = ns->gid_map.nr_extents != 0;
1301 /* Is setgroups allowed? */
1302 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1303 mutex_unlock(&userns_state_mutex);
1304
1305 return allowed;
1306 }
1307
1308 /*
1309 * Returns true if @child is the same namespace or a descendant of
1310 * @ancestor.
1311 */
in_userns(const struct user_namespace * ancestor,const struct user_namespace * child)1312 bool in_userns(const struct user_namespace *ancestor,
1313 const struct user_namespace *child)
1314 {
1315 const struct user_namespace *ns;
1316 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1317 ;
1318 return (ns == ancestor);
1319 }
1320
current_in_userns(const struct user_namespace * target_ns)1321 bool current_in_userns(const struct user_namespace *target_ns)
1322 {
1323 return in_userns(target_ns, current_user_ns());
1324 }
1325 EXPORT_SYMBOL(current_in_userns);
1326
userns_get(struct task_struct * task)1327 static struct ns_common *userns_get(struct task_struct *task)
1328 {
1329 struct user_namespace *user_ns;
1330
1331 rcu_read_lock();
1332 user_ns = get_user_ns(__task_cred(task)->user_ns);
1333 rcu_read_unlock();
1334
1335 return user_ns ? &user_ns->ns : NULL;
1336 }
1337
userns_put(struct ns_common * ns)1338 static void userns_put(struct ns_common *ns)
1339 {
1340 put_user_ns(to_user_ns(ns));
1341 }
1342
userns_install(struct nsset * nsset,struct ns_common * ns)1343 static int userns_install(struct nsset *nsset, struct ns_common *ns)
1344 {
1345 struct user_namespace *user_ns = to_user_ns(ns);
1346 struct cred *cred;
1347
1348 /* Don't allow gaining capabilities by reentering
1349 * the same user namespace.
1350 */
1351 if (user_ns == current_user_ns())
1352 return -EINVAL;
1353
1354 /* Tasks that share a thread group must share a user namespace */
1355 if (!thread_group_empty(current))
1356 return -EINVAL;
1357
1358 if (current->fs->users != 1)
1359 return -EINVAL;
1360
1361 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1362 return -EPERM;
1363
1364 cred = nsset_cred(nsset);
1365 if (!cred)
1366 return -EINVAL;
1367
1368 put_user_ns(cred->user_ns);
1369 set_cred_user_ns(cred, get_user_ns(user_ns));
1370
1371 if (set_cred_ucounts(cred) < 0)
1372 return -EINVAL;
1373
1374 return 0;
1375 }
1376
ns_get_owner(struct ns_common * ns)1377 struct ns_common *ns_get_owner(struct ns_common *ns)
1378 {
1379 struct user_namespace *my_user_ns = current_user_ns();
1380 struct user_namespace *owner, *p;
1381
1382 /* See if the owner is in the current user namespace */
1383 owner = p = ns->ops->owner(ns);
1384 for (;;) {
1385 if (!p)
1386 return ERR_PTR(-EPERM);
1387 if (p == my_user_ns)
1388 break;
1389 p = p->parent;
1390 }
1391
1392 return &get_user_ns(owner)->ns;
1393 }
1394
userns_owner(struct ns_common * ns)1395 static struct user_namespace *userns_owner(struct ns_common *ns)
1396 {
1397 return to_user_ns(ns)->parent;
1398 }
1399
1400 const struct proc_ns_operations userns_operations = {
1401 .name = "user",
1402 .get = userns_get,
1403 .put = userns_put,
1404 .install = userns_install,
1405 .owner = userns_owner,
1406 .get_parent = ns_get_owner,
1407 };
1408
user_namespaces_init(void)1409 static __init int user_namespaces_init(void)
1410 {
1411 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC | SLAB_ACCOUNT);
1412 ns_tree_add(&init_user_ns);
1413 return 0;
1414 }
1415 subsys_initcall(user_namespaces_init);
1416