xref: /linux/kernel/user.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 
20 struct user_namespace init_user_ns = {
21 	.kref = {
22 		.refcount	= ATOMIC_INIT(2),
23 	},
24 	.root_user = &root_user,
25 };
26 EXPORT_SYMBOL_GPL(init_user_ns);
27 
28 /*
29  * UID task count cache, to get fast user lookup in "alloc_uid"
30  * when changing user ID's (ie setuid() and friends).
31  */
32 
33 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
34 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35 #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid)))
36 
37 static struct kmem_cache *uid_cachep;
38 
39 /*
40  * The uidhash_lock is mostly taken from process context, but it is
41  * occasionally also taken from softirq/tasklet context, when
42  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43  * But free_uid() is also called with local interrupts disabled, and running
44  * local_bh_enable() with local interrupts disabled is an error - we'll run
45  * softirq callbacks, and they can unconditionally enable interrupts, and
46  * the caller of free_uid() didn't expect that..
47  */
48 static DEFINE_SPINLOCK(uidhash_lock);
49 
50 struct user_struct root_user = {
51 	.__count	= ATOMIC_INIT(1),
52 	.processes	= ATOMIC_INIT(1),
53 	.files		= ATOMIC_INIT(0),
54 	.sigpending	= ATOMIC_INIT(0),
55 	.locked_shm     = 0,
56 #ifdef CONFIG_KEYS
57 	.uid_keyring	= &root_user_keyring,
58 	.session_keyring = &root_session_keyring,
59 #endif
60 #ifdef CONFIG_FAIR_USER_SCHED
61 	.tg		= &init_task_group,
62 #endif
63 };
64 
65 /*
66  * These routines must be called with the uidhash spinlock held!
67  */
68 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
69 {
70 	hlist_add_head(&up->uidhash_node, hashent);
71 }
72 
73 static void uid_hash_remove(struct user_struct *up)
74 {
75 	hlist_del_init(&up->uidhash_node);
76 }
77 
78 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
79 {
80 	struct user_struct *user;
81 	struct hlist_node *h;
82 
83 	hlist_for_each_entry(user, h, hashent, uidhash_node) {
84 		if (user->uid == uid) {
85 			atomic_inc(&user->__count);
86 			return user;
87 		}
88 	}
89 
90 	return NULL;
91 }
92 
93 #ifdef CONFIG_FAIR_USER_SCHED
94 
95 static void sched_destroy_user(struct user_struct *up)
96 {
97 	sched_destroy_group(up->tg);
98 }
99 
100 static int sched_create_user(struct user_struct *up)
101 {
102 	int rc = 0;
103 
104 	up->tg = sched_create_group();
105 	if (IS_ERR(up->tg))
106 		rc = -ENOMEM;
107 
108 	return rc;
109 }
110 
111 static void sched_switch_user(struct task_struct *p)
112 {
113 	sched_move_task(p);
114 }
115 
116 #else	/* CONFIG_FAIR_USER_SCHED */
117 
118 static void sched_destroy_user(struct user_struct *up) { }
119 static int sched_create_user(struct user_struct *up) { return 0; }
120 static void sched_switch_user(struct task_struct *p) { }
121 
122 #endif	/* CONFIG_FAIR_USER_SCHED */
123 
124 #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
125 
126 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
127 static DEFINE_MUTEX(uids_mutex);
128 
129 static inline void uids_mutex_lock(void)
130 {
131 	mutex_lock(&uids_mutex);
132 }
133 
134 static inline void uids_mutex_unlock(void)
135 {
136 	mutex_unlock(&uids_mutex);
137 }
138 
139 /* uid directory attributes */
140 static ssize_t cpu_shares_show(struct kobject *kobj,
141 			       struct kobj_attribute *attr,
142 			       char *buf)
143 {
144 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
145 
146 	return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
147 }
148 
149 static ssize_t cpu_shares_store(struct kobject *kobj,
150 				struct kobj_attribute *attr,
151 				const char *buf, size_t size)
152 {
153 	struct user_struct *up = container_of(kobj, struct user_struct, kobj);
154 	unsigned long shares;
155 	int rc;
156 
157 	sscanf(buf, "%lu", &shares);
158 
159 	rc = sched_group_set_shares(up->tg, shares);
160 
161 	return (rc ? rc : size);
162 }
163 
164 static struct kobj_attribute cpu_share_attr =
165 	__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
166 
167 /* default attributes per uid directory */
168 static struct attribute *uids_attributes[] = {
169 	&cpu_share_attr.attr,
170 	NULL
171 };
172 
173 /* the lifetime of user_struct is not managed by the core (now) */
174 static void uids_release(struct kobject *kobj)
175 {
176 	return;
177 }
178 
179 static struct kobj_type uids_ktype = {
180 	.sysfs_ops = &kobj_sysfs_ops,
181 	.default_attrs = uids_attributes,
182 	.release = uids_release,
183 };
184 
185 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
186 static int uids_user_create(struct user_struct *up)
187 {
188 	struct kobject *kobj = &up->kobj;
189 	int error;
190 
191 	memset(kobj, 0, sizeof(struct kobject));
192 	kobj->kset = uids_kset;
193 	error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
194 	if (error) {
195 		kobject_put(kobj);
196 		goto done;
197 	}
198 
199 	kobject_uevent(kobj, KOBJ_ADD);
200 done:
201 	return error;
202 }
203 
204 /* create these entries in sysfs:
205  * 	"/sys/kernel/uids" directory
206  * 	"/sys/kernel/uids/0" directory (for root user)
207  * 	"/sys/kernel/uids/0/cpu_share" file (for root user)
208  */
209 int __init uids_sysfs_init(void)
210 {
211 	uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
212 	if (!uids_kset)
213 		return -ENOMEM;
214 
215 	return uids_user_create(&root_user);
216 }
217 
218 /* work function to remove sysfs directory for a user and free up
219  * corresponding structures.
220  */
221 static void remove_user_sysfs_dir(struct work_struct *w)
222 {
223 	struct user_struct *up = container_of(w, struct user_struct, work);
224 	unsigned long flags;
225 	int remove_user = 0;
226 
227 	/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
228 	 * atomic.
229 	 */
230 	uids_mutex_lock();
231 
232 	local_irq_save(flags);
233 
234 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
235 		uid_hash_remove(up);
236 		remove_user = 1;
237 		spin_unlock_irqrestore(&uidhash_lock, flags);
238 	} else {
239 		local_irq_restore(flags);
240 	}
241 
242 	if (!remove_user)
243 		goto done;
244 
245 	kobject_uevent(&up->kobj, KOBJ_REMOVE);
246 	kobject_del(&up->kobj);
247 	kobject_put(&up->kobj);
248 
249 	sched_destroy_user(up);
250 	key_put(up->uid_keyring);
251 	key_put(up->session_keyring);
252 	kmem_cache_free(uid_cachep, up);
253 
254 done:
255 	uids_mutex_unlock();
256 }
257 
258 /* IRQs are disabled and uidhash_lock is held upon function entry.
259  * IRQ state (as stored in flags) is restored and uidhash_lock released
260  * upon function exit.
261  */
262 static inline void free_user(struct user_struct *up, unsigned long flags)
263 {
264 	/* restore back the count */
265 	atomic_inc(&up->__count);
266 	spin_unlock_irqrestore(&uidhash_lock, flags);
267 
268 	INIT_WORK(&up->work, remove_user_sysfs_dir);
269 	schedule_work(&up->work);
270 }
271 
272 #else	/* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
273 
274 int uids_sysfs_init(void) { return 0; }
275 static inline int uids_user_create(struct user_struct *up) { return 0; }
276 static inline void uids_mutex_lock(void) { }
277 static inline void uids_mutex_unlock(void) { }
278 
279 /* IRQs are disabled and uidhash_lock is held upon function entry.
280  * IRQ state (as stored in flags) is restored and uidhash_lock released
281  * upon function exit.
282  */
283 static inline void free_user(struct user_struct *up, unsigned long flags)
284 {
285 	uid_hash_remove(up);
286 	spin_unlock_irqrestore(&uidhash_lock, flags);
287 	sched_destroy_user(up);
288 	key_put(up->uid_keyring);
289 	key_put(up->session_keyring);
290 	kmem_cache_free(uid_cachep, up);
291 }
292 
293 #endif
294 
295 /*
296  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
297  * caller must undo that ref with free_uid().
298  *
299  * If the user_struct could not be found, return NULL.
300  */
301 struct user_struct *find_user(uid_t uid)
302 {
303 	struct user_struct *ret;
304 	unsigned long flags;
305 	struct user_namespace *ns = current->nsproxy->user_ns;
306 
307 	spin_lock_irqsave(&uidhash_lock, flags);
308 	ret = uid_hash_find(uid, uidhashentry(ns, uid));
309 	spin_unlock_irqrestore(&uidhash_lock, flags);
310 	return ret;
311 }
312 
313 void free_uid(struct user_struct *up)
314 {
315 	unsigned long flags;
316 
317 	if (!up)
318 		return;
319 
320 	local_irq_save(flags);
321 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
322 		free_user(up, flags);
323 	else
324 		local_irq_restore(flags);
325 }
326 
327 struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
328 {
329 	struct hlist_head *hashent = uidhashentry(ns, uid);
330 	struct user_struct *up, *new;
331 
332 	/* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
333 	 * atomic.
334 	 */
335 	uids_mutex_lock();
336 
337 	spin_lock_irq(&uidhash_lock);
338 	up = uid_hash_find(uid, hashent);
339 	spin_unlock_irq(&uidhash_lock);
340 
341 	if (!up) {
342 		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
343 		if (!new)
344 			goto out_unlock;
345 
346 		new->uid = uid;
347 		atomic_set(&new->__count, 1);
348 		atomic_set(&new->processes, 0);
349 		atomic_set(&new->files, 0);
350 		atomic_set(&new->sigpending, 0);
351 #ifdef CONFIG_INOTIFY_USER
352 		atomic_set(&new->inotify_watches, 0);
353 		atomic_set(&new->inotify_devs, 0);
354 #endif
355 #ifdef CONFIG_POSIX_MQUEUE
356 		new->mq_bytes = 0;
357 #endif
358 		new->locked_shm = 0;
359 
360 		if (alloc_uid_keyring(new, current) < 0)
361 			goto out_free_user;
362 
363 		if (sched_create_user(new) < 0)
364 			goto out_put_keys;
365 
366 		if (uids_user_create(new))
367 			goto out_destoy_sched;
368 
369 		/*
370 		 * Before adding this, check whether we raced
371 		 * on adding the same user already..
372 		 */
373 		spin_lock_irq(&uidhash_lock);
374 		up = uid_hash_find(uid, hashent);
375 		if (up) {
376 			/* This case is not possible when CONFIG_FAIR_USER_SCHED
377 			 * is defined, since we serialize alloc_uid() using
378 			 * uids_mutex. Hence no need to call
379 			 * sched_destroy_user() or remove_user_sysfs_dir().
380 			 */
381 			key_put(new->uid_keyring);
382 			key_put(new->session_keyring);
383 			kmem_cache_free(uid_cachep, new);
384 		} else {
385 			uid_hash_insert(new, hashent);
386 			up = new;
387 		}
388 		spin_unlock_irq(&uidhash_lock);
389 
390 	}
391 
392 	uids_mutex_unlock();
393 
394 	return up;
395 
396 out_destoy_sched:
397 	sched_destroy_user(new);
398 out_put_keys:
399 	key_put(new->uid_keyring);
400 	key_put(new->session_keyring);
401 out_free_user:
402 	kmem_cache_free(uid_cachep, new);
403 out_unlock:
404 	uids_mutex_unlock();
405 	return NULL;
406 }
407 
408 void switch_uid(struct user_struct *new_user)
409 {
410 	struct user_struct *old_user;
411 
412 	/* What if a process setreuid()'s and this brings the
413 	 * new uid over his NPROC rlimit?  We can check this now
414 	 * cheaply with the new uid cache, so if it matters
415 	 * we should be checking for it.  -DaveM
416 	 */
417 	old_user = current->user;
418 	atomic_inc(&new_user->processes);
419 	atomic_dec(&old_user->processes);
420 	switch_uid_keyring(new_user);
421 	current->user = new_user;
422 	sched_switch_user(current);
423 
424 	/*
425 	 * We need to synchronize with __sigqueue_alloc()
426 	 * doing a get_uid(p->user).. If that saw the old
427 	 * user value, we need to wait until it has exited
428 	 * its critical region before we can free the old
429 	 * structure.
430 	 */
431 	smp_mb();
432 	spin_unlock_wait(&current->sighand->siglock);
433 
434 	free_uid(old_user);
435 	suid_keys(current);
436 }
437 
438 #ifdef CONFIG_USER_NS
439 void release_uids(struct user_namespace *ns)
440 {
441 	int i;
442 	unsigned long flags;
443 	struct hlist_head *head;
444 	struct hlist_node *nd;
445 
446 	spin_lock_irqsave(&uidhash_lock, flags);
447 	/*
448 	 * collapse the chains so that the user_struct-s will
449 	 * be still alive, but not in hashes. subsequent free_uid()
450 	 * will free them.
451 	 */
452 	for (i = 0; i < UIDHASH_SZ; i++) {
453 		head = ns->uidhash_table + i;
454 		while (!hlist_empty(head)) {
455 			nd = head->first;
456 			hlist_del_init(nd);
457 		}
458 	}
459 	spin_unlock_irqrestore(&uidhash_lock, flags);
460 
461 	free_uid(ns->root_user);
462 }
463 #endif
464 
465 static int __init uid_cache_init(void)
466 {
467 	int n;
468 
469 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
470 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
471 
472 	for(n = 0; n < UIDHASH_SZ; ++n)
473 		INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
474 
475 	/* Insert the root user immediately (init already runs as root) */
476 	spin_lock_irq(&uidhash_lock);
477 	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
478 	spin_unlock_irq(&uidhash_lock);
479 
480 	return 0;
481 }
482 
483 module_init(uid_cache_init);
484