xref: /linux/kernel/user.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 
20 /*
21  * userns count is 1 for root user, 1 for init_uts_ns,
22  * and 1 for... ?
23  */
24 struct user_namespace init_user_ns = {
25 	.uid_map = {
26 		.nr_extents = 1,
27 		.extent[0] = {
28 			.first = 0,
29 			.lower_first = 0,
30 			.count = 4294967295U,
31 		},
32 	},
33 	.gid_map = {
34 		.nr_extents = 1,
35 		.extent[0] = {
36 			.first = 0,
37 			.lower_first = 0,
38 			.count = 4294967295U,
39 		},
40 	},
41 	.kref = {
42 		.refcount	= ATOMIC_INIT(3),
43 	},
44 	.owner = GLOBAL_ROOT_UID,
45 	.group = GLOBAL_ROOT_GID,
46 };
47 EXPORT_SYMBOL_GPL(init_user_ns);
48 
49 /*
50  * UID task count cache, to get fast user lookup in "alloc_uid"
51  * when changing user ID's (ie setuid() and friends).
52  */
53 
54 #define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
55 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
56 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
57 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
58 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
59 
60 static struct kmem_cache *uid_cachep;
61 struct hlist_head uidhash_table[UIDHASH_SZ];
62 
63 /*
64  * The uidhash_lock is mostly taken from process context, but it is
65  * occasionally also taken from softirq/tasklet context, when
66  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
67  * But free_uid() is also called with local interrupts disabled, and running
68  * local_bh_enable() with local interrupts disabled is an error - we'll run
69  * softirq callbacks, and they can unconditionally enable interrupts, and
70  * the caller of free_uid() didn't expect that..
71  */
72 static DEFINE_SPINLOCK(uidhash_lock);
73 
74 /* root_user.__count is 1, for init task cred */
75 struct user_struct root_user = {
76 	.__count	= ATOMIC_INIT(1),
77 	.processes	= ATOMIC_INIT(1),
78 	.files		= ATOMIC_INIT(0),
79 	.sigpending	= ATOMIC_INIT(0),
80 	.locked_shm     = 0,
81 	.uid		= GLOBAL_ROOT_UID,
82 };
83 
84 /*
85  * These routines must be called with the uidhash spinlock held!
86  */
87 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
88 {
89 	hlist_add_head(&up->uidhash_node, hashent);
90 }
91 
92 static void uid_hash_remove(struct user_struct *up)
93 {
94 	hlist_del_init(&up->uidhash_node);
95 }
96 
97 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
98 {
99 	struct user_struct *user;
100 	struct hlist_node *h;
101 
102 	hlist_for_each_entry(user, h, hashent, uidhash_node) {
103 		if (uid_eq(user->uid, uid)) {
104 			atomic_inc(&user->__count);
105 			return user;
106 		}
107 	}
108 
109 	return NULL;
110 }
111 
112 /* IRQs are disabled and uidhash_lock is held upon function entry.
113  * IRQ state (as stored in flags) is restored and uidhash_lock released
114  * upon function exit.
115  */
116 static void free_user(struct user_struct *up, unsigned long flags)
117 	__releases(&uidhash_lock)
118 {
119 	uid_hash_remove(up);
120 	spin_unlock_irqrestore(&uidhash_lock, flags);
121 	key_put(up->uid_keyring);
122 	key_put(up->session_keyring);
123 	kmem_cache_free(uid_cachep, up);
124 }
125 
126 /*
127  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
128  * caller must undo that ref with free_uid().
129  *
130  * If the user_struct could not be found, return NULL.
131  */
132 struct user_struct *find_user(kuid_t uid)
133 {
134 	struct user_struct *ret;
135 	unsigned long flags;
136 
137 	spin_lock_irqsave(&uidhash_lock, flags);
138 	ret = uid_hash_find(uid, uidhashentry(uid));
139 	spin_unlock_irqrestore(&uidhash_lock, flags);
140 	return ret;
141 }
142 
143 void free_uid(struct user_struct *up)
144 {
145 	unsigned long flags;
146 
147 	if (!up)
148 		return;
149 
150 	local_irq_save(flags);
151 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
152 		free_user(up, flags);
153 	else
154 		local_irq_restore(flags);
155 }
156 
157 struct user_struct *alloc_uid(kuid_t uid)
158 {
159 	struct hlist_head *hashent = uidhashentry(uid);
160 	struct user_struct *up, *new;
161 
162 	spin_lock_irq(&uidhash_lock);
163 	up = uid_hash_find(uid, hashent);
164 	spin_unlock_irq(&uidhash_lock);
165 
166 	if (!up) {
167 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
168 		if (!new)
169 			goto out_unlock;
170 
171 		new->uid = uid;
172 		atomic_set(&new->__count, 1);
173 
174 		/*
175 		 * Before adding this, check whether we raced
176 		 * on adding the same user already..
177 		 */
178 		spin_lock_irq(&uidhash_lock);
179 		up = uid_hash_find(uid, hashent);
180 		if (up) {
181 			key_put(new->uid_keyring);
182 			key_put(new->session_keyring);
183 			kmem_cache_free(uid_cachep, new);
184 		} else {
185 			uid_hash_insert(new, hashent);
186 			up = new;
187 		}
188 		spin_unlock_irq(&uidhash_lock);
189 	}
190 
191 	return up;
192 
193 out_unlock:
194 	return NULL;
195 }
196 
197 static int __init uid_cache_init(void)
198 {
199 	int n;
200 
201 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
202 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
203 
204 	for(n = 0; n < UIDHASH_SZ; ++n)
205 		INIT_HLIST_HEAD(uidhash_table + n);
206 
207 	/* Insert the root user immediately (init already runs as root) */
208 	spin_lock_irq(&uidhash_lock);
209 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
210 	spin_unlock_irq(&uidhash_lock);
211 
212 	return 0;
213 }
214 
215 module_init(uid_cache_init);
216