xref: /linux/kernel/user.c (revision 18b19abc3709b109676ffd1f48dcd332c2e477d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The "user cache".
4  *
5  * (C) Copyright 1991-2000 Linus Torvalds
6  *
7  * We have a per-user structure to keep track of how many
8  * processes, files etc the user has claimed, in order to be
9  * able to have per-user limits for system resources.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/bitops.h>
16 #include <linux/key.h>
17 #include <linux/sched/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/export.h>
20 #include <linux/user_namespace.h>
21 #include <linux/binfmts.h>
22 #include <linux/proc_ns.h>
23 
24 #if IS_ENABLED(CONFIG_BINFMT_MISC)
25 struct binfmt_misc init_binfmt_misc = {
26 	.entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
27 	.enabled = true,
28 	.entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
29 };
30 EXPORT_SYMBOL_GPL(init_binfmt_misc);
31 #endif
32 
33 /*
34  * userns count is 1 for root user, 1 for init_uts_ns,
35  * and 1 for... ?
36  */
37 struct user_namespace init_user_ns = {
38 	.uid_map = {
39 		{
40 			.extent[0] = {
41 				.first = 0,
42 				.lower_first = 0,
43 				.count = 4294967295U,
44 			},
45 			.nr_extents = 1,
46 		},
47 	},
48 	.gid_map = {
49 		{
50 			.extent[0] = {
51 				.first = 0,
52 				.lower_first = 0,
53 				.count = 4294967295U,
54 			},
55 			.nr_extents = 1,
56 		},
57 	},
58 	.projid_map = {
59 		{
60 			.extent[0] = {
61 				.first = 0,
62 				.lower_first = 0,
63 				.count = 4294967295U,
64 			},
65 			.nr_extents = 1,
66 		},
67 	},
68 	.ns.ns_type = ns_common_type(&init_user_ns),
69 	.ns.__ns_ref = REFCOUNT_INIT(3),
70 	.owner = GLOBAL_ROOT_UID,
71 	.group = GLOBAL_ROOT_GID,
72 	.ns.inum = ns_init_inum(&init_user_ns),
73 #ifdef CONFIG_USER_NS
74 	.ns.ops = &userns_operations,
75 #endif
76 	.flags = USERNS_INIT_FLAGS,
77 #ifdef CONFIG_KEYS
78 	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
79 	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
80 #endif
81 #if IS_ENABLED(CONFIG_BINFMT_MISC)
82 	.binfmt_misc = &init_binfmt_misc,
83 #endif
84 };
85 EXPORT_SYMBOL_GPL(init_user_ns);
86 
87 /*
88  * UID task count cache, to get fast user lookup in "alloc_uid"
89  * when changing user ID's (ie setuid() and friends).
90  */
91 
92 #define UIDHASH_BITS	(IS_ENABLED(CONFIG_BASE_SMALL) ? 3 : 7)
93 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
94 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
95 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
96 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
97 
98 static struct kmem_cache *uid_cachep;
99 static struct hlist_head uidhash_table[UIDHASH_SZ];
100 
101 /*
102  * The uidhash_lock is mostly taken from process context, but it is
103  * occasionally also taken from softirq/tasklet context, when
104  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
105  * But free_uid() is also called with local interrupts disabled, and running
106  * local_bh_enable() with local interrupts disabled is an error - we'll run
107  * softirq callbacks, and they can unconditionally enable interrupts, and
108  * the caller of free_uid() didn't expect that..
109  */
110 static DEFINE_SPINLOCK(uidhash_lock);
111 
112 /* root_user.__count is 1, for init task cred */
113 struct user_struct root_user = {
114 	.__count	= REFCOUNT_INIT(1),
115 	.uid		= GLOBAL_ROOT_UID,
116 	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
117 };
118 
119 /*
120  * These routines must be called with the uidhash spinlock held!
121  */
uid_hash_insert(struct user_struct * up,struct hlist_head * hashent)122 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
123 {
124 	hlist_add_head(&up->uidhash_node, hashent);
125 }
126 
uid_hash_remove(struct user_struct * up)127 static void uid_hash_remove(struct user_struct *up)
128 {
129 	hlist_del_init(&up->uidhash_node);
130 }
131 
uid_hash_find(kuid_t uid,struct hlist_head * hashent)132 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
133 {
134 	struct user_struct *user;
135 
136 	hlist_for_each_entry(user, hashent, uidhash_node) {
137 		if (uid_eq(user->uid, uid)) {
138 			refcount_inc(&user->__count);
139 			return user;
140 		}
141 	}
142 
143 	return NULL;
144 }
145 
user_epoll_alloc(struct user_struct * up)146 static int user_epoll_alloc(struct user_struct *up)
147 {
148 #ifdef CONFIG_EPOLL
149 	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
150 #else
151 	return 0;
152 #endif
153 }
154 
user_epoll_free(struct user_struct * up)155 static void user_epoll_free(struct user_struct *up)
156 {
157 #ifdef CONFIG_EPOLL
158 	percpu_counter_destroy(&up->epoll_watches);
159 #endif
160 }
161 
162 /* IRQs are disabled and uidhash_lock is held upon function entry.
163  * IRQ state (as stored in flags) is restored and uidhash_lock released
164  * upon function exit.
165  */
free_user(struct user_struct * up,unsigned long flags)166 static void free_user(struct user_struct *up, unsigned long flags)
167 	__releases(&uidhash_lock)
168 {
169 	uid_hash_remove(up);
170 	spin_unlock_irqrestore(&uidhash_lock, flags);
171 	user_epoll_free(up);
172 	kmem_cache_free(uid_cachep, up);
173 }
174 
175 /*
176  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
177  * caller must undo that ref with free_uid().
178  *
179  * If the user_struct could not be found, return NULL.
180  */
find_user(kuid_t uid)181 struct user_struct *find_user(kuid_t uid)
182 {
183 	struct user_struct *ret;
184 	unsigned long flags;
185 
186 	spin_lock_irqsave(&uidhash_lock, flags);
187 	ret = uid_hash_find(uid, uidhashentry(uid));
188 	spin_unlock_irqrestore(&uidhash_lock, flags);
189 	return ret;
190 }
191 
free_uid(struct user_struct * up)192 void free_uid(struct user_struct *up)
193 {
194 	unsigned long flags;
195 
196 	if (!up)
197 		return;
198 
199 	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
200 		free_user(up, flags);
201 }
202 EXPORT_SYMBOL_GPL(free_uid);
203 
alloc_uid(kuid_t uid)204 struct user_struct *alloc_uid(kuid_t uid)
205 {
206 	struct hlist_head *hashent = uidhashentry(uid);
207 	struct user_struct *up, *new;
208 
209 	spin_lock_irq(&uidhash_lock);
210 	up = uid_hash_find(uid, hashent);
211 	spin_unlock_irq(&uidhash_lock);
212 
213 	if (!up) {
214 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
215 		if (!new)
216 			return NULL;
217 
218 		new->uid = uid;
219 		refcount_set(&new->__count, 1);
220 		if (user_epoll_alloc(new)) {
221 			kmem_cache_free(uid_cachep, new);
222 			return NULL;
223 		}
224 		ratelimit_state_init(&new->ratelimit, HZ, 100);
225 		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
226 
227 		/*
228 		 * Before adding this, check whether we raced
229 		 * on adding the same user already..
230 		 */
231 		spin_lock_irq(&uidhash_lock);
232 		up = uid_hash_find(uid, hashent);
233 		if (up) {
234 			user_epoll_free(new);
235 			kmem_cache_free(uid_cachep, new);
236 		} else {
237 			uid_hash_insert(new, hashent);
238 			up = new;
239 		}
240 		spin_unlock_irq(&uidhash_lock);
241 	}
242 
243 	return up;
244 }
245 
uid_cache_init(void)246 static int __init uid_cache_init(void)
247 {
248 	int n;
249 
250 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
251 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
252 
253 	for(n = 0; n < UIDHASH_SZ; ++n)
254 		INIT_HLIST_HEAD(uidhash_table + n);
255 
256 	if (user_epoll_alloc(&root_user))
257 		panic("root_user epoll percpu counter alloc failed");
258 
259 	/* Insert the root user immediately (init already runs as root) */
260 	spin_lock_irq(&uidhash_lock);
261 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
262 	spin_unlock_irq(&uidhash_lock);
263 
264 	return 0;
265 }
266 subsys_initcall(uid_cache_init);
267