xref: /linux/kernel/user.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The "user cache".
4  *
5  * (C) Copyright 1991-2000 Linus Torvalds
6  *
7  * We have a per-user structure to keep track of how many
8  * processes, files etc the user has claimed, in order to be
9  * able to have per-user limits for system resources.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/bitops.h>
16 #include <linux/key.h>
17 #include <linux/sched/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/export.h>
20 #include <linux/user_namespace.h>
21 #include <linux/binfmts.h>
22 #include <linux/proc_ns.h>
23 
24 #if IS_ENABLED(CONFIG_BINFMT_MISC)
25 struct binfmt_misc init_binfmt_misc = {
26 	.entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
27 	.enabled = true,
28 	.entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
29 };
30 EXPORT_SYMBOL_GPL(init_binfmt_misc);
31 #endif
32 
33 /*
34  * userns count is 1 for root user, 1 for init_uts_ns,
35  * and 1 for... ?
36  */
37 struct user_namespace init_user_ns = {
38 	.ns = NS_COMMON_INIT(init_user_ns),
39 	.uid_map = {
40 		{
41 			.extent[0] = {
42 				.first = 0,
43 				.lower_first = 0,
44 				.count = 4294967295U,
45 			},
46 			.nr_extents = 1,
47 		},
48 	},
49 	.gid_map = {
50 		{
51 			.extent[0] = {
52 				.first = 0,
53 				.lower_first = 0,
54 				.count = 4294967295U,
55 			},
56 			.nr_extents = 1,
57 		},
58 	},
59 	.projid_map = {
60 		{
61 			.extent[0] = {
62 				.first = 0,
63 				.lower_first = 0,
64 				.count = 4294967295U,
65 			},
66 			.nr_extents = 1,
67 		},
68 	},
69 	.owner = GLOBAL_ROOT_UID,
70 	.group = GLOBAL_ROOT_GID,
71 	.flags = USERNS_INIT_FLAGS,
72 #ifdef CONFIG_KEYS
73 	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
74 	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
75 #endif
76 #if IS_ENABLED(CONFIG_BINFMT_MISC)
77 	.binfmt_misc = &init_binfmt_misc,
78 #endif
79 };
80 EXPORT_SYMBOL_GPL(init_user_ns);
81 
82 /*
83  * UID task count cache, to get fast user lookup in "alloc_uid"
84  * when changing user ID's (ie setuid() and friends).
85  */
86 
87 #define UIDHASH_BITS	(IS_ENABLED(CONFIG_BASE_SMALL) ? 3 : 7)
88 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
89 #define UIDHASH_MASK		(UIDHASH_SZ - 1)
90 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
91 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
92 
93 static struct kmem_cache *uid_cachep;
94 static struct hlist_head uidhash_table[UIDHASH_SZ];
95 
96 /*
97  * The uidhash_lock is mostly taken from process context, but it is
98  * occasionally also taken from softirq/tasklet context, when
99  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
100  * But free_uid() is also called with local interrupts disabled, and running
101  * local_bh_enable() with local interrupts disabled is an error - we'll run
102  * softirq callbacks, and they can unconditionally enable interrupts, and
103  * the caller of free_uid() didn't expect that..
104  */
105 static DEFINE_SPINLOCK(uidhash_lock);
106 
107 /* root_user.__count is 1, for init task cred */
108 struct user_struct root_user = {
109 	.__count	= REFCOUNT_INIT(1),
110 	.uid		= GLOBAL_ROOT_UID,
111 	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
112 };
113 
114 /*
115  * These routines must be called with the uidhash spinlock held!
116  */
117 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
118 {
119 	hlist_add_head(&up->uidhash_node, hashent);
120 }
121 
122 static void uid_hash_remove(struct user_struct *up)
123 {
124 	hlist_del_init(&up->uidhash_node);
125 }
126 
127 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
128 {
129 	struct user_struct *user;
130 
131 	hlist_for_each_entry(user, hashent, uidhash_node) {
132 		if (uid_eq(user->uid, uid)) {
133 			refcount_inc(&user->__count);
134 			return user;
135 		}
136 	}
137 
138 	return NULL;
139 }
140 
141 static int user_epoll_alloc(struct user_struct *up)
142 {
143 #ifdef CONFIG_EPOLL
144 	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
145 #else
146 	return 0;
147 #endif
148 }
149 
150 static void user_epoll_free(struct user_struct *up)
151 {
152 #ifdef CONFIG_EPOLL
153 	percpu_counter_destroy(&up->epoll_watches);
154 #endif
155 }
156 
157 /* IRQs are disabled and uidhash_lock is held upon function entry.
158  * IRQ state (as stored in flags) is restored and uidhash_lock released
159  * upon function exit.
160  */
161 static void free_user(struct user_struct *up, unsigned long flags)
162 	__releases(&uidhash_lock)
163 {
164 	uid_hash_remove(up);
165 	spin_unlock_irqrestore(&uidhash_lock, flags);
166 	user_epoll_free(up);
167 	kmem_cache_free(uid_cachep, up);
168 }
169 
170 /*
171  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
172  * caller must undo that ref with free_uid().
173  *
174  * If the user_struct could not be found, return NULL.
175  */
176 struct user_struct *find_user(kuid_t uid)
177 {
178 	struct user_struct *ret;
179 	unsigned long flags;
180 
181 	spin_lock_irqsave(&uidhash_lock, flags);
182 	ret = uid_hash_find(uid, uidhashentry(uid));
183 	spin_unlock_irqrestore(&uidhash_lock, flags);
184 	return ret;
185 }
186 
187 void free_uid(struct user_struct *up)
188 {
189 	unsigned long flags;
190 
191 	if (!up)
192 		return;
193 
194 	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
195 		free_user(up, flags);
196 }
197 EXPORT_SYMBOL_GPL(free_uid);
198 
199 struct user_struct *alloc_uid(kuid_t uid)
200 {
201 	struct hlist_head *hashent = uidhashentry(uid);
202 	struct user_struct *up, *new;
203 
204 	spin_lock_irq(&uidhash_lock);
205 	up = uid_hash_find(uid, hashent);
206 	spin_unlock_irq(&uidhash_lock);
207 
208 	if (!up) {
209 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
210 		if (!new)
211 			return NULL;
212 
213 		new->uid = uid;
214 		refcount_set(&new->__count, 1);
215 		if (user_epoll_alloc(new)) {
216 			kmem_cache_free(uid_cachep, new);
217 			return NULL;
218 		}
219 		ratelimit_state_init(&new->ratelimit, HZ, 100);
220 		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
221 
222 		/*
223 		 * Before adding this, check whether we raced
224 		 * on adding the same user already..
225 		 */
226 		spin_lock_irq(&uidhash_lock);
227 		up = uid_hash_find(uid, hashent);
228 		if (up) {
229 			user_epoll_free(new);
230 			kmem_cache_free(uid_cachep, new);
231 		} else {
232 			uid_hash_insert(new, hashent);
233 			up = new;
234 		}
235 		spin_unlock_irq(&uidhash_lock);
236 	}
237 
238 	return up;
239 }
240 
241 static int __init uid_cache_init(void)
242 {
243 	int n;
244 
245 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
246 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
247 
248 	for(n = 0; n < UIDHASH_SZ; ++n)
249 		INIT_HLIST_HEAD(uidhash_table + n);
250 
251 	if (user_epoll_alloc(&root_user))
252 		panic("root_user epoll percpu counter alloc failed");
253 
254 	/* Insert the root user immediately (init already runs as root) */
255 	spin_lock_irq(&uidhash_lock);
256 	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
257 	spin_unlock_irq(&uidhash_lock);
258 
259 	return 0;
260 }
261 subsys_initcall(uid_cache_init);
262