xref: /linux/arch/x86/um/tls_32.c (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 /*
2  * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3  * Licensed under the GPL
4  */
5 
6 #include <linux/percpu.h>
7 #include <linux/sched.h>
8 #include <linux/syscalls.h>
9 #include <linux/uaccess.h>
10 #include <asm/ptrace-abi.h>
11 #include <os.h>
12 #include <skas.h>
13 #include <sysdep/tls.h>
14 #include <asm/desc.h>
15 #include <stub-data.h>
16 
17 /*
18  * If needed we can detect when it's uninitialized.
19  *
20  * These are initialized in an initcall and unchanged thereafter.
21  */
22 static int host_supports_tls = -1;
23 int host_gdt_entry_tls_min;
24 
do_set_thread_area(struct task_struct * task,struct user_desc * info)25 static int do_set_thread_area(struct task_struct* task, struct user_desc *info)
26 {
27 	int ret;
28 
29 	if (info->entry_number < host_gdt_entry_tls_min ||
30 	    info->entry_number >= host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES)
31 		return -EINVAL;
32 
33 	if (using_seccomp) {
34 		int idx = info->entry_number - host_gdt_entry_tls_min;
35 		struct stub_data *data = (void *)task->mm->context.id.stack;
36 
37 		data->arch_data.tls[idx] = *info;
38 		data->arch_data.sync |= BIT(idx);
39 
40 		return 0;
41 	}
42 
43 	ret = os_set_thread_area(info, task->mm->context.id.pid);
44 
45 	if (ret)
46 		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
47 		       "index = %d\n", ret, info->entry_number);
48 
49 	return ret;
50 }
51 
52 /*
53  * sys_get_thread_area: get a yet unused TLS descriptor index.
54  * XXX: Consider leaving one free slot for glibc usage at first place. This must
55  * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
56  *
57  * Also, this must be tested when compiling in SKAS mode with dynamic linking
58  * and running against NPTL.
59  */
get_free_idx(struct task_struct * task)60 static int get_free_idx(struct task_struct* task)
61 {
62 	struct thread_struct *t = &task->thread;
63 	int idx;
64 
65 	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
66 		if (!t->arch.tls_array[idx].present)
67 			return idx + GDT_ENTRY_TLS_MIN;
68 	return -ESRCH;
69 }
70 
clear_user_desc(struct user_desc * info)71 static inline void clear_user_desc(struct user_desc* info)
72 {
73 	/* Postcondition: LDT_empty(info) returns true. */
74 	memset(info, 0, sizeof(*info));
75 
76 	/*
77 	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
78 	 * indeed an empty user_desc.
79 	 */
80 	info->read_exec_only = 1;
81 	info->seg_not_present = 1;
82 }
83 
84 #define O_FORCE 1
85 
load_TLS(int flags,struct task_struct * to)86 static int load_TLS(int flags, struct task_struct *to)
87 {
88 	int ret = 0;
89 	int idx;
90 
91 	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
92 		struct uml_tls_struct* curr =
93 			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
94 
95 		/*
96 		 * Actually, now if it wasn't flushed it gets cleared and
97 		 * flushed to the host, which will clear it.
98 		 */
99 		if (!curr->present) {
100 			if (!curr->flushed) {
101 				clear_user_desc(&curr->tls);
102 				curr->tls.entry_number = idx;
103 			} else {
104 				WARN_ON(!LDT_empty(&curr->tls));
105 				continue;
106 			}
107 		}
108 
109 		if (!(flags & O_FORCE) && curr->flushed)
110 			continue;
111 
112 		ret = do_set_thread_area(current, &curr->tls);
113 		if (ret)
114 			goto out;
115 
116 		curr->flushed = 1;
117 	}
118 out:
119 	return ret;
120 }
121 
122 /*
123  * Verify if we need to do a flush for the new process, i.e. if there are any
124  * present desc's, only if they haven't been flushed.
125  */
needs_TLS_update(struct task_struct * task)126 static inline int needs_TLS_update(struct task_struct *task)
127 {
128 	int i;
129 	int ret = 0;
130 
131 	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
132 		struct uml_tls_struct* curr =
133 			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
134 
135 		/*
136 		 * Can't test curr->present, we may need to clear a descriptor
137 		 * which had a value.
138 		 */
139 		if (curr->flushed)
140 			continue;
141 		ret = 1;
142 		break;
143 	}
144 	return ret;
145 }
146 
147 /*
148  * On a newly forked process, the TLS descriptors haven't yet been flushed. So
149  * we mark them as such and the first switch_to will do the job.
150  */
clear_flushed_tls(struct task_struct * task)151 void clear_flushed_tls(struct task_struct *task)
152 {
153 	int i;
154 
155 	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
156 		struct uml_tls_struct* curr =
157 			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
158 
159 		/*
160 		 * Still correct to do this, if it wasn't present on the host it
161 		 * will remain as flushed as it was.
162 		 */
163 		if (!curr->present)
164 			continue;
165 
166 		curr->flushed = 0;
167 	}
168 }
169 
170 /*
171  * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
172  * common host process. So this is needed in SKAS0 too.
173  *
174  * However, if each thread had a different host process (and this was discussed
175  * for SMP support) this won't be needed.
176  *
177  * And this will not need be used when (and if) we'll add support to the host
178  * SKAS patch.
179  */
180 
arch_switch_tls(struct task_struct * to)181 int arch_switch_tls(struct task_struct *to)
182 {
183 	if (!host_supports_tls)
184 		return 0;
185 
186 	/*
187 	 * We have no need whatsoever to switch TLS for kernel threads; beyond
188 	 * that, that would also result in us calling os_set_thread_area with
189 	 * userspace_pid[cpu] == 0, which gives an error.
190 	 */
191 	if (likely(to->mm))
192 		return load_TLS(O_FORCE, to);
193 
194 	return 0;
195 }
196 
set_tls_entry(struct task_struct * task,struct user_desc * info,int idx,int flushed)197 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
198 			 int idx, int flushed)
199 {
200 	struct thread_struct *t = &task->thread;
201 
202 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
203 		return -EINVAL;
204 
205 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
206 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
207 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
208 
209 	return 0;
210 }
211 
arch_set_tls(struct task_struct * new,unsigned long tls)212 int arch_set_tls(struct task_struct *new, unsigned long tls)
213 {
214 	struct user_desc info;
215 	int idx, ret = -EFAULT;
216 
217 	if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
218 		goto out;
219 
220 	ret = -EINVAL;
221 	if (LDT_empty(&info))
222 		goto out;
223 
224 	idx = info.entry_number;
225 
226 	ret = set_tls_entry(new, &info, idx, 0);
227 out:
228 	return ret;
229 }
230 
get_tls_entry(struct task_struct * task,struct user_desc * info,int idx)231 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
232 			 int idx)
233 {
234 	struct thread_struct *t = &task->thread;
235 
236 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
237 		return -EINVAL;
238 
239 	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
240 		goto clear;
241 
242 	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
243 
244 out:
245 	/*
246 	 * Temporary debugging check, to make sure that things have been
247 	 * flushed. This could be triggered if load_TLS() failed.
248 	 */
249 	if (unlikely(task == current &&
250 		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
251 		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
252 				"without flushed TLS.", current->pid);
253 	}
254 
255 	return 0;
256 clear:
257 	/*
258 	 * When the TLS entry has not been set, the values read to user in the
259 	 * tls_array are 0 (because it's cleared at boot, see
260 	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
261 	 */
262 	clear_user_desc(info);
263 	info->entry_number = idx;
264 	goto out;
265 }
266 
SYSCALL_DEFINE1(set_thread_area,struct user_desc __user *,user_desc)267 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
268 {
269 	struct user_desc info;
270 	int idx, ret;
271 
272 	if (!host_supports_tls)
273 		return -ENOSYS;
274 
275 	if (copy_from_user(&info, user_desc, sizeof(info)))
276 		return -EFAULT;
277 
278 	idx = info.entry_number;
279 
280 	if (idx == -1) {
281 		idx = get_free_idx(current);
282 		if (idx < 0)
283 			return idx;
284 		info.entry_number = idx;
285 		/* Tell the user which slot we chose for him.*/
286 		if (put_user(idx, &user_desc->entry_number))
287 			return -EFAULT;
288 	}
289 
290 	ret = do_set_thread_area(current, &info);
291 	if (ret)
292 		return ret;
293 	return set_tls_entry(current, &info, idx, 1);
294 }
295 
296 /*
297  * Perform set_thread_area on behalf of the traced child.
298  * Note: error handling is not done on the deferred load, and this differ from
299  * i386. However the only possible error are caused by bugs.
300  */
ptrace_set_thread_area(struct task_struct * child,int idx,struct user_desc __user * user_desc)301 int ptrace_set_thread_area(struct task_struct *child, int idx,
302 			   struct user_desc __user *user_desc)
303 {
304 	struct user_desc info;
305 
306 	if (!host_supports_tls)
307 		return -EIO;
308 
309 	if (copy_from_user(&info, user_desc, sizeof(info)))
310 		return -EFAULT;
311 
312 	return set_tls_entry(child, &info, idx, 0);
313 }
314 
SYSCALL_DEFINE1(get_thread_area,struct user_desc __user *,user_desc)315 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
316 {
317 	struct user_desc info;
318 	int idx, ret;
319 
320 	if (!host_supports_tls)
321 		return -ENOSYS;
322 
323 	if (get_user(idx, &user_desc->entry_number))
324 		return -EFAULT;
325 
326 	ret = get_tls_entry(current, &info, idx);
327 	if (ret < 0)
328 		goto out;
329 
330 	if (copy_to_user(user_desc, &info, sizeof(info)))
331 		ret = -EFAULT;
332 
333 out:
334 	return ret;
335 }
336 
337 /*
338  * Perform get_thread_area on behalf of the traced child.
339  */
ptrace_get_thread_area(struct task_struct * child,int idx,struct user_desc __user * user_desc)340 int ptrace_get_thread_area(struct task_struct *child, int idx,
341 		struct user_desc __user *user_desc)
342 {
343 	struct user_desc info;
344 	int ret;
345 
346 	if (!host_supports_tls)
347 		return -EIO;
348 
349 	ret = get_tls_entry(child, &info, idx);
350 	if (ret < 0)
351 		goto out;
352 
353 	if (copy_to_user(user_desc, &info, sizeof(info)))
354 		ret = -EFAULT;
355 out:
356 	return ret;
357 }
358 
359 /*
360  * This code is really i386-only, but it detects and logs x86_64 GDT indexes
361  * if a 32-bit UML is running on a 64-bit host.
362  */
__setup_host_supports_tls(void)363 static int __init __setup_host_supports_tls(void)
364 {
365 	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
366 	if (host_supports_tls) {
367 		printk(KERN_INFO "Host TLS support detected\n");
368 		printk(KERN_INFO "Detected host type: ");
369 		switch (host_gdt_entry_tls_min) {
370 		case GDT_ENTRY_TLS_MIN_I386:
371 			printk(KERN_CONT "i386");
372 			break;
373 		case GDT_ENTRY_TLS_MIN_X86_64:
374 			printk(KERN_CONT "x86_64");
375 			break;
376 		}
377 		printk(KERN_CONT " (GDT indexes %d to %d)\n",
378 		       host_gdt_entry_tls_min,
379 		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
380 	} else
381 		printk(KERN_ERR "  Host TLS support NOT detected! "
382 				"TLS support inside UML will not work\n");
383 	return 0;
384 }
385 
386 __initcall(__setup_host_supports_tls);
387