xref: /linux/arch/x86/um/tls_32.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3  * Licensed under the GPL
4  */
5 
6 #include <linux/percpu.h>
7 #include <linux/sched.h>
8 #include <linux/syscalls.h>
9 #include <linux/uaccess.h>
10 #include <asm/ptrace-abi.h>
11 #include <os.h>
12 #include <skas.h>
13 #include <sysdep/tls.h>
14 #include <asm/desc.h>
15 
16 /*
17  * If needed we can detect when it's uninitialized.
18  *
19  * These are initialized in an initcall and unchanged thereafter.
20  */
21 static int host_supports_tls = -1;
22 int host_gdt_entry_tls_min;
23 
24 static int do_set_thread_area(struct user_desc *info)
25 {
26 	int ret;
27 	u32 cpu;
28 
29 	cpu = get_cpu();
30 	ret = os_set_thread_area(info, userspace_pid[cpu]);
31 	put_cpu();
32 
33 	if (ret)
34 		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
35 		       "index = %d\n", ret, info->entry_number);
36 
37 	return ret;
38 }
39 
40 /*
41  * sys_get_thread_area: get a yet unused TLS descriptor index.
42  * XXX: Consider leaving one free slot for glibc usage at first place. This must
43  * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
44  *
45  * Also, this must be tested when compiling in SKAS mode with dynamic linking
46  * and running against NPTL.
47  */
48 static int get_free_idx(struct task_struct* task)
49 {
50 	struct thread_struct *t = &task->thread;
51 	int idx;
52 
53 	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
54 		if (!t->arch.tls_array[idx].present)
55 			return idx + GDT_ENTRY_TLS_MIN;
56 	return -ESRCH;
57 }
58 
59 static inline void clear_user_desc(struct user_desc* info)
60 {
61 	/* Postcondition: LDT_empty(info) returns true. */
62 	memset(info, 0, sizeof(*info));
63 
64 	/*
65 	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
66 	 * indeed an empty user_desc.
67 	 */
68 	info->read_exec_only = 1;
69 	info->seg_not_present = 1;
70 }
71 
72 #define O_FORCE 1
73 
74 static int load_TLS(int flags, struct task_struct *to)
75 {
76 	int ret = 0;
77 	int idx;
78 
79 	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
80 		struct uml_tls_struct* curr =
81 			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
82 
83 		/*
84 		 * Actually, now if it wasn't flushed it gets cleared and
85 		 * flushed to the host, which will clear it.
86 		 */
87 		if (!curr->present) {
88 			if (!curr->flushed) {
89 				clear_user_desc(&curr->tls);
90 				curr->tls.entry_number = idx;
91 			} else {
92 				WARN_ON(!LDT_empty(&curr->tls));
93 				continue;
94 			}
95 		}
96 
97 		if (!(flags & O_FORCE) && curr->flushed)
98 			continue;
99 
100 		ret = do_set_thread_area(&curr->tls);
101 		if (ret)
102 			goto out;
103 
104 		curr->flushed = 1;
105 	}
106 out:
107 	return ret;
108 }
109 
110 /*
111  * Verify if we need to do a flush for the new process, i.e. if there are any
112  * present desc's, only if they haven't been flushed.
113  */
114 static inline int needs_TLS_update(struct task_struct *task)
115 {
116 	int i;
117 	int ret = 0;
118 
119 	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
120 		struct uml_tls_struct* curr =
121 			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
122 
123 		/*
124 		 * Can't test curr->present, we may need to clear a descriptor
125 		 * which had a value.
126 		 */
127 		if (curr->flushed)
128 			continue;
129 		ret = 1;
130 		break;
131 	}
132 	return ret;
133 }
134 
135 /*
136  * On a newly forked process, the TLS descriptors haven't yet been flushed. So
137  * we mark them as such and the first switch_to will do the job.
138  */
139 void clear_flushed_tls(struct task_struct *task)
140 {
141 	int i;
142 
143 	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
144 		struct uml_tls_struct* curr =
145 			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
146 
147 		/*
148 		 * Still correct to do this, if it wasn't present on the host it
149 		 * will remain as flushed as it was.
150 		 */
151 		if (!curr->present)
152 			continue;
153 
154 		curr->flushed = 0;
155 	}
156 }
157 
158 /*
159  * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
160  * common host process. So this is needed in SKAS0 too.
161  *
162  * However, if each thread had a different host process (and this was discussed
163  * for SMP support) this won't be needed.
164  *
165  * And this will not need be used when (and if) we'll add support to the host
166  * SKAS patch.
167  */
168 
169 int arch_switch_tls(struct task_struct *to)
170 {
171 	if (!host_supports_tls)
172 		return 0;
173 
174 	/*
175 	 * We have no need whatsoever to switch TLS for kernel threads; beyond
176 	 * that, that would also result in us calling os_set_thread_area with
177 	 * userspace_pid[cpu] == 0, which gives an error.
178 	 */
179 	if (likely(to->mm))
180 		return load_TLS(O_FORCE, to);
181 
182 	return 0;
183 }
184 
185 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
186 			 int idx, int flushed)
187 {
188 	struct thread_struct *t = &task->thread;
189 
190 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
191 		return -EINVAL;
192 
193 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
194 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
195 	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
196 
197 	return 0;
198 }
199 
200 int arch_set_tls(struct task_struct *new, unsigned long tls)
201 {
202 	struct user_desc info;
203 	int idx, ret = -EFAULT;
204 
205 	if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
206 		goto out;
207 
208 	ret = -EINVAL;
209 	if (LDT_empty(&info))
210 		goto out;
211 
212 	idx = info.entry_number;
213 
214 	ret = set_tls_entry(new, &info, idx, 0);
215 out:
216 	return ret;
217 }
218 
219 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
220 			 int idx)
221 {
222 	struct thread_struct *t = &task->thread;
223 
224 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
225 		return -EINVAL;
226 
227 	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
228 		goto clear;
229 
230 	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
231 
232 out:
233 	/*
234 	 * Temporary debugging check, to make sure that things have been
235 	 * flushed. This could be triggered if load_TLS() failed.
236 	 */
237 	if (unlikely(task == current &&
238 		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
239 		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
240 				"without flushed TLS.", current->pid);
241 	}
242 
243 	return 0;
244 clear:
245 	/*
246 	 * When the TLS entry has not been set, the values read to user in the
247 	 * tls_array are 0 (because it's cleared at boot, see
248 	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
249 	 */
250 	clear_user_desc(info);
251 	info->entry_number = idx;
252 	goto out;
253 }
254 
255 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
256 {
257 	struct user_desc info;
258 	int idx, ret;
259 
260 	if (!host_supports_tls)
261 		return -ENOSYS;
262 
263 	if (copy_from_user(&info, user_desc, sizeof(info)))
264 		return -EFAULT;
265 
266 	idx = info.entry_number;
267 
268 	if (idx == -1) {
269 		idx = get_free_idx(current);
270 		if (idx < 0)
271 			return idx;
272 		info.entry_number = idx;
273 		/* Tell the user which slot we chose for him.*/
274 		if (put_user(idx, &user_desc->entry_number))
275 			return -EFAULT;
276 	}
277 
278 	ret = do_set_thread_area(&info);
279 	if (ret)
280 		return ret;
281 	return set_tls_entry(current, &info, idx, 1);
282 }
283 
284 /*
285  * Perform set_thread_area on behalf of the traced child.
286  * Note: error handling is not done on the deferred load, and this differ from
287  * i386. However the only possible error are caused by bugs.
288  */
289 int ptrace_set_thread_area(struct task_struct *child, int idx,
290 			   struct user_desc __user *user_desc)
291 {
292 	struct user_desc info;
293 
294 	if (!host_supports_tls)
295 		return -EIO;
296 
297 	if (copy_from_user(&info, user_desc, sizeof(info)))
298 		return -EFAULT;
299 
300 	return set_tls_entry(child, &info, idx, 0);
301 }
302 
303 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
304 {
305 	struct user_desc info;
306 	int idx, ret;
307 
308 	if (!host_supports_tls)
309 		return -ENOSYS;
310 
311 	if (get_user(idx, &user_desc->entry_number))
312 		return -EFAULT;
313 
314 	ret = get_tls_entry(current, &info, idx);
315 	if (ret < 0)
316 		goto out;
317 
318 	if (copy_to_user(user_desc, &info, sizeof(info)))
319 		ret = -EFAULT;
320 
321 out:
322 	return ret;
323 }
324 
325 /*
326  * Perform get_thread_area on behalf of the traced child.
327  */
328 int ptrace_get_thread_area(struct task_struct *child, int idx,
329 		struct user_desc __user *user_desc)
330 {
331 	struct user_desc info;
332 	int ret;
333 
334 	if (!host_supports_tls)
335 		return -EIO;
336 
337 	ret = get_tls_entry(child, &info, idx);
338 	if (ret < 0)
339 		goto out;
340 
341 	if (copy_to_user(user_desc, &info, sizeof(info)))
342 		ret = -EFAULT;
343 out:
344 	return ret;
345 }
346 
347 /*
348  * This code is really i386-only, but it detects and logs x86_64 GDT indexes
349  * if a 32-bit UML is running on a 64-bit host.
350  */
351 static int __init __setup_host_supports_tls(void)
352 {
353 	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
354 	if (host_supports_tls) {
355 		printk(KERN_INFO "Host TLS support detected\n");
356 		printk(KERN_INFO "Detected host type: ");
357 		switch (host_gdt_entry_tls_min) {
358 		case GDT_ENTRY_TLS_MIN_I386:
359 			printk(KERN_CONT "i386");
360 			break;
361 		case GDT_ENTRY_TLS_MIN_X86_64:
362 			printk(KERN_CONT "x86_64");
363 			break;
364 		}
365 		printk(KERN_CONT " (GDT indexes %d to %d)\n",
366 		       host_gdt_entry_tls_min,
367 		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
368 	} else
369 		printk(KERN_ERR "  Host TLS support NOT detected! "
370 				"TLS support inside UML will not work\n");
371 	return 0;
372 }
373 
374 __initcall(__setup_host_supports_tls);
375