xref: /linux/arch/um/kernel/process.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6 
7 #include <linux/stddef.h>
8 #include <linux/err.h>
9 #include <linux/hardirq.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/tick.h>
20 #include <linux/threads.h>
21 #include <linux/tracehook.h>
22 #include <asm/current.h>
23 #include <asm/pgtable.h>
24 #include <asm/mmu_context.h>
25 #include <asm/uaccess.h>
26 #include "as-layout.h"
27 #include "kern_util.h"
28 #include "os.h"
29 #include "skas.h"
30 
31 /*
32  * This is a per-cpu array.  A processor only modifies its entry and it only
33  * cares about its entry, so it's OK if another processor is modifying its
34  * entry.
35  */
36 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37 
38 static inline int external_pid(void)
39 {
40 	/* FIXME: Need to look up userspace_pid by cpu */
41 	return userspace_pid[0];
42 }
43 
44 int pid_to_processor_id(int pid)
45 {
46 	int i;
47 
48 	for (i = 0; i < ncpus; i++) {
49 		if (cpu_tasks[i].pid == pid)
50 			return i;
51 	}
52 	return -1;
53 }
54 
55 void free_stack(unsigned long stack, int order)
56 {
57 	free_pages(stack, order);
58 }
59 
60 unsigned long alloc_stack(int order, int atomic)
61 {
62 	unsigned long page;
63 	gfp_t flags = GFP_KERNEL;
64 
65 	if (atomic)
66 		flags = GFP_ATOMIC;
67 	page = __get_free_pages(flags, order);
68 
69 	return page;
70 }
71 
72 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
73 {
74 	int pid;
75 
76 	current->thread.request.u.thread.proc = fn;
77 	current->thread.request.u.thread.arg = arg;
78 	pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
79 		      &current->thread.regs, 0, NULL, NULL);
80 	return pid;
81 }
82 EXPORT_SYMBOL(kernel_thread);
83 
84 static inline void set_current(struct task_struct *task)
85 {
86 	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
87 		{ external_pid(), task });
88 }
89 
90 extern void arch_switch_to(struct task_struct *to);
91 
92 void *__switch_to(struct task_struct *from, struct task_struct *to)
93 {
94 	to->thread.prev_sched = from;
95 	set_current(to);
96 
97 	do {
98 		current->thread.saved_task = NULL;
99 
100 		switch_threads(&from->thread.switch_buf,
101 			       &to->thread.switch_buf);
102 
103 		arch_switch_to(current);
104 
105 		if (current->thread.saved_task)
106 			show_regs(&(current->thread.regs));
107 		to = current->thread.saved_task;
108 		from = current;
109 	} while (current->thread.saved_task);
110 
111 	return current->thread.prev_sched;
112 }
113 
114 void interrupt_end(void)
115 {
116 	if (need_resched())
117 		schedule();
118 	if (test_thread_flag(TIF_SIGPENDING))
119 		do_signal();
120 	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
121 		tracehook_notify_resume(&current->thread.regs);
122 }
123 
124 void exit_thread(void)
125 {
126 }
127 
128 int get_current_pid(void)
129 {
130 	return task_pid_nr(current);
131 }
132 
133 /*
134  * This is called magically, by its address being stuffed in a jmp_buf
135  * and being longjmp-d to.
136  */
137 void new_thread_handler(void)
138 {
139 	int (*fn)(void *), n;
140 	void *arg;
141 
142 	if (current->thread.prev_sched != NULL)
143 		schedule_tail(current->thread.prev_sched);
144 	current->thread.prev_sched = NULL;
145 
146 	fn = current->thread.request.u.thread.proc;
147 	arg = current->thread.request.u.thread.arg;
148 
149 	/*
150 	 * The return value is 1 if the kernel thread execs a process,
151 	 * 0 if it just exits
152 	 */
153 	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
154 	if (n == 1) {
155 		/* Handle any immediate reschedules or signals */
156 		interrupt_end();
157 		userspace(&current->thread.regs.regs);
158 	}
159 	else do_exit(0);
160 }
161 
162 /* Called magically, see new_thread_handler above */
163 void fork_handler(void)
164 {
165 	force_flush_all();
166 
167 	schedule_tail(current->thread.prev_sched);
168 
169 	/*
170 	 * XXX: if interrupt_end() calls schedule, this call to
171 	 * arch_switch_to isn't needed. We could want to apply this to
172 	 * improve performance. -bb
173 	 */
174 	arch_switch_to(current);
175 
176 	current->thread.prev_sched = NULL;
177 
178 	/* Handle any immediate reschedules or signals */
179 	interrupt_end();
180 
181 	userspace(&current->thread.regs.regs);
182 }
183 
184 int copy_thread(unsigned long clone_flags, unsigned long sp,
185 		unsigned long stack_top, struct task_struct * p,
186 		struct pt_regs *regs)
187 {
188 	void (*handler)(void);
189 	int ret = 0;
190 
191 	p->thread = (struct thread_struct) INIT_THREAD;
192 
193 	if (current->thread.forking) {
194 	  	memcpy(&p->thread.regs.regs, &regs->regs,
195 		       sizeof(p->thread.regs.regs));
196 		UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0);
197 		if (sp != 0)
198 			REGS_SP(p->thread.regs.regs.gp) = sp;
199 
200 		handler = fork_handler;
201 
202 		arch_copy_thread(&current->thread.arch, &p->thread.arch);
203 	}
204 	else {
205 		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
206 		p->thread.request.u.thread = current->thread.request.u.thread;
207 		handler = new_thread_handler;
208 	}
209 
210 	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
211 
212 	if (current->thread.forking) {
213 		clear_flushed_tls(p);
214 
215 		/*
216 		 * Set a new TLS for the child thread?
217 		 */
218 		if (clone_flags & CLONE_SETTLS)
219 			ret = arch_copy_tls(p);
220 	}
221 
222 	return ret;
223 }
224 
225 void initial_thread_cb(void (*proc)(void *), void *arg)
226 {
227 	int save_kmalloc_ok = kmalloc_ok;
228 
229 	kmalloc_ok = 0;
230 	initial_thread_cb_skas(proc, arg);
231 	kmalloc_ok = save_kmalloc_ok;
232 }
233 
234 void default_idle(void)
235 {
236 	unsigned long long nsecs;
237 
238 	while (1) {
239 		/* endless idle loop with no priority at all */
240 
241 		/*
242 		 * although we are an idle CPU, we do not want to
243 		 * get into the scheduler unnecessarily.
244 		 */
245 		if (need_resched())
246 			schedule();
247 
248 		tick_nohz_idle_enter();
249 		rcu_idle_enter();
250 		nsecs = disable_timer();
251 		idle_sleep(nsecs);
252 		rcu_idle_exit();
253 		tick_nohz_idle_exit();
254 	}
255 }
256 
257 void cpu_idle(void)
258 {
259 	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
260 	default_idle();
261 }
262 
263 int __cant_sleep(void) {
264 	return in_atomic() || irqs_disabled() || in_interrupt();
265 	/* Is in_interrupt() really needed? */
266 }
267 
268 int user_context(unsigned long sp)
269 {
270 	unsigned long stack;
271 
272 	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
273 	return stack != (unsigned long) current_thread_info();
274 }
275 
276 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
277 
278 void do_uml_exitcalls(void)
279 {
280 	exitcall_t *call;
281 
282 	call = &__uml_exitcall_end;
283 	while (--call >= &__uml_exitcall_begin)
284 		(*call)();
285 }
286 
287 char *uml_strdup(const char *string)
288 {
289 	return kstrdup(string, GFP_KERNEL);
290 }
291 EXPORT_SYMBOL(uml_strdup);
292 
293 int copy_to_user_proc(void __user *to, void *from, int size)
294 {
295 	return copy_to_user(to, from, size);
296 }
297 
298 int copy_from_user_proc(void *to, void __user *from, int size)
299 {
300 	return copy_from_user(to, from, size);
301 }
302 
303 int clear_user_proc(void __user *buf, int size)
304 {
305 	return clear_user(buf, size);
306 }
307 
308 int strlen_user_proc(char __user *str)
309 {
310 	return strlen_user(str);
311 }
312 
313 int smp_sigio_handler(void)
314 {
315 #ifdef CONFIG_SMP
316 	int cpu = current_thread_info()->cpu;
317 	IPI_handler(cpu);
318 	if (cpu != 0)
319 		return 1;
320 #endif
321 	return 0;
322 }
323 
324 int cpu(void)
325 {
326 	return current_thread_info()->cpu;
327 }
328 
329 static atomic_t using_sysemu = ATOMIC_INIT(0);
330 int sysemu_supported;
331 
332 void set_using_sysemu(int value)
333 {
334 	if (value > sysemu_supported)
335 		return;
336 	atomic_set(&using_sysemu, value);
337 }
338 
339 int get_using_sysemu(void)
340 {
341 	return atomic_read(&using_sysemu);
342 }
343 
344 static int sysemu_proc_show(struct seq_file *m, void *v)
345 {
346 	seq_printf(m, "%d\n", get_using_sysemu());
347 	return 0;
348 }
349 
350 static int sysemu_proc_open(struct inode *inode, struct file *file)
351 {
352 	return single_open(file, sysemu_proc_show, NULL);
353 }
354 
355 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
356 				 size_t count, loff_t *pos)
357 {
358 	char tmp[2];
359 
360 	if (copy_from_user(tmp, buf, 1))
361 		return -EFAULT;
362 
363 	if (tmp[0] >= '0' && tmp[0] <= '2')
364 		set_using_sysemu(tmp[0] - '0');
365 	/* We use the first char, but pretend to write everything */
366 	return count;
367 }
368 
369 static const struct file_operations sysemu_proc_fops = {
370 	.owner		= THIS_MODULE,
371 	.open		= sysemu_proc_open,
372 	.read		= seq_read,
373 	.llseek		= seq_lseek,
374 	.release	= single_release,
375 	.write		= sysemu_proc_write,
376 };
377 
378 int __init make_proc_sysemu(void)
379 {
380 	struct proc_dir_entry *ent;
381 	if (!sysemu_supported)
382 		return 0;
383 
384 	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
385 
386 	if (ent == NULL)
387 	{
388 		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
389 		return 0;
390 	}
391 
392 	return 0;
393 }
394 
395 late_initcall(make_proc_sysemu);
396 
397 int singlestepping(void * t)
398 {
399 	struct task_struct *task = t ? t : current;
400 
401 	if (!(task->ptrace & PT_DTRACE))
402 		return 0;
403 
404 	if (task->thread.singlestep_syscall)
405 		return 1;
406 
407 	return 2;
408 }
409 
410 /*
411  * Only x86 and x86_64 have an arch_align_stack().
412  * All other arches have "#define arch_align_stack(x) (x)"
413  * in their asm/system.h
414  * As this is included in UML from asm-um/system-generic.h,
415  * we can use it to behave as the subarch does.
416  */
417 #ifndef arch_align_stack
418 unsigned long arch_align_stack(unsigned long sp)
419 {
420 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
421 		sp -= get_random_int() % 8192;
422 	return sp & ~0xf;
423 }
424 #endif
425 
426 unsigned long get_wchan(struct task_struct *p)
427 {
428 	unsigned long stack_page, sp, ip;
429 	bool seen_sched = 0;
430 
431 	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
432 		return 0;
433 
434 	stack_page = (unsigned long) task_stack_page(p);
435 	/* Bail if the process has no kernel stack for some reason */
436 	if (stack_page == 0)
437 		return 0;
438 
439 	sp = p->thread.switch_buf->JB_SP;
440 	/*
441 	 * Bail if the stack pointer is below the bottom of the kernel
442 	 * stack for some reason
443 	 */
444 	if (sp < stack_page)
445 		return 0;
446 
447 	while (sp < stack_page + THREAD_SIZE) {
448 		ip = *((unsigned long *) sp);
449 		if (in_sched_functions(ip))
450 			/* Ignore everything until we're above the scheduler */
451 			seen_sched = 1;
452 		else if (kernel_text_address(ip) && seen_sched)
453 			return ip;
454 
455 		sp += sizeof(unsigned long);
456 	}
457 
458 	return 0;
459 }
460 
461 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
462 {
463 	int cpu = current_thread_info()->cpu;
464 
465 	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
466 }
467 
468