xref: /linux/arch/arm/kernel/process.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *  linux/arch/arm/kernel/process.c
3  *
4  *  Copyright (C) 1996-2000 Russell King - Converted to ARM.
5  *  Original Copyright (C) 1995  Linus Torvalds
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <stdarg.h>
12 
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/delay.h>
25 #include <linux/reboot.h>
26 #include <linux/interrupt.h>
27 #include <linux/kallsyms.h>
28 #include <linux/init.h>
29 #include <linux/cpu.h>
30 #include <linux/elfcore.h>
31 
32 #include <asm/leds.h>
33 #include <asm/processor.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/mach/time.h>
37 
38 extern const char *processor_modes[];
39 extern void setup_mm_for_reboot(char mode);
40 
41 static volatile int hlt_counter;
42 
43 #include <asm/arch/system.h>
44 
45 void disable_hlt(void)
46 {
47 	hlt_counter++;
48 }
49 
50 EXPORT_SYMBOL(disable_hlt);
51 
52 void enable_hlt(void)
53 {
54 	hlt_counter--;
55 }
56 
57 EXPORT_SYMBOL(enable_hlt);
58 
59 static int __init nohlt_setup(char *__unused)
60 {
61 	hlt_counter = 1;
62 	return 1;
63 }
64 
65 static int __init hlt_setup(char *__unused)
66 {
67 	hlt_counter = 0;
68 	return 1;
69 }
70 
71 __setup("nohlt", nohlt_setup);
72 __setup("hlt", hlt_setup);
73 
74 /*
75  * The following aren't currently used.
76  */
77 void (*pm_idle)(void);
78 EXPORT_SYMBOL(pm_idle);
79 
80 void (*pm_power_off)(void);
81 EXPORT_SYMBOL(pm_power_off);
82 
83 /*
84  * This is our default idle handler.  We need to disable
85  * interrupts here to ensure we don't miss a wakeup call.
86  */
87 static void default_idle(void)
88 {
89 	if (hlt_counter)
90 		cpu_relax();
91 	else {
92 		local_irq_disable();
93 		if (!need_resched()) {
94 			timer_dyn_reprogram();
95 			arch_idle();
96 		}
97 		local_irq_enable();
98 	}
99 }
100 
101 /*
102  * The idle thread.  We try to conserve power, while trying to keep
103  * overall latency low.  The architecture specific idle is passed
104  * a value to indicate the level of "idleness" of the system.
105  */
106 void cpu_idle(void)
107 {
108 	local_fiq_enable();
109 
110 	/* endless idle loop with no priority at all */
111 	while (1) {
112 		void (*idle)(void) = pm_idle;
113 
114 #ifdef CONFIG_HOTPLUG_CPU
115 		if (cpu_is_offline(smp_processor_id())) {
116 			leds_event(led_idle_start);
117 			cpu_die();
118 		}
119 #endif
120 
121 		if (!idle)
122 			idle = default_idle;
123 		leds_event(led_idle_start);
124 		while (!need_resched())
125 			idle();
126 		leds_event(led_idle_end);
127 		preempt_enable_no_resched();
128 		schedule();
129 		preempt_disable();
130 	}
131 }
132 
133 static char reboot_mode = 'h';
134 
135 int __init reboot_setup(char *str)
136 {
137 	reboot_mode = str[0];
138 	return 1;
139 }
140 
141 __setup("reboot=", reboot_setup);
142 
143 void machine_halt(void)
144 {
145 }
146 
147 
148 void machine_power_off(void)
149 {
150 	if (pm_power_off)
151 		pm_power_off();
152 }
153 
154 
155 void machine_restart(char * __unused)
156 {
157 	/*
158 	 * Clean and disable cache, and turn off interrupts
159 	 */
160 	cpu_proc_fin();
161 
162 	/*
163 	 * Tell the mm system that we are going to reboot -
164 	 * we may need it to insert some 1:1 mappings so that
165 	 * soft boot works.
166 	 */
167 	setup_mm_for_reboot(reboot_mode);
168 
169 	/*
170 	 * Now call the architecture specific reboot code.
171 	 */
172 	arch_reset(reboot_mode);
173 
174 	/*
175 	 * Whoops - the architecture was unable to reboot.
176 	 * Tell the user!
177 	 */
178 	mdelay(1000);
179 	printk("Reboot failed -- System halted\n");
180 	while (1);
181 }
182 
183 void __show_regs(struct pt_regs *regs)
184 {
185 	unsigned long flags = condition_codes(regs);
186 
187 	printk("CPU: %d\n", smp_processor_id());
188 	print_symbol("PC is at %s\n", instruction_pointer(regs));
189 	print_symbol("LR is at %s\n", regs->ARM_lr);
190 	printk("pc : [<%08lx>]    lr : [<%08lx>]    %s\n"
191 	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
192 		instruction_pointer(regs),
193 		regs->ARM_lr, print_tainted(), regs->ARM_sp,
194 		regs->ARM_ip, regs->ARM_fp);
195 	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
196 		regs->ARM_r10, regs->ARM_r9,
197 		regs->ARM_r8);
198 	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
199 		regs->ARM_r7, regs->ARM_r6,
200 		regs->ARM_r5, regs->ARM_r4);
201 	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
202 		regs->ARM_r3, regs->ARM_r2,
203 		regs->ARM_r1, regs->ARM_r0);
204 	printk("Flags: %c%c%c%c",
205 		flags & PSR_N_BIT ? 'N' : 'n',
206 		flags & PSR_Z_BIT ? 'Z' : 'z',
207 		flags & PSR_C_BIT ? 'C' : 'c',
208 		flags & PSR_V_BIT ? 'V' : 'v');
209 	printk("  IRQs o%s  FIQs o%s  Mode %s%s  Segment %s\n",
210 		interrupts_enabled(regs) ? "n" : "ff",
211 		fast_interrupts_enabled(regs) ? "n" : "ff",
212 		processor_modes[processor_mode(regs)],
213 		thumb_mode(regs) ? " (T)" : "",
214 		get_fs() == get_ds() ? "kernel" : "user");
215 	{
216 		unsigned int ctrl, transbase, dac;
217 		  __asm__ (
218 		"	mrc p15, 0, %0, c1, c0\n"
219 		"	mrc p15, 0, %1, c2, c0\n"
220 		"	mrc p15, 0, %2, c3, c0\n"
221 		: "=r" (ctrl), "=r" (transbase), "=r" (dac));
222 		printk("Control: %04X  Table: %08X  DAC: %08X\n",
223 		  	ctrl, transbase, dac);
224 	}
225 }
226 
227 void show_regs(struct pt_regs * regs)
228 {
229 	printk("\n");
230 	printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
231 	__show_regs(regs);
232 	__backtrace();
233 }
234 
235 void show_fpregs(struct user_fp *regs)
236 {
237 	int i;
238 
239 	for (i = 0; i < 8; i++) {
240 		unsigned long *p;
241 		char type;
242 
243 		p = (unsigned long *)(regs->fpregs + i);
244 
245 		switch (regs->ftype[i]) {
246 			case 1: type = 'f'; break;
247 			case 2: type = 'd'; break;
248 			case 3: type = 'e'; break;
249 			default: type = '?'; break;
250 		}
251 		if (regs->init_flag)
252 			type = '?';
253 
254 		printk("  f%d(%c): %08lx %08lx %08lx%c",
255 			i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
256 	}
257 
258 
259 	printk("FPSR: %08lx FPCR: %08lx\n",
260 		(unsigned long)regs->fpsr,
261 		(unsigned long)regs->fpcr);
262 }
263 
264 /*
265  * Task structure and kernel stack allocation.
266  */
267 static unsigned long *thread_info_head;
268 static unsigned int nr_thread_info;
269 
270 #define EXTRA_TASK_STRUCT	4
271 
272 struct thread_info *alloc_thread_info(struct task_struct *task)
273 {
274 	struct thread_info *thread = NULL;
275 
276 	if (EXTRA_TASK_STRUCT) {
277 		unsigned long *p = thread_info_head;
278 
279 		if (p) {
280 			thread_info_head = (unsigned long *)p[0];
281 			nr_thread_info -= 1;
282 		}
283 		thread = (struct thread_info *)p;
284 	}
285 
286 	if (!thread)
287 		thread = (struct thread_info *)
288 			   __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
289 
290 #ifdef CONFIG_DEBUG_STACK_USAGE
291 	/*
292 	 * The stack must be cleared if you want SYSRQ-T to
293 	 * give sensible stack usage information
294 	 */
295 	if (thread)
296 		memzero(thread, THREAD_SIZE);
297 #endif
298 	return thread;
299 }
300 
301 void free_thread_info(struct thread_info *thread)
302 {
303 	if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
304 		unsigned long *p = (unsigned long *)thread;
305 		p[0] = (unsigned long)thread_info_head;
306 		thread_info_head = p;
307 		nr_thread_info += 1;
308 	} else
309 		free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
310 }
311 
312 /*
313  * Free current thread data structures etc..
314  */
315 void exit_thread(void)
316 {
317 }
318 
319 static void default_fp_init(union fp_state *fp)
320 {
321 	memset(fp, 0, sizeof(union fp_state));
322 }
323 
324 void (*fp_init)(union fp_state *) = default_fp_init;
325 EXPORT_SYMBOL(fp_init);
326 
327 void flush_thread(void)
328 {
329 	struct thread_info *thread = current_thread_info();
330 	struct task_struct *tsk = current;
331 
332 	memset(thread->used_cp, 0, sizeof(thread->used_cp));
333 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
334 #if defined(CONFIG_IWMMXT)
335 	iwmmxt_task_release(thread);
336 #endif
337 	fp_init(&thread->fpstate);
338 #if defined(CONFIG_VFP)
339 	vfp_flush_thread(&thread->vfpstate);
340 #endif
341 }
342 
343 void release_thread(struct task_struct *dead_task)
344 {
345 #if defined(CONFIG_VFP)
346 	vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
347 #endif
348 #if defined(CONFIG_IWMMXT)
349 	iwmmxt_task_release(task_thread_info(dead_task));
350 #endif
351 }
352 
353 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
354 
355 int
356 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
357 	    unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
358 {
359 	struct thread_info *thread = task_thread_info(p);
360 	struct pt_regs *childregs = task_pt_regs(p);
361 
362 	*childregs = *regs;
363 	childregs->ARM_r0 = 0;
364 	childregs->ARM_sp = stack_start;
365 
366 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
367 	thread->cpu_context.sp = (unsigned long)childregs;
368 	thread->cpu_context.pc = (unsigned long)ret_from_fork;
369 
370 	if (clone_flags & CLONE_SETTLS)
371 		thread->tp_value = regs->ARM_r3;
372 
373 	return 0;
374 }
375 
376 /*
377  * fill in the fpe structure for a core dump...
378  */
379 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
380 {
381 	struct thread_info *thread = current_thread_info();
382 	int used_math = thread->used_cp[1] | thread->used_cp[2];
383 
384 	if (used_math)
385 		memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
386 
387 	return used_math != 0;
388 }
389 EXPORT_SYMBOL(dump_fpu);
390 
391 /*
392  * fill in the user structure for a core dump..
393  */
394 void dump_thread(struct pt_regs * regs, struct user * dump)
395 {
396 	struct task_struct *tsk = current;
397 
398 	dump->magic = CMAGIC;
399 	dump->start_code = tsk->mm->start_code;
400 	dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
401 
402 	dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
403 	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
404 	dump->u_ssize = 0;
405 
406 	dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
407 	dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
408 	dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
409 	dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
410 	dump->u_debugreg[4] = tsk->thread.debug.nsaved;
411 
412 	if (dump->start_stack < 0x04000000)
413 		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
414 
415 	dump->regs = *regs;
416 	dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
417 }
418 EXPORT_SYMBOL(dump_thread);
419 
420 /*
421  * Shuffle the argument into the correct register before calling the
422  * thread function.  r1 is the thread argument, r2 is the pointer to
423  * the thread function, and r3 points to the exit function.
424  */
425 extern void kernel_thread_helper(void);
426 asm(	".section .text\n"
427 "	.align\n"
428 "	.type	kernel_thread_helper, #function\n"
429 "kernel_thread_helper:\n"
430 "	mov	r0, r1\n"
431 "	mov	lr, r3\n"
432 "	mov	pc, r2\n"
433 "	.size	kernel_thread_helper, . - kernel_thread_helper\n"
434 "	.previous");
435 
436 /*
437  * Create a kernel thread.
438  */
439 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
440 {
441 	struct pt_regs regs;
442 
443 	memset(&regs, 0, sizeof(regs));
444 
445 	regs.ARM_r1 = (unsigned long)arg;
446 	regs.ARM_r2 = (unsigned long)fn;
447 	regs.ARM_r3 = (unsigned long)do_exit;
448 	regs.ARM_pc = (unsigned long)kernel_thread_helper;
449 	regs.ARM_cpsr = SVC_MODE;
450 
451 	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
452 }
453 EXPORT_SYMBOL(kernel_thread);
454 
455 unsigned long get_wchan(struct task_struct *p)
456 {
457 	unsigned long fp, lr;
458 	unsigned long stack_start, stack_end;
459 	int count = 0;
460 	if (!p || p == current || p->state == TASK_RUNNING)
461 		return 0;
462 
463 	stack_start = (unsigned long)end_of_stack(p);
464 	stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
465 
466 	fp = thread_saved_fp(p);
467 	do {
468 		if (fp < stack_start || fp > stack_end)
469 			return 0;
470 		lr = pc_pointer (((unsigned long *)fp)[-1]);
471 		if (!in_sched_functions(lr))
472 			return lr;
473 		fp = *(unsigned long *) (fp - 12);
474 	} while (count ++ < 16);
475 	return 0;
476 }
477 EXPORT_SYMBOL(get_wchan);
478