xref: /linux/arch/s390/kernel/process.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * This file handles the architecture dependent parts of process handling.
3  *
4  *    Copyright IBM Corp. 1999,2009
5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6  *		 Hartmut Penner <hp@de.ibm.com>,
7  *		 Denis Joseph Barrow,
8  */
9 
10 #include <linux/compiler.h>
11 #include <linux/cpu.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/elfcore.h>
16 #include <linux/smp.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/tick.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/compat.h>
23 #include <linux/kprobes.h>
24 #include <linux/random.h>
25 #include <linux/module.h>
26 #include <asm/system.h>
27 #include <asm/io.h>
28 #include <asm/processor.h>
29 #include <asm/irq.h>
30 #include <asm/timer.h>
31 #include <asm/nmi.h>
32 #include <asm/compat.h>
33 #include <asm/smp.h>
34 #include "entry.h"
35 
36 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
37 
38 /*
39  * Return saved PC of a blocked thread. used in kernel/sched.
40  * resume in entry.S does not create a new stack frame, it
41  * just stores the registers %r6-%r15 to the frame given by
42  * schedule. We want to return the address of the caller of
43  * schedule, so we have to walk the backchain one time to
44  * find the frame schedule() store its return address.
45  */
46 unsigned long thread_saved_pc(struct task_struct *tsk)
47 {
48 	struct stack_frame *sf, *low, *high;
49 
50 	if (!tsk || !task_stack_page(tsk))
51 		return 0;
52 	low = task_stack_page(tsk);
53 	high = (struct stack_frame *) task_pt_regs(tsk);
54 	sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
55 	if (sf <= low || sf > high)
56 		return 0;
57 	sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
58 	if (sf <= low || sf > high)
59 		return 0;
60 	return sf->gprs[8];
61 }
62 
63 /*
64  * The idle loop on a S390...
65  */
66 static void default_idle(void)
67 {
68 	if (cpu_is_offline(smp_processor_id()))
69 		cpu_die();
70 	local_irq_disable();
71 	if (need_resched()) {
72 		local_irq_enable();
73 		return;
74 	}
75 	local_mcck_disable();
76 	if (test_thread_flag(TIF_MCCK_PENDING)) {
77 		local_mcck_enable();
78 		local_irq_enable();
79 		s390_handle_mcck();
80 		return;
81 	}
82 	trace_hardirqs_on();
83 	/* Don't trace preempt off for idle. */
84 	stop_critical_timings();
85 	/* Stop virtual timer and halt the cpu. */
86 	vtime_stop_cpu();
87 	/* Reenable preemption tracer. */
88 	start_critical_timings();
89 }
90 
91 void cpu_idle(void)
92 {
93 	for (;;) {
94 		tick_nohz_stop_sched_tick(1);
95 		while (!need_resched())
96 			default_idle();
97 		tick_nohz_restart_sched_tick();
98 		preempt_enable_no_resched();
99 		schedule();
100 		preempt_disable();
101 	}
102 }
103 
104 extern void __kprobes kernel_thread_starter(void);
105 
106 asm(
107 	".section .kprobes.text, \"ax\"\n"
108 	".global kernel_thread_starter\n"
109 	"kernel_thread_starter:\n"
110 	"    la    2,0(10)\n"
111 	"    basr  14,9\n"
112 	"    la    2,0\n"
113 	"    br    11\n"
114 	".previous\n");
115 
116 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
117 {
118 	struct pt_regs regs;
119 
120 	memset(&regs, 0, sizeof(regs));
121 	regs.psw.mask = psw_kernel_bits |
122 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
123 	regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
124 	regs.gprs[9] = (unsigned long) fn;
125 	regs.gprs[10] = (unsigned long) arg;
126 	regs.gprs[11] = (unsigned long) do_exit;
127 	regs.orig_gpr2 = -1;
128 
129 	/* Ok, create the new process.. */
130 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
131 		       0, &regs, 0, NULL, NULL);
132 }
133 EXPORT_SYMBOL(kernel_thread);
134 
135 /*
136  * Free current thread data structures etc..
137  */
138 void exit_thread(void)
139 {
140 }
141 
142 void flush_thread(void)
143 {
144 }
145 
146 void release_thread(struct task_struct *dead_task)
147 {
148 }
149 
150 int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
151 		unsigned long unused,
152 		struct task_struct *p, struct pt_regs *regs)
153 {
154 	struct thread_info *ti;
155 	struct fake_frame
156 	{
157 		struct stack_frame sf;
158 		struct pt_regs childregs;
159 	} *frame;
160 
161 	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
162 	p->thread.ksp = (unsigned long) frame;
163 	/* Store access registers to kernel stack of new process. */
164 	frame->childregs = *regs;
165 	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
166 	frame->childregs.gprs[15] = new_stackp;
167 	frame->sf.back_chain = 0;
168 
169 	/* new return point is ret_from_fork */
170 	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
171 
172 	/* fake return stack for resume(), don't go back to schedule */
173 	frame->sf.gprs[9] = (unsigned long) frame;
174 
175 	/* Save access registers to new thread structure. */
176 	save_access_regs(&p->thread.acrs[0]);
177 
178 #ifndef CONFIG_64BIT
179 	/*
180 	 * save fprs to current->thread.fp_regs to merge them with
181 	 * the emulated registers and then copy the result to the child.
182 	 */
183 	save_fp_regs(&current->thread.fp_regs);
184 	memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
185 	       sizeof(s390_fp_regs));
186 	/* Set a new TLS ?  */
187 	if (clone_flags & CLONE_SETTLS)
188 		p->thread.acrs[0] = regs->gprs[6];
189 #else /* CONFIG_64BIT */
190 	/* Save the fpu registers to new thread structure. */
191 	save_fp_regs(&p->thread.fp_regs);
192 	/* Set a new TLS ?  */
193 	if (clone_flags & CLONE_SETTLS) {
194 		if (is_compat_task()) {
195 			p->thread.acrs[0] = (unsigned int) regs->gprs[6];
196 		} else {
197 			p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
198 			p->thread.acrs[1] = (unsigned int) regs->gprs[6];
199 		}
200 	}
201 #endif /* CONFIG_64BIT */
202 	/* start new process with ar4 pointing to the correct address space */
203 	p->thread.mm_segment = get_fs();
204 	/* Don't copy debug registers */
205 	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
206 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
207 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
208 	clear_tsk_thread_flag(p, TIF_PER_TRAP);
209 	/* Initialize per thread user and system timer values */
210 	ti = task_thread_info(p);
211 	ti->user_timer = 0;
212 	ti->system_timer = 0;
213 	return 0;
214 }
215 
216 SYSCALL_DEFINE0(fork)
217 {
218 	struct pt_regs *regs = task_pt_regs(current);
219 	return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
220 }
221 
222 SYSCALL_DEFINE4(clone, unsigned long, newsp, unsigned long, clone_flags,
223 		int __user *, parent_tidptr, int __user *, child_tidptr)
224 {
225 	struct pt_regs *regs = task_pt_regs(current);
226 
227 	if (!newsp)
228 		newsp = regs->gprs[15];
229 	return do_fork(clone_flags, newsp, regs, 0,
230 		       parent_tidptr, child_tidptr);
231 }
232 
233 /*
234  * This is trivial, and on the face of it looks like it
235  * could equally well be done in user mode.
236  *
237  * Not so, for quite unobvious reasons - register pressure.
238  * In user mode vfork() cannot have a stack frame, and if
239  * done by calling the "clone()" system call directly, you
240  * do not have enough call-clobbered registers to hold all
241  * the information you need.
242  */
243 SYSCALL_DEFINE0(vfork)
244 {
245 	struct pt_regs *regs = task_pt_regs(current);
246 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
247 		       regs->gprs[15], regs, 0, NULL, NULL);
248 }
249 
250 asmlinkage void execve_tail(void)
251 {
252 	current->thread.fp_regs.fpc = 0;
253 	if (MACHINE_HAS_IEEE)
254 		asm volatile("sfpc %0,%0" : : "d" (0));
255 }
256 
257 /*
258  * sys_execve() executes a new program.
259  */
260 SYSCALL_DEFINE3(execve, const char __user *, name,
261 		const char __user *const __user *, argv,
262 		const char __user *const __user *, envp)
263 {
264 	struct pt_regs *regs = task_pt_regs(current);
265 	char *filename;
266 	long rc;
267 
268 	filename = getname(name);
269 	rc = PTR_ERR(filename);
270 	if (IS_ERR(filename))
271 		return rc;
272 	rc = do_execve(filename, argv, envp, regs);
273 	if (rc)
274 		goto out;
275 	execve_tail();
276 	rc = regs->gprs[2];
277 out:
278 	putname(filename);
279 	return rc;
280 }
281 
282 /*
283  * fill in the FPU structure for a core dump.
284  */
285 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
286 {
287 #ifndef CONFIG_64BIT
288 	/*
289 	 * save fprs to current->thread.fp_regs to merge them with
290 	 * the emulated registers and then copy the result to the dump.
291 	 */
292 	save_fp_regs(&current->thread.fp_regs);
293 	memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
294 #else /* CONFIG_64BIT */
295 	save_fp_regs(fpregs);
296 #endif /* CONFIG_64BIT */
297 	return 1;
298 }
299 EXPORT_SYMBOL(dump_fpu);
300 
301 unsigned long get_wchan(struct task_struct *p)
302 {
303 	struct stack_frame *sf, *low, *high;
304 	unsigned long return_address;
305 	int count;
306 
307 	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
308 		return 0;
309 	low = task_stack_page(p);
310 	high = (struct stack_frame *) task_pt_regs(p);
311 	sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
312 	if (sf <= low || sf > high)
313 		return 0;
314 	for (count = 0; count < 16; count++) {
315 		sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
316 		if (sf <= low || sf > high)
317 			return 0;
318 		return_address = sf->gprs[8] & PSW_ADDR_INSN;
319 		if (!in_sched_functions(return_address))
320 			return return_address;
321 	}
322 	return 0;
323 }
324 
325 unsigned long arch_align_stack(unsigned long sp)
326 {
327 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
328 		sp -= get_random_int() & ~PAGE_MASK;
329 	return sp & ~0xf;
330 }
331 
332 static inline unsigned long brk_rnd(void)
333 {
334 	/* 8MB for 32bit, 1GB for 64bit */
335 	if (is_32bit_task())
336 		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
337 	else
338 		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
339 }
340 
341 unsigned long arch_randomize_brk(struct mm_struct *mm)
342 {
343 	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
344 
345 	if (ret < mm->brk)
346 		return mm->brk;
347 	return ret;
348 }
349 
350 unsigned long randomize_et_dyn(unsigned long base)
351 {
352 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
353 
354 	if (!(current->flags & PF_RANDOMIZE))
355 		return base;
356 	if (ret < base)
357 		return base;
358 	return ret;
359 }
360