xref: /linux/arch/um/os-Linux/skas/process.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5 
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include <signal.h>
11 #include <setjmp.h>
12 #include <sched.h>
13 #include "ptrace_user.h"
14 #include <sys/wait.h>
15 #include <sys/mman.h>
16 #include <sys/user.h>
17 #include <sys/time.h>
18 #include <asm/unistd.h>
19 #include <asm/types.h>
20 #include "user.h"
21 #include "sysdep/ptrace.h"
22 #include "user_util.h"
23 #include "kern_util.h"
24 #include "skas.h"
25 #include "stub-data.h"
26 #include "mm_id.h"
27 #include "sysdep/sigcontext.h"
28 #include "sysdep/stub.h"
29 #include "os.h"
30 #include "proc_mm.h"
31 #include "skas_ptrace.h"
32 #include "chan_user.h"
33 #include "registers.h"
34 #include "mem.h"
35 #include "uml-config.h"
36 #include "process.h"
37 #include "longjmp.h"
38 
39 int is_skas_winch(int pid, int fd, void *data)
40 {
41 	if(pid != os_getpgrp())
42 		return(0);
43 
44 	register_winch_irq(-1, fd, -1, data);
45 	return(1);
46 }
47 
48 void wait_stub_done(int pid, int sig, char * fname)
49 {
50 	int n, status, err;
51 
52 	do {
53 		if ( sig != -1 ) {
54 			err = ptrace(PTRACE_CONT, pid, 0, sig);
55 			if(err)
56 				panic("%s : continue failed, errno = %d\n",
57 				      fname, errno);
58 		}
59 		sig = 0;
60 
61 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
62 	} while((n >= 0) && WIFSTOPPED(status) &&
63 	        ((WSTOPSIG(status) == SIGVTALRM) ||
64 		 /* running UML inside a detached screen can cause
65 		  * SIGWINCHes
66 		  */
67 		 (WSTOPSIG(status) == SIGWINCH)));
68 
69 	if((n < 0) || !WIFSTOPPED(status) ||
70 	   (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
71 		unsigned long regs[HOST_FRAME_SIZE];
72 
73 		if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
74 			printk("Failed to get registers from stub, "
75 			       "errno = %d\n", errno);
76 		else {
77 			int i;
78 
79 			printk("Stub registers -\n");
80 			for(i = 0; i < HOST_FRAME_SIZE; i++)
81 				printk("\t%d - %lx\n", i, regs[i]);
82 		}
83 		panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
84 		      "pid = %d, n = %d, errno = %d, status = 0x%x\n",
85 		      fname, pid, n, errno, status);
86 	}
87 }
88 
89 extern unsigned long current_stub_stack(void);
90 
91 void get_skas_faultinfo(int pid, struct faultinfo * fi)
92 {
93 	int err;
94 
95 	if(ptrace_faultinfo){
96 		err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
97 		if(err)
98 			panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
99 			      "errno = %d\n", errno);
100 
101 		/* Special handling for i386, which has different structs */
102 		if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
103 			memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
104 			       sizeof(struct faultinfo) -
105 			       sizeof(struct ptrace_faultinfo));
106 	}
107 	else {
108 		wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo");
109 
110 		/* faultinfo is prepared by the stub-segv-handler at start of
111 		 * the stub stack page. We just have to copy it.
112 		 */
113 		memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
114 	}
115 }
116 
117 static void handle_segv(int pid, union uml_pt_regs * regs)
118 {
119 	get_skas_faultinfo(pid, &regs->skas.faultinfo);
120 	segv(regs->skas.faultinfo, 0, 1, NULL);
121 }
122 
123 /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/
124 static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu)
125 {
126 	int err, status;
127 
128 	/* Mark this as a syscall */
129 	UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs);
130 
131 	if (!local_using_sysemu)
132 	{
133 		err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
134 			     __NR_getpid);
135 		if(err < 0)
136 			panic("handle_trap - nullifying syscall failed errno = %d\n",
137 			      errno);
138 
139 		err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
140 		if(err < 0)
141 			panic("handle_trap - continuing to end of syscall failed, "
142 			      "errno = %d\n", errno);
143 
144 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
145 		if((err < 0) || !WIFSTOPPED(status) ||
146 		   (WSTOPSIG(status) != SIGTRAP + 0x80))
147 			panic("handle_trap - failed to wait at end of syscall, "
148 			      "errno = %d, status = %d\n", errno, status);
149 	}
150 
151 	handle_syscall(regs);
152 }
153 
154 extern int __syscall_stub_start;
155 
156 static int userspace_tramp(void *stack)
157 {
158 	void *addr;
159 
160 	ptrace(PTRACE_TRACEME, 0, 0, 0);
161 
162 	init_new_thread_signals(1);
163 	enable_timer();
164 
165 	if(!proc_mm){
166 		/* This has a pte, but it can't be mapped in with the usual
167 		 * tlb_flush mechanism because this is part of that mechanism
168 		 */
169 		int fd;
170 		__u64 offset;
171 		fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
172 		addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
173 			      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
174 		if(addr == MAP_FAILED){
175 			printk("mapping mmap stub failed, errno = %d\n",
176 			       errno);
177 			exit(1);
178 		}
179 
180 		if(stack != NULL){
181 			fd = phys_mapping(to_phys(stack), &offset);
182 			addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
183 				    PROT_READ | PROT_WRITE,
184 				    MAP_FIXED | MAP_SHARED, fd, offset);
185 			if(addr == MAP_FAILED){
186 				printk("mapping segfault stack failed, "
187 				       "errno = %d\n", errno);
188 				exit(1);
189 			}
190 		}
191 	}
192 	if(!ptrace_faultinfo && (stack != NULL)){
193 		unsigned long v = UML_CONFIG_STUB_CODE +
194 				  (unsigned long) stub_segv_handler -
195 				  (unsigned long) &__syscall_stub_start;
196 
197 		set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size());
198 		set_handler(SIGSEGV, (void *) v, SA_ONSTACK,
199 			    SIGIO, SIGWINCH, SIGALRM, SIGVTALRM,
200 			    SIGUSR1, -1);
201 	}
202 
203 	os_stop_process(os_getpid());
204 	return(0);
205 }
206 
207 /* Each element set once, and only accessed by a single processor anyway */
208 #undef NR_CPUS
209 #define NR_CPUS 1
210 int userspace_pid[NR_CPUS];
211 
212 int start_userspace(unsigned long stub_stack)
213 {
214 	void *stack;
215 	unsigned long sp;
216 	int pid, status, n, flags;
217 
218 	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
219 		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
220 	if(stack == MAP_FAILED)
221 		panic("start_userspace : mmap failed, errno = %d", errno);
222 	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
223 
224 	flags = CLONE_FILES | SIGCHLD;
225 	if(proc_mm) flags |= CLONE_VM;
226 	pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
227 	if(pid < 0)
228 		panic("start_userspace : clone failed, errno = %d", errno);
229 
230 	do {
231 		CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
232 		if(n < 0)
233 			panic("start_userspace : wait failed, errno = %d",
234 			      errno);
235 	} while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
236 
237 	if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
238 		panic("start_userspace : expected SIGSTOP, got status = %d",
239 		      status);
240 
241 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0)
242 		panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n",
243 		      errno);
244 
245 	if(munmap(stack, PAGE_SIZE) < 0)
246 		panic("start_userspace : munmap failed, errno = %d\n", errno);
247 
248 	return(pid);
249 }
250 
251 void userspace(union uml_pt_regs *regs)
252 {
253 	int err, status, op, pid = userspace_pid[0];
254 	int local_using_sysemu; /*To prevent races if using_sysemu changes under us.*/
255 
256 	while(1){
257 		restore_registers(pid, regs);
258 
259 		/* Now we set local_using_sysemu to be used for one loop */
260 		local_using_sysemu = get_using_sysemu();
261 
262 		op = SELECT_PTRACE_OPERATION(local_using_sysemu, singlestepping(NULL));
263 
264 		err = ptrace(op, pid, 0, 0);
265 		if(err)
266 			panic("userspace - could not resume userspace process, "
267 			      "pid=%d, ptrace operation = %d, errno = %d\n",
268 			      op, errno);
269 
270 		CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
271 		if(err < 0)
272 			panic("userspace - waitpid failed, errno = %d\n",
273 			      errno);
274 
275 		regs->skas.is_user = 1;
276 		save_registers(pid, regs);
277 		UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
278 
279 		if(WIFSTOPPED(status)){
280 		  	switch(WSTOPSIG(status)){
281 			case SIGSEGV:
282 				if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo)
283 					user_signal(SIGSEGV, regs, pid);
284 				else handle_segv(pid, regs);
285 				break;
286 			case SIGTRAP + 0x80:
287 			        handle_trap(pid, regs, local_using_sysemu);
288 				break;
289 			case SIGTRAP:
290 				relay_signal(SIGTRAP, regs);
291 				break;
292 			case SIGIO:
293 			case SIGVTALRM:
294 			case SIGILL:
295 			case SIGBUS:
296 			case SIGFPE:
297 			case SIGWINCH:
298 				user_signal(WSTOPSIG(status), regs, pid);
299 				break;
300 			default:
301 			        printk("userspace - child stopped with signal "
302 				       "%d\n", WSTOPSIG(status));
303 			}
304 			pid = userspace_pid[0];
305 			interrupt_end();
306 
307 			/* Avoid -ERESTARTSYS handling in host */
308 			if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
309 				PT_SYSCALL_NR(regs->skas.regs) = -1;
310 		}
311 	}
312 }
313 
314 int copy_context_skas0(unsigned long new_stack, int pid)
315 {
316 	int err;
317 	unsigned long regs[HOST_FRAME_SIZE];
318 	unsigned long fp_regs[HOST_FP_SIZE];
319 	unsigned long current_stack = current_stub_stack();
320 	struct stub_data *data = (struct stub_data *) current_stack;
321 	struct stub_data *child_data = (struct stub_data *) new_stack;
322 	__u64 new_offset;
323 	int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
324 
325 	/* prepare offset and fd of child's stack as argument for parent's
326 	 * and child's mmap2 calls
327 	 */
328 	*data = ((struct stub_data) { .offset	= MMAP_OFFSET(new_offset),
329 				      .fd	= new_fd,
330 				      .timer    = ((struct itimerval)
331 					            { { 0, 1000000 / hz() },
332 						      { 0, 1000000 / hz() }})});
333 	get_safe_registers(regs, fp_regs);
334 
335 	/* Set parent's instruction pointer to start of clone-stub */
336 	regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
337 				(unsigned long) stub_clone_handler -
338 				(unsigned long) &__syscall_stub_start;
339 	regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
340 		sizeof(void *);
341 #ifdef __SIGNAL_FRAMESIZE
342 	regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
343 #endif
344 	err = ptrace_setregs(pid, regs);
345 	if(err < 0)
346 		panic("copy_context_skas0 : PTRACE_SETREGS failed, "
347 		      "pid = %d, errno = %d\n", pid, errno);
348 
349 	err = ptrace_setfpregs(pid, fp_regs);
350 	if(err < 0)
351 		panic("copy_context_skas0 : PTRACE_SETFPREGS failed, "
352 		      "pid = %d, errno = %d\n", pid, errno);
353 
354 	/* set a well known return code for detection of child write failure */
355 	child_data->err = 12345678;
356 
357 	/* Wait, until parent has finished its work: read child's pid from
358 	 * parent's stack, and check, if bad result.
359 	 */
360 	wait_stub_done(pid, 0, "copy_context_skas0");
361 
362 	pid = data->err;
363 	if(pid < 0)
364 		panic("copy_context_skas0 - stub-parent reports error %d\n",
365 		      pid);
366 
367 	/* Wait, until child has finished too: read child's result from
368 	 * child's stack and check it.
369 	 */
370 	wait_stub_done(pid, -1, "copy_context_skas0");
371 	if (child_data->err != UML_CONFIG_STUB_DATA)
372 		panic("copy_context_skas0 - stub-child reports error %d\n",
373 		      child_data->err);
374 
375 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
376 		   (void *)PTRACE_O_TRACESYSGOOD) < 0)
377 		panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
378 		      "errno = %d\n", errno);
379 
380 	return pid;
381 }
382 
383 /*
384  * This is used only, if stub pages are needed, while proc_mm is
385  * availabl. Opening /proc/mm creates a new mm_context, which lacks
386  * the stub-pages. Thus, we map them using /proc/mm-fd
387  */
388 void map_stub_pages(int fd, unsigned long code,
389 		    unsigned long data, unsigned long stack)
390 {
391 	struct proc_mm_op mmop;
392 	int n;
393 	__u64 code_offset;
394 	int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
395 				   &code_offset);
396 
397 	mmop = ((struct proc_mm_op) { .op        = MM_MMAP,
398 				      .u         =
399 				      { .mmap    =
400 					{ .addr    = code,
401 					  .len     = PAGE_SIZE,
402 					  .prot    = PROT_EXEC,
403 					  .flags   = MAP_FIXED | MAP_PRIVATE,
404 					  .fd      = code_fd,
405 					  .offset  = code_offset
406 	} } });
407 	n = os_write_file(fd, &mmop, sizeof(mmop));
408 	if(n != sizeof(mmop))
409 		panic("map_stub_pages : /proc/mm map for code failed, "
410 		      "err = %d\n", -n);
411 
412 	if ( stack ) {
413 		__u64 map_offset;
414 		int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
415 		mmop = ((struct proc_mm_op)
416 				{ .op        = MM_MMAP,
417 				  .u         =
418 				  { .mmap    =
419 				    { .addr    = data,
420 				      .len     = PAGE_SIZE,
421 				      .prot    = PROT_READ | PROT_WRITE,
422 				      .flags   = MAP_FIXED | MAP_SHARED,
423 				      .fd      = map_fd,
424 				      .offset  = map_offset
425 		} } });
426 		n = os_write_file(fd, &mmop, sizeof(mmop));
427 		if(n != sizeof(mmop))
428 			panic("map_stub_pages : /proc/mm map for data failed, "
429 			      "err = %d\n", -n);
430 	}
431 }
432 
433 void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
434 		void (*handler)(int))
435 {
436 	unsigned long flags;
437 	sigjmp_buf switch_buf, fork_buf;
438 	int enable;
439 
440 	*switch_buf_ptr = &switch_buf;
441 	*fork_buf_ptr = &fork_buf;
442 
443 	/* Somewhat subtle - siglongjmp restores the signal mask before doing
444 	 * the longjmp.  This means that when jumping from one stack to another
445 	 * when the target stack has interrupts enabled, an interrupt may occur
446 	 * on the source stack.  This is bad when starting up a process because
447 	 * it's not supposed to get timer ticks until it has been scheduled.
448 	 * So, we disable interrupts around the sigsetjmp to ensure that
449 	 * they can't happen until we get back here where they are safe.
450 	 */
451 	flags = get_signals();
452 	block_signals();
453 	if(UML_SIGSETJMP(&fork_buf, enable) == 0)
454 		new_thread_proc(stack, handler);
455 
456 	remove_sigstack();
457 
458 	set_signals(flags);
459 }
460 
461 #define INIT_JMP_NEW_THREAD 0
462 #define INIT_JMP_REMOVE_SIGSTACK 1
463 #define INIT_JMP_CALLBACK 2
464 #define INIT_JMP_HALT 3
465 #define INIT_JMP_REBOOT 4
466 
467 void thread_wait(void *sw, void *fb)
468 {
469 	sigjmp_buf buf, **switch_buf = sw, *fork_buf;
470 	int enable;
471 
472 	*switch_buf = &buf;
473 	fork_buf = fb;
474 	if(UML_SIGSETJMP(&buf, enable) == 0)
475 		siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK);
476 }
477 
478 void switch_threads(void *me, void *next)
479 {
480 	sigjmp_buf my_buf, **me_ptr = me, *next_buf = next;
481 	int enable;
482 
483 	*me_ptr = &my_buf;
484 	if(UML_SIGSETJMP(&my_buf, enable) == 0)
485 		UML_SIGLONGJMP(next_buf, 1);
486 }
487 
488 static sigjmp_buf initial_jmpbuf;
489 
490 /* XXX Make these percpu */
491 static void (*cb_proc)(void *arg);
492 static void *cb_arg;
493 static sigjmp_buf *cb_back;
494 
495 int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
496 {
497 	sigjmp_buf **switch_buf = switch_buf_ptr;
498 	int n, enable;
499 
500 	set_handler(SIGWINCH, (__sighandler_t) sig_handler,
501 		    SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
502 		    SIGVTALRM, -1);
503 
504 	*fork_buf_ptr = &initial_jmpbuf;
505 	n = UML_SIGSETJMP(&initial_jmpbuf, enable);
506 	switch(n){
507 	case INIT_JMP_NEW_THREAD:
508 		new_thread_proc((void *) stack, new_thread_handler);
509 		break;
510 	case INIT_JMP_REMOVE_SIGSTACK:
511 		remove_sigstack();
512 		break;
513 	case INIT_JMP_CALLBACK:
514 		(*cb_proc)(cb_arg);
515 		UML_SIGLONGJMP(cb_back, 1);
516 		break;
517 	case INIT_JMP_HALT:
518 		kmalloc_ok = 0;
519 		return(0);
520 	case INIT_JMP_REBOOT:
521 		kmalloc_ok = 0;
522 		return(1);
523 	default:
524 		panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
525 	}
526 	UML_SIGLONGJMP(*switch_buf, 1);
527 }
528 
529 void initial_thread_cb_skas(void (*proc)(void *), void *arg)
530 {
531 	sigjmp_buf here;
532 	int enable;
533 
534 	cb_proc = proc;
535 	cb_arg = arg;
536 	cb_back = &here;
537 
538 	block_signals();
539 	if(UML_SIGSETJMP(&here, enable) == 0)
540 		UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
541 	unblock_signals();
542 
543 	cb_proc = NULL;
544 	cb_arg = NULL;
545 	cb_back = NULL;
546 }
547 
548 void halt_skas(void)
549 {
550 	block_signals();
551 	UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
552 }
553 
554 void reboot_skas(void)
555 {
556 	block_signals();
557 	UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
558 }
559 
560 void switch_mm_skas(struct mm_id *mm_idp)
561 {
562 	int err;
563 
564 #warning need cpu pid in switch_mm_skas
565 	if(proc_mm){
566 		err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
567 			     mm_idp->u.mm_fd);
568 		if(err)
569 			panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
570 			      "errno = %d\n", errno);
571 	}
572 	else userspace_pid[0] = mm_idp->u.pid;
573 }
574