1 /* 2 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <sched.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <sys/mman.h> 12 #include <sys/ptrace.h> 13 #include <sys/wait.h> 14 #include <asm/unistd.h> 15 #include "as-layout.h" 16 #include "chan_user.h" 17 #include "kern_util.h" 18 #include "mem.h" 19 #include "os.h" 20 #include "proc_mm.h" 21 #include "ptrace_user.h" 22 #include "registers.h" 23 #include "skas.h" 24 #include "skas_ptrace.h" 25 #include "sysdep/stub.h" 26 27 int is_skas_winch(int pid, int fd, void *data) 28 { 29 if (pid != getpgrp()) 30 return 0; 31 32 register_winch_irq(-1, fd, -1, data, 0); 33 return 1; 34 } 35 36 static int ptrace_dump_regs(int pid) 37 { 38 unsigned long regs[MAX_REG_NR]; 39 int i; 40 41 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) 42 return -errno; 43 44 printk(UM_KERN_ERR "Stub registers -\n"); 45 for (i = 0; i < ARRAY_SIZE(regs); i++) 46 printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); 47 48 return 0; 49 } 50 51 /* 52 * Signals that are OK to receive in the stub - we'll just continue it. 53 * SIGWINCH will happen when UML is inside a detached screen. 54 */ 55 #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH)) 56 57 /* Signals that the stub will finish with - anything else is an error */ 58 #define STUB_DONE_MASK (1 << SIGTRAP) 59 60 void wait_stub_done(int pid) 61 { 62 int n, status, err; 63 64 while (1) { 65 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 66 if ((n < 0) || !WIFSTOPPED(status)) 67 goto bad_wait; 68 69 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) 70 break; 71 72 err = ptrace(PTRACE_CONT, pid, 0, 0); 73 if (err) { 74 printk(UM_KERN_ERR "wait_stub_done : continue failed, " 75 "errno = %d\n", errno); 76 fatal_sigsegv(); 77 } 78 } 79 80 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 81 return; 82 83 bad_wait: 84 err = ptrace_dump_regs(pid); 85 if (err) 86 printk(UM_KERN_ERR "Failed to get registers from stub, " 87 "errno = %d\n", -err); 88 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 89 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 90 status); 91 fatal_sigsegv(); 92 } 93 94 extern unsigned long current_stub_stack(void); 95 96 static void get_skas_faultinfo(int pid, struct faultinfo *fi) 97 { 98 int err; 99 100 if (ptrace_faultinfo) { 101 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); 102 if (err) { 103 printk(UM_KERN_ERR "get_skas_faultinfo - " 104 "PTRACE_FAULTINFO failed, errno = %d\n", errno); 105 fatal_sigsegv(); 106 } 107 108 /* Special handling for i386, which has different structs */ 109 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) 110 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, 111 sizeof(struct faultinfo) - 112 sizeof(struct ptrace_faultinfo)); 113 } 114 else { 115 unsigned long fpregs[FP_SIZE]; 116 117 err = get_fp_registers(pid, fpregs); 118 if (err < 0) { 119 printk(UM_KERN_ERR "save_fp_registers returned %d\n", 120 err); 121 fatal_sigsegv(); 122 } 123 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); 124 if (err) { 125 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, " 126 "errno = %d\n", pid, errno); 127 fatal_sigsegv(); 128 } 129 wait_stub_done(pid); 130 131 /* 132 * faultinfo is prepared by the stub-segv-handler at start of 133 * the stub stack page. We just have to copy it. 134 */ 135 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); 136 137 err = put_fp_registers(pid, fpregs); 138 if (err < 0) { 139 printk(UM_KERN_ERR "put_fp_registers returned %d\n", 140 err); 141 fatal_sigsegv(); 142 } 143 } 144 } 145 146 static void handle_segv(int pid, struct uml_pt_regs * regs) 147 { 148 get_skas_faultinfo(pid, ®s->faultinfo); 149 segv(regs->faultinfo, 0, 1, NULL); 150 } 151 152 /* 153 * To use the same value of using_sysemu as the caller, ask it that value 154 * (in local_using_sysemu 155 */ 156 static void handle_trap(int pid, struct uml_pt_regs *regs, 157 int local_using_sysemu) 158 { 159 int err, status; 160 161 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END)) 162 fatal_sigsegv(); 163 164 /* Mark this as a syscall */ 165 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp); 166 167 if (!local_using_sysemu) 168 { 169 err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, 170 __NR_getpid); 171 if (err < 0) { 172 printk(UM_KERN_ERR "handle_trap - nullifying syscall " 173 "failed, errno = %d\n", errno); 174 fatal_sigsegv(); 175 } 176 177 err = ptrace(PTRACE_SYSCALL, pid, 0, 0); 178 if (err < 0) { 179 printk(UM_KERN_ERR "handle_trap - continuing to end of " 180 "syscall failed, errno = %d\n", errno); 181 fatal_sigsegv(); 182 } 183 184 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 185 if ((err < 0) || !WIFSTOPPED(status) || 186 (WSTOPSIG(status) != SIGTRAP + 0x80)) { 187 err = ptrace_dump_regs(pid); 188 if (err) 189 printk(UM_KERN_ERR "Failed to get registers " 190 "from process, errno = %d\n", -err); 191 printk(UM_KERN_ERR "handle_trap - failed to wait at " 192 "end of syscall, errno = %d, status = %d\n", 193 errno, status); 194 fatal_sigsegv(); 195 } 196 } 197 198 handle_syscall(regs); 199 } 200 201 extern int __syscall_stub_start; 202 203 static int userspace_tramp(void *stack) 204 { 205 void *addr; 206 int err; 207 208 ptrace(PTRACE_TRACEME, 0, 0, 0); 209 210 signal(SIGTERM, SIG_DFL); 211 signal(SIGWINCH, SIG_IGN); 212 err = set_interval(); 213 if (err) { 214 printk(UM_KERN_ERR "userspace_tramp - setting timer failed, " 215 "errno = %d\n", err); 216 exit(1); 217 } 218 219 if (!proc_mm) { 220 /* 221 * This has a pte, but it can't be mapped in with the usual 222 * tlb_flush mechanism because this is part of that mechanism 223 */ 224 int fd; 225 unsigned long long offset; 226 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); 227 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, 228 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); 229 if (addr == MAP_FAILED) { 230 printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, " 231 "errno = %d\n", STUB_CODE, errno); 232 exit(1); 233 } 234 235 if (stack != NULL) { 236 fd = phys_mapping(to_phys(stack), &offset); 237 addr = mmap((void *) STUB_DATA, 238 UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, 239 MAP_FIXED | MAP_SHARED, fd, offset); 240 if (addr == MAP_FAILED) { 241 printk(UM_KERN_ERR "mapping segfault stack " 242 "at 0x%lx failed, errno = %d\n", 243 STUB_DATA, errno); 244 exit(1); 245 } 246 } 247 } 248 if (!ptrace_faultinfo && (stack != NULL)) { 249 struct sigaction sa; 250 251 unsigned long v = STUB_CODE + 252 (unsigned long) stub_segv_handler - 253 (unsigned long) &__syscall_stub_start; 254 255 set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE); 256 sigemptyset(&sa.sa_mask); 257 sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO; 258 sa.sa_sigaction = (void *) v; 259 sa.sa_restorer = NULL; 260 if (sigaction(SIGSEGV, &sa, NULL) < 0) { 261 printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV " 262 "handler failed - errno = %d\n", errno); 263 exit(1); 264 } 265 } 266 267 kill(os_getpid(), SIGSTOP); 268 return 0; 269 } 270 271 /* Each element set once, and only accessed by a single processor anyway */ 272 #undef NR_CPUS 273 #define NR_CPUS 1 274 int userspace_pid[NR_CPUS]; 275 276 int start_userspace(unsigned long stub_stack) 277 { 278 void *stack; 279 unsigned long sp; 280 int pid, status, n, flags, err; 281 282 stack = mmap(NULL, UM_KERN_PAGE_SIZE, 283 PROT_READ | PROT_WRITE | PROT_EXEC, 284 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 285 if (stack == MAP_FAILED) { 286 err = -errno; 287 printk(UM_KERN_ERR "start_userspace : mmap failed, " 288 "errno = %d\n", errno); 289 return err; 290 } 291 292 sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); 293 294 flags = CLONE_FILES; 295 if (proc_mm) 296 flags |= CLONE_VM; 297 else 298 flags |= SIGCHLD; 299 300 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); 301 if (pid < 0) { 302 err = -errno; 303 printk(UM_KERN_ERR "start_userspace : clone failed, " 304 "errno = %d\n", errno); 305 return err; 306 } 307 308 do { 309 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 310 if (n < 0) { 311 err = -errno; 312 printk(UM_KERN_ERR "start_userspace : wait failed, " 313 "errno = %d\n", errno); 314 goto out_kill; 315 } 316 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); 317 318 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) { 319 err = -EINVAL; 320 printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got " 321 "status = %d\n", status); 322 goto out_kill; 323 } 324 325 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 326 (void *) PTRACE_O_TRACESYSGOOD) < 0) { 327 err = -errno; 328 printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS " 329 "failed, errno = %d\n", errno); 330 goto out_kill; 331 } 332 333 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) { 334 err = -errno; 335 printk(UM_KERN_ERR "start_userspace : munmap failed, " 336 "errno = %d\n", errno); 337 goto out_kill; 338 } 339 340 return pid; 341 342 out_kill: 343 os_kill_ptraced_process(pid, 1); 344 return err; 345 } 346 347 void userspace(struct uml_pt_regs *regs) 348 { 349 struct itimerval timer; 350 unsigned long long nsecs, now; 351 int err, status, op, pid = userspace_pid[0]; 352 /* To prevent races if using_sysemu changes under us.*/ 353 int local_using_sysemu; 354 355 if (getitimer(ITIMER_VIRTUAL, &timer)) 356 printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); 357 nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + 358 timer.it_value.tv_usec * UM_NSEC_PER_USEC; 359 nsecs += os_nsecs(); 360 361 while (1) { 362 /* 363 * This can legitimately fail if the process loads a 364 * bogus value into a segment register. It will 365 * segfault and PTRACE_GETREGS will read that value 366 * out of the process. However, PTRACE_SETREGS will 367 * fail. In this case, there is nothing to do but 368 * just kill the process. 369 */ 370 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 371 fatal_sigsegv(); 372 373 if (put_fp_registers(pid, regs->fp)) 374 fatal_sigsegv(); 375 376 /* Now we set local_using_sysemu to be used for one loop */ 377 local_using_sysemu = get_using_sysemu(); 378 379 op = SELECT_PTRACE_OPERATION(local_using_sysemu, 380 singlestepping(NULL)); 381 382 if (ptrace(op, pid, 0, 0)) { 383 printk(UM_KERN_ERR "userspace - ptrace continue " 384 "failed, op = %d, errno = %d\n", op, errno); 385 fatal_sigsegv(); 386 } 387 388 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 389 if (err < 0) { 390 printk(UM_KERN_ERR "userspace - wait failed, " 391 "errno = %d\n", errno); 392 fatal_sigsegv(); 393 } 394 395 regs->is_user = 1; 396 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) { 397 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, " 398 "errno = %d\n", errno); 399 fatal_sigsegv(); 400 } 401 402 if (get_fp_registers(pid, regs->fp)) { 403 printk(UM_KERN_ERR "userspace - get_fp_registers failed, " 404 "errno = %d\n", errno); 405 fatal_sigsegv(); 406 } 407 408 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 409 410 if (WIFSTOPPED(status)) { 411 int sig = WSTOPSIG(status); 412 switch (sig) { 413 case SIGSEGV: 414 if (PTRACE_FULL_FAULTINFO || 415 !ptrace_faultinfo) { 416 get_skas_faultinfo(pid, 417 ®s->faultinfo); 418 (*sig_info[SIGSEGV])(SIGSEGV, regs); 419 } 420 else handle_segv(pid, regs); 421 break; 422 case SIGTRAP + 0x80: 423 handle_trap(pid, regs, local_using_sysemu); 424 break; 425 case SIGTRAP: 426 relay_signal(SIGTRAP, regs); 427 break; 428 case SIGVTALRM: 429 now = os_nsecs(); 430 if (now < nsecs) 431 break; 432 block_signals(); 433 (*sig_info[sig])(sig, regs); 434 unblock_signals(); 435 nsecs = timer.it_value.tv_sec * 436 UM_NSEC_PER_SEC + 437 timer.it_value.tv_usec * 438 UM_NSEC_PER_USEC; 439 nsecs += os_nsecs(); 440 break; 441 case SIGIO: 442 case SIGILL: 443 case SIGBUS: 444 case SIGFPE: 445 case SIGWINCH: 446 block_signals(); 447 (*sig_info[sig])(sig, regs); 448 unblock_signals(); 449 break; 450 default: 451 printk(UM_KERN_ERR "userspace - child stopped " 452 "with signal %d\n", sig); 453 fatal_sigsegv(); 454 } 455 pid = userspace_pid[0]; 456 interrupt_end(); 457 458 /* Avoid -ERESTARTSYS handling in host */ 459 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) 460 PT_SYSCALL_NR(regs->gp) = -1; 461 } 462 } 463 } 464 465 static unsigned long thread_regs[MAX_REG_NR]; 466 static unsigned long thread_fp_regs[FP_SIZE]; 467 468 static int __init init_thread_regs(void) 469 { 470 get_safe_registers(thread_regs, thread_fp_regs); 471 /* Set parent's instruction pointer to start of clone-stub */ 472 thread_regs[REGS_IP_INDEX] = STUB_CODE + 473 (unsigned long) stub_clone_handler - 474 (unsigned long) &__syscall_stub_start; 475 thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE - 476 sizeof(void *); 477 #ifdef __SIGNAL_FRAMESIZE 478 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE; 479 #endif 480 return 0; 481 } 482 483 __initcall(init_thread_regs); 484 485 int copy_context_skas0(unsigned long new_stack, int pid) 486 { 487 struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ }; 488 int err; 489 unsigned long current_stack = current_stub_stack(); 490 struct stub_data *data = (struct stub_data *) current_stack; 491 struct stub_data *child_data = (struct stub_data *) new_stack; 492 unsigned long long new_offset; 493 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); 494 495 /* 496 * prepare offset and fd of child's stack as argument for parent's 497 * and child's mmap2 calls 498 */ 499 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), 500 .fd = new_fd, 501 .timer = ((struct itimerval) 502 { .it_value = tv, 503 .it_interval = tv }) }); 504 505 err = ptrace_setregs(pid, thread_regs); 506 if (err < 0) { 507 err = -errno; 508 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS " 509 "failed, pid = %d, errno = %d\n", pid, -err); 510 return err; 511 } 512 513 err = put_fp_registers(pid, thread_fp_regs); 514 if (err < 0) { 515 printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " 516 "failed, pid = %d, err = %d\n", pid, err); 517 return err; 518 } 519 520 /* set a well known return code for detection of child write failure */ 521 child_data->err = 12345678; 522 523 /* 524 * Wait, until parent has finished its work: read child's pid from 525 * parent's stack, and check, if bad result. 526 */ 527 err = ptrace(PTRACE_CONT, pid, 0, 0); 528 if (err) { 529 err = -errno; 530 printk(UM_KERN_ERR "Failed to continue new process, pid = %d, " 531 "errno = %d\n", pid, errno); 532 return err; 533 } 534 535 wait_stub_done(pid); 536 537 pid = data->err; 538 if (pid < 0) { 539 printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports " 540 "error %d\n", -pid); 541 return pid; 542 } 543 544 /* 545 * Wait, until child has finished too: read child's result from 546 * child's stack and check it. 547 */ 548 wait_stub_done(pid); 549 if (child_data->err != STUB_DATA) { 550 printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports " 551 "error %ld\n", child_data->err); 552 err = child_data->err; 553 goto out_kill; 554 } 555 556 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 557 (void *)PTRACE_O_TRACESYSGOOD) < 0) { 558 err = -errno; 559 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS " 560 "failed, errno = %d\n", errno); 561 goto out_kill; 562 } 563 564 return pid; 565 566 out_kill: 567 os_kill_ptraced_process(pid, 1); 568 return err; 569 } 570 571 /* 572 * This is used only, if stub pages are needed, while proc_mm is 573 * available. Opening /proc/mm creates a new mm_context, which lacks 574 * the stub-pages. Thus, we map them using /proc/mm-fd 575 */ 576 int map_stub_pages(int fd, unsigned long code, unsigned long data, 577 unsigned long stack) 578 { 579 struct proc_mm_op mmop; 580 int n; 581 unsigned long long code_offset; 582 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start), 583 &code_offset); 584 585 mmop = ((struct proc_mm_op) { .op = MM_MMAP, 586 .u = 587 { .mmap = 588 { .addr = code, 589 .len = UM_KERN_PAGE_SIZE, 590 .prot = PROT_EXEC, 591 .flags = MAP_FIXED | MAP_PRIVATE, 592 .fd = code_fd, 593 .offset = code_offset 594 } } }); 595 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 596 if (n != sizeof(mmop)) { 597 n = errno; 598 printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, " 599 "offset = %llx\n", code, code_fd, 600 (unsigned long long) code_offset); 601 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code " 602 "failed, err = %d\n", n); 603 return -n; 604 } 605 606 if (stack) { 607 unsigned long long map_offset; 608 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); 609 mmop = ((struct proc_mm_op) 610 { .op = MM_MMAP, 611 .u = 612 { .mmap = 613 { .addr = data, 614 .len = UM_KERN_PAGE_SIZE, 615 .prot = PROT_READ | PROT_WRITE, 616 .flags = MAP_FIXED | MAP_SHARED, 617 .fd = map_fd, 618 .offset = map_offset 619 } } }); 620 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 621 if (n != sizeof(mmop)) { 622 n = errno; 623 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for " 624 "data failed, err = %d\n", n); 625 return -n; 626 } 627 } 628 629 return 0; 630 } 631 632 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)) 633 { 634 (*buf)[0].JB_IP = (unsigned long) handler; 635 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE - 636 sizeof(void *); 637 } 638 639 #define INIT_JMP_NEW_THREAD 0 640 #define INIT_JMP_CALLBACK 1 641 #define INIT_JMP_HALT 2 642 #define INIT_JMP_REBOOT 3 643 644 void switch_threads(jmp_buf *me, jmp_buf *you) 645 { 646 if (UML_SETJMP(me) == 0) 647 UML_LONGJMP(you, 1); 648 } 649 650 static jmp_buf initial_jmpbuf; 651 652 /* XXX Make these percpu */ 653 static void (*cb_proc)(void *arg); 654 static void *cb_arg; 655 static jmp_buf *cb_back; 656 657 int start_idle_thread(void *stack, jmp_buf *switch_buf) 658 { 659 int n; 660 661 set_handler(SIGWINCH); 662 663 /* 664 * Can't use UML_SETJMP or UML_LONGJMP here because they save 665 * and restore signals, with the possible side-effect of 666 * trying to handle any signals which came when they were 667 * blocked, which can't be done on this stack. 668 * Signals must be blocked when jumping back here and restored 669 * after returning to the jumper. 670 */ 671 n = setjmp(initial_jmpbuf); 672 switch (n) { 673 case INIT_JMP_NEW_THREAD: 674 (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; 675 (*switch_buf)[0].JB_SP = (unsigned long) stack + 676 UM_THREAD_SIZE - sizeof(void *); 677 break; 678 case INIT_JMP_CALLBACK: 679 (*cb_proc)(cb_arg); 680 longjmp(*cb_back, 1); 681 break; 682 case INIT_JMP_HALT: 683 kmalloc_ok = 0; 684 return 0; 685 case INIT_JMP_REBOOT: 686 kmalloc_ok = 0; 687 return 1; 688 default: 689 printk(UM_KERN_ERR "Bad sigsetjmp return in " 690 "start_idle_thread - %d\n", n); 691 fatal_sigsegv(); 692 } 693 longjmp(*switch_buf, 1); 694 } 695 696 void initial_thread_cb_skas(void (*proc)(void *), void *arg) 697 { 698 jmp_buf here; 699 700 cb_proc = proc; 701 cb_arg = arg; 702 cb_back = &here; 703 704 block_signals(); 705 if (UML_SETJMP(&here) == 0) 706 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); 707 unblock_signals(); 708 709 cb_proc = NULL; 710 cb_arg = NULL; 711 cb_back = NULL; 712 } 713 714 void halt_skas(void) 715 { 716 block_signals(); 717 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); 718 } 719 720 void reboot_skas(void) 721 { 722 block_signals(); 723 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); 724 } 725 726 void __switch_mm(struct mm_id *mm_idp) 727 { 728 int err; 729 730 /* FIXME: need cpu pid in __switch_mm */ 731 if (proc_mm) { 732 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, 733 mm_idp->u.mm_fd); 734 if (err) { 735 printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM " 736 "failed, errno = %d\n", errno); 737 fatal_sigsegv(); 738 } 739 } 740 else userspace_pid[0] = mm_idp->u.pid; 741 } 742