1 /* 2 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <sched.h> 9 #include <errno.h> 10 #include <string.h> 11 #include <sys/mman.h> 12 #include <sys/ptrace.h> 13 #include <sys/wait.h> 14 #include <asm/unistd.h> 15 #include "as-layout.h" 16 #include "chan_user.h" 17 #include "kern_constants.h" 18 #include "kern_util.h" 19 #include "mem.h" 20 #include "os.h" 21 #include "process.h" 22 #include "proc_mm.h" 23 #include "ptrace_user.h" 24 #include "registers.h" 25 #include "skas.h" 26 #include "skas_ptrace.h" 27 #include "user.h" 28 #include "sysdep/stub.h" 29 30 int is_skas_winch(int pid, int fd, void *data) 31 { 32 if (pid != getpgrp()) 33 return 0; 34 35 register_winch_irq(-1, fd, -1, data, 0); 36 return 1; 37 } 38 39 static int ptrace_dump_regs(int pid) 40 { 41 unsigned long regs[MAX_REG_NR]; 42 int i; 43 44 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) 45 return -errno; 46 47 printk(UM_KERN_ERR "Stub registers -\n"); 48 for (i = 0; i < ARRAY_SIZE(regs); i++) 49 printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]); 50 51 return 0; 52 } 53 54 /* 55 * Signals that are OK to receive in the stub - we'll just continue it. 56 * SIGWINCH will happen when UML is inside a detached screen. 57 */ 58 #define STUB_SIG_MASK (1 << SIGVTALRM) 59 60 /* Signals that the stub will finish with - anything else is an error */ 61 #define STUB_DONE_MASK (1 << SIGTRAP) 62 63 void wait_stub_done(int pid) 64 { 65 int n, status, err; 66 67 while (1) { 68 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 69 if ((n < 0) || !WIFSTOPPED(status)) 70 goto bad_wait; 71 72 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0) 73 break; 74 75 err = ptrace(PTRACE_CONT, pid, 0, 0); 76 if (err) { 77 printk(UM_KERN_ERR "wait_stub_done : continue failed, " 78 "errno = %d\n", errno); 79 fatal_sigsegv(); 80 } 81 } 82 83 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 84 return; 85 86 bad_wait: 87 err = ptrace_dump_regs(pid); 88 if (err) 89 printk(UM_KERN_ERR "Failed to get registers from stub, " 90 "errno = %d\n", -err); 91 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 92 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 93 status); 94 fatal_sigsegv(); 95 } 96 97 extern unsigned long current_stub_stack(void); 98 99 void get_skas_faultinfo(int pid, struct faultinfo * fi) 100 { 101 int err; 102 103 if (ptrace_faultinfo) { 104 err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); 105 if (err) { 106 printk(UM_KERN_ERR "get_skas_faultinfo - " 107 "PTRACE_FAULTINFO failed, errno = %d\n", errno); 108 fatal_sigsegv(); 109 } 110 111 /* Special handling for i386, which has different structs */ 112 if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) 113 memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, 114 sizeof(struct faultinfo) - 115 sizeof(struct ptrace_faultinfo)); 116 } 117 else { 118 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV); 119 if (err) { 120 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, " 121 "errno = %d\n", pid, errno); 122 fatal_sigsegv(); 123 } 124 wait_stub_done(pid); 125 126 /* 127 * faultinfo is prepared by the stub-segv-handler at start of 128 * the stub stack page. We just have to copy it. 129 */ 130 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); 131 } 132 } 133 134 static void handle_segv(int pid, struct uml_pt_regs * regs) 135 { 136 get_skas_faultinfo(pid, ®s->faultinfo); 137 segv(regs->faultinfo, 0, 1, NULL); 138 } 139 140 /* 141 * To use the same value of using_sysemu as the caller, ask it that value 142 * (in local_using_sysemu 143 */ 144 static void handle_trap(int pid, struct uml_pt_regs *regs, 145 int local_using_sysemu) 146 { 147 int err, status; 148 149 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END)) 150 fatal_sigsegv(); 151 152 /* Mark this as a syscall */ 153 UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp); 154 155 if (!local_using_sysemu) 156 { 157 err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, 158 __NR_getpid); 159 if (err < 0) { 160 printk(UM_KERN_ERR "handle_trap - nullifying syscall " 161 "failed, errno = %d\n", errno); 162 fatal_sigsegv(); 163 } 164 165 err = ptrace(PTRACE_SYSCALL, pid, 0, 0); 166 if (err < 0) { 167 printk(UM_KERN_ERR "handle_trap - continuing to end of " 168 "syscall failed, errno = %d\n", errno); 169 fatal_sigsegv(); 170 } 171 172 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 173 if ((err < 0) || !WIFSTOPPED(status) || 174 (WSTOPSIG(status) != SIGTRAP + 0x80)) { 175 err = ptrace_dump_regs(pid); 176 if (err) 177 printk(UM_KERN_ERR "Failed to get registers " 178 "from process, errno = %d\n", -err); 179 printk(UM_KERN_ERR "handle_trap - failed to wait at " 180 "end of syscall, errno = %d, status = %d\n", 181 errno, status); 182 fatal_sigsegv(); 183 } 184 } 185 186 handle_syscall(regs); 187 } 188 189 extern int __syscall_stub_start; 190 191 static int userspace_tramp(void *stack) 192 { 193 void *addr; 194 int err; 195 196 ptrace(PTRACE_TRACEME, 0, 0, 0); 197 198 signal(SIGTERM, SIG_DFL); 199 signal(SIGWINCH, SIG_IGN); 200 err = set_interval(); 201 if (err) { 202 printk(UM_KERN_ERR "userspace_tramp - setting timer failed, " 203 "errno = %d\n", err); 204 exit(1); 205 } 206 207 if (!proc_mm) { 208 /* 209 * This has a pte, but it can't be mapped in with the usual 210 * tlb_flush mechanism because this is part of that mechanism 211 */ 212 int fd; 213 unsigned long long offset; 214 fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); 215 addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE, 216 PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); 217 if (addr == MAP_FAILED) { 218 printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, " 219 "errno = %d\n", STUB_CODE, errno); 220 exit(1); 221 } 222 223 if (stack != NULL) { 224 fd = phys_mapping(to_phys(stack), &offset); 225 addr = mmap((void *) STUB_DATA, 226 UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, 227 MAP_FIXED | MAP_SHARED, fd, offset); 228 if (addr == MAP_FAILED) { 229 printk(UM_KERN_ERR "mapping segfault stack " 230 "at 0x%lx failed, errno = %d\n", 231 STUB_DATA, errno); 232 exit(1); 233 } 234 } 235 } 236 if (!ptrace_faultinfo && (stack != NULL)) { 237 struct sigaction sa; 238 239 unsigned long v = STUB_CODE + 240 (unsigned long) stub_segv_handler - 241 (unsigned long) &__syscall_stub_start; 242 243 set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE); 244 sigemptyset(&sa.sa_mask); 245 sa.sa_flags = SA_ONSTACK | SA_NODEFER; 246 sa.sa_handler = (void *) v; 247 sa.sa_restorer = NULL; 248 if (sigaction(SIGSEGV, &sa, NULL) < 0) { 249 printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV " 250 "handler failed - errno = %d\n", errno); 251 exit(1); 252 } 253 } 254 255 kill(os_getpid(), SIGSTOP); 256 return 0; 257 } 258 259 /* Each element set once, and only accessed by a single processor anyway */ 260 #undef NR_CPUS 261 #define NR_CPUS 1 262 int userspace_pid[NR_CPUS]; 263 264 int start_userspace(unsigned long stub_stack) 265 { 266 void *stack; 267 unsigned long sp; 268 int pid, status, n, flags, err; 269 270 stack = mmap(NULL, UM_KERN_PAGE_SIZE, 271 PROT_READ | PROT_WRITE | PROT_EXEC, 272 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 273 if (stack == MAP_FAILED) { 274 err = -errno; 275 printk(UM_KERN_ERR "start_userspace : mmap failed, " 276 "errno = %d\n", errno); 277 return err; 278 } 279 280 sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *); 281 282 flags = CLONE_FILES; 283 if (proc_mm) 284 flags |= CLONE_VM; 285 else 286 flags |= SIGCHLD; 287 288 pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); 289 if (pid < 0) { 290 err = -errno; 291 printk(UM_KERN_ERR "start_userspace : clone failed, " 292 "errno = %d\n", errno); 293 return err; 294 } 295 296 do { 297 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 298 if (n < 0) { 299 err = -errno; 300 printk(UM_KERN_ERR "start_userspace : wait failed, " 301 "errno = %d\n", errno); 302 goto out_kill; 303 } 304 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); 305 306 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) { 307 err = -EINVAL; 308 printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got " 309 "status = %d\n", status); 310 goto out_kill; 311 } 312 313 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 314 (void *) PTRACE_O_TRACESYSGOOD) < 0) { 315 err = -errno; 316 printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS " 317 "failed, errno = %d\n", errno); 318 goto out_kill; 319 } 320 321 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) { 322 err = -errno; 323 printk(UM_KERN_ERR "start_userspace : munmap failed, " 324 "errno = %d\n", errno); 325 goto out_kill; 326 } 327 328 return pid; 329 330 out_kill: 331 os_kill_ptraced_process(pid, 1); 332 return err; 333 } 334 335 void userspace(struct uml_pt_regs *regs) 336 { 337 struct itimerval timer; 338 unsigned long long nsecs, now; 339 int err, status, op, pid = userspace_pid[0]; 340 /* To prevent races if using_sysemu changes under us.*/ 341 int local_using_sysemu; 342 343 if (getitimer(ITIMER_VIRTUAL, &timer)) 344 printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); 345 nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + 346 timer.it_value.tv_usec * UM_NSEC_PER_USEC; 347 nsecs += os_nsecs(); 348 349 while (1) { 350 /* 351 * This can legitimately fail if the process loads a 352 * bogus value into a segment register. It will 353 * segfault and PTRACE_GETREGS will read that value 354 * out of the process. However, PTRACE_SETREGS will 355 * fail. In this case, there is nothing to do but 356 * just kill the process. 357 */ 358 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 359 fatal_sigsegv(); 360 361 /* Now we set local_using_sysemu to be used for one loop */ 362 local_using_sysemu = get_using_sysemu(); 363 364 op = SELECT_PTRACE_OPERATION(local_using_sysemu, 365 singlestepping(NULL)); 366 367 if (ptrace(op, pid, 0, 0)) { 368 printk(UM_KERN_ERR "userspace - ptrace continue " 369 "failed, op = %d, errno = %d\n", op, errno); 370 fatal_sigsegv(); 371 } 372 373 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL)); 374 if (err < 0) { 375 printk(UM_KERN_ERR "userspace - wait failed, " 376 "errno = %d\n", errno); 377 fatal_sigsegv(); 378 } 379 380 regs->is_user = 1; 381 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) { 382 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, " 383 "errno = %d\n", errno); 384 fatal_sigsegv(); 385 } 386 387 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ 388 389 if (WIFSTOPPED(status)) { 390 int sig = WSTOPSIG(status); 391 switch (sig) { 392 case SIGSEGV: 393 if (PTRACE_FULL_FAULTINFO || 394 !ptrace_faultinfo) { 395 get_skas_faultinfo(pid, 396 ®s->faultinfo); 397 (*sig_info[SIGSEGV])(SIGSEGV, regs); 398 } 399 else handle_segv(pid, regs); 400 break; 401 case SIGTRAP + 0x80: 402 handle_trap(pid, regs, local_using_sysemu); 403 break; 404 case SIGTRAP: 405 relay_signal(SIGTRAP, regs); 406 break; 407 case SIGVTALRM: 408 now = os_nsecs(); 409 if (now < nsecs) 410 break; 411 block_signals(); 412 (*sig_info[sig])(sig, regs); 413 unblock_signals(); 414 nsecs = timer.it_value.tv_sec * 415 UM_NSEC_PER_SEC + 416 timer.it_value.tv_usec * 417 UM_NSEC_PER_USEC; 418 nsecs += os_nsecs(); 419 break; 420 case SIGIO: 421 case SIGILL: 422 case SIGBUS: 423 case SIGFPE: 424 case SIGWINCH: 425 block_signals(); 426 (*sig_info[sig])(sig, regs); 427 unblock_signals(); 428 break; 429 default: 430 printk(UM_KERN_ERR "userspace - child stopped " 431 "with signal %d\n", sig); 432 fatal_sigsegv(); 433 } 434 pid = userspace_pid[0]; 435 interrupt_end(); 436 437 /* Avoid -ERESTARTSYS handling in host */ 438 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) 439 PT_SYSCALL_NR(regs->gp) = -1; 440 } 441 } 442 } 443 444 static unsigned long thread_regs[MAX_REG_NR]; 445 446 static int __init init_thread_regs(void) 447 { 448 get_safe_registers(thread_regs); 449 /* Set parent's instruction pointer to start of clone-stub */ 450 thread_regs[REGS_IP_INDEX] = STUB_CODE + 451 (unsigned long) stub_clone_handler - 452 (unsigned long) &__syscall_stub_start; 453 thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE - 454 sizeof(void *); 455 #ifdef __SIGNAL_FRAMESIZE 456 thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE; 457 #endif 458 return 0; 459 } 460 461 __initcall(init_thread_regs); 462 463 int copy_context_skas0(unsigned long new_stack, int pid) 464 { 465 struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ }; 466 int err; 467 unsigned long current_stack = current_stub_stack(); 468 struct stub_data *data = (struct stub_data *) current_stack; 469 struct stub_data *child_data = (struct stub_data *) new_stack; 470 unsigned long long new_offset; 471 int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); 472 473 /* 474 * prepare offset and fd of child's stack as argument for parent's 475 * and child's mmap2 calls 476 */ 477 *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), 478 .fd = new_fd, 479 .timer = ((struct itimerval) 480 { .it_value = tv, 481 .it_interval = tv }) }); 482 483 err = ptrace_setregs(pid, thread_regs); 484 if (err < 0) { 485 err = -errno; 486 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS " 487 "failed, pid = %d, errno = %d\n", pid, -err); 488 return err; 489 } 490 491 /* set a well known return code for detection of child write failure */ 492 child_data->err = 12345678; 493 494 /* 495 * Wait, until parent has finished its work: read child's pid from 496 * parent's stack, and check, if bad result. 497 */ 498 err = ptrace(PTRACE_CONT, pid, 0, 0); 499 if (err) { 500 err = -errno; 501 printk(UM_KERN_ERR "Failed to continue new process, pid = %d, " 502 "errno = %d\n", pid, errno); 503 return err; 504 } 505 506 wait_stub_done(pid); 507 508 pid = data->err; 509 if (pid < 0) { 510 printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports " 511 "error %d\n", -pid); 512 return pid; 513 } 514 515 /* 516 * Wait, until child has finished too: read child's result from 517 * child's stack and check it. 518 */ 519 wait_stub_done(pid); 520 if (child_data->err != STUB_DATA) { 521 printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports " 522 "error %ld\n", child_data->err); 523 err = child_data->err; 524 goto out_kill; 525 } 526 527 if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, 528 (void *)PTRACE_O_TRACESYSGOOD) < 0) { 529 err = -errno; 530 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS " 531 "failed, errno = %d\n", errno); 532 goto out_kill; 533 } 534 535 return pid; 536 537 out_kill: 538 os_kill_ptraced_process(pid, 1); 539 return err; 540 } 541 542 /* 543 * This is used only, if stub pages are needed, while proc_mm is 544 * available. Opening /proc/mm creates a new mm_context, which lacks 545 * the stub-pages. Thus, we map them using /proc/mm-fd 546 */ 547 int map_stub_pages(int fd, unsigned long code, unsigned long data, 548 unsigned long stack) 549 { 550 struct proc_mm_op mmop; 551 int n; 552 unsigned long long code_offset; 553 int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start), 554 &code_offset); 555 556 mmop = ((struct proc_mm_op) { .op = MM_MMAP, 557 .u = 558 { .mmap = 559 { .addr = code, 560 .len = UM_KERN_PAGE_SIZE, 561 .prot = PROT_EXEC, 562 .flags = MAP_FIXED | MAP_PRIVATE, 563 .fd = code_fd, 564 .offset = code_offset 565 } } }); 566 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 567 if (n != sizeof(mmop)) { 568 n = errno; 569 printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, " 570 "offset = %llx\n", code, code_fd, 571 (unsigned long long) code_offset); 572 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code " 573 "failed, err = %d\n", n); 574 return -n; 575 } 576 577 if (stack) { 578 unsigned long long map_offset; 579 int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); 580 mmop = ((struct proc_mm_op) 581 { .op = MM_MMAP, 582 .u = 583 { .mmap = 584 { .addr = data, 585 .len = UM_KERN_PAGE_SIZE, 586 .prot = PROT_READ | PROT_WRITE, 587 .flags = MAP_FIXED | MAP_SHARED, 588 .fd = map_fd, 589 .offset = map_offset 590 } } }); 591 CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop))); 592 if (n != sizeof(mmop)) { 593 n = errno; 594 printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for " 595 "data failed, err = %d\n", n); 596 return -n; 597 } 598 } 599 600 return 0; 601 } 602 603 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)) 604 { 605 (*buf)[0].JB_IP = (unsigned long) handler; 606 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE - 607 sizeof(void *); 608 } 609 610 #define INIT_JMP_NEW_THREAD 0 611 #define INIT_JMP_CALLBACK 1 612 #define INIT_JMP_HALT 2 613 #define INIT_JMP_REBOOT 3 614 615 void switch_threads(jmp_buf *me, jmp_buf *you) 616 { 617 if (UML_SETJMP(me) == 0) 618 UML_LONGJMP(you, 1); 619 } 620 621 static jmp_buf initial_jmpbuf; 622 623 /* XXX Make these percpu */ 624 static void (*cb_proc)(void *arg); 625 static void *cb_arg; 626 static jmp_buf *cb_back; 627 628 int start_idle_thread(void *stack, jmp_buf *switch_buf) 629 { 630 int n; 631 632 set_handler(SIGWINCH, (__sighandler_t) sig_handler, 633 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGVTALRM, -1); 634 635 /* 636 * Can't use UML_SETJMP or UML_LONGJMP here because they save 637 * and restore signals, with the possible side-effect of 638 * trying to handle any signals which came when they were 639 * blocked, which can't be done on this stack. 640 * Signals must be blocked when jumping back here and restored 641 * after returning to the jumper. 642 */ 643 n = setjmp(initial_jmpbuf); 644 switch (n) { 645 case INIT_JMP_NEW_THREAD: 646 (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler; 647 (*switch_buf)[0].JB_SP = (unsigned long) stack + 648 UM_THREAD_SIZE - sizeof(void *); 649 break; 650 case INIT_JMP_CALLBACK: 651 (*cb_proc)(cb_arg); 652 longjmp(*cb_back, 1); 653 break; 654 case INIT_JMP_HALT: 655 kmalloc_ok = 0; 656 return 0; 657 case INIT_JMP_REBOOT: 658 kmalloc_ok = 0; 659 return 1; 660 default: 661 printk(UM_KERN_ERR "Bad sigsetjmp return in " 662 "start_idle_thread - %d\n", n); 663 fatal_sigsegv(); 664 } 665 longjmp(*switch_buf, 1); 666 } 667 668 void initial_thread_cb_skas(void (*proc)(void *), void *arg) 669 { 670 jmp_buf here; 671 672 cb_proc = proc; 673 cb_arg = arg; 674 cb_back = &here; 675 676 block_signals(); 677 if (UML_SETJMP(&here) == 0) 678 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); 679 unblock_signals(); 680 681 cb_proc = NULL; 682 cb_arg = NULL; 683 cb_back = NULL; 684 } 685 686 void halt_skas(void) 687 { 688 block_signals(); 689 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); 690 } 691 692 void reboot_skas(void) 693 { 694 block_signals(); 695 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); 696 } 697 698 void __switch_mm(struct mm_id *mm_idp) 699 { 700 int err; 701 702 /* FIXME: need cpu pid in __switch_mm */ 703 if (proc_mm) { 704 err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, 705 mm_idp->u.mm_fd); 706 if (err) { 707 printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM " 708 "failed, errno = %d\n", errno); 709 fatal_sigsegv(); 710 } 711 } 712 else userspace_pid[0] = mm_idp->u.pid; 713 } 714