1 /* 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/sched.h" 7 #include "linux/slab.h" 8 #include "linux/ptrace.h" 9 #include "linux/proc_fs.h" 10 #include "linux/file.h" 11 #include "linux/errno.h" 12 #include "linux/init.h" 13 #include "asm/uaccess.h" 14 #include "asm/atomic.h" 15 #include "kern_util.h" 16 #include "skas.h" 17 #include "os.h" 18 #include "user_util.h" 19 #include "tlb.h" 20 #include "kern.h" 21 #include "mode.h" 22 #include "registers.h" 23 24 void switch_to_skas(void *prev, void *next) 25 { 26 struct task_struct *from, *to; 27 28 from = prev; 29 to = next; 30 31 /* XXX need to check runqueues[cpu].idle */ 32 if(current->pid == 0) 33 switch_timers(0); 34 35 switch_threads(&from->thread.mode.skas.switch_buf, 36 &to->thread.mode.skas.switch_buf); 37 38 arch_switch_to_skas(current->thread.prev_sched, current); 39 40 if(current->pid == 0) 41 switch_timers(1); 42 } 43 44 extern void schedule_tail(struct task_struct *prev); 45 46 /* This is called magically, by its address being stuffed in a jmp_buf 47 * and being longjmp-d to. 48 */ 49 void new_thread_handler(void) 50 { 51 int (*fn)(void *), n; 52 void *arg; 53 54 if(current->thread.prev_sched != NULL) 55 schedule_tail(current->thread.prev_sched); 56 current->thread.prev_sched = NULL; 57 58 fn = current->thread.request.u.thread.proc; 59 arg = current->thread.request.u.thread.arg; 60 61 /* The return value is 1 if the kernel thread execs a process, 62 * 0 if it just exits 63 */ 64 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); 65 if(n == 1){ 66 /* Handle any immediate reschedules or signals */ 67 interrupt_end(); 68 userspace(¤t->thread.regs.regs); 69 } 70 else do_exit(0); 71 } 72 73 void release_thread_skas(struct task_struct *task) 74 { 75 } 76 77 /* Called magically, see new_thread_handler above */ 78 void fork_handler(void) 79 { 80 force_flush_all(); 81 if(current->thread.prev_sched == NULL) 82 panic("blech"); 83 84 schedule_tail(current->thread.prev_sched); 85 86 /* XXX: if interrupt_end() calls schedule, this call to 87 * arch_switch_to_skas isn't needed. We could want to apply this to 88 * improve performance. -bb */ 89 arch_switch_to_skas(current->thread.prev_sched, current); 90 91 current->thread.prev_sched = NULL; 92 93 /* Handle any immediate reschedules or signals */ 94 interrupt_end(); 95 96 userspace(¤t->thread.regs.regs); 97 } 98 99 int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp, 100 unsigned long stack_top, struct task_struct * p, 101 struct pt_regs *regs) 102 { 103 void (*handler)(void); 104 105 if(current->thread.forking){ 106 memcpy(&p->thread.regs.regs.skas, ®s->regs.skas, 107 sizeof(p->thread.regs.regs.skas)); 108 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0); 109 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp; 110 111 handler = fork_handler; 112 113 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 114 } 115 else { 116 init_thread_registers(&p->thread.regs.regs); 117 p->thread.request.u.thread = current->thread.request.u.thread; 118 handler = new_thread_handler; 119 } 120 121 new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf, 122 handler); 123 return(0); 124 } 125 126 int new_mm(unsigned long stack) 127 { 128 int fd; 129 130 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0); 131 if(fd < 0) 132 return(fd); 133 134 if(skas_needs_stub) 135 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack); 136 137 return(fd); 138 } 139 140 void init_idle_skas(void) 141 { 142 cpu_tasks[current_thread->cpu].pid = os_getpid(); 143 default_idle(); 144 } 145 146 extern void start_kernel(void); 147 148 static int start_kernel_proc(void *unused) 149 { 150 int pid; 151 152 block_signals(); 153 pid = os_getpid(); 154 155 cpu_tasks[0].pid = pid; 156 cpu_tasks[0].task = current; 157 #ifdef CONFIG_SMP 158 cpu_online_map = cpumask_of_cpu(0); 159 #endif 160 start_kernel(); 161 return(0); 162 } 163 164 extern int userspace_pid[]; 165 166 int start_uml_skas(void) 167 { 168 if(proc_mm) 169 userspace_pid[0] = start_userspace(0); 170 171 init_new_thread_signals(); 172 173 init_task.thread.request.u.thread.proc = start_kernel_proc; 174 init_task.thread.request.u.thread.arg = NULL; 175 return(start_idle_thread(task_stack_page(&init_task), 176 &init_task.thread.mode.skas.switch_buf)); 177 } 178 179 int external_pid_skas(struct task_struct *task) 180 { 181 #warning Need to look up userspace_pid by cpu 182 return(userspace_pid[0]); 183 } 184 185 int thread_pid_skas(struct task_struct *task) 186 { 187 #warning Need to look up userspace_pid by cpu 188 return(userspace_pid[0]); 189 } 190 191 void kill_off_processes_skas(void) 192 { 193 if(proc_mm) 194 #warning need to loop over userspace_pids in kill_off_processes_skas 195 os_kill_ptraced_process(userspace_pid[0], 1); 196 else { 197 struct task_struct *p; 198 int pid, me; 199 200 me = os_getpid(); 201 for_each_process(p){ 202 if(p->mm == NULL) 203 continue; 204 205 pid = p->mm->context.skas.id.u.pid; 206 os_kill_ptraced_process(pid, 1); 207 } 208 } 209 } 210 211 unsigned long current_stub_stack(void) 212 { 213 if(current->mm == NULL) 214 return(0); 215 216 return(current->mm->context.skas.id.stack); 217 } 218