1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 6 * Copyright 2003 PathScale, Inc. 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/err.h> 11 #include <linux/hardirq.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/personality.h> 15 #include <linux/proc_fs.h> 16 #include <linux/ptrace.h> 17 #include <linux/random.h> 18 #include <linux/cpu.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/sched/debug.h> 22 #include <linux/sched/task.h> 23 #include <linux/sched/task_stack.h> 24 #include <linux/seq_file.h> 25 #include <linux/tick.h> 26 #include <linux/threads.h> 27 #include <linux/resume_user_mode.h> 28 #include <asm/current.h> 29 #include <asm/mmu_context.h> 30 #include <asm/switch_to.h> 31 #include <asm/exec.h> 32 #include <linux/uaccess.h> 33 #include <as-layout.h> 34 #include <kern_util.h> 35 #include <os.h> 36 #include <skas.h> 37 #include <registers.h> 38 #include <linux/time-internal.h> 39 #include <linux/elfcore.h> 40 41 /* 42 * This is a per-cpu array. A processor only modifies its entry and it only 43 * cares about its entry, so it's OK if another processor is modifying its 44 * entry. 45 */ 46 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { NULL } }; 47 48 void free_stack(unsigned long stack, int order) 49 { 50 free_pages(stack, order); 51 } 52 53 unsigned long alloc_stack(int order, int atomic) 54 { 55 unsigned long page; 56 gfp_t flags = GFP_KERNEL; 57 58 if (atomic) 59 flags = GFP_ATOMIC; 60 page = __get_free_pages(flags, order); 61 62 return page; 63 } 64 65 static inline void set_current(struct task_struct *task) 66 { 67 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) { task }); 68 } 69 70 struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to) 71 { 72 to->thread.prev_sched = from; 73 set_current(to); 74 75 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); 76 arch_switch_to(current); 77 78 return current->thread.prev_sched; 79 } 80 81 void interrupt_end(void) 82 { 83 struct pt_regs *regs = ¤t->thread.regs; 84 85 if (need_resched()) 86 schedule(); 87 if (test_thread_flag(TIF_SIGPENDING) || 88 test_thread_flag(TIF_NOTIFY_SIGNAL)) 89 do_signal(regs); 90 if (test_thread_flag(TIF_NOTIFY_RESUME)) 91 resume_user_mode_work(regs); 92 } 93 94 int get_current_pid(void) 95 { 96 return task_pid_nr(current); 97 } 98 99 /* 100 * This is called magically, by its address being stuffed in a jmp_buf 101 * and being longjmp-d to. 102 */ 103 void new_thread_handler(void) 104 { 105 int (*fn)(void *); 106 void *arg; 107 108 if (current->thread.prev_sched != NULL) 109 schedule_tail(current->thread.prev_sched); 110 current->thread.prev_sched = NULL; 111 112 fn = current->thread.request.u.thread.proc; 113 arg = current->thread.request.u.thread.arg; 114 115 /* 116 * callback returns only if the kernel thread execs a process 117 */ 118 fn(arg); 119 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); 120 } 121 122 /* Called magically, see new_thread_handler above */ 123 static void fork_handler(void) 124 { 125 force_flush_all(); 126 127 schedule_tail(current->thread.prev_sched); 128 129 /* 130 * XXX: if interrupt_end() calls schedule, this call to 131 * arch_switch_to isn't needed. We could want to apply this to 132 * improve performance. -bb 133 */ 134 arch_switch_to(current); 135 136 current->thread.prev_sched = NULL; 137 138 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); 139 } 140 141 int copy_thread(struct task_struct * p, const struct kernel_clone_args *args) 142 { 143 unsigned long clone_flags = args->flags; 144 unsigned long sp = args->stack; 145 unsigned long tls = args->tls; 146 void (*handler)(void); 147 int ret = 0; 148 149 p->thread = (struct thread_struct) INIT_THREAD; 150 151 if (!args->fn) { 152 memcpy(&p->thread.regs.regs, current_pt_regs(), 153 sizeof(p->thread.regs.regs)); 154 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); 155 if (sp != 0) 156 REGS_SP(p->thread.regs.regs.gp) = sp; 157 158 handler = fork_handler; 159 160 arch_copy_thread(¤t->thread.arch, &p->thread.arch); 161 } else { 162 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 163 p->thread.request.u.thread.proc = args->fn; 164 p->thread.request.u.thread.arg = args->fn_arg; 165 handler = new_thread_handler; 166 } 167 168 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 169 170 if (!args->fn) { 171 clear_flushed_tls(p); 172 173 /* 174 * Set a new TLS for the child thread? 175 */ 176 if (clone_flags & CLONE_SETTLS) 177 ret = arch_set_tls(p, tls); 178 } 179 180 return ret; 181 } 182 183 void initial_thread_cb(void (*proc)(void *), void *arg) 184 { 185 int save_kmalloc_ok = kmalloc_ok; 186 187 kmalloc_ok = 0; 188 initial_thread_cb_skas(proc, arg); 189 kmalloc_ok = save_kmalloc_ok; 190 } 191 192 void um_idle_sleep(void) 193 { 194 if (time_travel_mode != TT_MODE_OFF) 195 time_travel_sleep(); 196 else 197 os_idle_sleep(); 198 } 199 200 void arch_cpu_idle(void) 201 { 202 um_idle_sleep(); 203 } 204 205 int __uml_cant_sleep(void) { 206 return in_atomic() || irqs_disabled() || in_interrupt(); 207 /* Is in_interrupt() really needed? */ 208 } 209 210 int user_context(unsigned long sp) 211 { 212 unsigned long stack; 213 214 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 215 return stack != (unsigned long) current_thread_info(); 216 } 217 218 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 219 220 void do_uml_exitcalls(void) 221 { 222 exitcall_t *call; 223 224 call = &__uml_exitcall_end; 225 while (--call >= &__uml_exitcall_begin) 226 (*call)(); 227 } 228 229 char *uml_strdup(const char *string) 230 { 231 return kstrdup(string, GFP_KERNEL); 232 } 233 EXPORT_SYMBOL(uml_strdup); 234 235 int copy_from_user_proc(void *to, void __user *from, int size) 236 { 237 return copy_from_user(to, from, size); 238 } 239 240 int singlestepping(void) 241 { 242 return test_thread_flag(TIF_SINGLESTEP); 243 } 244 245 /* 246 * Only x86 and x86_64 have an arch_align_stack(). 247 * All other arches have "#define arch_align_stack(x) (x)" 248 * in their asm/exec.h 249 * As this is included in UML from asm-um/system-generic.h, 250 * we can use it to behave as the subarch does. 251 */ 252 #ifndef arch_align_stack 253 unsigned long arch_align_stack(unsigned long sp) 254 { 255 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 256 sp -= get_random_u32_below(8192); 257 return sp & ~0xf; 258 } 259 #endif 260 261 unsigned long __get_wchan(struct task_struct *p) 262 { 263 unsigned long stack_page, sp, ip; 264 bool seen_sched = 0; 265 266 stack_page = (unsigned long) task_stack_page(p); 267 /* Bail if the process has no kernel stack for some reason */ 268 if (stack_page == 0) 269 return 0; 270 271 sp = p->thread.switch_buf->JB_SP; 272 /* 273 * Bail if the stack pointer is below the bottom of the kernel 274 * stack for some reason 275 */ 276 if (sp < stack_page) 277 return 0; 278 279 while (sp < stack_page + THREAD_SIZE) { 280 ip = *((unsigned long *) sp); 281 if (in_sched_functions(ip)) 282 /* Ignore everything until we're above the scheduler */ 283 seen_sched = 1; 284 else if (kernel_text_address(ip) && seen_sched) 285 return ip; 286 287 sp += sizeof(unsigned long); 288 } 289 290 return 0; 291 } 292 293 int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 294 { 295 int cpu = current_thread_info()->cpu; 296 297 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); 298 } 299 300