1 /* 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 3 * Copyright 2003 PathScale, Inc. 4 * Licensed under the GPL 5 */ 6 7 #include "linux/kernel.h" 8 #include "linux/sched.h" 9 #include "linux/interrupt.h" 10 #include "linux/string.h" 11 #include "linux/mm.h" 12 #include "linux/slab.h" 13 #include "linux/utsname.h" 14 #include "linux/fs.h" 15 #include "linux/utime.h" 16 #include "linux/smp_lock.h" 17 #include "linux/module.h" 18 #include "linux/init.h" 19 #include "linux/capability.h" 20 #include "linux/vmalloc.h" 21 #include "linux/spinlock.h" 22 #include "linux/proc_fs.h" 23 #include "linux/ptrace.h" 24 #include "linux/random.h" 25 #include "linux/personality.h" 26 #include "asm/unistd.h" 27 #include "asm/mman.h" 28 #include "asm/segment.h" 29 #include "asm/stat.h" 30 #include "asm/pgtable.h" 31 #include "asm/processor.h" 32 #include "asm/tlbflush.h" 33 #include "asm/uaccess.h" 34 #include "asm/user.h" 35 #include "user_util.h" 36 #include "kern_util.h" 37 #include "kern.h" 38 #include "signal_kern.h" 39 #include "init.h" 40 #include "irq_user.h" 41 #include "mem_user.h" 42 #include "tlb.h" 43 #include "frame_kern.h" 44 #include "sigcontext.h" 45 #include "os.h" 46 #include "mode.h" 47 #include "mode_kern.h" 48 #include "choose-mode.h" 49 #include "um_malloc.h" 50 51 /* This is a per-cpu array. A processor only modifies its entry and it only 52 * cares about its entry, so it's OK if another processor is modifying its 53 * entry. 54 */ 55 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; 56 57 int external_pid(void *t) 58 { 59 struct task_struct *task = t ? t : current; 60 61 return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task)); 62 } 63 64 int pid_to_processor_id(int pid) 65 { 66 int i; 67 68 for(i = 0; i < ncpus; i++){ 69 if(cpu_tasks[i].pid == pid) return(i); 70 } 71 return(-1); 72 } 73 74 void free_stack(unsigned long stack, int order) 75 { 76 free_pages(stack, order); 77 } 78 79 unsigned long alloc_stack(int order, int atomic) 80 { 81 unsigned long page; 82 gfp_t flags = GFP_KERNEL; 83 84 if (atomic) 85 flags = GFP_ATOMIC; 86 page = __get_free_pages(flags, order); 87 if(page == 0) 88 return(0); 89 stack_protections(page); 90 return(page); 91 } 92 93 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 94 { 95 int pid; 96 97 current->thread.request.u.thread.proc = fn; 98 current->thread.request.u.thread.arg = arg; 99 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, 100 ¤t->thread.regs, 0, NULL, NULL); 101 if(pid < 0) 102 panic("do_fork failed in kernel_thread, errno = %d", pid); 103 return(pid); 104 } 105 106 void set_current(void *t) 107 { 108 struct task_struct *task = t; 109 110 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) 111 { external_pid(task), task }); 112 } 113 114 void *_switch_to(void *prev, void *next, void *last) 115 { 116 struct task_struct *from = prev; 117 struct task_struct *to= next; 118 119 to->thread.prev_sched = from; 120 set_current(to); 121 122 do { 123 current->thread.saved_task = NULL ; 124 CHOOSE_MODE_PROC(switch_to_tt, switch_to_skas, prev, next); 125 if(current->thread.saved_task) 126 show_regs(&(current->thread.regs)); 127 next= current->thread.saved_task; 128 prev= current; 129 } while(current->thread.saved_task); 130 131 return(current->thread.prev_sched); 132 133 } 134 135 void interrupt_end(void) 136 { 137 if(need_resched()) schedule(); 138 if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal(); 139 } 140 141 void release_thread(struct task_struct *task) 142 { 143 CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task)); 144 } 145 146 void exit_thread(void) 147 { 148 unprotect_stack((unsigned long) current_thread); 149 } 150 151 void *get_current(void) 152 { 153 return(current); 154 } 155 156 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 157 unsigned long stack_top, struct task_struct * p, 158 struct pt_regs *regs) 159 { 160 int ret; 161 162 p->thread = (struct thread_struct) INIT_THREAD; 163 ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, 164 clone_flags, sp, stack_top, p, regs); 165 166 if (ret || !current->thread.forking) 167 goto out; 168 169 clear_flushed_tls(p); 170 171 /* 172 * Set a new TLS for the child thread? 173 */ 174 if (clone_flags & CLONE_SETTLS) 175 ret = arch_copy_tls(p); 176 177 out: 178 return ret; 179 } 180 181 void initial_thread_cb(void (*proc)(void *), void *arg) 182 { 183 int save_kmalloc_ok = kmalloc_ok; 184 185 kmalloc_ok = 0; 186 CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc, 187 arg); 188 kmalloc_ok = save_kmalloc_ok; 189 } 190 191 unsigned long stack_sp(unsigned long page) 192 { 193 return(page + PAGE_SIZE - sizeof(void *)); 194 } 195 196 int current_pid(void) 197 { 198 return(current->pid); 199 } 200 201 void default_idle(void) 202 { 203 CHOOSE_MODE(uml_idle_timer(), (void) 0); 204 205 while(1){ 206 /* endless idle loop with no priority at all */ 207 208 /* 209 * although we are an idle CPU, we do not want to 210 * get into the scheduler unnecessarily. 211 */ 212 if(need_resched()) 213 schedule(); 214 215 idle_sleep(10); 216 } 217 } 218 219 void cpu_idle(void) 220 { 221 CHOOSE_MODE(init_idle_tt(), init_idle_skas()); 222 } 223 224 int page_size(void) 225 { 226 return(PAGE_SIZE); 227 } 228 229 void *um_virt_to_phys(struct task_struct *task, unsigned long addr, 230 pte_t *pte_out) 231 { 232 pgd_t *pgd; 233 pud_t *pud; 234 pmd_t *pmd; 235 pte_t *pte; 236 pte_t ptent; 237 238 if(task->mm == NULL) 239 return(ERR_PTR(-EINVAL)); 240 pgd = pgd_offset(task->mm, addr); 241 if(!pgd_present(*pgd)) 242 return(ERR_PTR(-EINVAL)); 243 244 pud = pud_offset(pgd, addr); 245 if(!pud_present(*pud)) 246 return(ERR_PTR(-EINVAL)); 247 248 pmd = pmd_offset(pud, addr); 249 if(!pmd_present(*pmd)) 250 return(ERR_PTR(-EINVAL)); 251 252 pte = pte_offset_kernel(pmd, addr); 253 ptent = *pte; 254 if(!pte_present(ptent)) 255 return(ERR_PTR(-EINVAL)); 256 257 if(pte_out != NULL) 258 *pte_out = ptent; 259 return((void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK)); 260 } 261 262 char *current_cmd(void) 263 { 264 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) 265 return("(Unknown)"); 266 #else 267 void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL); 268 return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr); 269 #endif 270 } 271 272 void force_sigbus(void) 273 { 274 printk(KERN_ERR "Killing pid %d because of a lack of memory\n", 275 current->pid); 276 lock_kernel(); 277 sigaddset(¤t->pending.signal, SIGBUS); 278 recalc_sigpending(); 279 current->flags |= PF_SIGNALED; 280 do_exit(SIGBUS | 0x80); 281 } 282 283 void dump_thread(struct pt_regs *regs, struct user *u) 284 { 285 } 286 287 void enable_hlt(void) 288 { 289 panic("enable_hlt"); 290 } 291 292 EXPORT_SYMBOL(enable_hlt); 293 294 void disable_hlt(void) 295 { 296 panic("disable_hlt"); 297 } 298 299 EXPORT_SYMBOL(disable_hlt); 300 301 void *um_kmalloc(int size) 302 { 303 return kmalloc(size, GFP_KERNEL); 304 } 305 306 void *um_kmalloc_atomic(int size) 307 { 308 return kmalloc(size, GFP_ATOMIC); 309 } 310 311 void *um_vmalloc(int size) 312 { 313 return vmalloc(size); 314 } 315 316 void *um_vmalloc_atomic(int size) 317 { 318 return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL); 319 } 320 321 int __cant_sleep(void) { 322 return in_atomic() || irqs_disabled() || in_interrupt(); 323 /* Is in_interrupt() really needed? */ 324 } 325 326 unsigned long get_fault_addr(void) 327 { 328 return((unsigned long) current->thread.fault_addr); 329 } 330 331 EXPORT_SYMBOL(get_fault_addr); 332 333 void not_implemented(void) 334 { 335 printk(KERN_DEBUG "Something isn't implemented in here\n"); 336 } 337 338 EXPORT_SYMBOL(not_implemented); 339 340 int user_context(unsigned long sp) 341 { 342 unsigned long stack; 343 344 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); 345 return(stack != (unsigned long) current_thread); 346 } 347 348 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 349 350 void do_uml_exitcalls(void) 351 { 352 exitcall_t *call; 353 354 call = &__uml_exitcall_end; 355 while (--call >= &__uml_exitcall_begin) 356 (*call)(); 357 } 358 359 char *uml_strdup(char *string) 360 { 361 return kstrdup(string, GFP_KERNEL); 362 } 363 364 int copy_to_user_proc(void __user *to, void *from, int size) 365 { 366 return(copy_to_user(to, from, size)); 367 } 368 369 int copy_from_user_proc(void *to, void __user *from, int size) 370 { 371 return(copy_from_user(to, from, size)); 372 } 373 374 int clear_user_proc(void __user *buf, int size) 375 { 376 return(clear_user(buf, size)); 377 } 378 379 int strlen_user_proc(char __user *str) 380 { 381 return(strlen_user(str)); 382 } 383 384 int smp_sigio_handler(void) 385 { 386 #ifdef CONFIG_SMP 387 int cpu = current_thread->cpu; 388 IPI_handler(cpu); 389 if(cpu != 0) 390 return(1); 391 #endif 392 return(0); 393 } 394 395 int cpu(void) 396 { 397 return(current_thread->cpu); 398 } 399 400 static atomic_t using_sysemu = ATOMIC_INIT(0); 401 int sysemu_supported; 402 403 void set_using_sysemu(int value) 404 { 405 if (value > sysemu_supported) 406 return; 407 atomic_set(&using_sysemu, value); 408 } 409 410 int get_using_sysemu(void) 411 { 412 return atomic_read(&using_sysemu); 413 } 414 415 static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data) 416 { 417 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/ 418 *eof = 1; 419 420 return strlen(buf); 421 } 422 423 static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data) 424 { 425 char tmp[2]; 426 427 if (copy_from_user(tmp, buf, 1)) 428 return -EFAULT; 429 430 if (tmp[0] >= '0' && tmp[0] <= '2') 431 set_using_sysemu(tmp[0] - '0'); 432 return count; /*We use the first char, but pretend to write everything*/ 433 } 434 435 int __init make_proc_sysemu(void) 436 { 437 struct proc_dir_entry *ent; 438 if (!sysemu_supported) 439 return 0; 440 441 ent = create_proc_entry("sysemu", 0600, &proc_root); 442 443 if (ent == NULL) 444 { 445 printk(KERN_WARNING "Failed to register /proc/sysemu\n"); 446 return(0); 447 } 448 449 ent->read_proc = proc_read_sysemu; 450 ent->write_proc = proc_write_sysemu; 451 452 return 0; 453 } 454 455 late_initcall(make_proc_sysemu); 456 457 int singlestepping(void * t) 458 { 459 struct task_struct *task = t ? t : current; 460 461 if ( ! (task->ptrace & PT_DTRACE) ) 462 return(0); 463 464 if (task->thread.singlestep_syscall) 465 return(1); 466 467 return 2; 468 } 469 470 /* 471 * Only x86 and x86_64 have an arch_align_stack(). 472 * All other arches have "#define arch_align_stack(x) (x)" 473 * in their asm/system.h 474 * As this is included in UML from asm-um/system-generic.h, 475 * we can use it to behave as the subarch does. 476 */ 477 #ifndef arch_align_stack 478 unsigned long arch_align_stack(unsigned long sp) 479 { 480 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 481 sp -= get_random_int() % 8192; 482 return sp & ~0xf; 483 } 484 #endif 485