1 /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ 2 * 3 * linux/arch/sh/kernel/process.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * 7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 */ 9 10 /* 11 * This file handles the architecture-dependent parts of process handling.. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/unistd.h> 16 #include <linux/mm.h> 17 #include <linux/elfcore.h> 18 #include <linux/slab.h> 19 #include <linux/a.out.h> 20 #include <linux/ptrace.h> 21 #include <linux/platform.h> 22 #include <linux/kallsyms.h> 23 24 #include <asm/io.h> 25 #include <asm/uaccess.h> 26 #include <asm/mmu_context.h> 27 #include <asm/elf.h> 28 #if defined(CONFIG_SH_HS7751RVOIP) 29 #include <asm/hs7751rvoip/hs7751rvoip.h> 30 #elif defined(CONFIG_SH_RTS7751R2D) 31 #include <asm/rts7751r2d/rts7751r2d.h> 32 #endif 33 34 static int hlt_counter=0; 35 36 int ubc_usercnt = 0; 37 38 #define HARD_IDLE_TIMEOUT (HZ / 3) 39 40 void disable_hlt(void) 41 { 42 hlt_counter++; 43 } 44 45 EXPORT_SYMBOL(disable_hlt); 46 47 void enable_hlt(void) 48 { 49 hlt_counter--; 50 } 51 52 EXPORT_SYMBOL(enable_hlt); 53 54 void default_idle(void) 55 { 56 /* endless idle loop with no priority at all */ 57 while (1) { 58 if (hlt_counter) { 59 while (1) 60 if (need_resched()) 61 break; 62 } else { 63 while (!need_resched()) 64 cpu_sleep(); 65 } 66 67 schedule(); 68 } 69 } 70 71 void cpu_idle(void) 72 { 73 default_idle(); 74 } 75 76 void machine_restart(char * __unused) 77 { 78 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ 79 asm volatile("ldc %0, sr\n\t" 80 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); 81 } 82 83 EXPORT_SYMBOL(machine_restart); 84 85 void machine_halt(void) 86 { 87 #if defined(CONFIG_SH_HS7751RVOIP) 88 unsigned short value; 89 90 value = ctrl_inw(PA_OUTPORTR); 91 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 92 #elif defined(CONFIG_SH_RTS7751R2D) 93 ctrl_outw(0x0001, PA_POWOFF); 94 #endif 95 while (1) 96 cpu_sleep(); 97 } 98 99 EXPORT_SYMBOL(machine_halt); 100 101 void machine_power_off(void) 102 { 103 #if defined(CONFIG_SH_HS7751RVOIP) 104 unsigned short value; 105 106 value = ctrl_inw(PA_OUTPORTR); 107 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 108 #elif defined(CONFIG_SH_RTS7751R2D) 109 ctrl_outw(0x0001, PA_POWOFF); 110 #endif 111 } 112 113 EXPORT_SYMBOL(machine_power_off); 114 115 void show_regs(struct pt_regs * regs) 116 { 117 printk("\n"); 118 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); 119 print_symbol("PC is at %s\n", regs->pc); 120 printk("PC : %08lx SP : %08lx SR : %08lx ", 121 regs->pc, regs->regs[15], regs->sr); 122 #ifdef CONFIG_MMU 123 printk("TEA : %08x ", ctrl_inl(MMU_TEA)); 124 #else 125 printk(" "); 126 #endif 127 printk("%s\n", print_tainted()); 128 129 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", 130 regs->regs[0],regs->regs[1], 131 regs->regs[2],regs->regs[3]); 132 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", 133 regs->regs[4],regs->regs[5], 134 regs->regs[6],regs->regs[7]); 135 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", 136 regs->regs[8],regs->regs[9], 137 regs->regs[10],regs->regs[11]); 138 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", 139 regs->regs[12],regs->regs[13], 140 regs->regs[14]); 141 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", 142 regs->mach, regs->macl, regs->gbr, regs->pr); 143 144 /* 145 * If we're in kernel mode, dump the stack too.. 146 */ 147 if (!user_mode(regs)) { 148 extern void show_task(unsigned long *sp); 149 unsigned long sp = regs->regs[15]; 150 151 show_task((unsigned long *)sp); 152 } 153 } 154 155 /* 156 * Create a kernel thread 157 */ 158 159 /* 160 * This is the mechanism for creating a new kernel thread. 161 * 162 */ 163 extern void kernel_thread_helper(void); 164 __asm__(".align 5\n" 165 "kernel_thread_helper:\n\t" 166 "jsr @r5\n\t" 167 " nop\n\t" 168 "mov.l 1f, r1\n\t" 169 "jsr @r1\n\t" 170 " mov r0, r4\n\t" 171 ".align 2\n\t" 172 "1:.long do_exit"); 173 174 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 175 { /* Don't use this in BL=1(cli). Or else, CPU resets! */ 176 struct pt_regs regs; 177 178 memset(®s, 0, sizeof(regs)); 179 regs.regs[4] = (unsigned long) arg; 180 regs.regs[5] = (unsigned long) fn; 181 182 regs.pc = (unsigned long) kernel_thread_helper; 183 regs.sr = (1 << 30); 184 185 /* Ok, create the new process.. */ 186 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 187 } 188 189 /* 190 * Free current thread data structures etc.. 191 */ 192 void exit_thread(void) 193 { 194 if (current->thread.ubc_pc) { 195 current->thread.ubc_pc = 0; 196 ubc_usercnt -= 1; 197 } 198 } 199 200 void flush_thread(void) 201 { 202 #if defined(CONFIG_SH_FPU) 203 struct task_struct *tsk = current; 204 struct pt_regs *regs = (struct pt_regs *) 205 ((unsigned long)tsk->thread_info 206 + THREAD_SIZE - sizeof(struct pt_regs) 207 - sizeof(unsigned long)); 208 209 /* Forget lazy FPU state */ 210 clear_fpu(tsk, regs); 211 clear_used_math(); 212 #endif 213 } 214 215 void release_thread(struct task_struct *dead_task) 216 { 217 /* do nothing */ 218 } 219 220 /* Fill in the fpu structure for a core dump.. */ 221 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 222 { 223 int fpvalid = 0; 224 225 #if defined(CONFIG_SH_FPU) 226 struct task_struct *tsk = current; 227 228 fpvalid = !!tsk_used_math(tsk); 229 if (fpvalid) { 230 unlazy_fpu(tsk, regs); 231 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 232 } 233 #endif 234 235 return fpvalid; 236 } 237 238 /* 239 * Capture the user space registers if the task is not running (in user space) 240 */ 241 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 242 { 243 struct pt_regs ptregs; 244 245 ptregs = *(struct pt_regs *) 246 ((unsigned long)tsk->thread_info + THREAD_SIZE 247 - sizeof(struct pt_regs) 248 #ifdef CONFIG_SH_DSP 249 - sizeof(struct pt_dspregs) 250 #endif 251 - sizeof(unsigned long)); 252 elf_core_copy_regs(regs, &ptregs); 253 254 return 1; 255 } 256 257 int 258 dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) 259 { 260 int fpvalid = 0; 261 262 #if defined(CONFIG_SH_FPU) 263 fpvalid = !!tsk_used_math(tsk); 264 if (fpvalid) { 265 struct pt_regs *regs = (struct pt_regs *) 266 ((unsigned long)tsk->thread_info 267 + THREAD_SIZE - sizeof(struct pt_regs) 268 - sizeof(unsigned long)); 269 unlazy_fpu(tsk, regs); 270 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 271 } 272 #endif 273 274 return fpvalid; 275 } 276 277 asmlinkage void ret_from_fork(void); 278 279 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 280 unsigned long unused, 281 struct task_struct *p, struct pt_regs *regs) 282 { 283 struct pt_regs *childregs; 284 #if defined(CONFIG_SH_FPU) 285 struct task_struct *tsk = current; 286 287 unlazy_fpu(tsk, regs); 288 p->thread.fpu = tsk->thread.fpu; 289 copy_to_stopped_child_used_math(p); 290 #endif 291 292 childregs = ((struct pt_regs *) 293 (THREAD_SIZE + (unsigned long) p->thread_info) 294 #ifdef CONFIG_SH_DSP 295 - sizeof(struct pt_dspregs) 296 #endif 297 - sizeof(unsigned long)) - 1; 298 *childregs = *regs; 299 300 if (user_mode(regs)) { 301 childregs->regs[15] = usp; 302 } else { 303 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; 304 } 305 if (clone_flags & CLONE_SETTLS) { 306 childregs->gbr = childregs->regs[0]; 307 } 308 childregs->regs[0] = 0; /* Set return value for child */ 309 310 p->thread.sp = (unsigned long) childregs; 311 p->thread.pc = (unsigned long) ret_from_fork; 312 313 p->thread.ubc_pc = 0; 314 315 return 0; 316 } 317 318 /* 319 * fill in the user structure for a core dump.. 320 */ 321 void dump_thread(struct pt_regs * regs, struct user * dump) 322 { 323 dump->magic = CMAGIC; 324 dump->start_code = current->mm->start_code; 325 dump->start_data = current->mm->start_data; 326 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1); 327 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; 328 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT; 329 dump->u_ssize = (current->mm->start_stack - dump->start_stack + 330 PAGE_SIZE - 1) >> PAGE_SHIFT; 331 /* Debug registers will come here. */ 332 333 dump->regs = *regs; 334 335 dump->u_fpvalid = dump_fpu(regs, &dump->fpu); 336 } 337 338 /* Tracing by user break controller. */ 339 static void 340 ubc_set_tracing(int asid, unsigned long pc) 341 { 342 ctrl_outl(pc, UBC_BARA); 343 344 /* We don't have any ASID settings for the SH-2! */ 345 if (cpu_data->type != CPU_SH7604) 346 ctrl_outb(asid, UBC_BASRA); 347 348 ctrl_outl(0, UBC_BAMRA); 349 350 if (cpu_data->type == CPU_SH7729) { 351 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); 352 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); 353 } else { 354 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); 355 ctrl_outw(BRCR_PCBA, UBC_BRCR); 356 } 357 } 358 359 /* 360 * switch_to(x,y) should switch tasks from x to y. 361 * 362 */ 363 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 364 { 365 #if defined(CONFIG_SH_FPU) 366 struct pt_regs *regs = (struct pt_regs *) 367 ((unsigned long)prev->thread_info 368 + THREAD_SIZE - sizeof(struct pt_regs) 369 - sizeof(unsigned long)); 370 unlazy_fpu(prev, regs); 371 #endif 372 373 #ifdef CONFIG_PREEMPT 374 { 375 unsigned long flags; 376 struct pt_regs *regs; 377 378 local_irq_save(flags); 379 regs = (struct pt_regs *) 380 ((unsigned long)prev->thread_info 381 + THREAD_SIZE - sizeof(struct pt_regs) 382 #ifdef CONFIG_SH_DSP 383 - sizeof(struct pt_dspregs) 384 #endif 385 - sizeof(unsigned long)); 386 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { 387 int offset = (int)regs->regs[15]; 388 389 /* Reset stack pointer: clear critical region mark */ 390 regs->regs[15] = regs->regs[1]; 391 if (regs->pc < regs->regs[0]) 392 /* Go to rewind point */ 393 regs->pc = regs->regs[0] + offset; 394 } 395 local_irq_restore(flags); 396 } 397 #endif 398 399 /* 400 * Restore the kernel mode register 401 * k7 (r7_bank1) 402 */ 403 asm volatile("ldc %0, r7_bank" 404 : /* no output */ 405 : "r" (next->thread_info)); 406 407 #ifdef CONFIG_MMU 408 /* If no tasks are using the UBC, we're done */ 409 if (ubc_usercnt == 0) 410 /* If no tasks are using the UBC, we're done */; 411 else if (next->thread.ubc_pc && next->mm) { 412 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK, 413 next->thread.ubc_pc); 414 } else { 415 ctrl_outw(0, UBC_BBRA); 416 ctrl_outw(0, UBC_BBRB); 417 } 418 #endif 419 420 return prev; 421 } 422 423 asmlinkage int sys_fork(unsigned long r4, unsigned long r5, 424 unsigned long r6, unsigned long r7, 425 struct pt_regs regs) 426 { 427 #ifdef CONFIG_MMU 428 return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); 429 #else 430 /* fork almost works, enough to trick you into looking elsewhere :-( */ 431 return -EINVAL; 432 #endif 433 } 434 435 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, 436 unsigned long parent_tidptr, 437 unsigned long child_tidptr, 438 struct pt_regs regs) 439 { 440 if (!newsp) 441 newsp = regs.regs[15]; 442 return do_fork(clone_flags, newsp, ®s, 0, 443 (int __user *)parent_tidptr, (int __user *)child_tidptr); 444 } 445 446 /* 447 * This is trivial, and on the face of it looks like it 448 * could equally well be done in user mode. 449 * 450 * Not so, for quite unobvious reasons - register pressure. 451 * In user mode vfork() cannot have a stack frame, and if 452 * done by calling the "clone()" system call directly, you 453 * do not have enough call-clobbered registers to hold all 454 * the information you need. 455 */ 456 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, 457 unsigned long r6, unsigned long r7, 458 struct pt_regs regs) 459 { 460 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, 461 0, NULL, NULL); 462 } 463 464 /* 465 * sys_execve() executes a new program. 466 */ 467 asmlinkage int sys_execve(char *ufilename, char **uargv, 468 char **uenvp, unsigned long r7, 469 struct pt_regs regs) 470 { 471 int error; 472 char *filename; 473 474 filename = getname((char __user *)ufilename); 475 error = PTR_ERR(filename); 476 if (IS_ERR(filename)) 477 goto out; 478 479 error = do_execve(filename, 480 (char __user * __user *)uargv, 481 (char __user * __user *)uenvp, 482 ®s); 483 if (error == 0) { 484 task_lock(current); 485 current->ptrace &= ~PT_DTRACE; 486 task_unlock(current); 487 } 488 putname(filename); 489 out: 490 return error; 491 } 492 493 unsigned long get_wchan(struct task_struct *p) 494 { 495 unsigned long schedule_frame; 496 unsigned long pc; 497 498 if (!p || p == current || p->state == TASK_RUNNING) 499 return 0; 500 501 /* 502 * The same comment as on the Alpha applies here, too ... 503 */ 504 pc = thread_saved_pc(p); 505 if (in_sched_functions(pc)) { 506 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; 507 return (unsigned long)((unsigned long *)schedule_frame)[1]; 508 } 509 return pc; 510 } 511 512 asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, 513 unsigned long r6, unsigned long r7, 514 struct pt_regs regs) 515 { 516 /* Clear tracing. */ 517 ctrl_outw(0, UBC_BBRA); 518 ctrl_outw(0, UBC_BBRB); 519 current->thread.ubc_pc = 0; 520 ubc_usercnt -= 1; 521 522 force_sig(SIGTRAP, current); 523 } 524 525 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, 526 unsigned long r6, unsigned long r7, 527 struct pt_regs regs) 528 { 529 regs.pc -= 2; 530 force_sig(SIGTRAP, current); 531 } 532