1 /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ 2 * 3 * linux/arch/sh/kernel/process.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * 7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 */ 9 10 /* 11 * This file handles the architecture-dependent parts of process handling.. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/unistd.h> 16 #include <linux/mm.h> 17 #include <linux/elfcore.h> 18 #include <linux/slab.h> 19 #include <linux/a.out.h> 20 #include <linux/ptrace.h> 21 #include <linux/platform.h> 22 #include <linux/kallsyms.h> 23 24 #include <asm/io.h> 25 #include <asm/uaccess.h> 26 #include <asm/mmu_context.h> 27 #include <asm/elf.h> 28 #if defined(CONFIG_SH_HS7751RVOIP) 29 #include <asm/hs7751rvoip/hs7751rvoip.h> 30 #elif defined(CONFIG_SH_RTS7751R2D) 31 #include <asm/rts7751r2d/rts7751r2d.h> 32 #endif 33 34 static int hlt_counter=0; 35 36 int ubc_usercnt = 0; 37 38 #define HARD_IDLE_TIMEOUT (HZ / 3) 39 40 void disable_hlt(void) 41 { 42 hlt_counter++; 43 } 44 45 EXPORT_SYMBOL(disable_hlt); 46 47 void enable_hlt(void) 48 { 49 hlt_counter--; 50 } 51 52 EXPORT_SYMBOL(enable_hlt); 53 54 void cpu_idle(void) 55 { 56 /* endless idle loop with no priority at all */ 57 while (1) { 58 if (hlt_counter) { 59 while (!need_resched()) 60 cpu_relax(); 61 } else { 62 while (!need_resched()) 63 cpu_sleep(); 64 } 65 66 preempt_enable_no_resched(); 67 schedule(); 68 preempt_disable(); 69 } 70 } 71 72 void machine_restart(char * __unused) 73 { 74 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ 75 asm volatile("ldc %0, sr\n\t" 76 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); 77 } 78 79 void machine_halt(void) 80 { 81 #if defined(CONFIG_SH_HS7751RVOIP) 82 unsigned short value; 83 84 value = ctrl_inw(PA_OUTPORTR); 85 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 86 #elif defined(CONFIG_SH_RTS7751R2D) 87 ctrl_outw(0x0001, PA_POWOFF); 88 #endif 89 while (1) 90 cpu_sleep(); 91 } 92 93 void machine_power_off(void) 94 { 95 #if defined(CONFIG_SH_HS7751RVOIP) 96 unsigned short value; 97 98 value = ctrl_inw(PA_OUTPORTR); 99 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 100 #elif defined(CONFIG_SH_RTS7751R2D) 101 ctrl_outw(0x0001, PA_POWOFF); 102 #endif 103 } 104 105 void show_regs(struct pt_regs * regs) 106 { 107 printk("\n"); 108 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); 109 print_symbol("PC is at %s\n", regs->pc); 110 printk("PC : %08lx SP : %08lx SR : %08lx ", 111 regs->pc, regs->regs[15], regs->sr); 112 #ifdef CONFIG_MMU 113 printk("TEA : %08x ", ctrl_inl(MMU_TEA)); 114 #else 115 printk(" "); 116 #endif 117 printk("%s\n", print_tainted()); 118 119 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", 120 regs->regs[0],regs->regs[1], 121 regs->regs[2],regs->regs[3]); 122 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", 123 regs->regs[4],regs->regs[5], 124 regs->regs[6],regs->regs[7]); 125 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", 126 regs->regs[8],regs->regs[9], 127 regs->regs[10],regs->regs[11]); 128 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", 129 regs->regs[12],regs->regs[13], 130 regs->regs[14]); 131 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", 132 regs->mach, regs->macl, regs->gbr, regs->pr); 133 134 /* 135 * If we're in kernel mode, dump the stack too.. 136 */ 137 if (!user_mode(regs)) { 138 extern void show_task(unsigned long *sp); 139 unsigned long sp = regs->regs[15]; 140 141 show_task((unsigned long *)sp); 142 } 143 } 144 145 /* 146 * Create a kernel thread 147 */ 148 149 /* 150 * This is the mechanism for creating a new kernel thread. 151 * 152 */ 153 extern void kernel_thread_helper(void); 154 __asm__(".align 5\n" 155 "kernel_thread_helper:\n\t" 156 "jsr @r5\n\t" 157 " nop\n\t" 158 "mov.l 1f, r1\n\t" 159 "jsr @r1\n\t" 160 " mov r0, r4\n\t" 161 ".align 2\n\t" 162 "1:.long do_exit"); 163 164 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 165 { /* Don't use this in BL=1(cli). Or else, CPU resets! */ 166 struct pt_regs regs; 167 168 memset(®s, 0, sizeof(regs)); 169 regs.regs[4] = (unsigned long) arg; 170 regs.regs[5] = (unsigned long) fn; 171 172 regs.pc = (unsigned long) kernel_thread_helper; 173 regs.sr = (1 << 30); 174 175 /* Ok, create the new process.. */ 176 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 177 } 178 179 /* 180 * Free current thread data structures etc.. 181 */ 182 void exit_thread(void) 183 { 184 if (current->thread.ubc_pc) { 185 current->thread.ubc_pc = 0; 186 ubc_usercnt -= 1; 187 } 188 } 189 190 void flush_thread(void) 191 { 192 #if defined(CONFIG_SH_FPU) 193 struct task_struct *tsk = current; 194 struct pt_regs *regs = (struct pt_regs *) 195 ((unsigned long)tsk->thread_info 196 + THREAD_SIZE - sizeof(struct pt_regs) 197 - sizeof(unsigned long)); 198 199 /* Forget lazy FPU state */ 200 clear_fpu(tsk, regs); 201 clear_used_math(); 202 #endif 203 } 204 205 void release_thread(struct task_struct *dead_task) 206 { 207 /* do nothing */ 208 } 209 210 /* Fill in the fpu structure for a core dump.. */ 211 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 212 { 213 int fpvalid = 0; 214 215 #if defined(CONFIG_SH_FPU) 216 struct task_struct *tsk = current; 217 218 fpvalid = !!tsk_used_math(tsk); 219 if (fpvalid) { 220 unlazy_fpu(tsk, regs); 221 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 222 } 223 #endif 224 225 return fpvalid; 226 } 227 228 /* 229 * Capture the user space registers if the task is not running (in user space) 230 */ 231 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 232 { 233 struct pt_regs ptregs; 234 235 ptregs = *(struct pt_regs *) 236 ((unsigned long)tsk->thread_info + THREAD_SIZE 237 - sizeof(struct pt_regs) 238 #ifdef CONFIG_SH_DSP 239 - sizeof(struct pt_dspregs) 240 #endif 241 - sizeof(unsigned long)); 242 elf_core_copy_regs(regs, &ptregs); 243 244 return 1; 245 } 246 247 int 248 dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) 249 { 250 int fpvalid = 0; 251 252 #if defined(CONFIG_SH_FPU) 253 fpvalid = !!tsk_used_math(tsk); 254 if (fpvalid) { 255 struct pt_regs *regs = (struct pt_regs *) 256 ((unsigned long)tsk->thread_info 257 + THREAD_SIZE - sizeof(struct pt_regs) 258 - sizeof(unsigned long)); 259 unlazy_fpu(tsk, regs); 260 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 261 } 262 #endif 263 264 return fpvalid; 265 } 266 267 asmlinkage void ret_from_fork(void); 268 269 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 270 unsigned long unused, 271 struct task_struct *p, struct pt_regs *regs) 272 { 273 struct pt_regs *childregs; 274 #if defined(CONFIG_SH_FPU) 275 struct task_struct *tsk = current; 276 277 unlazy_fpu(tsk, regs); 278 p->thread.fpu = tsk->thread.fpu; 279 copy_to_stopped_child_used_math(p); 280 #endif 281 282 childregs = ((struct pt_regs *) 283 (THREAD_SIZE + (unsigned long) p->thread_info) 284 #ifdef CONFIG_SH_DSP 285 - sizeof(struct pt_dspregs) 286 #endif 287 - sizeof(unsigned long)) - 1; 288 *childregs = *regs; 289 290 if (user_mode(regs)) { 291 childregs->regs[15] = usp; 292 } else { 293 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; 294 } 295 if (clone_flags & CLONE_SETTLS) { 296 childregs->gbr = childregs->regs[0]; 297 } 298 childregs->regs[0] = 0; /* Set return value for child */ 299 300 p->thread.sp = (unsigned long) childregs; 301 p->thread.pc = (unsigned long) ret_from_fork; 302 303 p->thread.ubc_pc = 0; 304 305 return 0; 306 } 307 308 /* 309 * fill in the user structure for a core dump.. 310 */ 311 void dump_thread(struct pt_regs * regs, struct user * dump) 312 { 313 dump->magic = CMAGIC; 314 dump->start_code = current->mm->start_code; 315 dump->start_data = current->mm->start_data; 316 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1); 317 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; 318 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT; 319 dump->u_ssize = (current->mm->start_stack - dump->start_stack + 320 PAGE_SIZE - 1) >> PAGE_SHIFT; 321 /* Debug registers will come here. */ 322 323 dump->regs = *regs; 324 325 dump->u_fpvalid = dump_fpu(regs, &dump->fpu); 326 } 327 328 /* Tracing by user break controller. */ 329 static void 330 ubc_set_tracing(int asid, unsigned long pc) 331 { 332 ctrl_outl(pc, UBC_BARA); 333 334 /* We don't have any ASID settings for the SH-2! */ 335 if (cpu_data->type != CPU_SH7604) 336 ctrl_outb(asid, UBC_BASRA); 337 338 ctrl_outl(0, UBC_BAMRA); 339 340 if (cpu_data->type == CPU_SH7729) { 341 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); 342 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); 343 } else { 344 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); 345 ctrl_outw(BRCR_PCBA, UBC_BRCR); 346 } 347 } 348 349 /* 350 * switch_to(x,y) should switch tasks from x to y. 351 * 352 */ 353 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 354 { 355 #if defined(CONFIG_SH_FPU) 356 struct pt_regs *regs = (struct pt_regs *) 357 ((unsigned long)prev->thread_info 358 + THREAD_SIZE - sizeof(struct pt_regs) 359 - sizeof(unsigned long)); 360 unlazy_fpu(prev, regs); 361 #endif 362 363 #ifdef CONFIG_PREEMPT 364 { 365 unsigned long flags; 366 struct pt_regs *regs; 367 368 local_irq_save(flags); 369 regs = (struct pt_regs *) 370 ((unsigned long)prev->thread_info 371 + THREAD_SIZE - sizeof(struct pt_regs) 372 #ifdef CONFIG_SH_DSP 373 - sizeof(struct pt_dspregs) 374 #endif 375 - sizeof(unsigned long)); 376 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { 377 int offset = (int)regs->regs[15]; 378 379 /* Reset stack pointer: clear critical region mark */ 380 regs->regs[15] = regs->regs[1]; 381 if (regs->pc < regs->regs[0]) 382 /* Go to rewind point */ 383 regs->pc = regs->regs[0] + offset; 384 } 385 local_irq_restore(flags); 386 } 387 #endif 388 389 /* 390 * Restore the kernel mode register 391 * k7 (r7_bank1) 392 */ 393 asm volatile("ldc %0, r7_bank" 394 : /* no output */ 395 : "r" (next->thread_info)); 396 397 #ifdef CONFIG_MMU 398 /* If no tasks are using the UBC, we're done */ 399 if (ubc_usercnt == 0) 400 /* If no tasks are using the UBC, we're done */; 401 else if (next->thread.ubc_pc && next->mm) { 402 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK, 403 next->thread.ubc_pc); 404 } else { 405 ctrl_outw(0, UBC_BBRA); 406 ctrl_outw(0, UBC_BBRB); 407 } 408 #endif 409 410 return prev; 411 } 412 413 asmlinkage int sys_fork(unsigned long r4, unsigned long r5, 414 unsigned long r6, unsigned long r7, 415 struct pt_regs regs) 416 { 417 #ifdef CONFIG_MMU 418 return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); 419 #else 420 /* fork almost works, enough to trick you into looking elsewhere :-( */ 421 return -EINVAL; 422 #endif 423 } 424 425 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, 426 unsigned long parent_tidptr, 427 unsigned long child_tidptr, 428 struct pt_regs regs) 429 { 430 if (!newsp) 431 newsp = regs.regs[15]; 432 return do_fork(clone_flags, newsp, ®s, 0, 433 (int __user *)parent_tidptr, (int __user *)child_tidptr); 434 } 435 436 /* 437 * This is trivial, and on the face of it looks like it 438 * could equally well be done in user mode. 439 * 440 * Not so, for quite unobvious reasons - register pressure. 441 * In user mode vfork() cannot have a stack frame, and if 442 * done by calling the "clone()" system call directly, you 443 * do not have enough call-clobbered registers to hold all 444 * the information you need. 445 */ 446 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, 447 unsigned long r6, unsigned long r7, 448 struct pt_regs regs) 449 { 450 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, 451 0, NULL, NULL); 452 } 453 454 /* 455 * sys_execve() executes a new program. 456 */ 457 asmlinkage int sys_execve(char *ufilename, char **uargv, 458 char **uenvp, unsigned long r7, 459 struct pt_regs regs) 460 { 461 int error; 462 char *filename; 463 464 filename = getname((char __user *)ufilename); 465 error = PTR_ERR(filename); 466 if (IS_ERR(filename)) 467 goto out; 468 469 error = do_execve(filename, 470 (char __user * __user *)uargv, 471 (char __user * __user *)uenvp, 472 ®s); 473 if (error == 0) { 474 task_lock(current); 475 current->ptrace &= ~PT_DTRACE; 476 task_unlock(current); 477 } 478 putname(filename); 479 out: 480 return error; 481 } 482 483 unsigned long get_wchan(struct task_struct *p) 484 { 485 unsigned long schedule_frame; 486 unsigned long pc; 487 488 if (!p || p == current || p->state == TASK_RUNNING) 489 return 0; 490 491 /* 492 * The same comment as on the Alpha applies here, too ... 493 */ 494 pc = thread_saved_pc(p); 495 if (in_sched_functions(pc)) { 496 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; 497 return (unsigned long)((unsigned long *)schedule_frame)[1]; 498 } 499 return pc; 500 } 501 502 asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, 503 unsigned long r6, unsigned long r7, 504 struct pt_regs regs) 505 { 506 /* Clear tracing. */ 507 ctrl_outw(0, UBC_BBRA); 508 ctrl_outw(0, UBC_BBRB); 509 current->thread.ubc_pc = 0; 510 ubc_usercnt -= 1; 511 512 force_sig(SIGTRAP, current); 513 } 514 515 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, 516 unsigned long r6, unsigned long r7, 517 struct pt_regs regs) 518 { 519 regs.pc -= 2; 520 force_sig(SIGTRAP, current); 521 } 522