1 // TODO verify coprocessor handling 2 /* 3 * arch/xtensa/kernel/process.c 4 * 5 * Xtensa Processor version. 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 * Copyright (C) 2001 - 2005 Tensilica Inc. 12 * 13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 14 * Chris Zankel <chris@zankel.net> 15 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> 16 * Kevin Chea 17 */ 18 19 #include <linux/errno.h> 20 #include <linux/sched.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/smp.h> 24 #include <linux/smp_lock.h> 25 #include <linux/stddef.h> 26 #include <linux/unistd.h> 27 #include <linux/ptrace.h> 28 #include <linux/slab.h> 29 #include <linux/elf.h> 30 #include <linux/init.h> 31 #include <linux/prctl.h> 32 #include <linux/init_task.h> 33 #include <linux/module.h> 34 #include <linux/mqueue.h> 35 36 #include <asm/pgtable.h> 37 #include <asm/uaccess.h> 38 #include <asm/system.h> 39 #include <asm/io.h> 40 #include <asm/processor.h> 41 #include <asm/platform.h> 42 #include <asm/mmu.h> 43 #include <asm/irq.h> 44 #include <asm/atomic.h> 45 #include <asm/asm-offsets.h> 46 #include <asm/coprocessor.h> 47 48 extern void ret_from_fork(void); 49 50 static struct fs_struct init_fs = INIT_FS; 51 static struct files_struct init_files = INIT_FILES; 52 static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 53 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 54 struct mm_struct init_mm = INIT_MM(init_mm); 55 EXPORT_SYMBOL(init_mm); 56 57 union thread_union init_thread_union 58 __attribute__((__section__(".data.init_task"))) = 59 { INIT_THREAD_INFO(init_task) }; 60 61 struct task_struct init_task = INIT_TASK(init_task); 62 EXPORT_SYMBOL(init_task); 63 64 struct task_struct *current_set[NR_CPUS] = {&init_task, }; 65 66 void (*pm_power_off)(void) = NULL; 67 EXPORT_SYMBOL(pm_power_off); 68 69 70 #if XCHAL_CP_NUM > 0 71 72 /* 73 * Coprocessor ownership. 74 */ 75 76 coprocessor_info_t coprocessor_info[] = { 77 { 0, XTENSA_CPE_CP0_OFFSET }, 78 { 0, XTENSA_CPE_CP1_OFFSET }, 79 { 0, XTENSA_CPE_CP2_OFFSET }, 80 { 0, XTENSA_CPE_CP3_OFFSET }, 81 { 0, XTENSA_CPE_CP4_OFFSET }, 82 { 0, XTENSA_CPE_CP5_OFFSET }, 83 { 0, XTENSA_CPE_CP6_OFFSET }, 84 { 0, XTENSA_CPE_CP7_OFFSET }, 85 }; 86 87 #endif 88 89 /* 90 * Powermanagement idle function, if any is provided by the platform. 91 */ 92 93 void cpu_idle(void) 94 { 95 local_irq_enable(); 96 97 /* endless idle loop with no priority at all */ 98 while (1) { 99 while (!need_resched()) 100 platform_idle(); 101 preempt_enable_no_resched(); 102 schedule(); 103 preempt_disable(); 104 } 105 } 106 107 /* 108 * Free current thread data structures etc.. 109 */ 110 111 void exit_thread(void) 112 { 113 release_coprocessors(current); /* Empty macro if no CPs are defined */ 114 } 115 116 void flush_thread(void) 117 { 118 release_coprocessors(current); /* Empty macro if no CPs are defined */ 119 } 120 121 /* 122 * Copy thread. 123 * 124 * The stack layout for the new thread looks like this: 125 * 126 * +------------------------+ <- sp in childregs (= tos) 127 * | childregs | 128 * +------------------------+ <- thread.sp = sp in dummy-frame 129 * | dummy-frame | (saved in dummy-frame spill-area) 130 * +------------------------+ 131 * 132 * We create a dummy frame to return to ret_from_fork: 133 * a0 points to ret_from_fork (simulating a call4) 134 * sp points to itself (thread.sp) 135 * a2, a3 are unused. 136 * 137 * Note: This is a pristine frame, so we don't need any spill region on top of 138 * childregs. 139 */ 140 141 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 142 unsigned long unused, 143 struct task_struct * p, struct pt_regs * regs) 144 { 145 struct pt_regs *childregs; 146 unsigned long tos; 147 int user_mode = user_mode(regs); 148 149 /* Set up new TSS. */ 150 tos = (unsigned long)task_stack_page(p) + THREAD_SIZE; 151 if (user_mode) 152 childregs = (struct pt_regs*)(tos - PT_USER_SIZE); 153 else 154 childregs = (struct pt_regs*)tos - 1; 155 156 *childregs = *regs; 157 158 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ 159 *((int*)childregs - 3) = (unsigned long)childregs; 160 *((int*)childregs - 4) = 0; 161 162 childregs->areg[1] = tos; 163 childregs->areg[2] = 0; 164 p->set_child_tid = p->clear_child_tid = NULL; 165 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); 166 p->thread.sp = (unsigned long)childregs; 167 if (user_mode(regs)) { 168 169 int len = childregs->wmask & ~0xf; 170 childregs->areg[1] = usp; 171 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], 172 ®s->areg[XCHAL_NUM_AREGS - len/4], len); 173 174 if (clone_flags & CLONE_SETTLS) 175 childregs->areg[2] = childregs->areg[6]; 176 177 } else { 178 /* In kernel space, we start a new thread with a new stack. */ 179 childregs->wmask = 1; 180 } 181 return 0; 182 } 183 184 185 /* 186 * Create a kernel thread 187 */ 188 189 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 190 { 191 long retval; 192 __asm__ __volatile__ 193 ("mov a5, %4\n\t" /* preserve fn in a5 */ 194 "mov a6, %3\n\t" /* preserve and setup arg in a6 */ 195 "movi a2, %1\n\t" /* load __NR_clone for syscall*/ 196 "mov a3, sp\n\t" /* sp check and sys_clone */ 197 "mov a4, %5\n\t" /* load flags for syscall */ 198 "syscall\n\t" 199 "beq a3, sp, 1f\n\t" /* branch if parent */ 200 "callx4 a5\n\t" /* call fn */ 201 "movi a2, %2\n\t" /* load __NR_exit for syscall */ 202 "mov a3, a6\n\t" /* load fn return value */ 203 "syscall\n" 204 "1:\n\t" 205 "mov %0, a2\n\t" /* parent returns zero */ 206 :"=r" (retval) 207 :"i" (__NR_clone), "i" (__NR_exit), 208 "r" (arg), "r" (fn), 209 "r" (flags | CLONE_VM) 210 : "a2", "a3", "a4", "a5", "a6" ); 211 return retval; 212 } 213 214 215 /* 216 * These bracket the sleeping functions.. 217 */ 218 219 unsigned long get_wchan(struct task_struct *p) 220 { 221 unsigned long sp, pc; 222 unsigned long stack_page = (unsigned long) task_stack_page(p); 223 int count = 0; 224 225 if (!p || p == current || p->state == TASK_RUNNING) 226 return 0; 227 228 sp = p->thread.sp; 229 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); 230 231 do { 232 if (sp < stack_page + sizeof(struct task_struct) || 233 sp >= (stack_page + THREAD_SIZE) || 234 pc == 0) 235 return 0; 236 if (!in_sched_functions(pc)) 237 return pc; 238 239 /* Stack layout: sp-4: ra, sp-3: sp' */ 240 241 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp); 242 sp = *(unsigned long *)sp - 3; 243 } while (count++ < 16); 244 return 0; 245 } 246 247 /* 248 * do_copy_regs() gathers information from 'struct pt_regs' and 249 * 'current->thread.areg[]' to fill in the xtensa_gregset_t 250 * structure. 251 * 252 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats 253 * of processor registers. Besides different ordering, 254 * xtensa_gregset_t contains non-live register information that 255 * 'struct pt_regs' does not. Exception handling (primarily) uses 256 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t. 257 * 258 */ 259 260 void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, 261 struct task_struct *tsk) 262 { 263 int i, n, wb_offset; 264 265 elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0; 266 elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1; 267 268 __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i)); 269 elfregs->cpux = i; 270 __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i)); 271 elfregs->cpuy = i; 272 273 /* Note: PS.EXCM is not set while user task is running; its 274 * being set in regs->ps is for exception handling convenience. 275 */ 276 277 elfregs->pc = regs->pc; 278 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK); 279 elfregs->exccause = regs->exccause; 280 elfregs->excvaddr = regs->excvaddr; 281 elfregs->windowbase = regs->windowbase; 282 elfregs->windowstart = regs->windowstart; 283 elfregs->lbeg = regs->lbeg; 284 elfregs->lend = regs->lend; 285 elfregs->lcount = regs->lcount; 286 elfregs->sar = regs->sar; 287 elfregs->syscall = regs->syscall; 288 289 /* Copy register file. 290 * The layout looks like this: 291 * 292 * | a0 ... a15 | Z ... Z | arX ... arY | 293 * current window unused saved frames 294 */ 295 296 memset (elfregs->ar, 0, sizeof(elfregs->ar)); 297 298 wb_offset = regs->windowbase * 4; 299 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16; 300 301 for (i = 0; i < n; i++) 302 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i]; 303 304 n = (regs->wmask >> 4) * 4; 305 306 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--) 307 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i]; 308 } 309 310 void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) 311 { 312 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current); 313 } 314 315 316 /* The inverse of do_copy_regs(). No error or sanity checking. */ 317 318 void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, 319 struct task_struct *tsk) 320 { 321 int i, n, wb_offset; 322 323 /* Note: PS.EXCM is not set while user task is running; it 324 * needs to be set in regs->ps is for exception handling convenience. 325 */ 326 327 regs->pc = elfregs->pc; 328 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK); 329 regs->exccause = elfregs->exccause; 330 regs->excvaddr = elfregs->excvaddr; 331 regs->windowbase = elfregs->windowbase; 332 regs->windowstart = elfregs->windowstart; 333 regs->lbeg = elfregs->lbeg; 334 regs->lend = elfregs->lend; 335 regs->lcount = elfregs->lcount; 336 regs->sar = elfregs->sar; 337 regs->syscall = elfregs->syscall; 338 339 /* Clear everything. */ 340 341 memset (regs->areg, 0, sizeof(regs->areg)); 342 343 /* Copy regs from live window frame. */ 344 345 wb_offset = regs->windowbase * 4; 346 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16; 347 348 for (i = 0; i < n; i++) 349 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i]; 350 351 n = (regs->wmask >> 4) * 4; 352 353 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--) 354 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i]; 355 } 356 357 /* 358 * do_save_fpregs() gathers information from 'struct pt_regs' and 359 * 'current->thread' to fill in the elf_fpregset_t structure. 360 * 361 * Core files and ptrace use elf_fpregset_t. 362 */ 363 364 void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs, 365 struct task_struct *tsk) 366 { 367 #if XCHAL_HAVE_CP 368 369 extern unsigned char _xtensa_reginfo_tables[]; 370 extern unsigned _xtensa_reginfo_table_size; 371 int i; 372 unsigned long flags; 373 374 /* Before dumping coprocessor state from memory, 375 * ensure any live coprocessor contents for this 376 * task are first saved to memory: 377 */ 378 local_irq_save(flags); 379 380 for (i = 0; i < XCHAL_CP_MAX; i++) { 381 if (tsk == coprocessor_info[i].owner) { 382 enable_coprocessor(i); 383 save_coprocessor_registers( 384 tsk->thread.cp_save+coprocessor_info[i].offset,i); 385 disable_coprocessor(i); 386 } 387 } 388 389 local_irq_restore(flags); 390 391 /* Now dump coprocessor & extra state: */ 392 memcpy((unsigned char*)fpregs, 393 _xtensa_reginfo_tables, _xtensa_reginfo_table_size); 394 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size, 395 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE); 396 #endif 397 } 398 399 /* 400 * The inverse of do_save_fpregs(). 401 * Copies coprocessor and extra state from fpregs into regs and tsk->thread. 402 * Returns 0 on success, non-zero if layout doesn't match. 403 */ 404 405 int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs, 406 struct task_struct *tsk) 407 { 408 #if XCHAL_HAVE_CP 409 410 extern unsigned char _xtensa_reginfo_tables[]; 411 extern unsigned _xtensa_reginfo_table_size; 412 int i; 413 unsigned long flags; 414 415 /* Make sure save area layouts match. 416 * FIXME: in the future we could allow restoring from 417 * a different layout of the same registers, by comparing 418 * fpregs' table with _xtensa_reginfo_tables and matching 419 * entries and copying registers one at a time. 420 * Not too sure yet whether that's very useful. 421 */ 422 423 if( memcmp((unsigned char*)fpregs, 424 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) { 425 return -1; 426 } 427 428 /* Before restoring coprocessor state from memory, 429 * ensure any live coprocessor contents for this 430 * task are first invalidated. 431 */ 432 433 local_irq_save(flags); 434 435 for (i = 0; i < XCHAL_CP_MAX; i++) { 436 if (tsk == coprocessor_info[i].owner) { 437 enable_coprocessor(i); 438 save_coprocessor_registers( 439 tsk->thread.cp_save+coprocessor_info[i].offset,i); 440 coprocessor_info[i].owner = 0; 441 disable_coprocessor(i); 442 } 443 } 444 445 local_irq_restore(flags); 446 447 /* Now restore coprocessor & extra state: */ 448 449 memcpy(tsk->thread.cp_save, 450 (unsigned char*)fpregs + _xtensa_reginfo_table_size, 451 XTENSA_CP_EXTRA_SIZE); 452 #endif 453 return 0; 454 } 455 /* 456 * Fill in the CP structure for a core dump for a particular task. 457 */ 458 459 int 460 dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) 461 { 462 /* see asm/coprocessor.h for this magic number 16 */ 463 #if XTENSA_CP_EXTRA_SIZE > 16 464 do_save_fpregs (r, regs, task); 465 466 /* For now, bit 16 means some extra state may be present: */ 467 // FIXME!! need to track to return more accurate mask 468 return 0x10000 | XCHAL_CP_MASK; 469 #else 470 return 0; /* no coprocessors active on this processor */ 471 #endif 472 } 473 474 /* 475 * Fill in the CP structure for a core dump. 476 * This includes any FPU coprocessor. 477 * Here, we dump all coprocessors, and other ("extra") custom state. 478 * 479 * This function is called by elf_core_dump() in fs/binfmt_elf.c 480 * (in which case 'regs' comes from calls to do_coredump, see signals.c). 481 */ 482 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 483 { 484 return dump_task_fpu(regs, current, r); 485 } 486