1 /* 2 * arch/xtensa/kernel/process.c 3 * 4 * Xtensa Processor version. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2005 Tensilica Inc. 11 * 12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 13 * Chris Zankel <chris@zankel.net> 14 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca> 15 * Kevin Chea 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/smp.h> 23 #include <linux/stddef.h> 24 #include <linux/unistd.h> 25 #include <linux/ptrace.h> 26 #include <linux/elf.h> 27 #include <linux/init.h> 28 #include <linux/prctl.h> 29 #include <linux/init_task.h> 30 #include <linux/module.h> 31 #include <linux/mqueue.h> 32 #include <linux/fs.h> 33 #include <linux/slab.h> 34 #include <linux/rcupdate.h> 35 36 #include <asm/pgtable.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <asm/processor.h> 40 #include <asm/platform.h> 41 #include <asm/mmu.h> 42 #include <asm/irq.h> 43 #include <linux/atomic.h> 44 #include <asm/asm-offsets.h> 45 #include <asm/regs.h> 46 47 extern void ret_from_fork(void); 48 49 struct task_struct *current_set[NR_CPUS] = {&init_task, }; 50 51 void (*pm_power_off)(void) = NULL; 52 EXPORT_SYMBOL(pm_power_off); 53 54 55 #if XTENSA_HAVE_COPROCESSORS 56 57 void coprocessor_release_all(struct thread_info *ti) 58 { 59 unsigned long cpenable; 60 int i; 61 62 /* Make sure we don't switch tasks during this operation. */ 63 64 preempt_disable(); 65 66 /* Walk through all cp owners and release it for the requested one. */ 67 68 cpenable = ti->cpenable; 69 70 for (i = 0; i < XCHAL_CP_MAX; i++) { 71 if (coprocessor_owner[i] == ti) { 72 coprocessor_owner[i] = 0; 73 cpenable &= ~(1 << i); 74 } 75 } 76 77 ti->cpenable = cpenable; 78 coprocessor_clear_cpenable(); 79 80 preempt_enable(); 81 } 82 83 void coprocessor_flush_all(struct thread_info *ti) 84 { 85 unsigned long cpenable; 86 int i; 87 88 preempt_disable(); 89 90 cpenable = ti->cpenable; 91 92 for (i = 0; i < XCHAL_CP_MAX; i++) { 93 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) 94 coprocessor_flush(ti, i); 95 cpenable >>= 1; 96 } 97 98 preempt_enable(); 99 } 100 101 #endif 102 103 104 /* 105 * Powermanagement idle function, if any is provided by the platform. 106 */ 107 108 void cpu_idle(void) 109 { 110 local_irq_enable(); 111 112 /* endless idle loop with no priority at all */ 113 while (1) { 114 rcu_idle_enter(); 115 while (!need_resched()) 116 platform_idle(); 117 rcu_idle_exit(); 118 schedule_preempt_disabled(); 119 } 120 } 121 122 /* 123 * This is called when the thread calls exit(). 124 */ 125 void exit_thread(void) 126 { 127 #if XTENSA_HAVE_COPROCESSORS 128 coprocessor_release_all(current_thread_info()); 129 #endif 130 } 131 132 /* 133 * Flush thread state. This is called when a thread does an execve() 134 * Note that we flush coprocessor registers for the case execve fails. 135 */ 136 void flush_thread(void) 137 { 138 #if XTENSA_HAVE_COPROCESSORS 139 struct thread_info *ti = current_thread_info(); 140 coprocessor_flush_all(ti); 141 coprocessor_release_all(ti); 142 #endif 143 } 144 145 /* 146 * this gets called so that we can store coprocessor state into memory and 147 * copy the current task into the new thread. 148 */ 149 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 150 { 151 #if XTENSA_HAVE_COPROCESSORS 152 coprocessor_flush_all(task_thread_info(src)); 153 #endif 154 *dst = *src; 155 return 0; 156 } 157 158 /* 159 * Copy thread. 160 * 161 * The stack layout for the new thread looks like this: 162 * 163 * +------------------------+ <- sp in childregs (= tos) 164 * | childregs | 165 * +------------------------+ <- thread.sp = sp in dummy-frame 166 * | dummy-frame | (saved in dummy-frame spill-area) 167 * +------------------------+ 168 * 169 * We create a dummy frame to return to ret_from_fork: 170 * a0 points to ret_from_fork (simulating a call4) 171 * sp points to itself (thread.sp) 172 * a2, a3 are unused. 173 * 174 * Note: This is a pristine frame, so we don't need any spill region on top of 175 * childregs. 176 * 177 * The fun part: if we're keeping the same VM (i.e. cloning a thread, 178 * not an entire process), we're normally given a new usp, and we CANNOT share 179 * any live address register windows. If we just copy those live frames over, 180 * the two threads (parent and child) will overflow the same frames onto the 181 * parent stack at different times, likely corrupting the parent stack (esp. 182 * if the parent returns from functions that called clone() and calls new 183 * ones, before the child overflows its now old copies of its parent windows). 184 * One solution is to spill windows to the parent stack, but that's fairly 185 * involved. Much simpler to just not copy those live frames across. 186 */ 187 188 int copy_thread(unsigned long clone_flags, unsigned long usp, 189 unsigned long unused, 190 struct task_struct * p, struct pt_regs * regs) 191 { 192 struct pt_regs *childregs; 193 unsigned long tos; 194 int user_mode = user_mode(regs); 195 196 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 197 struct thread_info *ti; 198 #endif 199 200 /* Set up new TSS. */ 201 tos = (unsigned long)task_stack_page(p) + THREAD_SIZE; 202 if (user_mode) 203 childregs = (struct pt_regs*)(tos - PT_USER_SIZE); 204 else 205 childregs = (struct pt_regs*)tos - 1; 206 207 /* This does not copy all the regs. In a bout of brilliance or madness, 208 ARs beyond a0-a15 exist past the end of the struct. */ 209 *childregs = *regs; 210 211 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ 212 *((int*)childregs - 3) = (unsigned long)childregs; 213 *((int*)childregs - 4) = 0; 214 215 childregs->areg[2] = 0; 216 p->set_child_tid = p->clear_child_tid = NULL; 217 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); 218 p->thread.sp = (unsigned long)childregs; 219 220 if (user_mode(regs)) { 221 222 childregs->areg[1] = usp; 223 if (clone_flags & CLONE_VM) { 224 childregs->wmask = 1; /* can't share live windows */ 225 } else { 226 int len = childregs->wmask & ~0xf; 227 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], 228 ®s->areg[XCHAL_NUM_AREGS - len/4], len); 229 } 230 // FIXME: we need to set THREADPTR in thread_info... 231 if (clone_flags & CLONE_SETTLS) 232 childregs->areg[2] = childregs->areg[6]; 233 234 } else { 235 /* In kernel space, we start a new thread with a new stack. */ 236 childregs->wmask = 1; 237 childregs->areg[1] = tos; 238 } 239 240 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 241 ti = task_thread_info(p); 242 ti->cpenable = 0; 243 #endif 244 245 return 0; 246 } 247 248 249 /* 250 * These bracket the sleeping functions.. 251 */ 252 253 unsigned long get_wchan(struct task_struct *p) 254 { 255 unsigned long sp, pc; 256 unsigned long stack_page = (unsigned long) task_stack_page(p); 257 int count = 0; 258 259 if (!p || p == current || p->state == TASK_RUNNING) 260 return 0; 261 262 sp = p->thread.sp; 263 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); 264 265 do { 266 if (sp < stack_page + sizeof(struct task_struct) || 267 sp >= (stack_page + THREAD_SIZE) || 268 pc == 0) 269 return 0; 270 if (!in_sched_functions(pc)) 271 return pc; 272 273 /* Stack layout: sp-4: ra, sp-3: sp' */ 274 275 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp); 276 sp = *(unsigned long *)sp - 3; 277 } while (count++ < 16); 278 return 0; 279 } 280 281 /* 282 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats 283 * of processor registers. Besides different ordering, 284 * xtensa_gregset_t contains non-live register information that 285 * 'struct pt_regs' does not. Exception handling (primarily) uses 286 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t. 287 * 288 */ 289 290 void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) 291 { 292 unsigned long wb, ws, wm; 293 int live, last; 294 295 wb = regs->windowbase; 296 ws = regs->windowstart; 297 wm = regs->wmask; 298 ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1); 299 300 /* Don't leak any random bits. */ 301 302 memset(elfregs, 0, sizeof(*elfregs)); 303 304 /* Note: PS.EXCM is not set while user task is running; its 305 * being set in regs->ps is for exception handling convenience. 306 */ 307 308 elfregs->pc = regs->pc; 309 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT)); 310 elfregs->lbeg = regs->lbeg; 311 elfregs->lend = regs->lend; 312 elfregs->lcount = regs->lcount; 313 elfregs->sar = regs->sar; 314 elfregs->windowstart = ws; 315 316 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16; 317 last = XCHAL_NUM_AREGS - (wm >> 4) * 4; 318 memcpy(elfregs->a, regs->areg, live * 4); 319 memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16); 320 } 321 322 int dump_fpu(void) 323 { 324 return 0; 325 } 326 327 asmlinkage 328 long xtensa_clone(unsigned long clone_flags, unsigned long newsp, 329 void __user *parent_tid, void *child_tls, 330 void __user *child_tid, long a5, 331 struct pt_regs *regs) 332 { 333 if (!newsp) 334 newsp = regs->areg[1]; 335 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 336 } 337 338 /* 339 * xtensa_execve() executes a new program. 340 */ 341 342 asmlinkage 343 long xtensa_execve(const char __user *name, 344 const char __user *const __user *argv, 345 const char __user *const __user *envp, 346 long a3, long a4, long a5, 347 struct pt_regs *regs) 348 { 349 long error; 350 struct filename *filename; 351 352 filename = getname(name); 353 error = PTR_ERR(filename); 354 if (IS_ERR(filename)) 355 goto out; 356 error = do_execve(filename->name, argv, envp, regs); 357 putname(filename); 358 out: 359 return error; 360 } 361 362