xref: /linux/arch/arc/kernel/process.c (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Amit Bhor, Kanika Nema: Codito Technologies 2004
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/fs.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/syscalls.h>
20 #include <linux/elf.h>
21 #include <linux/tick.h>
22 
23 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
24 {
25 	task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
26 	return 0;
27 }
28 
29 /*
30  * We return the user space TLS data ptr as sys-call return code
31  * Ideally it should be copy to user.
32  * However we can cheat by the fact that some sys-calls do return
33  * absurdly high values
34  * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
35  * it won't be considered a sys-call error
36  * and it will be loads better than copy-to-user, which is a definite
37  * D-TLB Miss
38  */
39 SYSCALL_DEFINE0(arc_gettls)
40 {
41 	return task_thread_info(current)->thr_ptr;
42 }
43 
44 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
45 {
46 	int uval;
47 	int ret;
48 
49 	/*
50 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
51 	 * can't possibly be SMP. Thus doesn't need to be SMP safe.
52 	 * And this also helps reduce the overhead for serializing in
53 	 * the UP case
54 	 */
55 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
56 
57 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
58 		return -EFAULT;
59 
60 	preempt_disable();
61 
62 	ret = __get_user(uval, uaddr);
63 	if (ret)
64 		goto done;
65 
66 	if (uval != expected)
67 		ret = -EAGAIN;
68 	else
69 		ret = __put_user(new, uaddr);
70 
71 done:
72 	preempt_enable();
73 
74 	return ret;
75 }
76 
77 void arch_cpu_idle(void)
78 {
79 	/* sleep, but enable all interrupts before committing */
80 	__asm__ __volatile__(
81 		"sleep %0	\n"
82 		:
83 		:"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
84 }
85 
86 asmlinkage void ret_from_fork(void);
87 
88 /*
89  * Copy architecture-specific thread state
90  *
91  * Layout of Child kernel mode stack as setup at the end of this function is
92  *
93  * |     ...        |
94  * |     ...        |
95  * |    unused      |
96  * |                |
97  * ------------------
98  * |     r25        |   <==== top of Stack (thread.ksp)
99  * ~                ~
100  * |    --to--      |   (CALLEE Regs of kernel mode)
101  * |     r13        |
102  * ------------------
103  * |     fp         |
104  * |    blink       |   @ret_from_fork
105  * ------------------
106  * |                |
107  * ~                ~
108  * ~                ~
109  * |                |
110  * ------------------
111  * |     r12        |
112  * ~                ~
113  * |    --to--      |   (scratch Regs of user mode)
114  * |     r0         |
115  * ------------------
116  * |      SP        |
117  * |    orig_r0     |
118  * |    event/ECR   |
119  * |    user_r25    |
120  * ------------------  <===== END of PAGE
121  */
122 int copy_thread(unsigned long clone_flags,
123 		unsigned long usp, unsigned long kthread_arg,
124 		struct task_struct *p)
125 {
126 	struct pt_regs *c_regs;        /* child's pt_regs */
127 	unsigned long *childksp;       /* to unwind out of __switch_to() */
128 	struct callee_regs *c_callee;  /* child's callee regs */
129 	struct callee_regs *parent_callee;  /* paren't callee */
130 	struct pt_regs *regs = current_pt_regs();
131 
132 	/* Mark the specific anchors to begin with (see pic above) */
133 	c_regs = task_pt_regs(p);
134 	childksp = (unsigned long *)c_regs - 2;  /* 2 words for FP/BLINK */
135 	c_callee = ((struct callee_regs *)childksp) - 1;
136 
137 	/*
138 	 * __switch_to() uses thread.ksp to start unwinding stack
139 	 * For kernel threads we don't need to create callee regs, the
140 	 * stack layout nevertheless needs to remain the same.
141 	 * Also, since __switch_to anyways unwinds callee regs, we use
142 	 * this to populate kernel thread entry-pt/args into callee regs,
143 	 * so that ret_from_kernel_thread() becomes simpler.
144 	 */
145 	p->thread.ksp = (unsigned long)c_callee;	/* THREAD_KSP */
146 
147 	/* __switch_to expects FP(0), BLINK(return addr) at top */
148 	childksp[0] = 0;			/* fp */
149 	childksp[1] = (unsigned long)ret_from_fork; /* blink */
150 
151 	if (unlikely(p->flags & PF_KTHREAD)) {
152 		memset(c_regs, 0, sizeof(struct pt_regs));
153 
154 		c_callee->r13 = kthread_arg;
155 		c_callee->r14 = usp;  /* function */
156 
157 		return 0;
158 	}
159 
160 	/*--------- User Task Only --------------*/
161 
162 	/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
163 	childksp[0] = 0;				/* for POP fp */
164 	childksp[1] = (unsigned long)ret_from_fork;	/* for POP blink */
165 
166 	/* Copy parents pt regs on child's kernel mode stack */
167 	*c_regs = *regs;
168 
169 	if (usp)
170 		c_regs->sp = usp;
171 
172 	c_regs->r0 = 0;		/* fork returns 0 in child */
173 
174 	parent_callee = ((struct callee_regs *)regs) - 1;
175 	*c_callee = *parent_callee;
176 
177 	if (unlikely(clone_flags & CLONE_SETTLS)) {
178 		/*
179 		 * set task's userland tls data ptr from 4th arg
180 		 * clone C-lib call is difft from clone sys-call
181 		 */
182 		task_thread_info(p)->thr_ptr = regs->r3;
183 	} else {
184 		/* Normal fork case: set parent's TLS ptr in child */
185 		task_thread_info(p)->thr_ptr =
186 		task_thread_info(current)->thr_ptr;
187 	}
188 
189 	return 0;
190 }
191 
192 /*
193  * Do necessary setup to start up a new user task
194  */
195 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
196 {
197 	regs->sp = usp;
198 	regs->ret = pc;
199 
200 	/*
201 	 * [U]ser Mode bit set
202 	 * [L] ZOL loop inhibited to begin with - cleared by a LP insn
203 	 * Interrupts enabled
204 	 */
205 	regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
206 
207 	/* bogus seed values for debugging */
208 	regs->lp_start = 0x10;
209 	regs->lp_end = 0x80;
210 }
211 
212 /*
213  * Some archs flush debug and FPU info here
214  */
215 void flush_thread(void)
216 {
217 }
218 
219 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
220 {
221 	return 0;
222 }
223 
224 int elf_check_arch(const struct elf32_hdr *x)
225 {
226 	unsigned int eflags;
227 
228 	if (x->e_machine != EM_ARC_INUSE) {
229 		pr_err("ELF not built for %s ISA\n",
230 			is_isa_arcompact() ? "ARCompact":"ARCv2");
231 		return 0;
232 	}
233 
234 	eflags = x->e_flags;
235 	if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
236 		pr_err("ABI mismatch - you need newer toolchain\n");
237 		force_sigsegv(SIGSEGV, current);
238 		return 0;
239 	}
240 
241 	return 1;
242 }
243 EXPORT_SYMBOL(elf_check_arch);
244