xref: /linux/arch/sparc/kernel/sys_sparc_32.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /* linux/arch/sparc/kernel/sys_sparc.c
2  *
3  * This file contains various random system calls that
4  * have a non-standard calling sequence on the Linux/sparc
5  * platform.
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/syscalls.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/ipc.h>
24 
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 
28 /* #define DEBUG_UNIMP_SYSCALL */
29 
30 /* XXX Make this per-binary type, this way we can detect the type of
31  * XXX a binary.  Every Sparc executable calls this very early on.
32  */
33 asmlinkage unsigned long sys_getpagesize(void)
34 {
35 	return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
36 }
37 
38 #define COLOUR_ALIGN(addr)      (((addr)+SHMLBA-1)&~(SHMLBA-1))
39 
40 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
41 {
42 	struct vm_area_struct * vmm;
43 
44 	if (flags & MAP_FIXED) {
45 		/* We do not accept a shared mapping if it would violate
46 		 * cache aliasing constraints.
47 		 */
48 		if ((flags & MAP_SHARED) &&
49 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
50 			return -EINVAL;
51 		return addr;
52 	}
53 
54 	/* See asm-sparc/uaccess.h */
55 	if (len > TASK_SIZE - PAGE_SIZE)
56 		return -ENOMEM;
57 	if (ARCH_SUN4C && len > 0x20000000)
58 		return -ENOMEM;
59 	if (!addr)
60 		addr = TASK_UNMAPPED_BASE;
61 
62 	if (flags & MAP_SHARED)
63 		addr = COLOUR_ALIGN(addr);
64 	else
65 		addr = PAGE_ALIGN(addr);
66 
67 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
68 		/* At this point:  (!vmm || addr < vmm->vm_end). */
69 		if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) {
70 			addr = PAGE_OFFSET;
71 			vmm = find_vma(current->mm, PAGE_OFFSET);
72 		}
73 		if (TASK_SIZE - PAGE_SIZE - len < addr)
74 			return -ENOMEM;
75 		if (!vmm || addr + len <= vmm->vm_start)
76 			return addr;
77 		addr = vmm->vm_end;
78 		if (flags & MAP_SHARED)
79 			addr = COLOUR_ALIGN(addr);
80 	}
81 }
82 
83 /*
84  * sys_pipe() is the normal C calling standard for creating
85  * a pipe. It's not the way unix traditionally does this, though.
86  */
87 asmlinkage int sparc_pipe(struct pt_regs *regs)
88 {
89 	int fd[2];
90 	int error;
91 
92 	error = do_pipe_flags(fd, 0);
93 	if (error)
94 		goto out;
95 	regs->u_regs[UREG_I1] = fd[1];
96 	error = fd[0];
97 out:
98 	return error;
99 }
100 
101 int sparc_mmap_check(unsigned long addr, unsigned long len)
102 {
103 	if (ARCH_SUN4C &&
104 	    (len > 0x20000000 ||
105 	     (addr < 0xe0000000 && addr + len > 0x20000000)))
106 		return -EINVAL;
107 
108 	/* See asm-sparc/uaccess.h */
109 	if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
110 		return -EINVAL;
111 
112 	return 0;
113 }
114 
115 /* Linux version of mmap */
116 
117 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
118 	unsigned long prot, unsigned long flags, unsigned long fd,
119 	unsigned long pgoff)
120 {
121 	/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
122 	   we have. */
123 	return sys_mmap_pgoff(addr, len, prot, flags, fd,
124 			      pgoff >> (PAGE_SHIFT - 12));
125 }
126 
127 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
128 	unsigned long prot, unsigned long flags, unsigned long fd,
129 	unsigned long off)
130 {
131 	/* no alignment check? */
132 	return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
133 }
134 
135 long sparc_remap_file_pages(unsigned long start, unsigned long size,
136 			   unsigned long prot, unsigned long pgoff,
137 			   unsigned long flags)
138 {
139 	/* This works on an existing mmap so we don't need to validate
140 	 * the range as that was done at the original mmap call.
141 	 */
142 	return sys_remap_file_pages(start, size, prot,
143 				    (pgoff >> (PAGE_SHIFT - 12)), flags);
144 }
145 
146 /* we come to here via sys_nis_syscall so it can setup the regs argument */
147 asmlinkage unsigned long
148 c_sys_nis_syscall (struct pt_regs *regs)
149 {
150 	static int count = 0;
151 
152 	if (count++ > 5)
153 		return -ENOSYS;
154 	printk ("%s[%d]: Unimplemented SPARC system call %d\n",
155 		current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
156 #ifdef DEBUG_UNIMP_SYSCALL
157 	show_regs (regs);
158 #endif
159 	return -ENOSYS;
160 }
161 
162 /* #define DEBUG_SPARC_BREAKPOINT */
163 
164 asmlinkage void
165 sparc_breakpoint (struct pt_regs *regs)
166 {
167 	siginfo_t info;
168 
169 	lock_kernel();
170 #ifdef DEBUG_SPARC_BREAKPOINT
171         printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
172 #endif
173 	info.si_signo = SIGTRAP;
174 	info.si_errno = 0;
175 	info.si_code = TRAP_BRKPT;
176 	info.si_addr = (void __user *)regs->pc;
177 	info.si_trapno = 0;
178 	force_sig_info(SIGTRAP, &info, current);
179 
180 #ifdef DEBUG_SPARC_BREAKPOINT
181 	printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
182 #endif
183 	unlock_kernel();
184 }
185 
186 asmlinkage int
187 sparc_sigaction (int sig, const struct old_sigaction __user *act,
188 		 struct old_sigaction __user *oact)
189 {
190 	struct k_sigaction new_ka, old_ka;
191 	int ret;
192 
193 	WARN_ON_ONCE(sig >= 0);
194 	sig = -sig;
195 
196 	if (act) {
197 		unsigned long mask;
198 
199 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
200 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
201 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
202 			return -EFAULT;
203 		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
204 		__get_user(mask, &act->sa_mask);
205 		siginitset(&new_ka.sa.sa_mask, mask);
206 		new_ka.ka_restorer = NULL;
207 	}
208 
209 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
210 
211 	if (!ret && oact) {
212 		/* In the clone() case we could copy half consistent
213 		 * state to the user, however this could sleep and
214 		 * deadlock us if we held the signal lock on SMP.  So for
215 		 * now I take the easy way out and do no locking.
216 		 */
217 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
218 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
219 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
220 			return -EFAULT;
221 		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
222 		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
223 	}
224 
225 	return ret;
226 }
227 
228 asmlinkage long
229 sys_rt_sigaction(int sig,
230 		 const struct sigaction __user *act,
231 		 struct sigaction __user *oact,
232 		 void __user *restorer,
233 		 size_t sigsetsize)
234 {
235 	struct k_sigaction new_ka, old_ka;
236 	int ret;
237 
238 	/* XXX: Don't preclude handling different sized sigset_t's.  */
239 	if (sigsetsize != sizeof(sigset_t))
240 		return -EINVAL;
241 
242 	if (act) {
243 		new_ka.ka_restorer = restorer;
244 		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
245 			return -EFAULT;
246 	}
247 
248 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
249 
250 	if (!ret && oact) {
251 		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
252 			return -EFAULT;
253 	}
254 
255 	return ret;
256 }
257 
258 asmlinkage int sys_getdomainname(char __user *name, int len)
259 {
260  	int nlen, err;
261 
262 	if (len < 0)
263 		return -EINVAL;
264 
265  	down_read(&uts_sem);
266 
267 	nlen = strlen(utsname()->domainname) + 1;
268 	err = -EINVAL;
269 	if (nlen > len)
270 		goto out;
271 
272 	err = -EFAULT;
273 	if (!copy_to_user(name, utsname()->domainname, nlen))
274 		err = 0;
275 
276 out:
277 	up_read(&uts_sem);
278 	return err;
279 }
280 
281 /*
282  * Do a system call from kernel instead of calling sys_execve so we
283  * end up with proper pt_regs.
284  */
285 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
286 {
287 	long __res;
288 	register long __g1 __asm__ ("g1") = __NR_execve;
289 	register long __o0 __asm__ ("o0") = (long)(filename);
290 	register long __o1 __asm__ ("o1") = (long)(argv);
291 	register long __o2 __asm__ ("o2") = (long)(envp);
292 	asm volatile ("t 0x10\n\t"
293 		      "bcc 1f\n\t"
294 		      "mov %%o0, %0\n\t"
295 		      "sub %%g0, %%o0, %0\n\t"
296 		      "1:\n\t"
297 		      : "=r" (__res), "=&r" (__o0)
298 		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
299 		      : "cc");
300 	return __res;
301 }
302