xref: /linux/arch/sparc/kernel/sys_sparc_64.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /* linux/arch/sparc64/kernel/sys_sparc.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/sparc
6  * platform.
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/debug.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/mm.h>
17 #include <linux/sem.h>
18 #include <linux/msg.h>
19 #include <linux/shm.h>
20 #include <linux/stat.h>
21 #include <linux/mman.h>
22 #include <linux/utsname.h>
23 #include <linux/smp.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
28 #include <linux/random.h>
29 #include <linux/export.h>
30 #include <linux/context_tracking.h>
31 #include <linux/timex.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 
35 #include <asm/utrap.h>
36 #include <asm/unistd.h>
37 
38 #include "entry.h"
39 #include "kernel.h"
40 #include "systbls.h"
41 
42 /* #define DEBUG_UNIMP_SYSCALL */
43 
44 SYSCALL_DEFINE0(getpagesize)
45 {
46 	return PAGE_SIZE;
47 }
48 
49 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
50  * overflow past the end of the 64-bit address space?
51  */
52 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
53 {
54 	unsigned long va_exclude_start, va_exclude_end;
55 
56 	va_exclude_start = VA_EXCLUDE_START;
57 	va_exclude_end   = VA_EXCLUDE_END;
58 
59 	if (unlikely(len >= va_exclude_start))
60 		return 1;
61 
62 	if (unlikely((addr + len) < addr))
63 		return 1;
64 
65 	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
66 		     ((addr + len) >= va_exclude_start &&
67 		      (addr + len) < va_exclude_end)))
68 		return 1;
69 
70 	return 0;
71 }
72 
73 /* These functions differ from the default implementations in
74  * mm/mmap.c in two ways:
75  *
76  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
77  *    for fixed such mappings we just validate what the user gave us.
78  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
79  *    the spitfire/niagara VA-hole.
80  */
81 
82 static inline unsigned long COLOR_ALIGN(unsigned long addr,
83 					 unsigned long pgoff)
84 {
85 	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
86 	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
87 
88 	return base + off;
89 }
90 
91 static unsigned long get_align_mask(struct file *filp, unsigned long flags)
92 {
93 	if (filp && is_file_hugepages(filp))
94 		return huge_page_mask_align(filp);
95 	if (filp || (flags & MAP_SHARED))
96 		return PAGE_MASK & (SHMLBA - 1);
97 
98 	return 0;
99 }
100 
101 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
102 {
103 	struct mm_struct *mm = current->mm;
104 	struct vm_area_struct * vma;
105 	unsigned long task_size = TASK_SIZE;
106 	int do_color_align;
107 	struct vm_unmapped_area_info info = {};
108 	bool file_hugepage = false;
109 
110 	if (filp && is_file_hugepages(filp))
111 		file_hugepage = true;
112 
113 	if (flags & MAP_FIXED) {
114 		/* We do not accept a shared mapping if it would violate
115 		 * cache aliasing constraints.
116 		 */
117 		if (!file_hugepage && (flags & MAP_SHARED) &&
118 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
119 			return -EINVAL;
120 		return addr;
121 	}
122 
123 	if (test_thread_flag(TIF_32BIT))
124 		task_size = STACK_TOP32;
125 	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
126 		return -ENOMEM;
127 
128 	do_color_align = 0;
129 	if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
130 		do_color_align = 1;
131 
132 	if (addr) {
133 		if (do_color_align)
134 			addr = COLOR_ALIGN(addr, pgoff);
135 		else
136 			addr = PAGE_ALIGN(addr);
137 
138 		vma = find_vma(mm, addr);
139 		if (task_size - len >= addr &&
140 		    (!vma || addr + len <= vm_start_gap(vma)))
141 			return addr;
142 	}
143 
144 	info.length = len;
145 	info.low_limit = TASK_UNMAPPED_BASE;
146 	info.high_limit = min(task_size, VA_EXCLUDE_START);
147 	info.align_mask = get_align_mask(filp, flags);
148 	if (!file_hugepage)
149 		info.align_offset = pgoff << PAGE_SHIFT;
150 	addr = vm_unmapped_area(&info);
151 
152 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
153 		VM_BUG_ON(addr != -ENOMEM);
154 		info.low_limit = VA_EXCLUDE_END;
155 		info.high_limit = task_size;
156 		addr = vm_unmapped_area(&info);
157 	}
158 
159 	return addr;
160 }
161 
162 unsigned long
163 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
164 			  const unsigned long len, const unsigned long pgoff,
165 			  const unsigned long flags, vm_flags_t vm_flags)
166 {
167 	struct vm_area_struct *vma;
168 	struct mm_struct *mm = current->mm;
169 	unsigned long task_size = STACK_TOP32;
170 	unsigned long addr = addr0;
171 	int do_color_align;
172 	struct vm_unmapped_area_info info = {};
173 	bool file_hugepage = false;
174 
175 	/* This should only ever run for 32-bit processes.  */
176 	BUG_ON(!test_thread_flag(TIF_32BIT));
177 
178 	if (filp && is_file_hugepages(filp))
179 		file_hugepage = true;
180 
181 	if (flags & MAP_FIXED) {
182 		/* We do not accept a shared mapping if it would violate
183 		 * cache aliasing constraints.
184 		 */
185 		if (!file_hugepage && (flags & MAP_SHARED) &&
186 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
187 			return -EINVAL;
188 		return addr;
189 	}
190 
191 	if (unlikely(len > task_size))
192 		return -ENOMEM;
193 
194 	do_color_align = 0;
195 	if ((filp || (flags & MAP_SHARED)) && !file_hugepage)
196 		do_color_align = 1;
197 
198 	/* requesting a specific address */
199 	if (addr) {
200 		if (do_color_align)
201 			addr = COLOR_ALIGN(addr, pgoff);
202 		else
203 			addr = PAGE_ALIGN(addr);
204 
205 		vma = find_vma(mm, addr);
206 		if (task_size - len >= addr &&
207 		    (!vma || addr + len <= vm_start_gap(vma)))
208 			return addr;
209 	}
210 
211 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
212 	info.length = len;
213 	info.low_limit = PAGE_SIZE;
214 	info.high_limit = mm->mmap_base;
215 	info.align_mask = get_align_mask(filp, flags);
216 	if (!file_hugepage)
217 		info.align_offset = pgoff << PAGE_SHIFT;
218 	addr = vm_unmapped_area(&info);
219 
220 	/*
221 	 * A failed mmap() very likely causes application failure,
222 	 * so fall back to the bottom-up function here. This scenario
223 	 * can happen with large stack limits and large mmap()
224 	 * allocations.
225 	 */
226 	if (addr & ~PAGE_MASK) {
227 		VM_BUG_ON(addr != -ENOMEM);
228 		info.flags = 0;
229 		info.low_limit = TASK_UNMAPPED_BASE;
230 		info.high_limit = STACK_TOP32;
231 		addr = vm_unmapped_area(&info);
232 	}
233 
234 	return addr;
235 }
236 
237 /* Try to align mapping such that we align it as much as possible. */
238 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
239 {
240 	unsigned long align_goal, addr = -ENOMEM;
241 
242 	if (flags & MAP_FIXED) {
243 		/* Ok, don't mess with it. */
244 		return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
245 	}
246 	flags &= ~MAP_SHARED;
247 
248 	align_goal = PAGE_SIZE;
249 	if (len >= (4UL * 1024 * 1024))
250 		align_goal = (4UL * 1024 * 1024);
251 	else if (len >= (512UL * 1024))
252 		align_goal = (512UL * 1024);
253 	else if (len >= (64UL * 1024))
254 		align_goal = (64UL * 1024);
255 
256 	do {
257 		addr = mm_get_unmapped_area(current->mm, NULL, orig_addr,
258 					    len + (align_goal - PAGE_SIZE), pgoff, flags);
259 		if (!(addr & ~PAGE_MASK)) {
260 			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
261 			break;
262 		}
263 
264 		if (align_goal == (4UL * 1024 * 1024))
265 			align_goal = (512UL * 1024);
266 		else if (align_goal == (512UL * 1024))
267 			align_goal = (64UL * 1024);
268 		else
269 			align_goal = PAGE_SIZE;
270 	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
271 
272 	/* Mapping is smaller than 64K or larger areas could not
273 	 * be obtained.
274 	 */
275 	if (addr & ~PAGE_MASK)
276 		addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
277 
278 	return addr;
279 }
280 EXPORT_SYMBOL(get_fb_unmapped_area);
281 
282 /* Essentially the same as PowerPC.  */
283 static unsigned long mmap_rnd(void)
284 {
285 	unsigned long rnd = 0UL;
286 
287 	if (current->flags & PF_RANDOMIZE) {
288 		unsigned long val = get_random_long();
289 		if (test_thread_flag(TIF_32BIT))
290 			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
291 		else
292 			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
293 	}
294 	return rnd << PAGE_SHIFT;
295 }
296 
297 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
298 {
299 	unsigned long random_factor = mmap_rnd();
300 	unsigned long gap;
301 
302 	/*
303 	 * Fall back to the standard layout if the personality
304 	 * bit is set, or if the expected stack growth is unlimited:
305 	 */
306 	gap = rlim_stack->rlim_cur;
307 	if (!test_thread_flag(TIF_32BIT) ||
308 	    (current->personality & ADDR_COMPAT_LAYOUT) ||
309 	    gap == RLIM_INFINITY ||
310 	    sysctl_legacy_va_layout) {
311 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
312 		clear_bit(MMF_TOPDOWN, &mm->flags);
313 	} else {
314 		/* We know it's 32-bit */
315 		unsigned long task_size = STACK_TOP32;
316 
317 		if (gap < 128 * 1024 * 1024)
318 			gap = 128 * 1024 * 1024;
319 		if (gap > (task_size / 6 * 5))
320 			gap = (task_size / 6 * 5);
321 
322 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
323 		set_bit(MMF_TOPDOWN, &mm->flags);
324 	}
325 }
326 
327 /*
328  * sys_pipe() is the normal C calling standard for creating
329  * a pipe. It's not the way unix traditionally does this, though.
330  */
331 SYSCALL_DEFINE0(sparc_pipe)
332 {
333 	int fd[2];
334 	int error;
335 
336 	error = do_pipe_flags(fd, 0);
337 	if (error)
338 		goto out;
339 	current_pt_regs()->u_regs[UREG_I1] = fd[1];
340 	error = fd[0];
341 out:
342 	return error;
343 }
344 
345 /*
346  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
347  *
348  * This is really horribly ugly.
349  */
350 
351 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
352 		unsigned long, third, void __user *, ptr, long, fifth)
353 {
354 	long err;
355 
356 	if (!IS_ENABLED(CONFIG_SYSVIPC))
357 		return -ENOSYS;
358 
359 	/* No need for backward compatibility. We can start fresh... */
360 	if (call <= SEMTIMEDOP) {
361 		switch (call) {
362 		case SEMOP:
363 			err = ksys_semtimedop(first, ptr,
364 					      (unsigned int)second, NULL);
365 			goto out;
366 		case SEMTIMEDOP:
367 			err = ksys_semtimedop(first, ptr, (unsigned int)second,
368 				(const struct __kernel_timespec __user *)
369 					      (unsigned long) fifth);
370 			goto out;
371 		case SEMGET:
372 			err = ksys_semget(first, (int)second, (int)third);
373 			goto out;
374 		case SEMCTL: {
375 			err = ksys_old_semctl(first, second,
376 					      (int)third | IPC_64,
377 					      (unsigned long) ptr);
378 			goto out;
379 		}
380 		default:
381 			err = -ENOSYS;
382 			goto out;
383 		}
384 	}
385 	if (call <= MSGCTL) {
386 		switch (call) {
387 		case MSGSND:
388 			err = ksys_msgsnd(first, ptr, (size_t)second,
389 					 (int)third);
390 			goto out;
391 		case MSGRCV:
392 			err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
393 					 (int)third);
394 			goto out;
395 		case MSGGET:
396 			err = ksys_msgget((key_t)first, (int)second);
397 			goto out;
398 		case MSGCTL:
399 			err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
400 			goto out;
401 		default:
402 			err = -ENOSYS;
403 			goto out;
404 		}
405 	}
406 	if (call <= SHMCTL) {
407 		switch (call) {
408 		case SHMAT: {
409 			ulong raddr;
410 			err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
411 			if (!err) {
412 				if (put_user(raddr,
413 					     (ulong __user *) third))
414 					err = -EFAULT;
415 			}
416 			goto out;
417 		}
418 		case SHMDT:
419 			err = ksys_shmdt(ptr);
420 			goto out;
421 		case SHMGET:
422 			err = ksys_shmget(first, (size_t)second, (int)third);
423 			goto out;
424 		case SHMCTL:
425 			err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
426 			goto out;
427 		default:
428 			err = -ENOSYS;
429 			goto out;
430 		}
431 	} else {
432 		err = -ENOSYS;
433 	}
434 out:
435 	return err;
436 }
437 
438 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
439 {
440 	long ret;
441 
442 	if (personality(current->personality) == PER_LINUX32 &&
443 	    personality(personality) == PER_LINUX)
444 		personality |= PER_LINUX32;
445 	ret = sys_personality(personality);
446 	if (personality(ret) == PER_LINUX32)
447 		ret &= ~PER_LINUX32;
448 
449 	return ret;
450 }
451 
452 int sparc_mmap_check(unsigned long addr, unsigned long len)
453 {
454 	if (test_thread_flag(TIF_32BIT)) {
455 		if (len >= STACK_TOP32)
456 			return -EINVAL;
457 
458 		if (addr > STACK_TOP32 - len)
459 			return -EINVAL;
460 	} else {
461 		if (len >= VA_EXCLUDE_START)
462 			return -EINVAL;
463 
464 		if (invalid_64bit_range(addr, len))
465 			return -EINVAL;
466 	}
467 
468 	return 0;
469 }
470 
471 /* Linux version of mmap */
472 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
473 		unsigned long, prot, unsigned long, flags, unsigned long, fd,
474 		unsigned long, off)
475 {
476 	unsigned long retval = -EINVAL;
477 
478 	if ((off + PAGE_ALIGN(len)) < off)
479 		goto out;
480 	if (off & ~PAGE_MASK)
481 		goto out;
482 	retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
483 out:
484 	return retval;
485 }
486 
487 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
488 {
489 	if (invalid_64bit_range(addr, len))
490 		return -EINVAL;
491 
492 	return vm_munmap(addr, len);
493 }
494 
495 SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
496 		unsigned long, new_len, unsigned long, flags,
497 		unsigned long, new_addr)
498 {
499 	if (test_thread_flag(TIF_32BIT))
500 		return -EINVAL;
501 	return sys_mremap(addr, old_len, new_len, flags, new_addr);
502 }
503 
504 SYSCALL_DEFINE0(nis_syscall)
505 {
506 	static int count;
507 	struct pt_regs *regs = current_pt_regs();
508 
509 	/* Don't make the system unusable, if someone goes stuck */
510 	if (count++ > 5)
511 		return -ENOSYS;
512 
513 	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
514 #ifdef DEBUG_UNIMP_SYSCALL
515 	show_regs (regs);
516 #endif
517 
518 	return -ENOSYS;
519 }
520 
521 /* #define DEBUG_SPARC_BREAKPOINT */
522 
523 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
524 {
525 	enum ctx_state prev_state = exception_enter();
526 
527 	if (test_thread_flag(TIF_32BIT)) {
528 		regs->tpc &= 0xffffffff;
529 		regs->tnpc &= 0xffffffff;
530 	}
531 #ifdef DEBUG_SPARC_BREAKPOINT
532         printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
533 #endif
534 	force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc);
535 #ifdef DEBUG_SPARC_BREAKPOINT
536 	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
537 #endif
538 	exception_exit(prev_state);
539 }
540 
541 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
542 {
543 	int nlen, err;
544 	char tmp[__NEW_UTS_LEN + 1];
545 
546 	if (len < 0)
547 		return -EINVAL;
548 
549 	down_read(&uts_sem);
550 
551 	nlen = strlen(utsname()->domainname) + 1;
552 	err = -EINVAL;
553 	if (nlen > len)
554 		goto out_unlock;
555 	memcpy(tmp, utsname()->domainname, nlen);
556 
557 	up_read(&uts_sem);
558 
559 	if (copy_to_user(name, tmp, nlen))
560 		return -EFAULT;
561 	return 0;
562 
563 out_unlock:
564 	up_read(&uts_sem);
565 	return err;
566 }
567 
568 SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
569 {
570 	struct __kernel_timex txc;
571 	struct __kernel_old_timeval *tv = (void *)&txc.time;
572 	int ret;
573 
574 	/* Copy the user data space into the kernel copy
575 	 * structure. But bear in mind that the structures
576 	 * may change
577 	 */
578 	if (copy_from_user(&txc, txc_p, sizeof(txc)))
579 		return -EFAULT;
580 
581 	/*
582 	 * override for sparc64 specific timeval type: tv_usec
583 	 * is 32 bit wide instead of 64-bit in __kernel_timex
584 	 */
585 	txc.time.tv_usec = tv->tv_usec;
586 	ret = do_adjtimex(&txc);
587 	tv->tv_usec = txc.time.tv_usec;
588 
589 	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
590 }
591 
592 SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
593 		struct __kernel_timex __user *, txc_p)
594 {
595 	struct __kernel_timex txc;
596 	struct __kernel_old_timeval *tv = (void *)&txc.time;
597 	int ret;
598 
599 	if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
600 		pr_err_once("process %d (%s) attempted a POSIX timer syscall "
601 		    "while CONFIG_POSIX_TIMERS is not set\n",
602 		    current->pid, current->comm);
603 
604 		return -ENOSYS;
605 	}
606 
607 	/* Copy the user data space into the kernel copy
608 	 * structure. But bear in mind that the structures
609 	 * may change
610 	 */
611 	if (copy_from_user(&txc, txc_p, sizeof(txc)))
612 		return -EFAULT;
613 
614 	/*
615 	 * override for sparc64 specific timeval type: tv_usec
616 	 * is 32 bit wide instead of 64-bit in __kernel_timex
617 	 */
618 	txc.time.tv_usec = tv->tv_usec;
619 	ret = do_clock_adjtime(which_clock, &txc);
620 	tv->tv_usec = txc.time.tv_usec;
621 
622 	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
623 }
624 
625 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
626 		utrap_handler_t, new_p, utrap_handler_t, new_d,
627 		utrap_handler_t __user *, old_p,
628 		utrap_handler_t __user *, old_d)
629 {
630 	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
631 		return -EINVAL;
632 	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
633 		if (old_p) {
634 			if (!current_thread_info()->utraps) {
635 				if (put_user(NULL, old_p))
636 					return -EFAULT;
637 			} else {
638 				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
639 					return -EFAULT;
640 			}
641 		}
642 		if (old_d) {
643 			if (put_user(NULL, old_d))
644 				return -EFAULT;
645 		}
646 		return 0;
647 	}
648 	if (!current_thread_info()->utraps) {
649 		current_thread_info()->utraps =
650 			kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
651 				GFP_KERNEL);
652 		if (!current_thread_info()->utraps)
653 			return -ENOMEM;
654 		current_thread_info()->utraps[0] = 1;
655 	} else {
656 		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
657 		    current_thread_info()->utraps[0] > 1) {
658 			unsigned long *p = current_thread_info()->utraps;
659 
660 			current_thread_info()->utraps =
661 				kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
662 					      sizeof(long),
663 					      GFP_KERNEL);
664 			if (!current_thread_info()->utraps) {
665 				current_thread_info()->utraps = p;
666 				return -ENOMEM;
667 			}
668 			p[0]--;
669 			current_thread_info()->utraps[0] = 1;
670 			memcpy(current_thread_info()->utraps+1, p+1,
671 			       UT_TRAP_INSTRUCTION_31*sizeof(long));
672 		}
673 	}
674 	if (old_p) {
675 		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
676 			return -EFAULT;
677 	}
678 	if (old_d) {
679 		if (put_user(NULL, old_d))
680 			return -EFAULT;
681 	}
682 	current_thread_info()->utraps[type] = (long)new_p;
683 
684 	return 0;
685 }
686 
687 SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
688 {
689 	struct pt_regs *regs = current_pt_regs();
690 	if (model >= 3)
691 		return -EINVAL;
692 	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
693 	return 0;
694 }
695 
696 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
697 		struct sigaction __user *, oact, void __user *, restorer,
698 		size_t, sigsetsize)
699 {
700 	struct k_sigaction new_ka, old_ka;
701 	int ret;
702 
703 	/* XXX: Don't preclude handling different sized sigset_t's.  */
704 	if (sigsetsize != sizeof(sigset_t))
705 		return -EINVAL;
706 
707 	if (act) {
708 		new_ka.ka_restorer = restorer;
709 		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
710 			return -EFAULT;
711 	}
712 
713 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
714 
715 	if (!ret && oact) {
716 		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
717 			return -EFAULT;
718 	}
719 
720 	return ret;
721 }
722 
723 SYSCALL_DEFINE0(kern_features)
724 {
725 	return KERN_FEATURE_MIXED_MODE_STACK;
726 }
727