xref: /linux/arch/powerpc/lib/vmx-helper.c (revision bf0e022821fa516cd6eb0292fcb4ccdd4e201c9f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) IBM Corporation, 2011
5  *
6  * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
7  *          Anton Blanchard <anton@au.ibm.com>
8  */
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
11 #include <asm/switch_to.h>
12 
enter_vmx_usercopy(void)13 int enter_vmx_usercopy(void)
14 {
15 	if (in_interrupt())
16 		return 0;
17 
18 	preempt_disable();
19 	/*
20 	 * We need to disable page faults as they can call schedule and
21 	 * thus make us lose the VMX context. So on page faults, we just
22 	 * fail which will cause a fallback to the normal non-vmx copy.
23 	 */
24 	pagefault_disable();
25 
26 	enable_kernel_altivec();
27 
28 	return 1;
29 }
30 EXPORT_SYMBOL(enter_vmx_usercopy);
31 
32 /*
33  * This function must return 0 because we tail call optimise when calling
34  * from __copy_tofrom_user_power7 which returns 0 on success.
35  */
exit_vmx_usercopy(void)36 int exit_vmx_usercopy(void)
37 {
38 	disable_kernel_altivec();
39 	pagefault_enable();
40 	preempt_enable_no_resched();
41 	/*
42 	 * Must never explicitly call schedule (including preempt_enable())
43 	 * while in a kuap-unlocked user copy, because the AMR register will
44 	 * not be saved and restored across context switch. However preempt
45 	 * kernels need to be preempted as soon as possible if need_resched is
46 	 * set and we are preemptible. The hack here is to schedule a
47 	 * decrementer to fire here and reschedule for us if necessary.
48 	 */
49 	if (need_irq_preemption() && need_resched())
50 		set_dec(1);
51 	return 0;
52 }
53 EXPORT_SYMBOL(exit_vmx_usercopy);
54 
55 /*
56  * Can be called from kexec copy_page() path with MMU off. The kexec
57  * code sets preempt_count to HARDIRQ_OFFSET so we return early here.
58  * Since in_interrupt() is always inline, __no_sanitize_address on this
59  * function is sufficient to avoid KASAN shadow memory accesses in real
60  * mode.
61  */
enter_vmx_ops(void)62 int __no_sanitize_address enter_vmx_ops(void)
63 {
64 	if (in_interrupt())
65 		return 0;
66 
67 	preempt_disable();
68 
69 	enable_kernel_altivec();
70 
71 	return 1;
72 }
73 
74 /*
75  * All calls to this function will be optimised into tail calls. We are
76  * passed a pointer to the destination which we return as required by a
77  * memcpy implementation.
78  */
exit_vmx_ops(void * dest)79 void *exit_vmx_ops(void *dest)
80 {
81 	disable_kernel_altivec();
82 	preempt_enable();
83 	return dest;
84 }
85