1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Copyright (C) IBM Corporation, 2011 5 * 6 * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> 7 * Anton Blanchard <anton@au.ibm.com> 8 */ 9 #include <linux/uaccess.h> 10 #include <linux/hardirq.h> 11 #include <asm/switch_to.h> 12 enter_vmx_usercopy(void)13int enter_vmx_usercopy(void) 14 { 15 if (in_interrupt()) 16 return 0; 17 18 preempt_disable(); 19 /* 20 * We need to disable page faults as they can call schedule and 21 * thus make us lose the VMX context. So on page faults, we just 22 * fail which will cause a fallback to the normal non-vmx copy. 23 */ 24 pagefault_disable(); 25 26 enable_kernel_altivec(); 27 28 return 1; 29 } 30 31 /* 32 * This function must return 0 because we tail call optimise when calling 33 * from __copy_tofrom_user_power7 which returns 0 on success. 34 */ exit_vmx_usercopy(void)35int exit_vmx_usercopy(void) 36 { 37 disable_kernel_altivec(); 38 pagefault_enable(); 39 preempt_enable_no_resched(); 40 /* 41 * Must never explicitly call schedule (including preempt_enable()) 42 * while in a kuap-unlocked user copy, because the AMR register will 43 * not be saved and restored across context switch. However preempt 44 * kernels need to be preempted as soon as possible if need_resched is 45 * set and we are preemptible. The hack here is to schedule a 46 * decrementer to fire here and reschedule for us if necessary. 47 */ 48 if (IS_ENABLED(CONFIG_PREEMPT) && need_resched()) 49 set_dec(1); 50 return 0; 51 } 52 enter_vmx_ops(void)53int enter_vmx_ops(void) 54 { 55 if (in_interrupt()) 56 return 0; 57 58 preempt_disable(); 59 60 enable_kernel_altivec(); 61 62 return 1; 63 } 64 65 /* 66 * All calls to this function will be optimised into tail calls. We are 67 * passed a pointer to the destination which we return as required by a 68 * memcpy implementation. 69 */ exit_vmx_ops(void * dest)70void *exit_vmx_ops(void *dest) 71 { 72 disable_kernel_altivec(); 73 preempt_enable(); 74 return dest; 75 } 76