1/* 2 * AT_SYSINFO entry point 3*/ 4 5#include <asm/dwarf2.h> 6#include <asm/cpufeatures.h> 7#include <asm/alternative-asm.h> 8 9/* 10 * First get the common code for the sigreturn entry points. 11 * This must come first. 12 */ 13#include "sigreturn.S" 14 15 .text 16 .globl __kernel_vsyscall 17 .type __kernel_vsyscall,@function 18 ALIGN 19__kernel_vsyscall: 20 CFI_STARTPROC 21 /* 22 * Reshuffle regs so that all of any of the entry instructions 23 * will preserve enough state. 24 * 25 * A really nice entry sequence would be: 26 * pushl %edx 27 * pushl %ecx 28 * movl %esp, %ecx 29 * 30 * Unfortunately, naughty Android versions between July and December 31 * 2015 actually hardcode the traditional Linux SYSENTER entry 32 * sequence. That is severely broken for a number of reasons (ask 33 * anyone with an AMD CPU, for example). Nonetheless, we try to keep 34 * it working approximately as well as it ever worked. 35 * 36 * This link may eludicate some of the history: 37 * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 38 * personally, I find it hard to understand what's going on there. 39 * 40 * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. 41 * Execute an indirect call to the address in the AT_SYSINFO auxv 42 * entry. That is the ONLY correct way to make a fast 32-bit system 43 * call on Linux. (Open-coding int $0x80 is also fine, but it's 44 * slow.) 45 */ 46 pushl %ecx 47 CFI_ADJUST_CFA_OFFSET 4 48 CFI_REL_OFFSET ecx, 0 49 pushl %edx 50 CFI_ADJUST_CFA_OFFSET 4 51 CFI_REL_OFFSET edx, 0 52 pushl %ebp 53 CFI_ADJUST_CFA_OFFSET 4 54 CFI_REL_OFFSET ebp, 0 55 56 #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" 57 #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" 58 59#ifdef CONFIG_X86_64 60 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ 61 ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ 62 SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 63#else 64 ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP 65#endif 66 67 /* Enter using int $0x80 */ 68 int $0x80 69GLOBAL(int80_landing_pad) 70 71 /* 72 * Restore EDX and ECX in case they were clobbered. EBP is not 73 * clobbered (the kernel restores it), but it's cleaner and 74 * probably faster to pop it than to adjust ESP using addl. 75 */ 76 popl %ebp 77 CFI_RESTORE ebp 78 CFI_ADJUST_CFA_OFFSET -4 79 popl %edx 80 CFI_RESTORE edx 81 CFI_ADJUST_CFA_OFFSET -4 82 popl %ecx 83 CFI_RESTORE ecx 84 CFI_ADJUST_CFA_OFFSET -4 85 ret 86 CFI_ENDPROC 87 88 .size __kernel_vsyscall,.-__kernel_vsyscall 89 .previous 90