xref: /linux/arch/x86/include/asm/vsyscall.h (revision 36f353a1ebf88280f58d1ebfe2731251d9159456)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VSYSCALL_H
3 #define _ASM_X86_VSYSCALL_H
4 
5 #include <linux/seqlock.h>
6 #include <uapi/asm/vsyscall.h>
7 #include <asm/page_types.h>
8 
9 #ifdef CONFIG_X86_VSYSCALL_EMULATION
10 extern void map_vsyscall(void);
11 extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
12 
13 /*
14  * Called on instruction fetch fault in vsyscall page.
15  * Returns true if handled.
16  */
17 extern bool emulate_vsyscall(unsigned long error_code,
18 			     struct pt_regs *regs, unsigned long address);
19 #else
20 static inline void map_vsyscall(void) {}
21 static inline bool emulate_vsyscall(unsigned long error_code,
22 				    struct pt_regs *regs, unsigned long address)
23 {
24 	return false;
25 }
26 #endif
27 
28 /*
29  * The (legacy) vsyscall page is the long page in the kernel portion
30  * of the address space that has user-accessible permissions.
31  */
32 static inline bool is_vsyscall_vaddr(unsigned long vaddr)
33 {
34 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
35 }
36 
37 #endif /* _ASM_X86_VSYSCALL_H */
38