xref: /linux/arch/arm64/include/asm/kvm_ptrauth.h (revision 11ac16a4290b72efa8328cb144b727f3cbbdfa6a)
1384b40caSMark Rutland /* SPDX-License-Identifier: GPL-2.0 */
2384b40caSMark Rutland /* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore
3384b40caSMark Rutland  * Copyright 2019 Arm Limited
4384b40caSMark Rutland  * Authors: Mark Rutland <mark.rutland@arm.com>
5384b40caSMark Rutland  *         Amit Daniel Kachhap <amit.kachhap@arm.com>
6384b40caSMark Rutland  */
7384b40caSMark Rutland 
8384b40caSMark Rutland #ifndef __ASM_KVM_PTRAUTH_H
9384b40caSMark Rutland #define __ASM_KVM_PTRAUTH_H
10384b40caSMark Rutland 
11384b40caSMark Rutland #ifdef __ASSEMBLY__
12384b40caSMark Rutland 
13384b40caSMark Rutland #include <asm/sysreg.h>
14384b40caSMark Rutland 
15384b40caSMark Rutland #ifdef	CONFIG_ARM64_PTR_AUTH
16384b40caSMark Rutland 
17384b40caSMark Rutland #define PTRAUTH_REG_OFFSET(x)	(x - CPU_APIAKEYLO_EL1)
18384b40caSMark Rutland 
19384b40caSMark Rutland /*
20384b40caSMark Rutland  * CPU_AP*_EL1 values exceed immediate offset range (512) for stp
21384b40caSMark Rutland  * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and
22384b40caSMark Rutland  * calculates the offset of the keys from this base to avoid an extra add
23384b40caSMark Rutland  * instruction. These macros assumes the keys offsets follow the order of
24384b40caSMark Rutland  * the sysreg enum in kvm_host.h.
25384b40caSMark Rutland  */
26384b40caSMark Rutland .macro	ptrauth_save_state base, reg1, reg2
27384b40caSMark Rutland 	mrs_s	\reg1, SYS_APIAKEYLO_EL1
28384b40caSMark Rutland 	mrs_s	\reg2, SYS_APIAKEYHI_EL1
29384b40caSMark Rutland 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
30384b40caSMark Rutland 	mrs_s	\reg1, SYS_APIBKEYLO_EL1
31384b40caSMark Rutland 	mrs_s	\reg2, SYS_APIBKEYHI_EL1
32384b40caSMark Rutland 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
33384b40caSMark Rutland 	mrs_s	\reg1, SYS_APDAKEYLO_EL1
34384b40caSMark Rutland 	mrs_s	\reg2, SYS_APDAKEYHI_EL1
35384b40caSMark Rutland 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
36384b40caSMark Rutland 	mrs_s	\reg1, SYS_APDBKEYLO_EL1
37384b40caSMark Rutland 	mrs_s	\reg2, SYS_APDBKEYHI_EL1
38384b40caSMark Rutland 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
39384b40caSMark Rutland 	mrs_s	\reg1, SYS_APGAKEYLO_EL1
40384b40caSMark Rutland 	mrs_s	\reg2, SYS_APGAKEYHI_EL1
41384b40caSMark Rutland 	stp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
42384b40caSMark Rutland .endm
43384b40caSMark Rutland 
44384b40caSMark Rutland .macro	ptrauth_restore_state base, reg1, reg2
45384b40caSMark Rutland 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
46384b40caSMark Rutland 	msr_s	SYS_APIAKEYLO_EL1, \reg1
47384b40caSMark Rutland 	msr_s	SYS_APIAKEYHI_EL1, \reg2
48384b40caSMark Rutland 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
49384b40caSMark Rutland 	msr_s	SYS_APIBKEYLO_EL1, \reg1
50384b40caSMark Rutland 	msr_s	SYS_APIBKEYHI_EL1, \reg2
51384b40caSMark Rutland 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
52384b40caSMark Rutland 	msr_s	SYS_APDAKEYLO_EL1, \reg1
53384b40caSMark Rutland 	msr_s	SYS_APDAKEYHI_EL1, \reg2
54384b40caSMark Rutland 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
55384b40caSMark Rutland 	msr_s	SYS_APDBKEYLO_EL1, \reg1
56384b40caSMark Rutland 	msr_s	SYS_APDBKEYHI_EL1, \reg2
57384b40caSMark Rutland 	ldp	\reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
58384b40caSMark Rutland 	msr_s	SYS_APGAKEYLO_EL1, \reg1
59384b40caSMark Rutland 	msr_s	SYS_APGAKEYHI_EL1, \reg2
60384b40caSMark Rutland .endm
61384b40caSMark Rutland 
62384b40caSMark Rutland /*
63384b40caSMark Rutland  * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
64*11ac16a4SMarc Zyngier  * check for the presence ARM64_HAS_ADDRESS_AUTH, which is defined as
65*11ac16a4SMarc Zyngier  * (ARM64_HAS_ADDRESS_AUTH_ARCH || ARM64_HAS_ADDRESS_AUTH_IMP_DEF) and
66384b40caSMark Rutland  * then proceed ahead with the save/restore of Pointer Authentication
67*11ac16a4SMarc Zyngier  * key registers if enabled for the guest.
68384b40caSMark Rutland  */
69384b40caSMark Rutland .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
70*11ac16a4SMarc Zyngier alternative_if_not ARM64_HAS_ADDRESS_AUTH
71*11ac16a4SMarc Zyngier 	b	.L__skip_switch\@
72384b40caSMark Rutland alternative_else_nop_endif
73655169ceSMarc Zyngier 	mrs	\reg1, hcr_el2
74384b40caSMark Rutland 	and	\reg1, \reg1, #(HCR_API | HCR_APK)
75*11ac16a4SMarc Zyngier 	cbz	\reg1, .L__skip_switch\@
76384b40caSMark Rutland 	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
77384b40caSMark Rutland 	ptrauth_restore_state	\reg1, \reg2, \reg3
78*11ac16a4SMarc Zyngier .L__skip_switch\@:
79384b40caSMark Rutland .endm
80384b40caSMark Rutland 
81384b40caSMark Rutland .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
82*11ac16a4SMarc Zyngier alternative_if_not ARM64_HAS_ADDRESS_AUTH
83*11ac16a4SMarc Zyngier 	b	.L__skip_switch\@
84384b40caSMark Rutland alternative_else_nop_endif
85655169ceSMarc Zyngier 	mrs	\reg1, hcr_el2
86384b40caSMark Rutland 	and	\reg1, \reg1, #(HCR_API | HCR_APK)
87*11ac16a4SMarc Zyngier 	cbz	\reg1, .L__skip_switch\@
88384b40caSMark Rutland 	add	\reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
89384b40caSMark Rutland 	ptrauth_save_state	\reg1, \reg2, \reg3
90384b40caSMark Rutland 	add	\reg1, \h_ctxt, #CPU_APIAKEYLO_EL1
91384b40caSMark Rutland 	ptrauth_restore_state	\reg1, \reg2, \reg3
92384b40caSMark Rutland 	isb
93*11ac16a4SMarc Zyngier .L__skip_switch\@:
94384b40caSMark Rutland .endm
95384b40caSMark Rutland 
96384b40caSMark Rutland #else /* !CONFIG_ARM64_PTR_AUTH */
97384b40caSMark Rutland .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
98384b40caSMark Rutland .endm
99384b40caSMark Rutland .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
100384b40caSMark Rutland .endm
101384b40caSMark Rutland #endif /* CONFIG_ARM64_PTR_AUTH */
102384b40caSMark Rutland #endif /* __ASSEMBLY__ */
103384b40caSMark Rutland #endif /* __ASM_KVM_PTRAUTH_H */
104