1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #define DR7_RESET_VALUE 0x400
4
5 extern u64 sev_hv_features;
6 extern u64 sev_secrets_pa;
7
8 /* #VC handler runtime per-CPU data */
9 struct sev_es_runtime_data {
10 struct ghcb ghcb_page;
11
12 /*
13 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
14 * It is needed when an NMI happens while the #VC handler uses the real
15 * GHCB, and the NMI handler itself is causing another #VC exception. In
16 * that case the GHCB content of the first handler needs to be backed up
17 * and restored.
18 */
19 struct ghcb backup_ghcb;
20
21 /*
22 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
23 * There is no need for it to be atomic, because nothing is written to
24 * the GHCB between the read and the write of ghcb_active. So it is safe
25 * to use it when a nested #VC exception happens before the write.
26 *
27 * This is necessary for example in the #VC->NMI->#VC case when the NMI
28 * happens while the first #VC handler uses the GHCB. When the NMI code
29 * raises a second #VC handler it might overwrite the contents of the
30 * GHCB written by the first handler. To avoid this the content of the
31 * GHCB is saved and restored when the GHCB is detected to be in use
32 * already.
33 */
34 bool ghcb_active;
35 bool backup_ghcb_active;
36
37 /*
38 * Cached DR7 value - write it on DR7 writes and return it on reads.
39 * That value will never make it to the real hardware DR7 as debugging
40 * is currently unsupported in SEV-ES guests.
41 */
42 unsigned long dr7;
43 };
44
45 struct ghcb_state {
46 struct ghcb *ghcb;
47 };
48
49 extern struct svsm_ca boot_svsm_ca_page;
50
51 struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
52 void __sev_put_ghcb(struct ghcb_state *state);
53
54 DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
55 DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
56
57 void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
58 unsigned long npages, const struct psc_desc *desc);
59
60 DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
61 DECLARE_PER_CPU(u64, svsm_caa_pa);
62
63 extern u64 boot_svsm_caa_pa;
64
65 enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt);
66 void vc_forward_exception(struct es_em_ctxt *ctxt);
67
sev_es_rd_ghcb_msr(void)68 static inline u64 sev_es_rd_ghcb_msr(void)
69 {
70 return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
71 }
72
sev_es_wr_ghcb_msr(u64 val)73 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
74 {
75 u32 low, high;
76
77 low = (u32)(val);
78 high = (u32)(val >> 32);
79
80 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
81 }
82
83 enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write);
84
85 u64 get_hv_features(void);
86
87 const struct snp_cpuid_table *snp_cpuid_get_table(void);
88