xref: /linux/arch/x86/coco/sev/internal.h (revision ca8f421ea0d3f1d39f773e14f68f93c978e470ef)
1c1e8980fSBorislav Petkov (AMD) /* SPDX-License-Identifier: GPL-2.0 */
2f01c6489SBorislav Petkov (AMD) #ifndef __X86_COCO_SEV_INTERNAL_H__
3f01c6489SBorislav Petkov (AMD) #define __X86_COCO_SEV_INTERNAL_H__
4c1e8980fSBorislav Petkov (AMD) 
5c1e8980fSBorislav Petkov (AMD) #define DR7_RESET_VALUE        0x400
6c1e8980fSBorislav Petkov (AMD) 
7c1e8980fSBorislav Petkov (AMD) extern u64 sev_hv_features;
8c1e8980fSBorislav Petkov (AMD) extern u64 sev_secrets_pa;
9c1e8980fSBorislav Petkov (AMD) 
10c1e8980fSBorislav Petkov (AMD) /* #VC handler runtime per-CPU data */
11c1e8980fSBorislav Petkov (AMD) struct sev_es_runtime_data {
12c1e8980fSBorislav Petkov (AMD) 	struct ghcb ghcb_page;
13c1e8980fSBorislav Petkov (AMD) 
14c1e8980fSBorislav Petkov (AMD) 	/*
15c1e8980fSBorislav Petkov (AMD) 	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
16c1e8980fSBorislav Petkov (AMD) 	 * It is needed when an NMI happens while the #VC handler uses the real
17c1e8980fSBorislav Petkov (AMD) 	 * GHCB, and the NMI handler itself is causing another #VC exception. In
18c1e8980fSBorislav Petkov (AMD) 	 * that case the GHCB content of the first handler needs to be backed up
19c1e8980fSBorislav Petkov (AMD) 	 * and restored.
20c1e8980fSBorislav Petkov (AMD) 	 */
21c1e8980fSBorislav Petkov (AMD) 	struct ghcb backup_ghcb;
22c1e8980fSBorislav Petkov (AMD) 
23c1e8980fSBorislav Petkov (AMD) 	/*
24c1e8980fSBorislav Petkov (AMD) 	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
25c1e8980fSBorislav Petkov (AMD) 	 * There is no need for it to be atomic, because nothing is written to
26c1e8980fSBorislav Petkov (AMD) 	 * the GHCB between the read and the write of ghcb_active. So it is safe
27c1e8980fSBorislav Petkov (AMD) 	 * to use it when a nested #VC exception happens before the write.
28c1e8980fSBorislav Petkov (AMD) 	 *
29c1e8980fSBorislav Petkov (AMD) 	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
30c1e8980fSBorislav Petkov (AMD) 	 * happens while the first #VC handler uses the GHCB. When the NMI code
31c1e8980fSBorislav Petkov (AMD) 	 * raises a second #VC handler it might overwrite the contents of the
32c1e8980fSBorislav Petkov (AMD) 	 * GHCB written by the first handler. To avoid this the content of the
33c1e8980fSBorislav Petkov (AMD) 	 * GHCB is saved and restored when the GHCB is detected to be in use
34c1e8980fSBorislav Petkov (AMD) 	 * already.
35c1e8980fSBorislav Petkov (AMD) 	 */
36c1e8980fSBorislav Petkov (AMD) 	bool ghcb_active;
37c1e8980fSBorislav Petkov (AMD) 	bool backup_ghcb_active;
38c1e8980fSBorislav Petkov (AMD) 
39c1e8980fSBorislav Petkov (AMD) 	/*
40c1e8980fSBorislav Petkov (AMD) 	 * Cached DR7 value - write it on DR7 writes and return it on reads.
41c1e8980fSBorislav Petkov (AMD) 	 * That value will never make it to the real hardware DR7 as debugging
42c1e8980fSBorislav Petkov (AMD) 	 * is currently unsupported in SEV-ES guests.
43c1e8980fSBorislav Petkov (AMD) 	 */
44c1e8980fSBorislav Petkov (AMD) 	unsigned long dr7;
45c1e8980fSBorislav Petkov (AMD) };
46c1e8980fSBorislav Petkov (AMD) 
47c1e8980fSBorislav Petkov (AMD) struct ghcb_state {
48c1e8980fSBorislav Petkov (AMD) 	struct ghcb *ghcb;
49c1e8980fSBorislav Petkov (AMD) };
50c1e8980fSBorislav Petkov (AMD) 
51c1e8980fSBorislav Petkov (AMD) extern struct svsm_ca boot_svsm_ca_page;
52c1e8980fSBorislav Petkov (AMD) 
53c1e8980fSBorislav Petkov (AMD) struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
54c1e8980fSBorislav Petkov (AMD) void __sev_put_ghcb(struct ghcb_state *state);
55c1e8980fSBorislav Petkov (AMD) 
56c1e8980fSBorislav Petkov (AMD) DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
57c1e8980fSBorislav Petkov (AMD) DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
58c1e8980fSBorislav Petkov (AMD) 
59c1e8980fSBorislav Petkov (AMD) void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
60c1e8980fSBorislav Petkov (AMD) 			   unsigned long npages, const struct psc_desc *desc);
61c1e8980fSBorislav Petkov (AMD) 
62c1e8980fSBorislav Petkov (AMD) DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
63c1e8980fSBorislav Petkov (AMD) DECLARE_PER_CPU(u64, svsm_caa_pa);
64c1e8980fSBorislav Petkov (AMD) 
65c1e8980fSBorislav Petkov (AMD) extern u64 boot_svsm_caa_pa;
66c1e8980fSBorislav Petkov (AMD) 
67c1e8980fSBorislav Petkov (AMD) enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt);
68c1e8980fSBorislav Petkov (AMD) void vc_forward_exception(struct es_em_ctxt *ctxt);
69e21279b7SBorislav Petkov (AMD) void svsm_pval_pages(struct snp_psc_desc *desc);
70e21279b7SBorislav Petkov (AMD) int svsm_perform_call_protocol(struct svsm_call *call);
71e21279b7SBorislav Petkov (AMD) bool snp_svsm_vtpm_probe(void);
72c1e8980fSBorislav Petkov (AMD) 
73c1e8980fSBorislav Petkov (AMD) static inline u64 sev_es_rd_ghcb_msr(void)
74c1e8980fSBorislav Petkov (AMD) {
75c1e8980fSBorislav Petkov (AMD) 	return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
76c1e8980fSBorislav Petkov (AMD) }
77c1e8980fSBorislav Petkov (AMD) 
78c1e8980fSBorislav Petkov (AMD) static __always_inline void sev_es_wr_ghcb_msr(u64 val)
79c1e8980fSBorislav Petkov (AMD) {
80c1e8980fSBorislav Petkov (AMD) 	u32 low, high;
81c1e8980fSBorislav Petkov (AMD) 
82c1e8980fSBorislav Petkov (AMD) 	low  = (u32)(val);
83c1e8980fSBorislav Petkov (AMD) 	high = (u32)(val >> 32);
84c1e8980fSBorislav Petkov (AMD) 
85c1e8980fSBorislav Petkov (AMD) 	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
86c1e8980fSBorislav Petkov (AMD) }
87c1e8980fSBorislav Petkov (AMD) 
88*9183c97eSBorislav Petkov (AMD) enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write);
89c1e8980fSBorislav Petkov (AMD) 
90c1e8980fSBorislav Petkov (AMD) u64 get_hv_features(void);
91c1e8980fSBorislav Petkov (AMD) 
92c1e8980fSBorislav Petkov (AMD) const struct snp_cpuid_table *snp_cpuid_get_table(void);
93e21279b7SBorislav Petkov (AMD) 
94e21279b7SBorislav Petkov (AMD) static inline struct svsm_ca *svsm_get_caa(void)
95e21279b7SBorislav Petkov (AMD) {
96e21279b7SBorislav Petkov (AMD) 	if (sev_cfg.use_cas)
97e21279b7SBorislav Petkov (AMD) 		return this_cpu_read(svsm_caa);
98e21279b7SBorislav Petkov (AMD) 	else
99e21279b7SBorislav Petkov (AMD) 		return rip_rel_ptr(&boot_svsm_ca_page);
100e21279b7SBorislav Petkov (AMD) }
101e21279b7SBorislav Petkov (AMD) 
102e21279b7SBorislav Petkov (AMD) static inline u64 svsm_get_caa_pa(void)
103e21279b7SBorislav Petkov (AMD) {
104e21279b7SBorislav Petkov (AMD) 	if (sev_cfg.use_cas)
105e21279b7SBorislav Petkov (AMD) 		return this_cpu_read(svsm_caa_pa);
106e21279b7SBorislav Petkov (AMD) 	else
107e21279b7SBorislav Petkov (AMD) 		return boot_svsm_caa_pa;
108e21279b7SBorislav Petkov (AMD) }
109e21279b7SBorislav Petkov (AMD) 
110e21279b7SBorislav Petkov (AMD) static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
111e21279b7SBorislav Petkov (AMD) 				    int ret, u64 svsm_ret)
112e21279b7SBorislav Petkov (AMD) {
113e21279b7SBorislav Petkov (AMD) 	WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
114e21279b7SBorislav Petkov (AMD) 	     pfn, action, page_size, ret, svsm_ret);
115e21279b7SBorislav Petkov (AMD) 
116e21279b7SBorislav Petkov (AMD) 	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
117e21279b7SBorislav Petkov (AMD) }
118e21279b7SBorislav Petkov (AMD) 
119f01c6489SBorislav Petkov (AMD) #endif /* __X86_COCO_SEV_INTERNAL_H__ */
120