xref: /linux/arch/x86/coco/core.c (revision 2f804aca48322f02a8f44cca540663845ee80fb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Confidential Computing Platform Capability checks
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9 
10 #include <linux/export.h>
11 #include <linux/cc_platform.h>
12 
13 #include <asm/coco.h>
14 #include <asm/processor.h>
15 
16 enum cc_vendor cc_vendor __ro_after_init;
17 static u64 cc_mask __ro_after_init;
18 
19 static bool intel_cc_platform_has(enum cc_attr attr)
20 {
21 	switch (attr) {
22 	case CC_ATTR_GUEST_UNROLL_STRING_IO:
23 	case CC_ATTR_HOTPLUG_DISABLED:
24 	case CC_ATTR_GUEST_MEM_ENCRYPT:
25 	case CC_ATTR_MEM_ENCRYPT:
26 		return true;
27 	default:
28 		return false;
29 	}
30 }
31 
32 /*
33  * Handle the SEV-SNP vTOM case where sme_me_mask is zero, and
34  * the other levels of SME/SEV functionality, including C-bit
35  * based SEV-SNP, are not enabled.
36  */
37 static __maybe_unused bool amd_cc_platform_vtom(enum cc_attr attr)
38 {
39 	switch (attr) {
40 	case CC_ATTR_GUEST_MEM_ENCRYPT:
41 	case CC_ATTR_MEM_ENCRYPT:
42 		return true;
43 	default:
44 		return false;
45 	}
46 }
47 
48 /*
49  * SME and SEV are very similar but they are not the same, so there are
50  * times that the kernel will need to distinguish between SME and SEV. The
51  * cc_platform_has() function is used for this.  When a distinction isn't
52  * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
53  *
54  * The trampoline code is a good example for this requirement.  Before
55  * paging is activated, SME will access all memory as decrypted, but SEV
56  * will access all memory as encrypted.  So, when APs are being brought
57  * up under SME the trampoline area cannot be encrypted, whereas under SEV
58  * the trampoline area must be encrypted.
59  */
60 
61 static bool amd_cc_platform_has(enum cc_attr attr)
62 {
63 #ifdef CONFIG_AMD_MEM_ENCRYPT
64 
65 	if (sev_status & MSR_AMD64_SNP_VTOM)
66 		return amd_cc_platform_vtom(attr);
67 
68 	switch (attr) {
69 	case CC_ATTR_MEM_ENCRYPT:
70 		return sme_me_mask;
71 
72 	case CC_ATTR_HOST_MEM_ENCRYPT:
73 		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
74 
75 	case CC_ATTR_GUEST_MEM_ENCRYPT:
76 		return sev_status & MSR_AMD64_SEV_ENABLED;
77 
78 	case CC_ATTR_GUEST_STATE_ENCRYPT:
79 		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
80 
81 	/*
82 	 * With SEV, the rep string I/O instructions need to be unrolled
83 	 * but SEV-ES supports them through the #VC handler.
84 	 */
85 	case CC_ATTR_GUEST_UNROLL_STRING_IO:
86 		return (sev_status & MSR_AMD64_SEV_ENABLED) &&
87 			!(sev_status & MSR_AMD64_SEV_ES_ENABLED);
88 
89 	case CC_ATTR_GUEST_SEV_SNP:
90 		return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
91 
92 	default:
93 		return false;
94 	}
95 #else
96 	return false;
97 #endif
98 }
99 
100 bool cc_platform_has(enum cc_attr attr)
101 {
102 	switch (cc_vendor) {
103 	case CC_VENDOR_AMD:
104 		return amd_cc_platform_has(attr);
105 	case CC_VENDOR_INTEL:
106 		return intel_cc_platform_has(attr);
107 	default:
108 		return false;
109 	}
110 }
111 EXPORT_SYMBOL_GPL(cc_platform_has);
112 
113 u64 cc_mkenc(u64 val)
114 {
115 	/*
116 	 * Both AMD and Intel use a bit in the page table to indicate
117 	 * encryption status of the page.
118 	 *
119 	 * - for AMD, bit *set* means the page is encrypted
120 	 * - for AMD with vTOM and for Intel, *clear* means encrypted
121 	 */
122 	switch (cc_vendor) {
123 	case CC_VENDOR_AMD:
124 		if (sev_status & MSR_AMD64_SNP_VTOM)
125 			return val & ~cc_mask;
126 		else
127 			return val | cc_mask;
128 	case CC_VENDOR_INTEL:
129 		return val & ~cc_mask;
130 	default:
131 		return val;
132 	}
133 }
134 
135 u64 cc_mkdec(u64 val)
136 {
137 	/* See comment in cc_mkenc() */
138 	switch (cc_vendor) {
139 	case CC_VENDOR_AMD:
140 		if (sev_status & MSR_AMD64_SNP_VTOM)
141 			return val | cc_mask;
142 		else
143 			return val & ~cc_mask;
144 	case CC_VENDOR_INTEL:
145 		return val | cc_mask;
146 	default:
147 		return val;
148 	}
149 }
150 EXPORT_SYMBOL_GPL(cc_mkdec);
151 
152 __init void cc_set_mask(u64 mask)
153 {
154 	cc_mask = mask;
155 }
156