1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/efi.h>
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/crash_dump.h>
7 #include <linux/nmi.h>
8 #include <asm/unaccepted_memory.h>
9
10 /* Protects unaccepted memory bitmap and accepting_list */
11 static DEFINE_SPINLOCK(unaccepted_memory_lock);
12
13 struct accept_range {
14 struct list_head list;
15 unsigned long start;
16 unsigned long end;
17 };
18
19 static LIST_HEAD(accepting_list);
20
21 /*
22 * accept_memory() -- Consult bitmap and accept the memory if needed.
23 *
24 * Only memory that is explicitly marked as unaccepted in the bitmap requires
25 * an action. All the remaining memory is implicitly accepted and doesn't need
26 * acceptance.
27 *
28 * No need to accept:
29 * - anything if the system has no unaccepted table;
30 * - memory that is below phys_base;
31 * - memory that is above the memory that addressable by the bitmap;
32 */
accept_memory(phys_addr_t start,unsigned long size)33 void accept_memory(phys_addr_t start, unsigned long size)
34 {
35 struct efi_unaccepted_memory *unaccepted;
36 unsigned long range_start, range_end;
37 struct accept_range range, *entry;
38 unsigned long flags;
39 phys_addr_t end;
40 u64 unit_size;
41
42 unaccepted = efi_get_unaccepted_table();
43 if (!unaccepted)
44 return;
45
46 end = PAGE_ALIGN(start + size);
47 start = PAGE_ALIGN_DOWN(start);
48
49 unit_size = unaccepted->unit_size;
50
51 /*
52 * Only care for the part of the range that is represented
53 * in the bitmap.
54 */
55 if (start < unaccepted->phys_base)
56 start = unaccepted->phys_base;
57 if (end < unaccepted->phys_base)
58 return;
59
60 /* Translate to offsets from the beginning of the bitmap */
61 start -= unaccepted->phys_base;
62 end -= unaccepted->phys_base;
63
64 /*
65 * load_unaligned_zeropad() can lead to unwanted loads across page
66 * boundaries. The unwanted loads are typically harmless. But, they
67 * might be made to totally unrelated or even unmapped memory.
68 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
69 * #VE) to recover from these unwanted loads.
70 *
71 * But, this approach does not work for unaccepted memory. For TDX, a
72 * load from unaccepted memory will not lead to a recoverable exception
73 * within the guest. The guest will exit to the VMM where the only
74 * recourse is to terminate the guest.
75 *
76 * There are two parts to fix this issue and comprehensively avoid
77 * access to unaccepted memory. Together these ensure that an extra
78 * "guard" page is accepted in addition to the memory that needs to be
79 * used:
80 *
81 * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
82 * checks up to the next unit_size if 'start+size' is aligned on a
83 * unit_size boundary.
84 *
85 * 2. Implicitly extend accept_memory(start, size) to the next unit_size
86 * if 'size+end' is aligned on a unit_size boundary. (immediately
87 * following this comment)
88 */
89 if (!(end % unit_size))
90 end += unit_size;
91
92 /* Make sure not to overrun the bitmap */
93 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
94 end = unaccepted->size * unit_size * BITS_PER_BYTE;
95
96 range.start = start / unit_size;
97 range.end = DIV_ROUND_UP(end, unit_size);
98 retry:
99 spin_lock_irqsave(&unaccepted_memory_lock, flags);
100
101 /*
102 * Check if anybody works on accepting the same range of the memory.
103 *
104 * The check is done with unit_size granularity. It is crucial to catch
105 * all accept requests to the same unit_size block, even if they don't
106 * overlap on physical address level.
107 */
108 list_for_each_entry(entry, &accepting_list, list) {
109 if (entry->end <= range.start)
110 continue;
111 if (entry->start >= range.end)
112 continue;
113
114 /*
115 * Somebody else accepting the range. Or at least part of it.
116 *
117 * Drop the lock and retry until it is complete.
118 */
119 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
120 goto retry;
121 }
122
123 /*
124 * Register that the range is about to be accepted.
125 * Make sure nobody else will accept it.
126 */
127 list_add(&range.list, &accepting_list);
128
129 range_start = range.start;
130 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
131 range.end) {
132 unsigned long phys_start, phys_end;
133 unsigned long len = range_end - range_start;
134
135 phys_start = range_start * unit_size + unaccepted->phys_base;
136 phys_end = range_end * unit_size + unaccepted->phys_base;
137
138 /*
139 * Keep interrupts disabled until the accept operation is
140 * complete in order to prevent deadlocks.
141 *
142 * Enabling interrupts before calling arch_accept_memory()
143 * creates an opportunity for an interrupt handler to request
144 * acceptance for the same memory. The handler will continuously
145 * spin with interrupts disabled, preventing other task from
146 * making progress with the acceptance process.
147 */
148 spin_unlock(&unaccepted_memory_lock);
149
150 arch_accept_memory(phys_start, phys_end);
151
152 spin_lock(&unaccepted_memory_lock);
153 bitmap_clear(unaccepted->bitmap, range_start, len);
154 }
155
156 list_del(&range.list);
157
158 touch_softlockup_watchdog();
159
160 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
161 }
162
range_contains_unaccepted_memory(phys_addr_t start,unsigned long size)163 bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
164 {
165 struct efi_unaccepted_memory *unaccepted;
166 unsigned long flags;
167 bool ret = false;
168 phys_addr_t end;
169 u64 unit_size;
170
171 unaccepted = efi_get_unaccepted_table();
172 if (!unaccepted)
173 return false;
174
175 end = PAGE_ALIGN(start + size);
176 start = PAGE_ALIGN_DOWN(start);
177
178 unit_size = unaccepted->unit_size;
179
180 /*
181 * Only care for the part of the range that is represented
182 * in the bitmap.
183 */
184 if (start < unaccepted->phys_base)
185 start = unaccepted->phys_base;
186 if (end < unaccepted->phys_base)
187 return false;
188
189 /* Translate to offsets from the beginning of the bitmap */
190 start -= unaccepted->phys_base;
191 end -= unaccepted->phys_base;
192
193 /*
194 * Also consider the unaccepted state of the *next* page. See fix #1 in
195 * the comment on load_unaligned_zeropad() in accept_memory().
196 */
197 if (!(end % unit_size))
198 end += unit_size;
199
200 /* Make sure not to overrun the bitmap */
201 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
202 end = unaccepted->size * unit_size * BITS_PER_BYTE;
203
204 spin_lock_irqsave(&unaccepted_memory_lock, flags);
205 while (start < end) {
206 if (test_bit(start / unit_size, unaccepted->bitmap)) {
207 ret = true;
208 break;
209 }
210
211 start += unit_size;
212 }
213 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
214
215 return ret;
216 }
217
218 #ifdef CONFIG_PROC_VMCORE
unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb * cb,unsigned long pfn)219 static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
220 unsigned long pfn)
221 {
222 return !pfn_is_unaccepted_memory(pfn);
223 }
224
225 static struct vmcore_cb vmcore_cb = {
226 .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
227 };
228
unaccepted_memory_init_kdump(void)229 static int __init unaccepted_memory_init_kdump(void)
230 {
231 register_vmcore_cb(&vmcore_cb);
232 return 0;
233 }
234 core_initcall(unaccepted_memory_init_kdump);
235 #endif /* CONFIG_PROC_VMCORE */
236