xref: /linux/drivers/firmware/efi/unaccepted_memory.c (revision 247dbcdbf790c52fc76cf8e327cd0a5778e41e66)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/efi.h>
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/crash_dump.h>
7 #include <asm/unaccepted_memory.h>
8 
9 /* Protects unaccepted memory bitmap */
10 static DEFINE_SPINLOCK(unaccepted_memory_lock);
11 
12 /*
13  * accept_memory() -- Consult bitmap and accept the memory if needed.
14  *
15  * Only memory that is explicitly marked as unaccepted in the bitmap requires
16  * an action. All the remaining memory is implicitly accepted and doesn't need
17  * acceptance.
18  *
19  * No need to accept:
20  *  - anything if the system has no unaccepted table;
21  *  - memory that is below phys_base;
22  *  - memory that is above the memory that addressable by the bitmap;
23  */
24 void accept_memory(phys_addr_t start, phys_addr_t end)
25 {
26 	struct efi_unaccepted_memory *unaccepted;
27 	unsigned long range_start, range_end;
28 	unsigned long flags;
29 	u64 unit_size;
30 
31 	unaccepted = efi_get_unaccepted_table();
32 	if (!unaccepted)
33 		return;
34 
35 	unit_size = unaccepted->unit_size;
36 
37 	/*
38 	 * Only care for the part of the range that is represented
39 	 * in the bitmap.
40 	 */
41 	if (start < unaccepted->phys_base)
42 		start = unaccepted->phys_base;
43 	if (end < unaccepted->phys_base)
44 		return;
45 
46 	/* Translate to offsets from the beginning of the bitmap */
47 	start -= unaccepted->phys_base;
48 	end -= unaccepted->phys_base;
49 
50 	/*
51 	 * load_unaligned_zeropad() can lead to unwanted loads across page
52 	 * boundaries. The unwanted loads are typically harmless. But, they
53 	 * might be made to totally unrelated or even unmapped memory.
54 	 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
55 	 * #VE) to recover from these unwanted loads.
56 	 *
57 	 * But, this approach does not work for unaccepted memory. For TDX, a
58 	 * load from unaccepted memory will not lead to a recoverable exception
59 	 * within the guest. The guest will exit to the VMM where the only
60 	 * recourse is to terminate the guest.
61 	 *
62 	 * There are two parts to fix this issue and comprehensively avoid
63 	 * access to unaccepted memory. Together these ensure that an extra
64 	 * "guard" page is accepted in addition to the memory that needs to be
65 	 * used:
66 	 *
67 	 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
68 	 *    checks up to end+unit_size if 'end' is aligned on a unit_size
69 	 *    boundary.
70 	 *
71 	 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
72 	 *    'end' is aligned on a unit_size boundary. (immediately following
73 	 *    this comment)
74 	 */
75 	if (!(end % unit_size))
76 		end += unit_size;
77 
78 	/* Make sure not to overrun the bitmap */
79 	if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
80 		end = unaccepted->size * unit_size * BITS_PER_BYTE;
81 
82 	range_start = start / unit_size;
83 
84 	spin_lock_irqsave(&unaccepted_memory_lock, flags);
85 	for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
86 				   DIV_ROUND_UP(end, unit_size)) {
87 		unsigned long phys_start, phys_end;
88 		unsigned long len = range_end - range_start;
89 
90 		phys_start = range_start * unit_size + unaccepted->phys_base;
91 		phys_end = range_end * unit_size + unaccepted->phys_base;
92 
93 		arch_accept_memory(phys_start, phys_end);
94 		bitmap_clear(unaccepted->bitmap, range_start, len);
95 	}
96 	spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
97 }
98 
99 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
100 {
101 	struct efi_unaccepted_memory *unaccepted;
102 	unsigned long flags;
103 	bool ret = false;
104 	u64 unit_size;
105 
106 	unaccepted = efi_get_unaccepted_table();
107 	if (!unaccepted)
108 		return false;
109 
110 	unit_size = unaccepted->unit_size;
111 
112 	/*
113 	 * Only care for the part of the range that is represented
114 	 * in the bitmap.
115 	 */
116 	if (start < unaccepted->phys_base)
117 		start = unaccepted->phys_base;
118 	if (end < unaccepted->phys_base)
119 		return false;
120 
121 	/* Translate to offsets from the beginning of the bitmap */
122 	start -= unaccepted->phys_base;
123 	end -= unaccepted->phys_base;
124 
125 	/*
126 	 * Also consider the unaccepted state of the *next* page. See fix #1 in
127 	 * the comment on load_unaligned_zeropad() in accept_memory().
128 	 */
129 	if (!(end % unit_size))
130 		end += unit_size;
131 
132 	/* Make sure not to overrun the bitmap */
133 	if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
134 		end = unaccepted->size * unit_size * BITS_PER_BYTE;
135 
136 	spin_lock_irqsave(&unaccepted_memory_lock, flags);
137 	while (start < end) {
138 		if (test_bit(start / unit_size, unaccepted->bitmap)) {
139 			ret = true;
140 			break;
141 		}
142 
143 		start += unit_size;
144 	}
145 	spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
146 
147 	return ret;
148 }
149 
150 #ifdef CONFIG_PROC_VMCORE
151 static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
152 						unsigned long pfn)
153 {
154 	return !pfn_is_unaccepted_memory(pfn);
155 }
156 
157 static struct vmcore_cb vmcore_cb = {
158 	.pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
159 };
160 
161 static int __init unaccepted_memory_init_kdump(void)
162 {
163 	register_vmcore_cb(&vmcore_cb);
164 	return 0;
165 }
166 core_initcall(unaccepted_memory_init_kdump);
167 #endif /* CONFIG_PROC_VMCORE */
168