1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common EFI memory map functions.
4 */
5
6 #define pr_fmt(fmt) "efi: " fmt
7
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/efi.h>
11 #include <linux/io.h>
12 #include <asm/early_ioremap.h>
13 #include <asm/efi.h>
14 #include <linux/memblock.h>
15 #include <linux/slab.h>
16
__efi_memmap_alloc_early(unsigned long size)17 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
18 {
19 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
20 }
21
__efi_memmap_alloc_late(unsigned long size)22 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
23 {
24 unsigned int order = get_order(size);
25 struct page *p = alloc_pages(GFP_KERNEL, order);
26
27 if (!p)
28 return 0;
29
30 return PFN_PHYS(page_to_pfn(p));
31 }
32
33 static
__efi_memmap_free(u64 phys,unsigned long size,unsigned long flags)34 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
35 {
36 if (flags & EFI_MEMMAP_MEMBLOCK) {
37 if (slab_is_available())
38 memblock_free_late(phys, size);
39 else
40 memblock_phys_free(phys, size);
41 } else if (flags & EFI_MEMMAP_SLAB) {
42 struct page *p = pfn_to_page(PHYS_PFN(phys));
43 unsigned int order = get_order(size);
44
45 free_pages((unsigned long) page_address(p), order);
46 }
47 }
48
49 /**
50 * efi_memmap_alloc - Allocate memory for the EFI memory map
51 * @num_entries: Number of entries in the allocated map.
52 * @data: efi memmap installation parameters
53 *
54 * Depending on whether mm_init() has already been invoked or not,
55 * either memblock or "normal" page allocation is used.
56 *
57 * Returns zero on success, a negative error code on failure.
58 */
efi_memmap_alloc(unsigned int num_entries,struct efi_memory_map_data * data)59 int __init efi_memmap_alloc(unsigned int num_entries,
60 struct efi_memory_map_data *data)
61 {
62 /* Expect allocation parameters are zero initialized */
63 WARN_ON(data->phys_map || data->size);
64
65 data->size = num_entries * efi.memmap.desc_size;
66 data->desc_version = efi.memmap.desc_version;
67 data->desc_size = efi.memmap.desc_size;
68 data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
69 data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
70
71 if (slab_is_available()) {
72 data->flags |= EFI_MEMMAP_SLAB;
73 data->phys_map = __efi_memmap_alloc_late(data->size);
74 } else {
75 data->flags |= EFI_MEMMAP_MEMBLOCK;
76 data->phys_map = __efi_memmap_alloc_early(data->size);
77 }
78
79 if (!data->phys_map)
80 return -ENOMEM;
81 return 0;
82 }
83
84 /**
85 * efi_memmap_install - Install a new EFI memory map in efi.memmap
86 * @data: efi memmap installation parameters
87 *
88 * Unlike efi_memmap_init_*(), this function does not allow the caller
89 * to switch from early to late mappings. It simply uses the existing
90 * mapping function and installs the new memmap.
91 *
92 * Returns zero on success, a negative error code on failure.
93 */
efi_memmap_install(struct efi_memory_map_data * data)94 int __init efi_memmap_install(struct efi_memory_map_data *data)
95 {
96 unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
97 unsigned long flags = efi.memmap.flags;
98 u64 phys = efi.memmap.phys_map;
99 int ret;
100
101 efi_memmap_unmap();
102
103 if (efi_enabled(EFI_PARAVIRT))
104 return 0;
105
106 ret = __efi_memmap_init(data);
107 if (ret)
108 return ret;
109
110 __efi_memmap_free(phys, size, flags);
111 return 0;
112 }
113
114 /**
115 * efi_memmap_split_count - Count number of additional EFI memmap entries
116 * @md: EFI memory descriptor to split
117 * @range: Address range (start, end) to split around
118 *
119 * Returns the number of additional EFI memmap entries required to
120 * accommodate @range.
121 */
efi_memmap_split_count(efi_memory_desc_t * md,struct range * range)122 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
123 {
124 u64 m_start, m_end;
125 u64 start, end;
126 int count = 0;
127
128 start = md->phys_addr;
129 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
130
131 /* modifying range */
132 m_start = range->start;
133 m_end = range->end;
134
135 if (m_start <= start) {
136 /* split into 2 parts */
137 if (start < m_end && m_end < end)
138 count++;
139 }
140
141 if (start < m_start && m_start < end) {
142 /* split into 3 parts */
143 if (m_end < end)
144 count += 2;
145 /* split into 2 parts */
146 if (end <= m_end)
147 count++;
148 }
149
150 return count;
151 }
152
153 /**
154 * efi_memmap_insert - Insert a memory region in an EFI memmap
155 * @old_memmap: The existing EFI memory map structure
156 * @buf: Address of buffer to store new map
157 * @mem: Memory map entry to insert
158 *
159 * It is suggested that you call efi_memmap_split_count() first
160 * to see how large @buf needs to be.
161 */
efi_memmap_insert(struct efi_memory_map * old_memmap,void * buf,struct efi_mem_range * mem)162 void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
163 struct efi_mem_range *mem)
164 {
165 u64 m_start, m_end, m_attr;
166 efi_memory_desc_t *md;
167 u64 start, end;
168 void *old, *new;
169
170 /* modifying range */
171 m_start = mem->range.start;
172 m_end = mem->range.end;
173 m_attr = mem->attribute;
174
175 /*
176 * The EFI memory map deals with regions in EFI_PAGE_SIZE
177 * units. Ensure that the region described by 'mem' is aligned
178 * correctly.
179 */
180 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
181 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
182 WARN_ON(1);
183 return;
184 }
185
186 for (old = old_memmap->map, new = buf;
187 old < old_memmap->map_end;
188 old += old_memmap->desc_size, new += old_memmap->desc_size) {
189
190 /* copy original EFI memory descriptor */
191 memcpy(new, old, old_memmap->desc_size);
192 md = new;
193 start = md->phys_addr;
194 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
195
196 if (m_start <= start && end <= m_end)
197 md->attribute |= m_attr;
198
199 if (m_start <= start &&
200 (start < m_end && m_end < end)) {
201 /* first part */
202 md->attribute |= m_attr;
203 md->num_pages = (m_end - md->phys_addr + 1) >>
204 EFI_PAGE_SHIFT;
205 /* latter part */
206 new += old_memmap->desc_size;
207 memcpy(new, old, old_memmap->desc_size);
208 md = new;
209 md->phys_addr = m_end + 1;
210 md->num_pages = (end - md->phys_addr + 1) >>
211 EFI_PAGE_SHIFT;
212 }
213
214 if ((start < m_start && m_start < end) && m_end < end) {
215 /* first part */
216 md->num_pages = (m_start - md->phys_addr) >>
217 EFI_PAGE_SHIFT;
218 /* middle part */
219 new += old_memmap->desc_size;
220 memcpy(new, old, old_memmap->desc_size);
221 md = new;
222 md->attribute |= m_attr;
223 md->phys_addr = m_start;
224 md->num_pages = (m_end - m_start + 1) >>
225 EFI_PAGE_SHIFT;
226 /* last part */
227 new += old_memmap->desc_size;
228 memcpy(new, old, old_memmap->desc_size);
229 md = new;
230 md->phys_addr = m_end + 1;
231 md->num_pages = (end - m_end) >>
232 EFI_PAGE_SHIFT;
233 }
234
235 if ((start < m_start && m_start < end) &&
236 (end <= m_end)) {
237 /* first part */
238 md->num_pages = (m_start - md->phys_addr) >>
239 EFI_PAGE_SHIFT;
240 /* latter part */
241 new += old_memmap->desc_size;
242 memcpy(new, old, old_memmap->desc_size);
243 md = new;
244 md->phys_addr = m_start;
245 md->num_pages = (end - md->phys_addr + 1) >>
246 EFI_PAGE_SHIFT;
247 md->attribute |= m_attr;
248 }
249 }
250 }
251