1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Low level x86 E820 memory map handling functions.
4 *
5 * The firmware and bootloader passes us the "E820 table", which is the primary
6 * physical memory layout description available about x86 systems.
7 *
8 * The kernel takes the E820 memory layout and optionally modifies it with
9 * quirks and other tweaks, and feeds that into the generic Linux memory
10 * allocation code routines via a platform independent interface (memblock, etc.).
11 */
12 #include <linux/memblock.h>
13 #include <linux/suspend.h>
14 #include <linux/acpi.h>
15 #include <linux/firmware-map.h>
16 #include <linux/sort.h>
17 #include <linux/kvm_types.h>
18
19 #include <asm/e820/api.h>
20 #include <asm/setup.h>
21
22 /*
23 * We organize the E820 table into three main data structures:
24 *
25 * - 'e820_table_firmware': the original firmware version passed to us by the
26 * bootloader - not modified by the kernel. It is composed of two parts:
27 * the first 128 E820 memory entries in boot_params.e820_table and the remaining
28 * (if any) entries of the SETUP_E820_EXT nodes. We use this to:
29 *
30 * - the hibernation code uses it to generate a kernel-independent CRC32
31 * checksum of the physical memory layout of a system.
32 *
33 * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
34 * passed to us by the bootloader - the major difference between
35 * e820_table_firmware[] and this one is that e820_table_kexec[]
36 * might be modified by the kexec itself to fake an mptable.
37 * We use this to:
38 *
39 * - kexec, which is a bootloader in disguise, uses the original E820
40 * layout to pass to the kexec-ed kernel. This way the original kernel
41 * can have a restricted E820 map while the kexec()-ed kexec-kernel
42 * can have access to full memory - etc.
43 *
44 * Export the memory layout via /sys/firmware/memmap. kexec-tools uses
45 * the entries to create an E820 table for the kexec kernel.
46 *
47 * kexec_file_load in-kernel code uses the table for the kexec kernel.
48 *
49 * - 'e820_table': this is the main E820 table that is massaged by the
50 * low level x86 platform code, or modified by boot parameters, before
51 * passed on to higher level MM layers.
52 *
53 * Once the E820 map has been converted to the standard Linux memory layout
54 * information its role stops - modifying it has no effect and does not get
55 * re-propagated. So its main role is a temporary bootstrap storage of firmware
56 * specific memory layout data during early bootup.
57 */
58 __initdata static struct e820_table e820_table_init;
59 __initdata static struct e820_table e820_table_kexec_init;
60 __initdata static struct e820_table e820_table_firmware_init;
61
62 __refdata struct e820_table *e820_table = &e820_table_init;
63 __refdata struct e820_table *e820_table_kexec = &e820_table_kexec_init;
64 __refdata struct e820_table *e820_table_firmware = &e820_table_firmware_init;
65
66 /* For PCI or other memory-mapped resources */
67 unsigned long pci_mem_start = 0xaeedbabe;
68 #ifdef CONFIG_PCI
69 EXPORT_SYMBOL(pci_mem_start);
70 #endif
71
72 /*
73 * This function checks if any part of the range <start,end> is mapped
74 * with type.
75 */
_e820__mapped_any(struct e820_table * table,u64 start,u64 end,enum e820_type type)76 static bool _e820__mapped_any(struct e820_table *table,
77 u64 start, u64 end, enum e820_type type)
78 {
79 u32 idx;
80
81 for (idx = 0; idx < table->nr_entries; idx++) {
82 struct e820_entry *entry = &table->entries[idx];
83
84 if (type && entry->type != type)
85 continue;
86 if (entry->addr >= end || entry->addr + entry->size <= start)
87 continue;
88 return true;
89 }
90 return false;
91 }
92
e820__mapped_raw_any(u64 start,u64 end,enum e820_type type)93 bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type)
94 {
95 return _e820__mapped_any(e820_table_firmware, start, end, type);
96 }
97 EXPORT_SYMBOL_FOR_KVM(e820__mapped_raw_any);
98
e820__mapped_any(u64 start,u64 end,enum e820_type type)99 bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
100 {
101 return _e820__mapped_any(e820_table, start, end, type);
102 }
103 EXPORT_SYMBOL_GPL(e820__mapped_any);
104
105 /*
106 * This function checks if the entire <start,end> range is mapped with 'type'.
107 *
108 * Note: this function only works correctly once the E820 table is sorted and
109 * not-overlapping (at least for the range specified), which is the case normally.
110 */
__e820__mapped_all(u64 start,u64 end,enum e820_type type)111 static struct e820_entry *__e820__mapped_all(u64 start, u64 end,
112 enum e820_type type)
113 {
114 u32 idx;
115
116 for (idx = 0; idx < e820_table->nr_entries; idx++) {
117 struct e820_entry *entry = &e820_table->entries[idx];
118
119 if (type && entry->type != type)
120 continue;
121
122 /* Is the region (part) in overlap with the current region? */
123 if (entry->addr >= end || entry->addr + entry->size <= start)
124 continue;
125
126 /*
127 * If the region is at the beginning of <start,end> we move
128 * 'start' to the end of the region since it's ok until there
129 */
130 if (entry->addr <= start)
131 start = entry->addr + entry->size;
132
133 /*
134 * If 'start' is now at or beyond 'end', we're done, full
135 * coverage of the desired range exists:
136 */
137 if (start >= end)
138 return entry;
139 }
140
141 return NULL;
142 }
143
144 /*
145 * This function checks if the entire range <start,end> is mapped with type.
146 */
e820__mapped_all(u64 start,u64 end,enum e820_type type)147 __init bool e820__mapped_all(u64 start, u64 end, enum e820_type type)
148 {
149 return __e820__mapped_all(start, end, type);
150 }
151
152 /*
153 * This function returns the type associated with the range <start,end>.
154 */
e820__get_entry_type(u64 start,u64 end)155 int e820__get_entry_type(u64 start, u64 end)
156 {
157 struct e820_entry *entry = __e820__mapped_all(start, end, 0);
158
159 return entry ? entry->type : -EINVAL;
160 }
161
162 /*
163 * Add a memory region to the kernel E820 map.
164 */
__e820__range_add(struct e820_table * table,u64 start,u64 size,enum e820_type type)165 __init static void __e820__range_add(struct e820_table *table, u64 start, u64 size, enum e820_type type)
166 {
167 u32 idx = table->nr_entries;
168 struct e820_entry *entry_new;
169
170 if (idx >= ARRAY_SIZE(table->entries)) {
171 pr_err("E820 table full; ignoring [mem %#010llx-%#010llx]\n",
172 start, start + size-1);
173 return;
174 }
175
176 entry_new = table->entries + idx;
177
178 entry_new->addr = start;
179 entry_new->size = size;
180 entry_new->type = type;
181
182 table->nr_entries++;
183 }
184
e820__range_add(u64 start,u64 size,enum e820_type type)185 __init void e820__range_add(u64 start, u64 size, enum e820_type type)
186 {
187 __e820__range_add(e820_table, start, size, type);
188 }
189
e820_print_type(enum e820_type type)190 __init static void e820_print_type(enum e820_type type)
191 {
192 switch (type) {
193 case E820_TYPE_RAM: pr_cont(" System RAM"); break;
194 case E820_TYPE_RESERVED: pr_cont(" device reserved"); break;
195 case E820_TYPE_SOFT_RESERVED: pr_cont(" soft reserved"); break;
196 case E820_TYPE_ACPI: pr_cont(" ACPI data"); break;
197 case E820_TYPE_NVS: pr_cont(" ACPI NVS"); break;
198 case E820_TYPE_UNUSABLE: pr_cont(" unusable"); break;
199 case E820_TYPE_PMEM: /* Fall through: */
200 case E820_TYPE_PRAM: pr_cont(" persistent RAM (type %u)", type); break;
201 default: pr_cont(" type %u", type); break;
202 }
203 }
204
e820__print_table(const char * who)205 __init static void e820__print_table(const char *who)
206 {
207 u64 range_end_prev = 0;
208 u32 idx;
209
210 for (idx = 0; idx < e820_table->nr_entries; idx++) {
211 struct e820_entry *entry = e820_table->entries + idx;
212 u64 range_start, range_end;
213
214 range_start = entry->addr;
215 range_end = entry->addr + entry->size;
216
217 /* Out of order E820 maps should not happen: */
218 if (range_start < range_end_prev)
219 pr_info(FW_BUG "out of order E820 entry!\n");
220
221 if (range_start > range_end_prev) {
222 pr_info("%s: [gap %#018Lx-%#018Lx]\n",
223 who,
224 range_end_prev,
225 range_start-1);
226 }
227
228 pr_info("%s: [mem %#018Lx-%#018Lx] ", who, range_start, range_end-1);
229 e820_print_type(entry->type);
230 pr_cont("\n");
231
232 range_end_prev = range_end;
233 }
234 }
235
236 /*
237 * Sanitize an E820 map.
238 *
239 * Some E820 layouts include overlapping entries. The following
240 * replaces the original E820 map with a new one, removing overlaps,
241 * and resolving conflicting memory types in favor of highest
242 * numbered type.
243 *
244 * The input parameter 'entries' points to an array of 'struct
245 * e820_entry' which on entry has elements in the range [0, *nr_entries)
246 * valid, and which has space for up to max_nr_entries entries.
247 * On return, the resulting sanitized E820 map entries will be in
248 * overwritten in the same location, starting at 'entries'.
249 *
250 * The integer pointed to by nr_entries must be valid on entry (the
251 * current number of valid entries located at 'entries'). If the
252 * sanitizing succeeds the *nr_entries will be updated with the new
253 * number of valid entries (something no more than max_nr_entries).
254 *
255 * The return value from e820__update_table() is zero if it
256 * successfully 'sanitized' the map entries passed in, and is -1
257 * if it did nothing, which can happen if either of (1) it was
258 * only passed one map entry, or (2) any of the input map entries
259 * were invalid (start + size < start, meaning that the size was
260 * so big the described memory range wrapped around through zero.)
261 *
262 * Visually we're performing the following
263 * (1,2,3,4 = memory types)...
264 *
265 * Sample memory map (w/overlaps):
266 * ____22__________________
267 * ______________________4_
268 * ____1111________________
269 * _44_____________________
270 * 11111111________________
271 * ____________________33__
272 * ___________44___________
273 * __________33333_________
274 * ______________22________
275 * ___________________2222_
276 * _________111111111______
277 * _____________________11_
278 * _________________4______
279 *
280 * Sanitized equivalent (no overlap):
281 * 1_______________________
282 * _44_____________________
283 * ___1____________________
284 * ____22__________________
285 * ______11________________
286 * _________1______________
287 * __________3_____________
288 * ___________44___________
289 * _____________33_________
290 * _______________2________
291 * ________________1_______
292 * _________________4______
293 * ___________________2____
294 * ____________________33__
295 * ______________________4_
296 */
297 struct change_member {
298 /* Pointer to the original entry: */
299 struct e820_entry *entry;
300 /* Address for this change point: */
301 u64 addr;
302 };
303
304 __initdata static struct change_member change_point_list[2*E820_MAX_ENTRIES];
305 __initdata static struct change_member *change_point[2*E820_MAX_ENTRIES];
306 __initdata static struct e820_entry *overlap_list[E820_MAX_ENTRIES];
307 __initdata static struct e820_entry new_entries[E820_MAX_ENTRIES];
308
cpcompare(const void * a,const void * b)309 __init static int cpcompare(const void *a, const void *b)
310 {
311 struct change_member * const *app = a, * const *bpp = b;
312 const struct change_member *ap = *app, *bp = *bpp;
313
314 /*
315 * Inputs are pointers to two elements of change_point[]. If their
316 * addresses are not equal, their difference dominates. If the addresses
317 * are equal, then consider one that represents the end of its region
318 * to be greater than one that does not.
319 */
320 if (ap->addr != bp->addr)
321 return ap->addr > bp->addr ? 1 : -1;
322
323 return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr);
324 }
325
326 /*
327 * Can two consecutive E820 entries of this same E820 type be merged?
328 */
e820_type_mergeable(enum e820_type type)329 static bool e820_type_mergeable(enum e820_type type)
330 {
331 /*
332 * These types may indicate distinct platform ranges aligned to
333 * NUMA node, protection domain, performance domain, or other
334 * boundaries. Do not merge them.
335 */
336 if (type == E820_TYPE_PRAM)
337 return false;
338 if (type == E820_TYPE_SOFT_RESERVED)
339 return false;
340
341 return true;
342 }
343
e820__update_table(struct e820_table * table)344 __init int e820__update_table(struct e820_table *table)
345 {
346 struct e820_entry *entries = table->entries;
347 u32 max_nr_entries = ARRAY_SIZE(table->entries);
348 enum e820_type current_type, last_type;
349 u64 last_addr;
350 u32 new_nr_entries, overlap_entries;
351 u32 idx, chg_idx, chg_nr;
352
353 /* If there's only one memory region, don't bother: */
354 if (table->nr_entries < 2)
355 return -1;
356
357 BUG_ON(table->nr_entries > max_nr_entries);
358
359 /* Bail out if we find any unreasonable addresses in the map: */
360 for (idx = 0; idx < table->nr_entries; idx++) {
361 if (entries[idx].addr + entries[idx].size < entries[idx].addr)
362 return -1;
363 }
364
365 /* Create pointers for initial change-point information (for sorting): */
366 for (idx = 0; idx < 2 * table->nr_entries; idx++)
367 change_point[idx] = &change_point_list[idx];
368
369 /*
370 * Record all known change-points (starting and ending addresses),
371 * omitting empty memory regions:
372 */
373 chg_idx = 0;
374 for (idx = 0; idx < table->nr_entries; idx++) {
375 if (entries[idx].size != 0) {
376 change_point[chg_idx]->addr = entries[idx].addr;
377 change_point[chg_idx++]->entry = &entries[idx];
378 change_point[chg_idx]->addr = entries[idx].addr + entries[idx].size;
379 change_point[chg_idx++]->entry = &entries[idx];
380 }
381 }
382 chg_nr = chg_idx;
383
384 /* Sort change-point list by memory addresses (low -> high): */
385 sort(change_point, chg_nr, sizeof(*change_point), cpcompare, NULL);
386
387 /* Create a new memory map, removing overlaps: */
388 overlap_entries = 0; /* Number of entries in the overlap table */
389 new_nr_entries = 0; /* Index for creating new map entries */
390 last_type = 0; /* Start with undefined memory type */
391 last_addr = 0; /* Start with 0 as last starting address */
392
393 /* Loop through change-points, determining effect on the new map: */
394 for (chg_idx = 0; chg_idx < chg_nr; chg_idx++) {
395 /* Keep track of all overlapping entries */
396 if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) {
397 /* Add map entry to overlap list (> 1 entry implies an overlap) */
398 overlap_list[overlap_entries++] = change_point[chg_idx]->entry;
399 } else {
400 /* Remove entry from list (order independent, so swap with last): */
401 for (idx = 0; idx < overlap_entries; idx++) {
402 if (overlap_list[idx] == change_point[chg_idx]->entry)
403 overlap_list[idx] = overlap_list[overlap_entries-1];
404 }
405 overlap_entries--;
406 }
407 /*
408 * If there are overlapping entries, decide which
409 * "type" to use (larger value takes precedence --
410 * 1=usable, 2,3,4,4+=unusable)
411 */
412 current_type = 0;
413 for (idx = 0; idx < overlap_entries; idx++) {
414 if (overlap_list[idx]->type > current_type)
415 current_type = overlap_list[idx]->type;
416 }
417
418 /* Continue building up new map based on this information: */
419 if (current_type != last_type || !e820_type_mergeable(current_type)) {
420 if (last_type) {
421 new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
422 /* Move forward only if the new size was non-zero: */
423 if (new_entries[new_nr_entries].size != 0)
424 /* No more space left for new entries? */
425 if (++new_nr_entries >= max_nr_entries)
426 break;
427 }
428 if (current_type) {
429 new_entries[new_nr_entries].addr = change_point[chg_idx]->addr;
430 new_entries[new_nr_entries].type = current_type;
431 last_addr = change_point[chg_idx]->addr;
432 }
433 last_type = current_type;
434 }
435 }
436
437 /* Copy the new entries into the original location: */
438 memcpy(entries, new_entries, new_nr_entries*sizeof(*entries));
439 table->nr_entries = new_nr_entries;
440
441 return 0;
442 }
443
444 /*
445 * Copy the BIOS E820 map into the kernel's e820_table.
446 *
447 * Sanity-check it while we're at it..
448 */
append_e820_table(struct boot_e820_entry * entries,u32 nr_entries)449 __init static int append_e820_table(struct boot_e820_entry *entries, u32 nr_entries)
450 {
451 struct boot_e820_entry *entry = entries;
452
453 /* If there aren't any entries, we'll want to fall back to another source: */
454 if (!nr_entries)
455 return -ENOENT;
456
457 while (nr_entries) {
458 u64 start = entry->addr;
459 u64 size = entry->size;
460 u64 end = start + size-1;
461 u32 type = entry->type;
462
463 /* Ignore the remaining entries on 64-bit overflow: */
464 if (start > end && likely(size))
465 return -EINVAL;
466
467 e820__range_add(start, size, type);
468
469 entry++;
470 nr_entries--;
471 }
472 return 0;
473 }
474
475 __init static u64
__e820__range_update(struct e820_table * table,u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)476 __e820__range_update(struct e820_table *table, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
477 {
478 u64 end;
479 u32 idx;
480 u64 real_updated_size = 0;
481
482 BUG_ON(old_type == new_type);
483
484 if (size > (ULLONG_MAX - start))
485 size = ULLONG_MAX - start;
486
487 end = start + size;
488 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx]", start, end - 1);
489 e820_print_type(old_type);
490 pr_cont(" ==>");
491 e820_print_type(new_type);
492 pr_cont("\n");
493
494 for (idx = 0; idx < table->nr_entries; idx++) {
495 struct e820_entry *entry = &table->entries[idx];
496 u64 final_start, final_end;
497 u64 entry_end;
498
499 if (entry->type != old_type)
500 continue;
501
502 entry_end = entry->addr + entry->size;
503
504 /* Completely covered by new range? */
505 if (entry->addr >= start && entry_end <= end) {
506 entry->type = new_type;
507 real_updated_size += entry->size;
508 continue;
509 }
510
511 /* New range is completely covered? */
512 if (entry->addr < start && entry_end > end) {
513 __e820__range_add(table, start, size, new_type);
514 __e820__range_add(table, end, entry_end - end, entry->type);
515 entry->size = start - entry->addr;
516 real_updated_size += size;
517 continue;
518 }
519
520 /* Partially covered: */
521 final_start = max(start, entry->addr);
522 final_end = min(end, entry_end);
523 if (final_start >= final_end)
524 continue;
525
526 __e820__range_add(table, final_start, final_end - final_start, new_type);
527
528 real_updated_size += final_end - final_start;
529
530 /*
531 * Left range could be head or tail, so need to update
532 * its size first:
533 */
534 entry->size -= final_end - final_start;
535 if (entry->addr < final_start)
536 continue;
537
538 entry->addr = final_end;
539 }
540 return real_updated_size;
541 }
542
e820__range_update(u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)543 __init u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type)
544 {
545 return __e820__range_update(e820_table, start, size, old_type, new_type);
546 }
547
e820__range_update_table(struct e820_table * t,u64 start,u64 size,enum e820_type old_type,enum e820_type new_type)548 __init u64 e820__range_update_table(struct e820_table *t, u64 start, u64 size,
549 enum e820_type old_type, enum e820_type new_type)
550 {
551 return __e820__range_update(t, start, size, old_type, new_type);
552 }
553
554 /* Remove a range of memory from the E820 table: */
e820__range_remove(u64 start,u64 size,enum e820_type filter_type)555 __init void e820__range_remove(u64 start, u64 size, enum e820_type filter_type)
556 {
557 u32 idx;
558 u64 end;
559
560 if (size > (ULLONG_MAX - start))
561 size = ULLONG_MAX - start;
562
563 end = start + size;
564 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx]", start, end - 1);
565 if (filter_type)
566 e820_print_type(filter_type);
567 pr_cont("\n");
568
569 for (idx = 0; idx < e820_table->nr_entries; idx++) {
570 struct e820_entry *entry = &e820_table->entries[idx];
571 u64 final_start, final_end;
572 u64 entry_end;
573
574 if (filter_type && entry->type != filter_type)
575 continue;
576
577 entry_end = entry->addr + entry->size;
578
579 /* Completely covered? */
580 if (entry->addr >= start && entry_end <= end) {
581 memset(entry, 0, sizeof(*entry));
582 continue;
583 }
584
585 /* Is the new range completely covered? */
586 if (entry->addr < start && entry_end > end) {
587 e820__range_add(end, entry_end - end, entry->type);
588 entry->size = start - entry->addr;
589 continue;
590 }
591
592 /* Partially covered: */
593 final_start = max(start, entry->addr);
594 final_end = min(end, entry_end);
595 if (final_start >= final_end)
596 continue;
597
598 /*
599 * Left range could be head or tail, so need to update
600 * the size first:
601 */
602 entry->size -= final_end - final_start;
603 if (entry->addr < final_start)
604 continue;
605
606 entry->addr = final_end;
607 }
608 }
609
e820__update_table_print(void)610 __init void e820__update_table_print(void)
611 {
612 if (e820__update_table(e820_table))
613 return;
614
615 pr_info("modified physical RAM map:\n");
616 e820__print_table("modified");
617 }
618
e820__update_table_kexec(void)619 __init static void e820__update_table_kexec(void)
620 {
621 e820__update_table(e820_table_kexec);
622 }
623
624 #define MAX_GAP_END SZ_4G
625
626 /*
627 * Search for a gap in the E820 memory space from 0 to MAX_GAP_END (4GB).
628 */
e820_search_gap(unsigned long * max_gap_start,unsigned long * max_gap_size)629 __init static int e820_search_gap(unsigned long *max_gap_start, unsigned long *max_gap_size)
630 {
631 struct e820_entry *entry;
632 u64 range_end_prev = 0;
633 int found = 0;
634 u32 idx;
635
636 for (idx = 0; idx < e820_table->nr_entries; idx++) {
637 u64 range_start, range_end;
638
639 entry = e820_table->entries + idx;
640 range_start = entry->addr;
641 range_end = entry->addr + entry->size;
642
643 /* Process any gap before this entry: */
644 if (range_start > range_end_prev) {
645 u64 gap_start = range_end_prev;
646 u64 gap_end = range_start;
647 u64 gap_size;
648
649 if (gap_start < MAX_GAP_END) {
650 /* Make sure the entirety of the gap is below MAX_GAP_END: */
651 gap_end = min(gap_end, MAX_GAP_END);
652 gap_size = gap_end-gap_start;
653
654 if (gap_size >= *max_gap_size) {
655 *max_gap_start = gap_start;
656 *max_gap_size = gap_size;
657 found = 1;
658 }
659 }
660 }
661
662 range_end_prev = range_end;
663 }
664
665 /* Is there a usable gap beyond the last entry: */
666 if (entry->addr + entry->size < MAX_GAP_END) {
667 u64 gap_start = entry->addr + entry->size;
668 u64 gap_size = MAX_GAP_END-gap_start;
669
670 if (gap_size >= *max_gap_size) {
671 *max_gap_start = gap_start;
672 *max_gap_size = gap_size;
673 found = 1;
674 }
675 }
676
677 return found;
678 }
679
680 /*
681 * Search for the biggest gap in the low 32 bits of the E820
682 * memory space. We pass this space to the PCI subsystem, so
683 * that it can assign MMIO resources for hotplug or
684 * unconfigured devices in.
685 *
686 * Hopefully the BIOS let enough space left.
687 */
e820__setup_pci_gap(void)688 __init void e820__setup_pci_gap(void)
689 {
690 unsigned long max_gap_start, max_gap_size;
691 int found;
692
693 /* The minimum eligible gap size is 4MB: */
694 max_gap_size = SZ_4M;
695 found = e820_search_gap(&max_gap_start, &max_gap_size);
696
697 if (!found) {
698 #ifdef CONFIG_X86_64
699 max_gap_start = (max_pfn << PAGE_SHIFT) + SZ_1M;
700 pr_err("Cannot find an available gap in the 32-bit address range\n");
701 pr_err("PCI devices with unassigned 32-bit BARs may not work!\n");
702 #else
703 max_gap_start = SZ_256M;
704 #endif
705 }
706
707 /*
708 * e820__reserve_resources_late() protects stolen RAM already:
709 */
710 pci_mem_start = max_gap_start;
711
712 pr_info("[gap %#010lx-%#010lx] available for PCI devices\n",
713 max_gap_start, max_gap_start + max_gap_size-1);
714 }
715
716 /*
717 * Called late during init, in free_initmem().
718 *
719 * Initial e820_table and e820_table_kexec are largish __initdata arrays.
720 *
721 * Copy them to a (usually much smaller) dynamically allocated area that is
722 * sized precisely after the number of e820 entries.
723 *
724 * This is done after we've performed all the fixes and tweaks to the tables.
725 * All functions which modify them are __init functions, which won't exist
726 * after free_initmem().
727 */
e820__reallocate_tables(void)728 __init void e820__reallocate_tables(void)
729 {
730 struct e820_table *n;
731 int size;
732
733 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries;
734 n = kmemdup(e820_table, size, GFP_KERNEL);
735 BUG_ON(!n);
736 e820_table = n;
737
738 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
739 n = kmemdup(e820_table_kexec, size, GFP_KERNEL);
740 BUG_ON(!n);
741 e820_table_kexec = n;
742
743 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
744 n = kmemdup(e820_table_firmware, size, GFP_KERNEL);
745 BUG_ON(!n);
746 e820_table_firmware = n;
747 }
748
749 /*
750 * Because of the small fixed size of struct boot_params, only the first
751 * 128 E820 memory entries are passed to the kernel via boot_params.e820_table,
752 * the remaining (if any) entries are passed via the SETUP_E820_EXT node of
753 * struct setup_data, which is parsed here.
754 */
e820__memory_setup_extended(u64 phys_addr,u32 data_len)755 __init void e820__memory_setup_extended(u64 phys_addr, u32 data_len)
756 {
757 int entries;
758 struct boot_e820_entry *extmap;
759 struct setup_data *sdata;
760
761 sdata = early_memremap(phys_addr, data_len);
762 entries = sdata->len / sizeof(*extmap);
763 extmap = (struct boot_e820_entry *)(sdata->data);
764
765 append_e820_table(extmap, entries);
766 e820__update_table(e820_table);
767
768 memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
769 memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
770
771 early_memunmap(sdata, data_len);
772 pr_info("extended physical RAM map:\n");
773 e820__print_table("extended");
774 }
775
776 /*
777 * Find the ranges of physical addresses that do not correspond to
778 * E820 RAM areas and register the corresponding pages as 'nosave' for
779 * hibernation (32-bit) or software suspend and suspend to RAM (64-bit).
780 *
781 * This function requires the E820 map to be sorted and without any
782 * overlapping entries.
783 */
e820__register_nosave_regions(unsigned long limit_pfn)784 __init void e820__register_nosave_regions(unsigned long limit_pfn)
785 {
786 u32 idx;
787 u64 last_addr = 0;
788
789 for (idx = 0; idx < e820_table->nr_entries; idx++) {
790 struct e820_entry *entry = &e820_table->entries[idx];
791
792 if (entry->type != E820_TYPE_RAM)
793 continue;
794
795 if (last_addr < entry->addr)
796 register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
797
798 last_addr = entry->addr + entry->size;
799 }
800
801 register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
802 }
803
804 #ifdef CONFIG_ACPI
805 /*
806 * Register ACPI NVS memory regions, so that we can save/restore them during
807 * hibernation and the subsequent resume:
808 */
e820__register_nvs_regions(void)809 __init static int e820__register_nvs_regions(void)
810 {
811 u32 idx;
812
813 for (idx = 0; idx < e820_table->nr_entries; idx++) {
814 struct e820_entry *entry = &e820_table->entries[idx];
815
816 if (entry->type == E820_TYPE_NVS)
817 acpi_nvs_register(entry->addr, entry->size);
818 }
819
820 return 0;
821 }
822 core_initcall(e820__register_nvs_regions);
823 #endif
824
825 /*
826 * Allocate the requested number of bytes with the requested alignment
827 * and return (the physical address) to the caller. Also register this
828 * range in the 'kexec' E820 table as a reserved range.
829 *
830 * This allows kexec to fake a new mptable, as if it came from the real
831 * system.
832 */
e820__memblock_alloc_reserved(u64 size,u64 align)833 __init u64 e820__memblock_alloc_reserved(u64 size, u64 align)
834 {
835 u64 addr;
836
837 addr = memblock_phys_alloc(size, align);
838 if (addr) {
839 e820__range_update_table(e820_table_kexec, addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
840 pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
841 e820__update_table_kexec();
842 }
843
844 return addr;
845 }
846
847 #ifdef CONFIG_X86_32
848 # ifdef CONFIG_X86_PAE
849 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
850 # else
851 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
852 # endif
853 #else /* CONFIG_X86_32 */
854 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
855 #endif
856
857 /*
858 * Find the highest page frame number we have available
859 */
e820__end_ram_pfn(unsigned long limit_pfn)860 __init static unsigned long e820__end_ram_pfn(unsigned long limit_pfn)
861 {
862 u32 idx;
863 unsigned long last_pfn = 0;
864 unsigned long max_arch_pfn = MAX_ARCH_PFN;
865
866 for (idx = 0; idx < e820_table->nr_entries; idx++) {
867 struct e820_entry *entry = &e820_table->entries[idx];
868 unsigned long start_pfn;
869 unsigned long end_pfn;
870
871 if (entry->type != E820_TYPE_RAM &&
872 entry->type != E820_TYPE_ACPI)
873 continue;
874
875 start_pfn = entry->addr >> PAGE_SHIFT;
876 end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT;
877
878 if (start_pfn >= limit_pfn)
879 continue;
880 if (end_pfn > limit_pfn) {
881 last_pfn = limit_pfn;
882 break;
883 }
884 if (end_pfn > last_pfn)
885 last_pfn = end_pfn;
886 }
887
888 if (last_pfn > max_arch_pfn)
889 last_pfn = max_arch_pfn;
890
891 pr_info("last_pfn = %#lx max_arch_pfn = %#lx\n",
892 last_pfn, max_arch_pfn);
893 return last_pfn;
894 }
895
e820__end_of_ram_pfn(void)896 __init unsigned long e820__end_of_ram_pfn(void)
897 {
898 return e820__end_ram_pfn(MAX_ARCH_PFN);
899 }
900
e820__end_of_low_ram_pfn(void)901 __init unsigned long e820__end_of_low_ram_pfn(void)
902 {
903 return e820__end_ram_pfn(1UL << (32 - PAGE_SHIFT));
904 }
905
906 __initdata static int userdef;
907
908 /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */
parse_memopt(char * p)909 __init static int parse_memopt(char *p)
910 {
911 u64 mem_size;
912
913 if (!p)
914 return -EINVAL;
915
916 if (!strcmp(p, "nopentium")) {
917 #ifdef CONFIG_X86_32
918 setup_clear_cpu_cap(X86_FEATURE_PSE);
919 return 0;
920 #else
921 pr_warn("mem=nopentium ignored! (only supported on x86_32)\n");
922 return -EINVAL;
923 #endif
924 }
925
926 userdef = 1;
927 mem_size = memparse(p, &p);
928
929 /* Don't remove all memory when getting "mem={invalid}" parameter: */
930 if (mem_size == 0)
931 return -EINVAL;
932
933 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM);
934
935 #ifdef CONFIG_MEMORY_HOTPLUG
936 max_mem_size = mem_size;
937 #endif
938
939 return 0;
940 }
941 early_param("mem", parse_memopt);
942
parse_memmap_one(char * p)943 __init static int parse_memmap_one(char *p)
944 {
945 char *oldp;
946 u64 start_at, mem_size;
947
948 if (!p)
949 return -EINVAL;
950
951 if (!strncmp(p, "exactmap", 8)) {
952 e820_table->nr_entries = 0;
953 userdef = 1;
954 return 0;
955 }
956
957 oldp = p;
958 mem_size = memparse(p, &p);
959 if (p == oldp)
960 return -EINVAL;
961
962 userdef = 1;
963 if (*p == '@') {
964 start_at = memparse(p+1, &p);
965 e820__range_add(start_at, mem_size, E820_TYPE_RAM);
966 } else if (*p == '#') {
967 start_at = memparse(p+1, &p);
968 e820__range_add(start_at, mem_size, E820_TYPE_ACPI);
969 } else if (*p == '$') {
970 start_at = memparse(p+1, &p);
971 e820__range_add(start_at, mem_size, E820_TYPE_RESERVED);
972 } else if (*p == '!') {
973 start_at = memparse(p+1, &p);
974 e820__range_add(start_at, mem_size, E820_TYPE_PRAM);
975 } else if (*p == '%') {
976 enum e820_type from = 0, to = 0;
977
978 start_at = memparse(p + 1, &p);
979 if (*p == '-')
980 from = simple_strtoull(p + 1, &p, 0);
981 if (*p == '+')
982 to = simple_strtoull(p + 1, &p, 0);
983 if (*p != '\0')
984 return -EINVAL;
985 if (from && to)
986 e820__range_update(start_at, mem_size, from, to);
987 else if (to)
988 e820__range_add(start_at, mem_size, to);
989 else
990 e820__range_remove(start_at, mem_size, from);
991 } else {
992 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM);
993 }
994
995 return *p == '\0' ? 0 : -EINVAL;
996 }
997
parse_memmap_opt(char * str)998 __init static int parse_memmap_opt(char *str)
999 {
1000 while (str) {
1001 char *k = strchr(str, ',');
1002
1003 if (k)
1004 *k++ = 0;
1005
1006 parse_memmap_one(str);
1007 str = k;
1008 }
1009
1010 return 0;
1011 }
1012 early_param("memmap", parse_memmap_opt);
1013
1014 /*
1015 * Called after parse_early_param(), after early parameters (such as mem=)
1016 * have been processed, in which case we already have an E820 table filled in
1017 * via the parameter callback function(s), but it's not sorted and printed yet:
1018 */
e820__finish_early_params(void)1019 __init void e820__finish_early_params(void)
1020 {
1021 if (userdef) {
1022 if (e820__update_table(e820_table) < 0)
1023 panic("Invalid user supplied memory map");
1024
1025 pr_info("user-defined physical RAM map:\n");
1026 e820__print_table("user");
1027 }
1028 }
1029
e820_type_to_string(struct e820_entry * entry)1030 __init static const char * e820_type_to_string(struct e820_entry *entry)
1031 {
1032 switch (entry->type) {
1033 case E820_TYPE_RAM: return "System RAM";
1034 case E820_TYPE_ACPI: return "ACPI Tables";
1035 case E820_TYPE_NVS: return "ACPI Non-volatile Storage";
1036 case E820_TYPE_UNUSABLE: return "Unusable memory";
1037 case E820_TYPE_PRAM: return "Persistent Memory (legacy)";
1038 case E820_TYPE_PMEM: return "Persistent Memory";
1039 case E820_TYPE_RESERVED: return "Reserved";
1040 case E820_TYPE_SOFT_RESERVED: return "Soft Reserved";
1041 default: return "Unknown E820 type";
1042 }
1043 }
1044
e820_type_to_iomem_type(struct e820_entry * entry)1045 __init static unsigned long e820_type_to_iomem_type(struct e820_entry *entry)
1046 {
1047 switch (entry->type) {
1048 case E820_TYPE_RAM: return IORESOURCE_SYSTEM_RAM;
1049 case E820_TYPE_ACPI: /* Fall-through: */
1050 case E820_TYPE_NVS: /* Fall-through: */
1051 case E820_TYPE_UNUSABLE: /* Fall-through: */
1052 case E820_TYPE_PRAM: /* Fall-through: */
1053 case E820_TYPE_PMEM: /* Fall-through: */
1054 case E820_TYPE_RESERVED: /* Fall-through: */
1055 case E820_TYPE_SOFT_RESERVED: /* Fall-through: */
1056 default: return IORESOURCE_MEM;
1057 }
1058 }
1059
e820_type_to_iores_desc(struct e820_entry * entry)1060 __init static unsigned long e820_type_to_iores_desc(struct e820_entry *entry)
1061 {
1062 switch (entry->type) {
1063 case E820_TYPE_ACPI: return IORES_DESC_ACPI_TABLES;
1064 case E820_TYPE_NVS: return IORES_DESC_ACPI_NV_STORAGE;
1065 case E820_TYPE_PMEM: return IORES_DESC_PERSISTENT_MEMORY;
1066 case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
1067 case E820_TYPE_RESERVED: return IORES_DESC_RESERVED;
1068 case E820_TYPE_SOFT_RESERVED: return IORES_DESC_SOFT_RESERVED;
1069 case E820_TYPE_RAM: /* Fall-through: */
1070 case E820_TYPE_UNUSABLE: /* Fall-through: */
1071 default: return IORES_DESC_NONE;
1072 }
1073 }
1074
1075 /*
1076 * We assign one resource entry for each E820 map entry:
1077 */
1078 __initdata static struct resource *e820_res;
1079
1080 /*
1081 * Is this a device address region that should not be marked busy?
1082 * (Versus system address regions that we register & lock early.)
1083 */
e820_device_region(enum e820_type type,struct resource * res)1084 __init static bool e820_device_region(enum e820_type type, struct resource *res)
1085 {
1086 /* This is the legacy BIOS/DOS ROM-shadow + MMIO region: */
1087 if (res->start < SZ_1M)
1088 return false;
1089
1090 /*
1091 * Treat persistent memory and other special memory ranges like
1092 * device memory, i.e. keep it available for exclusive use of a
1093 * driver:
1094 */
1095 switch (type) {
1096 case E820_TYPE_RESERVED:
1097 case E820_TYPE_SOFT_RESERVED:
1098 case E820_TYPE_PRAM:
1099 case E820_TYPE_PMEM:
1100 return true;
1101 case E820_TYPE_RAM:
1102 case E820_TYPE_ACPI:
1103 case E820_TYPE_NVS:
1104 case E820_TYPE_UNUSABLE:
1105 default:
1106 return false;
1107 }
1108 }
1109
1110 /*
1111 * Mark E820 system regions as busy for the resource manager:
1112 */
e820__reserve_resources(void)1113 __init void e820__reserve_resources(void)
1114 {
1115 u32 idx;
1116 struct resource *res;
1117 u64 end;
1118
1119 res = memblock_alloc_or_panic(sizeof(*res) * e820_table->nr_entries,
1120 SMP_CACHE_BYTES);
1121 e820_res = res;
1122
1123 for (idx = 0; idx < e820_table->nr_entries; idx++) {
1124 struct e820_entry *entry = e820_table->entries + idx;
1125
1126 end = entry->addr + entry->size - 1;
1127 if (end != (resource_size_t)end) {
1128 res++;
1129 continue;
1130 }
1131 res->start = entry->addr;
1132 res->end = end;
1133 res->name = e820_type_to_string(entry);
1134 res->flags = e820_type_to_iomem_type(entry);
1135 res->desc = e820_type_to_iores_desc(entry);
1136
1137 /*
1138 * Skip and don't register device regions that could be conflicted
1139 * with PCI device BAR resources. They get inserted later in
1140 * pcibios_resource_survey() -> e820__reserve_resources_late():
1141 */
1142 if (!e820_device_region(entry->type, res)) {
1143 res->flags |= IORESOURCE_BUSY;
1144 insert_resource(&iomem_resource, res);
1145 }
1146 res++;
1147 }
1148
1149 /* Expose the kexec e820 table to sysfs: */
1150 for (idx = 0; idx < e820_table_kexec->nr_entries; idx++) {
1151 struct e820_entry *entry = e820_table_kexec->entries + idx;
1152
1153 firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry));
1154 }
1155 }
1156
1157 /*
1158 * How much should we pad the end of RAM, depending on where it is?
1159 */
ram_alignment(resource_size_t pos)1160 __init static unsigned long ram_alignment(resource_size_t pos)
1161 {
1162 unsigned long mb = pos >> 20;
1163
1164 /* To 64kB in the first megabyte */
1165 if (!mb)
1166 return 64*1024;
1167
1168 /* To 1MB in the first 16MB */
1169 if (mb < 16)
1170 return 1024*1024;
1171
1172 /* To 64MB for anything above that */
1173 return 64*1024*1024;
1174 }
1175
1176 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1177
e820__reserve_resources_late(void)1178 __init void e820__reserve_resources_late(void)
1179 {
1180 /*
1181 * Register device address regions listed in the E820 map,
1182 * these can be claimed by device drivers later on:
1183 */
1184 for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
1185 struct resource *res = e820_res + idx;
1186
1187 /* skip added or uninitialized resources */
1188 if (res->parent || !res->end)
1189 continue;
1190
1191 /* set aside soft-reserved resources for driver consideration */
1192 if (res->desc == IORES_DESC_SOFT_RESERVED) {
1193 insert_resource_expand_to_fit(&soft_reserve_resource, res);
1194 } else {
1195 /* publish the rest immediately */
1196 insert_resource_expand_to_fit(&iomem_resource, res);
1197 }
1198 }
1199
1200 /*
1201 * Create additional 'gaps' at the end of RAM regions,
1202 * rounding them up to 64k/1MB/64MB boundaries, should
1203 * they be weirdly sized, and register extra, locked
1204 * resource regions for them, to make sure drivers
1205 * won't claim those addresses.
1206 *
1207 * These are basically blind guesses and heuristics to
1208 * avoid resource conflicts with broken firmware that
1209 * doesn't properly list 'stolen RAM' as a system region
1210 * in the E820 map.
1211 */
1212 for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
1213 struct e820_entry *entry = &e820_table->entries[idx];
1214 u64 start, end;
1215
1216 if (entry->type != E820_TYPE_RAM)
1217 continue;
1218
1219 start = entry->addr + entry->size;
1220 end = round_up(start, ram_alignment(start)) - 1;
1221 if (end > MAX_RESOURCE_SIZE)
1222 end = MAX_RESOURCE_SIZE;
1223 if (start >= end)
1224 continue;
1225
1226 pr_info("e820: register RAM buffer resource [mem %#010llx-%#010llx]\n", start, end);
1227 reserve_region_with_split(&iomem_resource, start, end, "RAM buffer");
1228 }
1229 }
1230
1231 /*
1232 * Pass the firmware (bootloader) E820 map to the kernel and process it:
1233 */
e820__memory_setup_default(void)1234 __init char * e820__memory_setup_default(void)
1235 {
1236 char *who = "BIOS-e820";
1237
1238 /*
1239 * Try to copy the BIOS-supplied E820-map.
1240 *
1241 * Otherwise fake a memory map; one section from 0k->640k,
1242 * the next section from 1mb->appropriate_mem_k
1243 */
1244 if (append_e820_table(boot_params.e820_table, boot_params.e820_entries) < 0) {
1245 u64 mem_size;
1246
1247 /* Compare results from other methods and take the one that gives more RAM: */
1248 if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
1249 mem_size = boot_params.screen_info.ext_mem_k;
1250 who = "BIOS-88";
1251 } else {
1252 mem_size = boot_params.alt_mem_k;
1253 who = "BIOS-e801";
1254 }
1255
1256 e820_table->nr_entries = 0;
1257 e820__range_add(0, LOWMEMSIZE(), E820_TYPE_RAM);
1258 e820__range_add(HIGH_MEMORY, mem_size << 10, E820_TYPE_RAM);
1259 }
1260
1261 /* We just appended a lot of ranges, sanitize the table: */
1262 e820__update_table(e820_table);
1263
1264 return who;
1265 }
1266
1267 /*
1268 * Calls e820__memory_setup_default() in essence to pick up the firmware/bootloader
1269 * E820 map - with an optional platform quirk available for virtual platforms
1270 * to override this method of boot environment processing:
1271 */
e820__memory_setup(void)1272 __init void e820__memory_setup(void)
1273 {
1274 char *who;
1275
1276 /* This is a firmware interface ABI - make sure we don't break it: */
1277 BUILD_BUG_ON(sizeof(struct boot_e820_entry) != 20);
1278
1279 who = x86_init.resources.memory_setup();
1280
1281 memcpy(e820_table_kexec, e820_table, sizeof(*e820_table_kexec));
1282 memcpy(e820_table_firmware, e820_table, sizeof(*e820_table_firmware));
1283
1284 pr_info("BIOS-provided physical RAM map:\n");
1285 e820__print_table(who);
1286 }
1287
e820__memblock_setup(void)1288 __init void e820__memblock_setup(void)
1289 {
1290 u32 idx;
1291 u64 end;
1292
1293 #ifdef CONFIG_MEMORY_HOTPLUG
1294 /*
1295 * Memory used by the kernel cannot be hot-removed because Linux
1296 * cannot migrate the kernel pages. When memory hotplug is
1297 * enabled, we should prevent memblock from allocating memory
1298 * for the kernel.
1299 *
1300 * ACPI SRAT records all hotpluggable memory ranges. But before
1301 * SRAT is parsed, we don't know about it.
1302 *
1303 * The kernel image is loaded into memory at very early time. We
1304 * cannot prevent this anyway. So on NUMA system, we set any
1305 * node the kernel resides in as un-hotpluggable.
1306 *
1307 * Since on modern servers, one node could have double-digit
1308 * gigabytes memory, we can assume the memory around the kernel
1309 * image is also un-hotpluggable. So before SRAT is parsed, just
1310 * allocate memory near the kernel image to try the best to keep
1311 * the kernel away from hotpluggable memory.
1312 */
1313 if (movable_node_is_enabled())
1314 memblock_set_bottom_up(true);
1315 #endif
1316
1317 /*
1318 * At this point only the first megabyte is mapped for sure, the
1319 * rest of the memory cannot be used for memblock resizing
1320 */
1321 memblock_set_current_limit(ISA_END_ADDRESS);
1322
1323 /*
1324 * The bootstrap memblock region count maximum is 128 entries
1325 * (INIT_MEMBLOCK_REGIONS), but EFI might pass us more E820 entries
1326 * than that - so allow memblock resizing.
1327 *
1328 * This is safe, because this call happens pretty late during x86 setup,
1329 * so we know about reserved memory regions already. (This is important
1330 * so that memblock resizing does no stomp over reserved areas.)
1331 */
1332 memblock_allow_resize();
1333
1334 for (idx = 0; idx < e820_table->nr_entries; idx++) {
1335 struct e820_entry *entry = &e820_table->entries[idx];
1336
1337 end = entry->addr + entry->size;
1338 if (end != (resource_size_t)end)
1339 continue;
1340
1341 if (entry->type == E820_TYPE_SOFT_RESERVED)
1342 memblock_reserve(entry->addr, entry->size);
1343
1344 if (entry->type != E820_TYPE_RAM)
1345 continue;
1346
1347 memblock_add(entry->addr, entry->size);
1348 }
1349
1350 /*
1351 * At this point memblock is only allowed to allocate from memory
1352 * below 1M (aka ISA_END_ADDRESS) up until direct map is completely set
1353 * up in init_mem_mapping().
1354 *
1355 * KHO kernels are special and use only scratch memory for memblock
1356 * allocations, but memory below 1M is ignored by kernel after early
1357 * boot and cannot be naturally marked as scratch.
1358 *
1359 * To allow allocation of the real-mode trampoline and a few (if any)
1360 * other very early allocations from below 1M forcibly mark the memory
1361 * below 1M as scratch.
1362 *
1363 * After real mode trampoline is allocated, we clear that scratch
1364 * marking.
1365 */
1366 memblock_mark_kho_scratch(0, SZ_1M);
1367
1368 /*
1369 * 32-bit systems are limited to 4BG of memory even with HIGHMEM and
1370 * to even less without it.
1371 * Discard memory after max_pfn - the actual limit detected at runtime.
1372 */
1373 if (IS_ENABLED(CONFIG_X86_32))
1374 memblock_remove(PFN_PHYS(max_pfn), -1);
1375
1376 /* Throw away partial pages: */
1377 memblock_trim_memory(PAGE_SIZE);
1378
1379 memblock_dump_all();
1380 }
1381