Lines Matching +full:long +full:- +full:ram +full:- +full:code

1 // SPDX-License-Identifier: GPL-2.0-only
10 * allocation code routines via a platform independent interface (memblock, etc.).
16 #include <linux/firmware-map.h>
26 * - 'e820_table_firmware': the original firmware version passed to us by the
27 * bootloader - not modified by the kernel. It is composed of two parts:
31 * - inform the user about the firmware's notion of memory layout
34 * - the hibernation code uses it to generate a kernel-independent CRC32
37 * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
38 * passed to us by the bootloader - the major difference between
45 * - kexec, which is a bootloader in disguise, uses the original E820
46 * layout to pass to the kexec-ed kernel. This way the original kernel
47 * can have a restricted E820 map while the kexec()-ed kexec-kernel
48 * can have access to full memory - etc.
50 * - 'e820_table': this is the main E820 table that is massaged by the
51 * low level x86 platform code, or modified by boot parameters, before
55 * information its role stops - modifying it has no effect and does not get
56 * re-propagated. So its main role is a temporary bootstrap storage of firmware
67 /* For PCI or other memory-mapped resources */
68 unsigned long pci_mem_start = 0xaeedbabe;
82 for (i = 0; i < table->nr_entries; i++) {
83 struct e820_entry *entry = &table->entries[i];
85 if (type && entry->type != type)
87 if (entry->addr >= end || entry->addr + entry->size <= start)
110 * not-overlapping (at least for the range specified), which is the case normally.
117 for (i = 0; i < e820_table->nr_entries; i++) {
118 struct e820_entry *entry = &e820_table->entries[i];
120 if (type && entry->type != type)
124 if (entry->addr >= end || entry->addr + entry->size <= start)
131 if (entry->addr <= start)
132 start = entry->addr + entry->size;
160 return entry ? entry->type : -EINVAL;
168 int x = table->nr_entries;
170 if (x >= ARRAY_SIZE(table->entries)) {
171 pr_err("too many entries; ignoring [mem %#010llx-%#010llx]\n",
172 start, start + size - 1);
176 table->entries[x].addr = start;
177 table->entries[x].size = size;
178 table->entries[x].type = type;
179 table->nr_entries++;
207 for (i = 0; i < e820_table->nr_entries; i++) {
208 pr_info("%s: [mem %#018Lx-%#018Lx] ",
210 e820_table->entries[i].addr,
211 e820_table->entries[i].addr + e820_table->entries[i].size - 1);
213 e820_print_type(e820_table->entries[i].type);
238 * successfully 'sanitized' the map entries passed in, and is -1
283 unsigned long long addr;
302 if (ap->addr != bp->addr)
303 return ap->addr > bp->addr ? 1 : -1;
305 return (ap->addr != ap->entry->addr) - (bp->addr != bp->entry->addr);
324 struct e820_entry *entries = table->entries;
325 u32 max_nr_entries = ARRAY_SIZE(table->entries);
327 unsigned long long last_addr;
332 if (table->nr_entries < 2)
333 return -1;
335 BUG_ON(table->nr_entries > max_nr_entries);
338 for (i = 0; i < table->nr_entries; i++) {
340 return -1;
343 /* Create pointers for initial change-point information (for sorting): */
344 for (i = 0; i < 2 * table->nr_entries; i++)
348 * Record all known change-points (starting and ending addresses),
352 for (i = 0; i < table->nr_entries; i++) {
354 change_point[chg_idx]->addr = entries[i].addr;
355 change_point[chg_idx++]->entry = &entries[i];
356 change_point[chg_idx]->addr = entries[i].addr + entries[i].size;
357 change_point[chg_idx++]->entry = &entries[i];
362 /* Sort change-point list by memory addresses (low -> high): */
371 /* Loop through change-points, determining effect on the new map: */
374 if (change_point[chg_idx]->addr == change_point[chg_idx]->entry->addr) {
376 overlap_list[overlap_entries++] = change_point[chg_idx]->entry;
380 if (overlap_list[i] == change_point[chg_idx]->entry)
381 overlap_list[i] = overlap_list[overlap_entries-1];
383 overlap_entries--;
387 * "type" to use (larger value takes precedence --
392 if (overlap_list[i]->type > current_type)
393 current_type = overlap_list[i]->type;
399 new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
400 /* Move forward only if the new size was non-zero: */
407 new_entries[new_nr_entries].addr = change_point[chg_idx]->addr;
409 last_addr = change_point[chg_idx]->addr;
417 table->nr_entries = new_nr_entries;
427 u64 start = entry->addr;
428 u64 size = entry->size;
429 u64 end = start + size - 1;
430 u32 type = entry->type;
432 /* Ignore the entry on 64-bit overflow: */
434 return -1;
439 nr_entries--;
447 * Sanity-check it while we're at it..
449 * If we're lucky and live on a modern system, the setup code
457 return -1;
471 if (size > (ULLONG_MAX - start))
472 size = ULLONG_MAX - start;
475 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ", start, end - 1);
481 for (i = 0; i < table->nr_entries; i++) {
482 struct e820_entry *entry = &table->entries[i];
486 if (entry->type != old_type)
489 entry_end = entry->addr + entry->size;
492 if (entry->addr >= start && entry_end <= end) {
493 entry->type = new_type;
494 real_updated_size += entry->size;
499 if (entry->addr < start && entry_end > end) {
501 __e820__range_add(table, end, entry_end - end, entry->type);
502 entry->size = start - entry->addr;
508 final_start = max(start, entry->addr);
513 __e820__range_add(table, final_start, final_end - final_start, new_type);
515 real_updated_size += final_end - final_start;
521 entry->size -= final_end - final_start;
522 if (entry->addr < final_start)
525 entry->addr = final_end;
548 if (size > (ULLONG_MAX - start))
549 size = ULLONG_MAX - start;
552 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ", start, end - 1);
557 for (i = 0; i < e820_table->nr_entries; i++) {
558 struct e820_entry *entry = &e820_table->entries[i];
562 if (check_type && entry->type != old_type)
565 entry_end = entry->addr + entry->size;
568 if (entry->addr >= start && entry_end <= end) {
569 real_removed_size += entry->size;
575 if (entry->addr < start && entry_end > end) {
576 e820__range_add(end, entry_end - end, entry->type);
577 entry->size = start - entry->addr;
583 final_start = max(start, entry->addr);
588 real_removed_size += final_end - final_start;
594 entry->size -= final_end - final_start;
595 if (entry->addr < final_start)
598 entry->addr = final_end;
608 pr_info("modified physical RAM map:\n");
622 static int __init e820_search_gap(unsigned long *gapstart, unsigned long *gapsize)
624 unsigned long long last = MAX_GAP_END;
625 int i = e820_table->nr_entries;
628 while (--i >= 0) {
629 unsigned long long start = e820_table->entries[i].addr;
630 unsigned long long end = start + e820_table->entries[i].size;
637 unsigned long gap = last - end;
661 unsigned long gapstart, gapsize;
670 pr_err("Cannot find an available gap in the 32-bit address range\n");
671 pr_err("PCI devices with unassigned 32-bit BARs may not work!\n");
678 * e820__reserve_resources_late() protects stolen RAM already:
682 pr_info("[mem %#010lx-%#010lx] available for PCI devices\n",
683 gapstart, gapstart + gapsize - 1);
703 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries;
708 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
713 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
732 entries = sdata->len / sizeof(*extmap);
733 extmap = (struct boot_e820_entry *)(sdata->data);
742 pr_info("extended physical RAM map:\n");
748 * E820 RAM areas and register the corresponding pages as 'nosave' for
749 * hibernation (32-bit) or software suspend and suspend to RAM (64-bit).
754 void __init e820__register_nosave_regions(unsigned long limit_pfn)
757 unsigned long pfn = 0;
759 for (i = 0; i < e820_table->nr_entries; i++) {
760 struct e820_entry *entry = &e820_table->entries[i];
762 if (pfn < PFN_UP(entry->addr))
763 register_nosave_region(pfn, PFN_UP(entry->addr));
765 pfn = PFN_DOWN(entry->addr + entry->size);
767 if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
768 register_nosave_region(PFN_UP(entry->addr), pfn);
784 for (i = 0; i < e820_table->nr_entries; i++) {
785 struct e820_entry *entry = &e820_table->entries[i];
787 if (entry->type == E820_TYPE_NVS)
788 acpi_nvs_register(entry->addr, entry->size);
820 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
822 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
831 static unsigned long __init e820__end_ram_pfn(unsigned long limit_pfn)
834 unsigned long last_pfn = 0;
835 unsigned long max_arch_pfn = MAX_ARCH_PFN;
837 for (i = 0; i < e820_table->nr_entries; i++) {
838 struct e820_entry *entry = &e820_table->entries[i];
839 unsigned long start_pfn;
840 unsigned long end_pfn;
842 if (entry->type != E820_TYPE_RAM &&
843 entry->type != E820_TYPE_ACPI)
846 start_pfn = entry->addr >> PAGE_SHIFT;
847 end_pfn = (entry->addr + entry->size) >> PAGE_SHIFT;
867 unsigned long __init e820__end_of_ram_pfn(void)
872 unsigned long __init e820__end_of_low_ram_pfn(void)
874 return e820__end_ram_pfn(1UL << (32 - PAGE_SHIFT));
885 /* The "mem=nopentium" boot option disables 4MB page tables on 32-bit kernels: */
891 return -EINVAL;
899 return -EINVAL;
908 return -EINVAL;
910 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
926 return -EINVAL;
929 e820_table->nr_entries = 0;
937 return -EINVAL;
956 if (*p == '-')
961 return -EINVAL;
971 e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
974 return *p == '\0' ? 0 : -EINVAL;
1017 pa_next = data->next;
1019 e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
1021 if (data->type == SETUP_INDIRECT) {
1022 len += data->len;
1030 indirect = (struct setup_indirect *)data->data;
1032 if (indirect->type != SETUP_INDIRECT)
1033 e820__range_update(indirect->addr, indirect->len,
1043 pr_info("extended physical RAM map:\n");
1058 pr_info("user-defined physical RAM map:\n");
1065 switch (entry->type) {
1066 case E820_TYPE_RESERVED_KERN: /* Fall-through: */
1067 case E820_TYPE_RAM: return "System RAM";
1069 case E820_TYPE_NVS: return "ACPI Non-volatile Storage";
1079 static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry)
1081 switch (entry->type) {
1082 case E820_TYPE_RESERVED_KERN: /* Fall-through: */
1084 case E820_TYPE_ACPI: /* Fall-through: */
1085 case E820_TYPE_NVS: /* Fall-through: */
1086 case E820_TYPE_UNUSABLE: /* Fall-through: */
1087 case E820_TYPE_PRAM: /* Fall-through: */
1088 case E820_TYPE_PMEM: /* Fall-through: */
1089 case E820_TYPE_RESERVED: /* Fall-through: */
1090 case E820_TYPE_SOFT_RESERVED: /* Fall-through: */
1095 static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry)
1097 switch (entry->type) {
1104 case E820_TYPE_RESERVED_KERN: /* Fall-through: */
1105 case E820_TYPE_RAM: /* Fall-through: */
1106 case E820_TYPE_UNUSABLE: /* Fall-through: */
1113 /* this is the legacy bios/dos rom-shadow + mmio region */
1114 if (res->start < (1ULL<<20))
1149 res = memblock_alloc_or_panic(sizeof(*res) * e820_table->nr_entries,
1153 for (i = 0; i < e820_table->nr_entries; i++) {
1154 struct e820_entry *entry = e820_table->entries + i;
1156 end = entry->addr + entry->size - 1;
1161 res->start = entry->addr;
1162 res->end = end;
1163 res->name = e820_type_to_string(entry);
1164 res->flags = e820_type_to_iomem_type(entry);
1165 res->desc = e820_type_to_iores_desc(entry);
1172 if (do_mark_busy(entry->type, res)) {
1173 res->flags |= IORESOURCE_BUSY;
1179 /* Expose the bootloader-provided memory layout to the sysfs. */
1180 for (i = 0; i < e820_table_firmware->nr_entries; i++) {
1181 struct e820_entry *entry = e820_table_firmware->entries + i;
1183 firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry));
1188 * How much should we pad the end of RAM, depending on where it is?
1190 static unsigned long __init ram_alignment(resource_size_t pos)
1192 unsigned long mb = pos >> 20;
1206 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1214 for (i = 0; i < e820_table->nr_entries; i++) {
1215 if (!res->parent && res->end)
1221 * Try to bump up RAM regions to reasonable boundaries, to
1222 * avoid stolen RAM:
1224 for (i = 0; i < e820_table->nr_entries; i++) {
1225 struct e820_entry *entry = &e820_table->entries[i];
1228 if (entry->type != E820_TYPE_RAM)
1231 start = entry->addr + entry->size;
1232 end = round_up(start, ram_alignment(start)) - 1;
1238 printk(KERN_DEBUG "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n", start, end);
1239 reserve_region_with_split(&iomem_resource, start, end, "RAM buffer");
1248 char *who = "BIOS-e820";
1251 * Try to copy the BIOS-supplied E820-map.
1253 * Otherwise fake a memory map; one section from 0k->640k,
1254 * the next section from 1mb->appropriate_mem_k
1259 /* Compare results from other methods and take the one that gives more RAM: */
1262 who = "BIOS-88";
1265 who = "BIOS-e801";
1268 e820_table->nr_entries = 0;
1281 * E820 map - with an optional platform quirk available for virtual platforms
1288 /* This is a firmware interface ABI - make sure we don't break it: */
1296 pr_info("BIOS-provided physical RAM map:\n");
1308 * than that - so allow memblock resizing.
1316 for (i = 0; i < e820_table->nr_entries; i++) {
1317 struct e820_entry *entry = &e820_table->entries[i];
1319 end = entry->addr + entry->size;
1323 if (entry->type == E820_TYPE_SOFT_RESERVED)
1324 memblock_reserve(entry->addr, entry->size);
1326 if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
1329 memblock_add(entry->addr, entry->size);