Lines Matching +full:memory +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
32 #include <xen/interface/memory.h>
35 #include <xen/hvc-console.h>
36 #include "xen-ops.h"
40 /* Memory map would allow PCI passthrough. */
43 /* E820 map used during setting up memory. */
46 /* Number of initially usable memory pages. */
54 #define REMAP_SIZE (P2M_PER_PAGE - 3)
70 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit"); in xen_parse_512gb()
74 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit="); in xen_parse_512gb()
97 xen_extra_mem[i].n_pfns -= n_pfns; in xen_del_extra_mem()
103 xen_extra_mem[i].n_pfns -= n_pfns; in xen_del_extra_mem()
109 xen_extra_mem[i].n_pfns = start_pfn - start_r; in xen_del_extra_mem()
111 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - in xen_del_extra_mem()
156 * Finds the next RAM pfn available in the E820 map after min_pfn.
170 if (entry->type != E820_TYPE_RAM) in xen_find_pfn_range()
173 e_pfn = PFN_DOWN(entry->addr + entry->size); in xen_find_pfn_range()
179 s_pfn = PFN_UP(entry->addr); in xen_find_pfn_range()
185 done = e_pfn - *min_pfn; in xen_find_pfn_range()
187 done = e_pfn - s_pfn; in xen_find_pfn_range()
211 * This releases a chunk of memory and then does the identity map. It's used
278 * This function updates the p2m and m2p tables with an identity map from
281 * saved in the memory itself to avoid the need for allocating buffers. The
285 * remapping at a time when the memory management is capable of allocating
286 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
308 /* Map first pfn to xen_remap_buf */ in xen_do_set_identity_and_remap_chunk()
322 /* Set identity map */ in xen_do_set_identity_and_remap_chunk()
325 left -= chunk; in xen_do_set_identity_and_remap_chunk()
339 * The goal is to not allocate additional memory but to remap the existing
340 * pages. In the case of an error the underlying memory is simply released back
348 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk()
355 unsigned long left = n - i; in xen_set_identity_and_remap_chunk()
361 /* Identity map remaining pages */ in xen_set_identity_and_remap_chunk()
366 size = ini_nr_pages - cur_pfn; in xen_set_identity_and_remap_chunk()
405 return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; in xen_count_remap_pages()
418 * Combine non-RAM regions and gaps until a RAM region (or the in xen_foreach_remap_area()
419 * end of the map) is reached, then call the provided function in xen_foreach_remap_area()
420 * to perform its duty on the non-RAM region. in xen_foreach_remap_area()
422 * The combined non-RAM regions are rounded to a whole number in xen_foreach_remap_area()
426 * a non-page boundary. in xen_foreach_remap_area()
429 phys_addr_t end = entry->addr + entry->size; in xen_foreach_remap_area()
430 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) { in xen_foreach_remap_area()
434 if (entry->type == E820_TYPE_RAM) in xen_foreach_remap_area()
435 end_pfn = PFN_UP(entry->addr); in xen_foreach_remap_area()
447 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
449 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
465 /* Map the remap information */ in xen_remap_memory()
525 * case the e820 map provided to us will cover the static in xen_get_max_pages()
545 end &= ~((phys_addr_t)PAGE_SIZE - 1); in xen_align_and_add_e820_region()
548 * Don't allow adding memory not in E820 map while booting the in xen_align_and_add_e820_region()
556 e820__range_add(start, end - start, type); in xen_align_and_add_e820_region()
565 if (entry->type == E820_TYPE_UNUSABLE) in xen_ignore_unusable()
566 entry->type = E820_TYPE_RAM; in xen_ignore_unusable()
583 if (entry->type == E820_TYPE_RAM && entry->addr <= start && in xen_is_e820_reserved()
584 (entry->addr + entry->size) >= end) in xen_is_e820_reserved()
594 * Find a free area in physical memory not yet reserved and compliant with
595 * E820 map.
596 * Used to relocate pre-allocated areas like initrd or p2m list which are in
597 * conflict with the to be used E820 map.
608 if (entry->type != E820_TYPE_RAM || entry->size < size) in xen_find_free_area()
610 start = entry->addr; in xen_find_free_area()
615 if (start + size > entry->addr + entry->size) in xen_find_free_area()
628 * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
629 * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
639 swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); in xen_e820_swap_entry_with_ram()
640 swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); in xen_e820_swap_entry_with_ram()
644 entry_end = entry->addr + entry->size; in xen_e820_swap_entry_with_ram()
645 if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && in xen_e820_swap_entry_with_ram()
646 entry_end - swap_size >= mem_end) { in xen_e820_swap_entry_with_ram()
648 entry->size -= swap_size; in xen_e820_swap_entry_with_ram()
650 /* Add new entry at the end of E820 map. */ in xen_e820_swap_entry_with_ram()
656 entry->type = swap_entry->type; in xen_e820_swap_entry_with_ram()
657 entry->addr = entry_end - swap_size + in xen_e820_swap_entry_with_ram()
658 swap_addr - swap_entry->addr; in xen_e820_swap_entry_with_ram()
659 entry->size = swap_entry->size; in xen_e820_swap_entry_with_ram()
662 swap_entry->type = E820_TYPE_RAM; in xen_e820_swap_entry_with_ram()
663 swap_entry->addr = swap_addr; in xen_e820_swap_entry_with_ram()
664 swap_entry->size = swap_size; in xen_e820_swap_entry_with_ram()
666 /* Remember PFN<->MFN relation for P2M update. */ in xen_e820_swap_entry_with_ram()
667 xen_add_remap_nonram(swap_addr, entry_end - swap_size, in xen_e820_swap_entry_with_ram()
684 * Look for non-RAM memory types in a specific guest physical area and move
701 if (entry->addr >= end) in xen_e820_resolve_conflicts()
704 if (entry->addr + entry->size > start && in xen_e820_resolve_conflicts()
705 entry->type == E820_TYPE_NVS) in xen_e820_resolve_conflicts()
713 * Check for an area in physical memory to be usable for non-movable purposes.
714 * An area is considered to usable if the used E820 map lists it to be RAM or
728 xen_raw_console_write(" memory conflicts with E820 map\n"); in xen_chk_is_e820_usable()
745 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off) in xen_phys_memcpy()
746 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off; in xen_phys_memcpy()
748 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off) in xen_phys_memcpy()
749 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off; in xen_phys_memcpy()
751 to = early_memremap(dest - dest_off, dest_len + dest_off); in xen_phys_memcpy()
752 from = early_memremap(src - src_off, src_len + src_off); in xen_phys_memcpy()
756 n -= len; in xen_phys_memcpy()
769 if (xen_start_info->mfn_list >= __START_KERNEL_map) { in xen_reserve_xen_mfnlist()
770 start = __pa(xen_start_info->mfn_list); in xen_reserve_xen_mfnlist()
771 size = PFN_ALIGN(xen_start_info->nr_pages * in xen_reserve_xen_mfnlist()
774 start = PFN_PHYS(xen_start_info->first_p2m_pfn); in xen_reserve_xen_mfnlist()
775 size = PFN_PHYS(xen_start_info->nr_p2m_frames); in xen_reserve_xen_mfnlist()
787 * xen_memory_setup - Hook for machine specific memory setup.
803 ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); in xen_memory_setup()
817 if (rc == -ENOSYS) { in xen_memory_setup()
834 * regions, so if we're using the machine memory map leave the in xen_memory_setup()
835 * region as RAM as it is in the pseudo-physical map. in xen_memory_setup()
845 xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START; in xen_memory_setup()
851 /* Make sure the Xen-supplied memory map is well-ordered. */ in xen_memory_setup()
855 * Check whether the kernel itself conflicts with the target E820 map. in xen_memory_setup()
860 __pa_symbol(_end) - __pa_symbol(_text), in xen_memory_setup()
864 * Check for a conflict of the xen_start_info memory with the target in xen_memory_setup()
865 * E820 map. in xen_memory_setup()
872 * the target E820 map. in xen_memory_setup()
882 extra_pages += max_pages - ini_nr_pages; in xen_memory_setup()
885 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO in xen_memory_setup()
888 * Make sure we have no memory above max_pages, as this area in xen_memory_setup()
892 extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); in xen_memory_setup()
907 chunk_size = min(size, mem_end - addr); in xen_memory_setup()
911 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s; in xen_memory_setup()
912 extra_pages -= n_pfns; in xen_memory_setup()
923 size -= chunk_size; in xen_memory_setup()
940 * In domU, the ISA region is normal, usable memory, but we in xen_memory_setup()
941 * reserve ISA memory anyway because too many things poke in xen_memory_setup()
944 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED); in xen_memory_setup()
950 /* Check for a conflict of the initrd with the target E820 map. */ in xen_memory_setup()
957 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n"); in xen_memory_setup()
964 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", in xen_memory_setup()
972 * Set identity map on non-RAM pages and prepare remapping the in xen_memory_setup()
1007 /* Pretty fatal; 64-bit userspace has no other in xen_enable_syscall()
1036 if (!(xen_start_info->flags & SIF_INITDOMAIN)) { in xen_arch_setup()
1042 memcpy(boot_command_line, xen_start_info->cmd_line, in xen_arch_setup()