Lines Matching full:memblock

18 #include <linux/memblock.h>
43 * DOC: memblock overview
45 * Memblock is a method of managing memory regions during the early
49 * Memblock views the system memory as collections of contiguous
66 * wrapped with struct memblock. This structure is statically
76 * The early architecture setup should tell memblock what the physical
84 * Once memblock is setup the memory can be allocated using one of the
101 * memblock data structures (except "physmem") will be discarded after the
128 struct memblock memblock __initdata_memblock = {
150 * keep a pointer to &memblock.memory in the text section to use it in
152 * For architectures that do not keep memblock data after init, this
155 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
315 end = memblock.current_limit; in memblock_find_in_range_node()
388 if (memblock.reserved.regions != memblock_reserved_init_regions) { in memblock_discard()
389 addr = __pa(memblock.reserved.regions); in memblock_discard()
391 memblock.reserved.max); in memblock_discard()
393 kfree(memblock.reserved.regions); in memblock_discard()
398 if (memblock.memory.regions != memblock_memory_init_regions) { in memblock_discard()
399 addr = __pa(memblock.memory.regions); in memblock_discard()
401 memblock.memory.max); in memblock_discard()
403 kfree(memblock.memory.regions); in memblock_discard()
413 * memblock_double_array - double the size of the memblock regions array
414 * @type: memblock type of the regions array being doubled
418 * Double the size of the @type regions array. If memblock is being used to
441 panic("memblock: cannot resize %s array\n", type->name); in memblock_double_array()
454 if (type == &memblock.memory) in memblock_double_array()
465 if (type != &memblock.reserved) in memblock_double_array()
469 memblock.current_limit, in memblock_double_array()
473 min(new_area_start, memblock.current_limit), in memblock_double_array()
486 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", in memblock_double_array()
492 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", in memblock_double_array()
514 * Reserve the new array if that comes from the memblock. Otherwise, we in memblock_double_array()
528 * @type: memblock type to scan
563 * memblock_insert_region - insert new memblock region
564 * @type: memblock type to insert into
571 * Insert new memblock region [@base, @base + @size) into @type at @idx.
593 * memblock_add_range - add new memblock region
594 * @type: memblock type to add new region into
600 * Add new memblock region [@base, @base + @size) into @type. The new region
715 * memblock_add_node - add new memblock region within a NUMA node
721 * Add new memblock region [@base, @base + @size) to the "memory"
735 return memblock_add_range(&memblock.memory, base, size, nid, flags); in memblock_add_node()
739 * memblock_add - add new memblock region
743 * Add new memblock region [@base, @base + @size) to the "memory"
756 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); in memblock_add()
795 * @type: memblock type to isolate range for
892 return memblock_remove_range(&memblock.memory, base, size); in memblock_remove()
925 return memblock_remove_range(&memblock.reserved, base, size); in memblock_phys_free()
936 return memblock_add_range(&memblock.reserved, base, size, nid, flags); in __memblock_reserve()
977 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, in memmap_init_kho_scratch_pages()
987 * @type: memblock type to set/clear flag for
1028 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG); in memblock_mark_hotplug()
1040 return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_HOTPLUG); in memblock_clear_hotplug()
1057 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_MIRROR); in memblock_mark_mirror()
1071 * memblock, the caller must inform kmemleak to ignore that memory
1077 return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_NOMAP); in memblock_mark_nomap()
1089 return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_NOMAP); in memblock_clear_nomap()
1113 return memblock_setclr_flag(&memblock.reserved, base, size, 1, in memblock_reserved_mark_noinit()
1129 return memblock_setclr_flag(&memblock.memory, base, size, 1, in memblock_mark_kho_scratch()
1143 return memblock_setclr_flag(&memblock.memory, base, size, 0, in memblock_clear_kho_scratch()
1153 /* we never skip regions when iterating memblock.reserved or physmem */ in should_skip_region()
1390 struct memblock_type *type = &memblock.memory; in __next_mem_pfn_range()
1417 * memblock_set_node - set node ID on memblock regions
1420 * @type: memblock type to set node ID for
1423 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1458 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1483 * this moment memblock may be deinitialized already and its in memblock_alloc_range_nid()
1528 * Memblock allocated blocks are never reported as in memblock_alloc_range_nid()
1618 if (max_addr > memblock.current_limit) in memblock_alloc_internal()
1619 max_addr = memblock.current_limit; in memblock_alloc_internal()
1644 * allocate only from memory limited by memblock.current_limit value
1675 * allocate only from memory limited by memblock.current_limit value
1706 * allocate only from memory limited by memblock.current_limit value
1758 * This is only useful when the memblock allocator has already been torn
1785 return memblock.memory.total_size; in memblock_phys_mem_size()
1790 return memblock.reserved.total_size; in memblock_reserved_size()
1817 * from memblock point of view
1825 * An estimated number of free pages from memblock point of view.
1835 return memblock.memory.regions[0].base; in memblock_start_of_DRAM()
1840 int idx = memblock.memory.cnt - 1; in memblock_end_of_DRAM()
1842 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); in memblock_end_of_DRAM()
1852 * the memory memblock regions, if the @limit exceeds the total size in __find_max_addr()
1880 memblock_remove_range(&memblock.memory, max_addr, in memblock_enforce_memory_limit()
1882 memblock_remove_range(&memblock.reserved, max_addr, in memblock_enforce_memory_limit()
1899 ret = memblock_isolate_range(&memblock.memory, base, size, in memblock_cap_memory_range()
1905 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) in memblock_cap_memory_range()
1906 if (!memblock_is_nomap(&memblock.memory.regions[i])) in memblock_cap_memory_range()
1907 memblock_remove_region(&memblock.memory, i); in memblock_cap_memory_range()
1910 if (!memblock_is_nomap(&memblock.memory.regions[i])) in memblock_cap_memory_range()
1911 memblock_remove_region(&memblock.memory, i); in memblock_cap_memory_range()
1914 memblock_remove_range(&memblock.reserved, 0, base); in memblock_cap_memory_range()
1915 memblock_remove_range(&memblock.reserved, in memblock_cap_memory_range()
1955 return memblock_search(&memblock.reserved, addr) != -1; in memblock_is_reserved()
1960 return memblock_search(&memblock.memory, addr) != -1; in memblock_is_memory()
1965 int i = memblock_search(&memblock.memory, addr); in memblock_is_map_memory()
1969 return !memblock_is_nomap(&memblock.memory.regions[i]); in memblock_is_map_memory()
1975 struct memblock_type *type = &memblock.memory; in memblock_search_pfn_nid()
1999 int idx = memblock_search(&memblock.memory, base); in memblock_is_region_memory()
2004 return (memblock.memory.regions[idx].base + in memblock_is_region_memory()
2005 memblock.memory.regions[idx].size) >= end; in memblock_is_region_memory()
2021 return memblock_overlaps_region(&memblock.reserved, base, size); in memblock_is_region_reserved()
2042 memblock_remove_region(&memblock.memory, in memblock_trim_memory()
2043 r - memblock.memory.regions); in memblock_trim_memory()
2051 memblock.current_limit = limit; in memblock_set_current_limit()
2056 return memblock.current_limit; in memblock_get_current_limit()
2087 pr_info("MEMBLOCK configuration:\n"); in __memblock_dump_all()
2089 &memblock.memory.total_size, in __memblock_dump_all()
2090 &memblock.reserved.total_size); in __memblock_dump_all()
2092 memblock_dump(&memblock.memory); in __memblock_dump_all()
2093 memblock_dump(&memblock.reserved); in __memblock_dump_all()
2116 early_param("memblock", early_memblock);
2253 max_reserved = memblock.reserved.max; in memmap_init_reserved_pages()
2262 memblock_set_node(start, region->size, &memblock.reserved, nid); in memmap_init_reserved_pages()
2265 * 'max' is changed means memblock.reserved has been doubled its in memmap_init_reserved_pages()
2269 if (max_reserved != memblock.reserved.max) in memmap_init_reserved_pages()
2444 #define MEMBLOCK_KHO_FDT "memblock"
2445 #define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
2514 pr_err("failed to prepare memblock FDT for KHO: %d\n", err); in prepare_kho_fdt()
2747 struct dentry *root = debugfs_create_dir("memblock", NULL); in memblock_init_debugfs()
2750 &memblock.memory, &memblock_debug_fops); in memblock_init_debugfs()
2752 &memblock.reserved, &memblock_debug_fops); in memblock_init_debugfs()