xref: /linux/arch/arm64/mm/init.c (revision c6af2aa9ffc9763826607bc2664ef3ea4475ed18)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c1cc1552SCatalin Marinas /*
3c1cc1552SCatalin Marinas  * Based on arch/arm/mm/init.c
4c1cc1552SCatalin Marinas  *
5c1cc1552SCatalin Marinas  * Copyright (C) 1995-2005 Russell King
6c1cc1552SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
7c1cc1552SCatalin Marinas  */
8c1cc1552SCatalin Marinas 
9c1cc1552SCatalin Marinas #include <linux/kernel.h>
10c1cc1552SCatalin Marinas #include <linux/export.h>
11c1cc1552SCatalin Marinas #include <linux/errno.h>
12c1cc1552SCatalin Marinas #include <linux/swap.h>
13c1cc1552SCatalin Marinas #include <linux/init.h>
145a9e3e15SJisheng Zhang #include <linux/cache.h>
15c1cc1552SCatalin Marinas #include <linux/mman.h>
16c1cc1552SCatalin Marinas #include <linux/nodemask.h>
17c1cc1552SCatalin Marinas #include <linux/initrd.h>
18c1cc1552SCatalin Marinas #include <linux/gfp.h>
19c1cc1552SCatalin Marinas #include <linux/memblock.h>
20c1cc1552SCatalin Marinas #include <linux/sort.h>
21764b51eaSAKASHI Takahiro #include <linux/of.h>
22c1cc1552SCatalin Marinas #include <linux/of_fdt.h>
238b5369eaSNicolas Saenz Julienne #include <linux/dma-direct.h>
240b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
2586c8b27aSLeif Lindholm #include <linux/efi.h>
26a1e50a82SCatalin Marinas #include <linux/swiotlb.h>
27dae8c235SKefeng Wang #include <linux/vmalloc.h>
282077be67SLaura Abbott #include <linux/mm.h>
29764b51eaSAKASHI Takahiro #include <linux/kexec.h>
30e62aaeacSAKASHI Takahiro #include <linux/crash_dump.h>
31cf11e85fSRoman Gushchin #include <linux/hugetlb.h>
322b865293SArd Biesheuvel #include <linux/acpi_iort.h>
3385f58eb1SChen Wandun #include <linux/kmemleak.h>
34c1cc1552SCatalin Marinas 
35a7f8de16SArd Biesheuvel #include <asm/boot.h>
3608375198SCatalin Marinas #include <asm/fixmap.h>
37f9040773SArd Biesheuvel #include <asm/kasan.h>
38a7f8de16SArd Biesheuvel #include <asm/kernel-pgtable.h>
39f320bc74SQuentin Perret #include <asm/kvm_host.h>
40aa03c428SMark Rutland #include <asm/memory.h>
411a2db300SGanapatrao Kulkarni #include <asm/numa.h>
42c1cc1552SCatalin Marinas #include <asm/sections.h>
43c1cc1552SCatalin Marinas #include <asm/setup.h>
4487dfb311SMasahiro Yamada #include <linux/sizes.h>
45c1cc1552SCatalin Marinas #include <asm/tlb.h>
46e039ee4eSAndre Przywara #include <asm/alternative.h>
47687842ecSChristoph Hellwig #include <asm/xen/swiotlb-xen.h>
48c1cc1552SCatalin Marinas 
49a7f8de16SArd Biesheuvel /*
50a7f8de16SArd Biesheuvel  * We need to be able to catch inadvertent references to memstart_addr
51a7f8de16SArd Biesheuvel  * that occur (potentially in generic code) before arm64_memblock_init()
52a7f8de16SArd Biesheuvel  * executes, which assigns it its actual value. So use a default value
53a7f8de16SArd Biesheuvel  * that cannot be mistaken for a real physical address.
54a7f8de16SArd Biesheuvel  */
555a9e3e15SJisheng Zhang s64 memstart_addr __ro_after_init = -1;
5603ef055fSMark Rutland EXPORT_SYMBOL(memstart_addr);
5703ef055fSMark Rutland 
581a8e1cefSNicolas Saenz Julienne /*
59d78050eeSCatalin Marinas  * If the corresponding config options are enabled, we create both ZONE_DMA
60d78050eeSCatalin Marinas  * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
61d78050eeSCatalin Marinas  * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
62d78050eeSCatalin Marinas  * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
63d78050eeSCatalin Marinas  * otherwise it is empty.
6403149563SVijay Balakrishna  *
6503149563SVijay Balakrishna  * Memory reservation for crash kernel either done early or deferred
6603149563SVijay Balakrishna  * depending on DMA memory zones configs (ZONE_DMA) --
6703149563SVijay Balakrishna  *
6803149563SVijay Balakrishna  * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
6903149563SVijay Balakrishna  * here instead of max_zone_phys().  This lets early reservation of
7003149563SVijay Balakrishna  * crash kernel memory which has a dependency on arm64_dma_phys_limit.
7103149563SVijay Balakrishna  * Reserving memory early for crash kernel allows linear creation of block
7203149563SVijay Balakrishna  * mappings (greater than page-granularity) for all the memory bank rangs.
7303149563SVijay Balakrishna  * In this scheme a comparatively quicker boot is observed.
7403149563SVijay Balakrishna  *
7503149563SVijay Balakrishna  * If ZONE_DMA configs are defined, crash kernel memory reservation
76dd671f16SJulia Lawall  * is delayed until DMA zone memory range size initialization performed in
7703149563SVijay Balakrishna  * zone_sizes_init().  The defer is necessary to steer clear of DMA zone
7803149563SVijay Balakrishna  * memory range to avoid overlap allocation.  So crash kernel memory boundaries
7903149563SVijay Balakrishna  * are not known when mapping all bank memory ranges, which otherwise means
8003149563SVijay Balakrishna  * not possible to exclude crash kernel range from creating block mappings
8103149563SVijay Balakrishna  * so page-granularity mappings are created for the entire memory range.
8203149563SVijay Balakrishna  * Hence a slightly slower boot is observed.
8303149563SVijay Balakrishna  *
84dd671f16SJulia Lawall  * Note: Page-granularity mappings are necessary for crash kernel memory
8503149563SVijay Balakrishna  * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
861a8e1cefSNicolas Saenz Julienne  */
8703149563SVijay Balakrishna #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
8803149563SVijay Balakrishna phys_addr_t __ro_after_init arm64_dma_phys_limit;
8903149563SVijay Balakrishna #else
9077009345SWill Deacon phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
9103149563SVijay Balakrishna #endif
92c1cc1552SCatalin Marinas 
93764b51eaSAKASHI Takahiro /*
94764b51eaSAKASHI Takahiro  * reserve_crashkernel() - reserves memory for crash kernel
95764b51eaSAKASHI Takahiro  *
96764b51eaSAKASHI Takahiro  * This function reserves memory area given in "crashkernel=" kernel command
97764b51eaSAKASHI Takahiro  * line parameter. The memory reserved is used by dump capture kernel when
98764b51eaSAKASHI Takahiro  * primary kernel is crashing.
99764b51eaSAKASHI Takahiro  */
100764b51eaSAKASHI Takahiro static void __init reserve_crashkernel(void)
101764b51eaSAKASHI Takahiro {
102764b51eaSAKASHI Takahiro 	unsigned long long crash_base, crash_size;
103a7259df7SMike Rapoport 	unsigned long long crash_max = arm64_dma_phys_limit;
104764b51eaSAKASHI Takahiro 	int ret;
105764b51eaSAKASHI Takahiro 
106d339f158SJisheng Zhang 	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
107d339f158SJisheng Zhang 		return;
108d339f158SJisheng Zhang 
109764b51eaSAKASHI Takahiro 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
110764b51eaSAKASHI Takahiro 				&crash_size, &crash_base);
111764b51eaSAKASHI Takahiro 	/* no crashkernel= or invalid value specified */
112764b51eaSAKASHI Takahiro 	if (ret || !crash_size)
113764b51eaSAKASHI Takahiro 		return;
114764b51eaSAKASHI Takahiro 
115764b51eaSAKASHI Takahiro 	crash_size = PAGE_ALIGN(crash_size);
116764b51eaSAKASHI Takahiro 
117a7259df7SMike Rapoport 	/* User specifies base address explicitly. */
118a7259df7SMike Rapoport 	if (crash_base)
119a7259df7SMike Rapoport 		crash_max = crash_base + crash_size;
120a7259df7SMike Rapoport 
121764b51eaSAKASHI Takahiro 	/* Current arm64 boot protocol requires 2MB alignment */
122a7259df7SMike Rapoport 	crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
123a7259df7SMike Rapoport 					       crash_base, crash_max);
124a7259df7SMike Rapoport 	if (!crash_base) {
125764b51eaSAKASHI Takahiro 		pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
126764b51eaSAKASHI Takahiro 			crash_size);
127764b51eaSAKASHI Takahiro 		return;
128764b51eaSAKASHI Takahiro 	}
129764b51eaSAKASHI Takahiro 
130764b51eaSAKASHI Takahiro 	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
131764b51eaSAKASHI Takahiro 		crash_base, crash_base + crash_size, crash_size >> 20);
132764b51eaSAKASHI Takahiro 
13385f58eb1SChen Wandun 	/*
13485f58eb1SChen Wandun 	 * The crashkernel memory will be removed from the kernel linear
13585f58eb1SChen Wandun 	 * map. Inform kmemleak so that it won't try to access it.
13685f58eb1SChen Wandun 	 */
13785f58eb1SChen Wandun 	kmemleak_ignore_phys(crash_base);
138764b51eaSAKASHI Takahiro 	crashk_res.start = crash_base;
139764b51eaSAKASHI Takahiro 	crashk_res.end = crash_base + crash_size - 1;
140764b51eaSAKASHI Takahiro }
141764b51eaSAKASHI Takahiro 
142d50314a6SCatalin Marinas /*
143791ab8b2SCatalin Marinas  * Return the maximum physical address for a zone accessible by the given bits
144791ab8b2SCatalin Marinas  * limit. If DRAM starts above 32-bit, expand the zone to the maximum
145791ab8b2SCatalin Marinas  * available memory, otherwise cap it at 32-bit.
146d50314a6SCatalin Marinas  */
1471a8e1cefSNicolas Saenz Julienne static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
148d50314a6SCatalin Marinas {
149791ab8b2SCatalin Marinas 	phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
150791ab8b2SCatalin Marinas 	phys_addr_t phys_start = memblock_start_of_DRAM();
151791ab8b2SCatalin Marinas 
152791ab8b2SCatalin Marinas 	if (phys_start > U32_MAX)
153791ab8b2SCatalin Marinas 		zone_mask = PHYS_ADDR_MAX;
154791ab8b2SCatalin Marinas 	else if (phys_start > zone_mask)
155791ab8b2SCatalin Marinas 		zone_mask = U32_MAX;
156791ab8b2SCatalin Marinas 
157791ab8b2SCatalin Marinas 	return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
158d50314a6SCatalin Marinas }
159d50314a6SCatalin Marinas 
1601a2db300SGanapatrao Kulkarni static void __init zone_sizes_init(unsigned long min, unsigned long max)
1611a2db300SGanapatrao Kulkarni {
1621a2db300SGanapatrao Kulkarni 	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
1632b865293SArd Biesheuvel 	unsigned int __maybe_unused acpi_zone_dma_bits;
1648424ecddSNicolas Saenz Julienne 	unsigned int __maybe_unused dt_zone_dma_bits;
165d78050eeSCatalin Marinas 	phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
1661a2db300SGanapatrao Kulkarni 
1671a8e1cefSNicolas Saenz Julienne #ifdef CONFIG_ZONE_DMA
1682b865293SArd Biesheuvel 	acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
1698424ecddSNicolas Saenz Julienne 	dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
1702b865293SArd Biesheuvel 	zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
1719804f8c6SNicolas Saenz Julienne 	arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
1721a8e1cefSNicolas Saenz Julienne 	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
1731a8e1cefSNicolas Saenz Julienne #endif
1740c1f14edSMiles Chen #ifdef CONFIG_ZONE_DMA32
175d78050eeSCatalin Marinas 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
176d78050eeSCatalin Marinas 	if (!arm64_dma_phys_limit)
177d78050eeSCatalin Marinas 		arm64_dma_phys_limit = dma32_phys_limit;
1780c1f14edSMiles Chen #endif
1791a2db300SGanapatrao Kulkarni 	max_zone_pfns[ZONE_NORMAL] = max;
1801a2db300SGanapatrao Kulkarni 
1819691a071SMike Rapoport 	free_area_init(max_zone_pfns);
1821a2db300SGanapatrao Kulkarni }
1831a2db300SGanapatrao Kulkarni 
184873ba463SMike Rapoport int pfn_is_map_memory(unsigned long pfn)
185c1cc1552SCatalin Marinas {
186093bbe21SAnshuman Khandual 	phys_addr_t addr = PFN_PHYS(pfn);
1875ad356eaSGreg Hackmann 
188873ba463SMike Rapoport 	/* avoid false positives for bogus PFNs, see comment in pfn_valid() */
189093bbe21SAnshuman Khandual 	if (PHYS_PFN(addr) != pfn)
1905ad356eaSGreg Hackmann 		return 0;
1914ab21506SRobin Murphy 
1925ad356eaSGreg Hackmann 	return memblock_is_map_memory(addr);
193c1cc1552SCatalin Marinas }
194873ba463SMike Rapoport EXPORT_SYMBOL(pfn_is_map_memory);
195c1cc1552SCatalin Marinas 
196bb425a75SPeng Fan static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
1976083fe74SMark Rutland 
1986083fe74SMark Rutland /*
1996083fe74SMark Rutland  * Limit the memory size that was specified via FDT.
2006083fe74SMark Rutland  */
2016083fe74SMark Rutland static int __init early_mem(char *p)
2026083fe74SMark Rutland {
2036083fe74SMark Rutland 	if (!p)
2046083fe74SMark Rutland 		return 1;
2056083fe74SMark Rutland 
2066083fe74SMark Rutland 	memory_limit = memparse(p, &p) & PAGE_MASK;
2076083fe74SMark Rutland 	pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
2086083fe74SMark Rutland 
2096083fe74SMark Rutland 	return 0;
2106083fe74SMark Rutland }
2116083fe74SMark Rutland early_param("mem", early_mem);
2126083fe74SMark Rutland 
213c1cc1552SCatalin Marinas void __init arm64_memblock_init(void)
214c1cc1552SCatalin Marinas {
21588053ec8SArd Biesheuvel 	s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
21688053ec8SArd Biesheuvel 
21788053ec8SArd Biesheuvel 	/*
21888053ec8SArd Biesheuvel 	 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
21988053ec8SArd Biesheuvel 	 * be limited in their ability to support a linear map that exceeds 51
22088053ec8SArd Biesheuvel 	 * bits of VA space, depending on the placement of the ID map. Given
22188053ec8SArd Biesheuvel 	 * that the placement of the ID map may be randomized, let's simply
22288053ec8SArd Biesheuvel 	 * limit the kernel's linear map to 51 bits as well if we detect this
22388053ec8SArd Biesheuvel 	 * configuration.
22488053ec8SArd Biesheuvel 	 */
22588053ec8SArd Biesheuvel 	if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
22688053ec8SArd Biesheuvel 	    is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
22788053ec8SArd Biesheuvel 		pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
22888053ec8SArd Biesheuvel 		linear_region_size = min_t(u64, linear_region_size, BIT(51));
22988053ec8SArd Biesheuvel 	}
230a7f8de16SArd Biesheuvel 
231e9eaa805SKristina Martsenko 	/* Remove memory above our supported physical address size */
232e9eaa805SKristina Martsenko 	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
233e9eaa805SKristina Martsenko 
234a7f8de16SArd Biesheuvel 	/*
235a7f8de16SArd Biesheuvel 	 * Select a suitable value for the base of physical memory.
236a7f8de16SArd Biesheuvel 	 */
237a7f8de16SArd Biesheuvel 	memstart_addr = round_down(memblock_start_of_DRAM(),
238a7f8de16SArd Biesheuvel 				   ARM64_MEMSTART_ALIGN);
239a7f8de16SArd Biesheuvel 
24031f80a4eSMarc Zyngier 	if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
24131f80a4eSMarc Zyngier 		pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
24231f80a4eSMarc Zyngier 
243a7f8de16SArd Biesheuvel 	/*
244a7f8de16SArd Biesheuvel 	 * Remove the memory that we will not be able to cover with the
245a7f8de16SArd Biesheuvel 	 * linear mapping. Take care not to clip the kernel which may be
246a7f8de16SArd Biesheuvel 	 * high in memory.
247a7f8de16SArd Biesheuvel 	 */
2482077be67SLaura Abbott 	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
2492077be67SLaura Abbott 			__pa_symbol(_end)), ULLONG_MAX);
2502958987fSArd Biesheuvel 	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
2512958987fSArd Biesheuvel 		/* ensure that memstart_addr remains sufficiently aligned */
2522958987fSArd Biesheuvel 		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
2532958987fSArd Biesheuvel 					 ARM64_MEMSTART_ALIGN);
2542958987fSArd Biesheuvel 		memblock_remove(0, memstart_addr);
2552958987fSArd Biesheuvel 	}
256a7f8de16SArd Biesheuvel 
257a7f8de16SArd Biesheuvel 	/*
2587bc1a0f9SArd Biesheuvel 	 * If we are running with a 52-bit kernel VA config on a system that
2597bc1a0f9SArd Biesheuvel 	 * does not support it, we have to place the available physical
2607bc1a0f9SArd Biesheuvel 	 * memory in the 48-bit addressable part of the linear region, i.e.,
2617bc1a0f9SArd Biesheuvel 	 * we have to move it upward. Since memstart_addr represents the
2627bc1a0f9SArd Biesheuvel 	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
2637bc1a0f9SArd Biesheuvel 	 */
2647bc1a0f9SArd Biesheuvel 	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
2657bc1a0f9SArd Biesheuvel 		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
2667bc1a0f9SArd Biesheuvel 
2677bc1a0f9SArd Biesheuvel 	/*
268a7f8de16SArd Biesheuvel 	 * Apply the memory limit if it was set. Since the kernel may be loaded
269a7f8de16SArd Biesheuvel 	 * high up in memory, add back the kernel region that must be accessible
270a7f8de16SArd Biesheuvel 	 * via the linear mapping.
271a7f8de16SArd Biesheuvel 	 */
272d7dc899aSStefan Agner 	if (memory_limit != PHYS_ADDR_MAX) {
273cb0a6502SDennis Chen 		memblock_mem_limit_remove_map(memory_limit);
2742077be67SLaura Abbott 		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
275a7f8de16SArd Biesheuvel 	}
2766083fe74SMark Rutland 
277c756c592SFlorian Fainelli 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
278177e15f0SArd Biesheuvel 		/*
279177e15f0SArd Biesheuvel 		 * Add back the memory we just removed if it results in the
280177e15f0SArd Biesheuvel 		 * initrd to become inaccessible via the linear mapping.
281177e15f0SArd Biesheuvel 		 * Otherwise, this is a no-op
282177e15f0SArd Biesheuvel 		 */
283c756c592SFlorian Fainelli 		u64 base = phys_initrd_start & PAGE_MASK;
284d4d18e3eSBjorn Andersson 		u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
285177e15f0SArd Biesheuvel 
286177e15f0SArd Biesheuvel 		/*
287177e15f0SArd Biesheuvel 		 * We can only add back the initrd memory if we don't end up
288177e15f0SArd Biesheuvel 		 * with more memory than we can address via the linear mapping.
289177e15f0SArd Biesheuvel 		 * It is up to the bootloader to position the kernel and the
290177e15f0SArd Biesheuvel 		 * initrd reasonably close to each other (i.e., within 32 GB of
291177e15f0SArd Biesheuvel 		 * each other) so that all granule/#levels combinations can
292177e15f0SArd Biesheuvel 		 * always access both.
293177e15f0SArd Biesheuvel 		 */
294177e15f0SArd Biesheuvel 		if (WARN(base < memblock_start_of_DRAM() ||
295177e15f0SArd Biesheuvel 			 base + size > memblock_start_of_DRAM() +
296177e15f0SArd Biesheuvel 				       linear_region_size,
297177e15f0SArd Biesheuvel 			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
29870b3d237SWill Deacon 			phys_initrd_size = 0;
299177e15f0SArd Biesheuvel 		} else {
300177e15f0SArd Biesheuvel 			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
301177e15f0SArd Biesheuvel 			memblock_add(base, size);
302177e15f0SArd Biesheuvel 			memblock_reserve(base, size);
303177e15f0SArd Biesheuvel 		}
304177e15f0SArd Biesheuvel 	}
305177e15f0SArd Biesheuvel 
306c031a421SArd Biesheuvel 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
307c031a421SArd Biesheuvel 		extern u16 memstart_offset_seed;
30897d6786eSArd Biesheuvel 		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
30997d6786eSArd Biesheuvel 		int parange = cpuid_feature_extract_unsigned_field(
31097d6786eSArd Biesheuvel 					mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
31197d6786eSArd Biesheuvel 		s64 range = linear_region_size -
31297d6786eSArd Biesheuvel 			    BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
313c031a421SArd Biesheuvel 
314c031a421SArd Biesheuvel 		/*
315c031a421SArd Biesheuvel 		 * If the size of the linear region exceeds, by a sufficient
31697d6786eSArd Biesheuvel 		 * margin, the size of the region that the physical memory can
31797d6786eSArd Biesheuvel 		 * span, randomize the linear region as well.
318c031a421SArd Biesheuvel 		 */
31997d6786eSArd Biesheuvel 		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
320c8a43c18SYueyi Li 			range /= ARM64_MEMSTART_ALIGN;
321c031a421SArd Biesheuvel 			memstart_addr -= ARM64_MEMSTART_ALIGN *
322c031a421SArd Biesheuvel 					 ((range * memstart_offset_seed) >> 16);
323c031a421SArd Biesheuvel 		}
324c031a421SArd Biesheuvel 	}
3252d5a5612SCatalin Marinas 
326bd00cd5fSMark Rutland 	/*
327bd00cd5fSMark Rutland 	 * Register the kernel text, kernel data, initrd, and initial
328bd00cd5fSMark Rutland 	 * pagetables with memblock.
329bd00cd5fSMark Rutland 	 */
330e2a073ddSArd Biesheuvel 	memblock_reserve(__pa_symbol(_stext), _end - _stext);
331c756c592SFlorian Fainelli 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
332a89dea58SArd Biesheuvel 		/* the generic initrd code expects virtual addresses */
333c756c592SFlorian Fainelli 		initrd_start = __phys_to_virt(phys_initrd_start);
334c756c592SFlorian Fainelli 		initrd_end = initrd_start + phys_initrd_size;
335a89dea58SArd Biesheuvel 	}
336c1cc1552SCatalin Marinas 
3379bf14b7cSMarek Szyprowski 	early_init_fdt_scan_reserved_mem();
3382d5a5612SCatalin Marinas 
33903149563SVijay Balakrishna 	if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
34003149563SVijay Balakrishna 		reserve_crashkernel();
34103149563SVijay Balakrishna 
342f24e5834SSteve Capper 	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
343c1cc1552SCatalin Marinas }
344c1cc1552SCatalin Marinas 
345c1cc1552SCatalin Marinas void __init bootmem_init(void)
346c1cc1552SCatalin Marinas {
347c1cc1552SCatalin Marinas 	unsigned long min, max;
348c1cc1552SCatalin Marinas 
349c1cc1552SCatalin Marinas 	min = PFN_UP(memblock_start_of_DRAM());
350c1cc1552SCatalin Marinas 	max = PFN_DOWN(memblock_end_of_DRAM());
351c1cc1552SCatalin Marinas 
35236dd9086SVladimir Murzin 	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
35336dd9086SVladimir Murzin 
3541a2db300SGanapatrao Kulkarni 	max_pfn = max_low_pfn = max;
35519d6242eSMiles Chen 	min_low_pfn = min;
3561a2db300SGanapatrao Kulkarni 
357eb75541fSAtish Patra 	arch_numa_init();
358618e0786SBarry Song 
359618e0786SBarry Song 	/*
360eb75541fSAtish Patra 	 * must be done after arch_numa_init() which calls numa_init() to
361618e0786SBarry Song 	 * initialize node_online_map that gets used in hugetlb_cma_reserve()
362618e0786SBarry Song 	 * while allocating required CMA size across online nodes.
363618e0786SBarry Song 	 */
364abb7962aSAnshuman Khandual #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
365abb7962aSAnshuman Khandual 	arm64_hugetlb_cma_reserve();
366618e0786SBarry Song #endif
367618e0786SBarry Song 
368c6303ab9SBarry Song 	dma_pernuma_cma_reserve();
369c6303ab9SBarry Song 
370f320bc74SQuentin Perret 	kvm_hyp_reserve();
371f320bc74SQuentin Perret 
372c1cc1552SCatalin Marinas 	/*
373c89ab04fSMike Rapoport 	 * sparse_init() tries to allocate memory from memblock, so must be
374c89ab04fSMike Rapoport 	 * done after the fixed reservations
375c1cc1552SCatalin Marinas 	 */
376c1cc1552SCatalin Marinas 	sparse_init();
377c1cc1552SCatalin Marinas 	zone_sizes_init(min, max);
378c1cc1552SCatalin Marinas 
3790a30c535SNicolas Saenz Julienne 	/*
380d78050eeSCatalin Marinas 	 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
381d78050eeSCatalin Marinas 	 */
382d78050eeSCatalin Marinas 	dma_contiguous_reserve(arm64_dma_phys_limit);
383d78050eeSCatalin Marinas 
384d78050eeSCatalin Marinas 	/*
3850a30c535SNicolas Saenz Julienne 	 * request_standard_resources() depends on crashkernel's memory being
3860a30c535SNicolas Saenz Julienne 	 * reserved, so do it here.
3870a30c535SNicolas Saenz Julienne 	 */
38803149563SVijay Balakrishna 	if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
3890a30c535SNicolas Saenz Julienne 		reserve_crashkernel();
3900a30c535SNicolas Saenz Julienne 
3911a2db300SGanapatrao Kulkarni 	memblock_dump_all();
392c1cc1552SCatalin Marinas }
393c1cc1552SCatalin Marinas 
394c1cc1552SCatalin Marinas /*
395c1cc1552SCatalin Marinas  * mem_init() marks the free areas in the mem_map and tells us how much memory
396c1cc1552SCatalin Marinas  * is free.  This is done after various parts of the system have claimed their
397c1cc1552SCatalin Marinas  * memory after the kernel image.
398c1cc1552SCatalin Marinas  */
399c1cc1552SCatalin Marinas void __init mem_init(void)
400c1cc1552SCatalin Marinas {
401*c6af2aa9SChristoph Hellwig 	swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
402a1e50a82SCatalin Marinas 
403bee4ebd1SJiang Liu 	/* this will put all unused low memory onto the freelists */
404c6ffc5caSMike Rapoport 	memblock_free_all();
405c1cc1552SCatalin Marinas 
406c1cc1552SCatalin Marinas 	/*
407c1cc1552SCatalin Marinas 	 * Check boundaries twice: Some fundamental inconsistencies can be
408c1cc1552SCatalin Marinas 	 * detected at build time already.
409c1cc1552SCatalin Marinas 	 */
410c1cc1552SCatalin Marinas #ifdef CONFIG_COMPAT
411363524d2SSteve Capper 	BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
412c1cc1552SCatalin Marinas #endif
413c1cc1552SCatalin Marinas 
4147e04cc91SAnshuman Khandual 	/*
4157e04cc91SAnshuman Khandual 	 * Selected page table levels should match when derived from
4167e04cc91SAnshuman Khandual 	 * scratch using the virtual address range and page size.
4177e04cc91SAnshuman Khandual 	 */
4187e04cc91SAnshuman Khandual 	BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
4197e04cc91SAnshuman Khandual 		     CONFIG_PGTABLE_LEVELS);
4207e04cc91SAnshuman Khandual 
421bee4ebd1SJiang Liu 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
422c1cc1552SCatalin Marinas 		extern int sysctl_overcommit_memory;
423c1cc1552SCatalin Marinas 		/*
424c1cc1552SCatalin Marinas 		 * On a machine this small we won't get anywhere without
425c1cc1552SCatalin Marinas 		 * overcommit, so turn it on by default.
426c1cc1552SCatalin Marinas 		 */
427c1cc1552SCatalin Marinas 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
428c1cc1552SCatalin Marinas 	}
429c1cc1552SCatalin Marinas }
430c1cc1552SCatalin Marinas 
431c1cc1552SCatalin Marinas void free_initmem(void)
432c1cc1552SCatalin Marinas {
4332077be67SLaura Abbott 	free_reserved_area(lm_alias(__init_begin),
4342077be67SLaura Abbott 			   lm_alias(__init_end),
4356ec939f8SAnshuman Khandual 			   POISON_FREE_INITMEM, "unused kernel");
436dae8c235SKefeng Wang 	/*
437dae8c235SKefeng Wang 	 * Unmap the __init region but leave the VM area in place. This
438dae8c235SKefeng Wang 	 * prevents the region from being reused for kernel modules, which
439dae8c235SKefeng Wang 	 * is not supported by kallsyms.
440dae8c235SKefeng Wang 	 */
4414ad0ae8cSNicholas Piggin 	vunmap_range((u64)__init_begin, (u64)__init_end);
442c1cc1552SCatalin Marinas }
443c1cc1552SCatalin Marinas 
444638d5031SAnshuman Khandual void dump_mem_limit(void)
445a7f8de16SArd Biesheuvel {
446d7dc899aSStefan Agner 	if (memory_limit != PHYS_ADDR_MAX) {
447a7f8de16SArd Biesheuvel 		pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
448a7f8de16SArd Biesheuvel 	} else {
449a7f8de16SArd Biesheuvel 		pr_emerg("Memory Limit: none\n");
450a7f8de16SArd Biesheuvel 	}
451a7f8de16SArd Biesheuvel }
452