1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 */
14
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
19 #include <linux/execmem.h>
20 #include <linux/vmalloc.h>
21
22 #include <asm/swiotlb.h>
23 #include <asm/machdep.h>
24 #include <asm/rtas.h>
25 #include <asm/kasan.h>
26 #include <asm/svm.h>
27 #include <asm/mmzone.h>
28 #include <asm/ftrace.h>
29 #include <asm/text-patching.h>
30 #include <asm/setup.h>
31 #include <asm/fixmap.h>
32
33 #include <asm/fadump.h>
34 #include <asm/kexec.h>
35 #include <asm/kvm_ppc.h>
36
37 #include <mm/mmu_decl.h>
38
39 unsigned long long memory_limit __initdata;
40
__phys_mem_access_prot(unsigned long pfn,unsigned long size,pgprot_t vma_prot)41 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
42 pgprot_t vma_prot)
43 {
44 if (ppc_md.phys_mem_access_prot)
45 return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
46
47 if (!page_is_ram(pfn))
48 vma_prot = pgprot_noncached(vma_prot);
49
50 return vma_prot;
51 }
52 EXPORT_SYMBOL(__phys_mem_access_prot);
53
54 #ifdef CONFIG_MEMORY_HOTPLUG
55 static DEFINE_MUTEX(linear_mapping_mutex);
56
57 #ifdef CONFIG_NUMA
memory_add_physaddr_to_nid(u64 start)58 int memory_add_physaddr_to_nid(u64 start)
59 {
60 return hot_add_scn_to_nid(start);
61 }
62 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
63 #endif
64
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)65 int __weak create_section_mapping(unsigned long start, unsigned long end,
66 int nid, pgprot_t prot)
67 {
68 return -ENODEV;
69 }
70
remove_section_mapping(unsigned long start,unsigned long end)71 int __weak remove_section_mapping(unsigned long start, unsigned long end)
72 {
73 return -ENODEV;
74 }
75
arch_create_linear_mapping(int nid,u64 start,u64 size,struct mhp_params * params)76 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
77 struct mhp_params *params)
78 {
79 int rc;
80
81 start = (unsigned long)__va(start);
82 mutex_lock(&linear_mapping_mutex);
83 rc = create_section_mapping(start, start + size, nid,
84 params->pgprot);
85 mutex_unlock(&linear_mapping_mutex);
86 if (rc) {
87 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
88 start, start + size, rc);
89 return -EFAULT;
90 }
91 return 0;
92 }
93
arch_remove_linear_mapping(u64 start,u64 size)94 void __ref arch_remove_linear_mapping(u64 start, u64 size)
95 {
96 int ret;
97
98 /* Remove htab bolted mappings for this section of memory */
99 start = (unsigned long)__va(start);
100
101 mutex_lock(&linear_mapping_mutex);
102 ret = remove_section_mapping(start, start + size);
103 mutex_unlock(&linear_mapping_mutex);
104 if (ret)
105 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
106 start, start + size, ret);
107
108 /* Ensure all vmalloc mappings are flushed in case they also
109 * hit that section of memory
110 */
111 vm_unmap_aliases();
112 }
113
114 /*
115 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
116 * updating.
117 */
update_end_of_memory_vars(u64 start,u64 size)118 static void update_end_of_memory_vars(u64 start, u64 size)
119 {
120 unsigned long end_pfn = PFN_UP(start + size);
121
122 if (end_pfn > max_pfn) {
123 max_pfn = end_pfn;
124 max_low_pfn = end_pfn;
125 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
126 }
127 }
128
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)129 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
130 struct mhp_params *params)
131 {
132 int ret;
133
134 ret = __add_pages(nid, start_pfn, nr_pages, params);
135 if (ret)
136 return ret;
137
138 /* update max_pfn, max_low_pfn and high_memory */
139 update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
140 nr_pages << PAGE_SHIFT);
141
142 return ret;
143 }
144
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)145 int __ref arch_add_memory(int nid, u64 start, u64 size,
146 struct mhp_params *params)
147 {
148 unsigned long start_pfn = start >> PAGE_SHIFT;
149 unsigned long nr_pages = size >> PAGE_SHIFT;
150 int rc;
151
152 rc = arch_create_linear_mapping(nid, start, size, params);
153 if (rc)
154 return rc;
155 rc = add_pages(nid, start_pfn, nr_pages, params);
156 if (rc)
157 arch_remove_linear_mapping(start, size);
158 return rc;
159 }
160
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)161 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
162 {
163 unsigned long start_pfn = start >> PAGE_SHIFT;
164 unsigned long nr_pages = size >> PAGE_SHIFT;
165
166 __remove_pages(start_pfn, nr_pages, altmap);
167 arch_remove_linear_mapping(start, size);
168 }
169 #endif
170
171 #ifndef CONFIG_NUMA
mem_topology_setup(void)172 void __init mem_topology_setup(void)
173 {
174 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
175 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
176 #ifdef CONFIG_HIGHMEM
177 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
178 #endif
179
180 /* Place all memblock_regions in the same node and merge contiguous
181 * memblock_regions
182 */
183 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
184 }
185
186 /* mark pages that don't exist as nosave */
mark_nonram_nosave(void)187 static int __init mark_nonram_nosave(void)
188 {
189 unsigned long spfn, epfn, prev = 0;
190 int i;
191
192 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
193 if (prev && prev < spfn)
194 register_nosave_region(prev, spfn);
195
196 prev = epfn;
197 }
198
199 return 0;
200 }
201 #else /* CONFIG_NUMA */
mark_nonram_nosave(void)202 static int __init mark_nonram_nosave(void)
203 {
204 return 0;
205 }
206 #endif
207
208 /*
209 * Zones usage:
210 *
211 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
212 * everything else. GFP_DMA32 page allocations automatically fall back to
213 * ZONE_DMA.
214 *
215 * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
216 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
217 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
218 * ZONE_DMA.
219 */
arch_zone_limits_init(unsigned long * max_zone_pfns)220 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
221 {
222 #ifdef CONFIG_ZONE_DMA
223 max_zone_pfns[ZONE_DMA] = min((zone_dma_limit >> PAGE_SHIFT) + 1, max_low_pfn);
224 #endif
225 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
226 #ifdef CONFIG_HIGHMEM
227 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
228 #endif
229 }
230
231 /*
232 * paging_init() sets up the page tables - in fact we've already done this.
233 */
paging_init(void)234 void __init paging_init(void)
235 {
236 unsigned long long total_ram = memblock_phys_mem_size();
237 phys_addr_t top_of_ram = memblock_end_of_DRAM();
238 int zone_dma_bits;
239
240 #ifdef CONFIG_HIGHMEM
241 unsigned long v = __fix_to_virt(FIX_KMAP_END);
242 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
243
244 for (; v < end; v += PAGE_SIZE)
245 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
246
247 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
248 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
249 #endif /* CONFIG_HIGHMEM */
250
251 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
252 (unsigned long long)top_of_ram, total_ram);
253 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
254 (long int)((top_of_ram - total_ram) >> 20));
255
256 /*
257 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
258 * powerbooks.
259 */
260 if (IS_ENABLED(CONFIG_PPC32))
261 zone_dma_bits = 30;
262 else
263 zone_dma_bits = 31;
264
265 zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
266
267 mark_nonram_nosave();
268 }
269
arch_mm_preinit(void)270 void __init arch_mm_preinit(void)
271 {
272
273 /*
274 * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM
275 * and hugetlb. These must be called after pageblock_order is
276 * initialised.
277 */
278 fadump_cma_init();
279 kdump_cma_reserve();
280 kvm_cma_reserve();
281
282 /*
283 * book3s is limited to 16 page sizes due to encoding this in
284 * a 4-bit field for slices.
285 */
286 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
287
288 #ifdef CONFIG_SWIOTLB
289 /*
290 * Some platforms (e.g. 85xx) limit DMA-able memory way below
291 * 4G. We force memblock to bottom-up mode to ensure that the
292 * memory allocated in swiotlb_init() is DMA-able.
293 * As it's the last memblock allocation, no need to reset it
294 * back to to-down.
295 */
296 memblock_set_bottom_up(true);
297 swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
298 #endif
299
300 kasan_late_init();
301
302 #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
303 /*
304 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
305 * functions.... do it here for the non-smp case.
306 */
307 per_cpu(next_tlbcam_idx, smp_processor_id()) =
308 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
309 #endif
310 }
311
free_initmem(void)312 void free_initmem(void)
313 {
314 ppc_md.progress = ppc_printk_progress;
315 mark_initmem_nx();
316 free_initmem_default(POISON_FREE_INITMEM);
317 ftrace_free_init_tramp();
318 }
319
320 /*
321 * System memory should not be in /proc/iomem but various tools expect it
322 * (eg kdump).
323 */
add_system_ram_resources(void)324 static int __init add_system_ram_resources(void)
325 {
326 phys_addr_t start, end;
327 u64 i;
328
329 for_each_mem_range(i, &start, &end) {
330 struct resource *res;
331
332 res = kzalloc_obj(struct resource);
333 WARN_ON(!res);
334
335 if (res) {
336 res->name = "System RAM";
337 res->start = start;
338 /*
339 * In memblock, end points to the first byte after
340 * the range while in resourses, end points to the
341 * last byte in the range.
342 */
343 res->end = end - 1;
344 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
345 WARN_ON(insert_resource(&iomem_resource, res) < 0);
346 }
347 }
348
349 return 0;
350 }
351 subsys_initcall(add_system_ram_resources);
352
353 #ifdef CONFIG_STRICT_DEVMEM
354 /*
355 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
356 * is valid. The argument is a physical page number.
357 *
358 * Access has to be given to non-kernel-ram areas as well, these contain the
359 * PCI mmio resources as well as potential bios/acpi data regions.
360 */
devmem_is_allowed(unsigned long pfn)361 int devmem_is_allowed(unsigned long pfn)
362 {
363 if (page_is_rtas_user_buf(pfn))
364 return 1;
365 if (iomem_is_exclusive(PFN_PHYS(pfn)))
366 return 0;
367 if (!page_is_ram(pfn))
368 return 1;
369 return 0;
370 }
371 #endif /* CONFIG_STRICT_DEVMEM */
372
373 /*
374 * This is defined in kernel/resource.c but only powerpc needs to export it, for
375 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
376 */
377 EXPORT_SYMBOL_GPL(walk_system_ram_range);
378
379 #ifdef CONFIG_EXECMEM
380 static struct execmem_info execmem_info __ro_after_init;
381
382 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
prealloc_execmem_pgtable(void)383 static void prealloc_execmem_pgtable(void)
384 {
385 unsigned long va;
386
387 for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE)
388 pte_alloc_kernel(pmd_off_k(va), va);
389 }
390 #else
prealloc_execmem_pgtable(void)391 static void prealloc_execmem_pgtable(void) { }
392 #endif
393
execmem_arch_setup(void)394 struct execmem_info __init *execmem_arch_setup(void)
395 {
396 pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
397 pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
398 unsigned long fallback_start = 0, fallback_end = 0;
399 unsigned long start, end;
400
401 /*
402 * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
403 * allow allocating data in the entire vmalloc space
404 */
405 #ifdef MODULES_VADDR
406 unsigned long limit = (unsigned long)_etext - SZ_32M;
407
408 /* First try within 32M limit from _etext to avoid branch trampolines */
409 if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
410 start = limit;
411 fallback_start = MODULES_VADDR;
412 fallback_end = MODULES_END;
413 } else {
414 start = MODULES_VADDR;
415 }
416
417 end = MODULES_END;
418 #else
419 start = VMALLOC_START;
420 end = VMALLOC_END;
421 #endif
422
423 prealloc_execmem_pgtable();
424
425 execmem_info = (struct execmem_info){
426 .ranges = {
427 [EXECMEM_DEFAULT] = {
428 .start = start,
429 .end = end,
430 .pgprot = prot,
431 .alignment = 1,
432 .fallback_start = fallback_start,
433 .fallback_end = fallback_end,
434 },
435 [EXECMEM_KPROBES] = {
436 .start = VMALLOC_START,
437 .end = VMALLOC_END,
438 .pgprot = kprobes_prot,
439 .alignment = 1,
440 },
441 [EXECMEM_MODULE_DATA] = {
442 .start = VMALLOC_START,
443 .end = VMALLOC_END,
444 .pgprot = PAGE_KERNEL,
445 .alignment = 1,
446 },
447 },
448 };
449
450 return &execmem_info;
451 }
452 #endif /* CONFIG_EXECMEM */
453