1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 */ 5 #include <linux/init.h> 6 #include <linux/export.h> 7 #include <linux/signal.h> 8 #include <linux/sched.h> 9 #include <linux/smp.h> 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/string.h> 13 #include <linux/types.h> 14 #include <linux/pagemap.h> 15 #include <linux/memblock.h> 16 #include <linux/memremap.h> 17 #include <linux/mm.h> 18 #include <linux/mman.h> 19 #include <linux/highmem.h> 20 #include <linux/swap.h> 21 #include <linux/proc_fs.h> 22 #include <linux/pfn.h> 23 #include <linux/hardirq.h> 24 #include <linux/gfp.h> 25 #include <linux/hugetlb.h> 26 #include <linux/mmzone.h> 27 #include <linux/execmem.h> 28 29 #include <asm/asm-offsets.h> 30 #include <asm/bootinfo.h> 31 #include <asm/cpu.h> 32 #include <asm/dma.h> 33 #include <asm/mmu_context.h> 34 #include <asm/sections.h> 35 #include <asm/pgtable.h> 36 #include <asm/pgalloc.h> 37 #include <asm/tlb.h> 38 39 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 40 EXPORT_SYMBOL(empty_zero_page); 41 42 void copy_user_highpage(struct page *to, struct page *from, 43 unsigned long vaddr, struct vm_area_struct *vma) 44 { 45 void *vfrom, *vto; 46 47 vfrom = kmap_local_page(from); 48 vto = kmap_local_page(to); 49 copy_page(vto, vfrom); 50 kunmap_local(vfrom); 51 kunmap_local(vto); 52 /* Make sure this page is cleared on other CPU's too before using it */ 53 smp_wmb(); 54 } 55 56 int __ref page_is_ram(unsigned long pfn) 57 { 58 unsigned long addr = PFN_PHYS(pfn); 59 60 return memblock_is_memory(addr) && !memblock_is_reserved(addr); 61 } 62 63 #ifndef CONFIG_NUMA 64 void __init paging_init(void) 65 { 66 unsigned long max_zone_pfns[MAX_NR_ZONES]; 67 68 #ifdef CONFIG_ZONE_DMA 69 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 70 #endif 71 #ifdef CONFIG_ZONE_DMA32 72 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 73 #endif 74 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 75 76 free_area_init(max_zone_pfns); 77 } 78 79 void __init mem_init(void) 80 { 81 max_mapnr = max_low_pfn; 82 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 83 84 memblock_free_all(); 85 } 86 #endif /* !CONFIG_NUMA */ 87 88 void __ref free_initmem(void) 89 { 90 free_initmem_default(POISON_FREE_INITMEM); 91 } 92 93 #ifdef CONFIG_MEMORY_HOTPLUG 94 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) 95 { 96 unsigned long start_pfn = start >> PAGE_SHIFT; 97 unsigned long nr_pages = size >> PAGE_SHIFT; 98 int ret; 99 100 ret = __add_pages(nid, start_pfn, nr_pages, params); 101 102 if (ret) 103 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", 104 __func__, ret); 105 106 return ret; 107 } 108 109 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 110 { 111 unsigned long start_pfn = start >> PAGE_SHIFT; 112 unsigned long nr_pages = size >> PAGE_SHIFT; 113 struct page *page = pfn_to_page(start_pfn); 114 115 /* With altmap the first mapped page is offset from @start */ 116 if (altmap) 117 page += vmem_altmap_offset(altmap); 118 __remove_pages(start_pfn, nr_pages, altmap); 119 } 120 121 #ifdef CONFIG_NUMA 122 int memory_add_physaddr_to_nid(u64 start) 123 { 124 return pa_to_nid(start); 125 } 126 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 127 #endif 128 #endif 129 130 #ifdef CONFIG_SPARSEMEM_VMEMMAP 131 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, 132 unsigned long addr, unsigned long next) 133 { 134 pmd_t entry; 135 136 entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL); 137 pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL; 138 set_pmd_at(&init_mm, addr, pmd, entry); 139 } 140 141 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, 142 unsigned long addr, unsigned long next) 143 { 144 int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; 145 146 if (huge) 147 vmemmap_verify((pte_t *)pmd, node, addr, next); 148 149 return huge; 150 } 151 152 int __meminit vmemmap_populate(unsigned long start, unsigned long end, 153 int node, struct vmem_altmap *altmap) 154 { 155 #if CONFIG_PGTABLE_LEVELS == 2 156 return vmemmap_populate_basepages(start, end, node, NULL); 157 #else 158 return vmemmap_populate_hugepages(start, end, node, NULL); 159 #endif 160 } 161 162 #ifdef CONFIG_MEMORY_HOTPLUG 163 void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) 164 { 165 } 166 #endif 167 #endif 168 169 pte_t * __init populate_kernel_pte(unsigned long addr) 170 { 171 pgd_t *pgd = pgd_offset_k(addr); 172 p4d_t *p4d = p4d_offset(pgd, addr); 173 pud_t *pud; 174 pmd_t *pmd; 175 176 if (p4d_none(p4dp_get(p4d))) { 177 pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 178 if (!pud) 179 panic("%s: Failed to allocate memory\n", __func__); 180 p4d_populate(&init_mm, p4d, pud); 181 #ifndef __PAGETABLE_PUD_FOLDED 182 pud_init(pud); 183 #endif 184 } 185 186 pud = pud_offset(p4d, addr); 187 if (pud_none(pudp_get(pud))) { 188 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 189 if (!pmd) 190 panic("%s: Failed to allocate memory\n", __func__); 191 pud_populate(&init_mm, pud, pmd); 192 #ifndef __PAGETABLE_PMD_FOLDED 193 pmd_init(pmd); 194 #endif 195 } 196 197 pmd = pmd_offset(pud, addr); 198 if (!pmd_present(pmdp_get(pmd))) { 199 pte_t *pte; 200 201 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 202 if (!pte) 203 panic("%s: Failed to allocate memory\n", __func__); 204 pmd_populate_kernel(&init_mm, pmd, pte); 205 } 206 207 return pte_offset_kernel(pmd, addr); 208 } 209 210 void __init __set_fixmap(enum fixed_addresses idx, 211 phys_addr_t phys, pgprot_t flags) 212 { 213 unsigned long addr = __fix_to_virt(idx); 214 pte_t *ptep; 215 216 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 217 218 ptep = populate_kernel_pte(addr); 219 if (!pte_none(ptep_get(ptep))) { 220 pte_ERROR(*ptep); 221 return; 222 } 223 224 if (pgprot_val(flags)) 225 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 226 else { 227 pte_clear(&init_mm, addr, ptep); 228 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 229 } 230 } 231 232 /* 233 * Align swapper_pg_dir in to 64K, allows its address to be loaded 234 * with a single LUI instruction in the TLB handlers. If we used 235 * __aligned(64K), its size would get rounded up to the alignment 236 * size, and waste space. So we place it in its own section and align 237 * it in the linker script. 238 */ 239 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); 240 241 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss; 242 #ifndef __PAGETABLE_PUD_FOLDED 243 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; 244 EXPORT_SYMBOL(invalid_pud_table); 245 #endif 246 #ifndef __PAGETABLE_PMD_FOLDED 247 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; 248 EXPORT_SYMBOL(invalid_pmd_table); 249 #endif 250 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 251 EXPORT_SYMBOL(invalid_pte_table); 252 253 #ifdef CONFIG_EXECMEM 254 static struct execmem_info execmem_info __ro_after_init; 255 256 struct execmem_info __init *execmem_arch_setup(void) 257 { 258 execmem_info = (struct execmem_info){ 259 .ranges = { 260 [EXECMEM_DEFAULT] = { 261 .start = MODULES_VADDR, 262 .end = MODULES_END, 263 .pgprot = PAGE_KERNEL, 264 .alignment = 1, 265 }, 266 }, 267 }; 268 269 return &execmem_info; 270 } 271 #endif /* CONFIG_EXECMEM */ 272