1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/smp.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/hugetlb.h>
26 #include <linux/mmzone.h>
27 #include <linux/execmem.h>
28
29 #include <asm/asm-offsets.h>
30 #include <asm/bootinfo.h>
31 #include <asm/cpu.h>
32 #include <asm/dma.h>
33 #include <asm/mmu_context.h>
34 #include <asm/sections.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlb.h>
38
page_is_ram(unsigned long pfn)39 int __ref page_is_ram(unsigned long pfn)
40 {
41 unsigned long addr = PFN_PHYS(pfn);
42
43 return memblock_is_memory(addr) && !memblock_is_reserved(addr);
44 }
45
arch_zone_limits_init(unsigned long * max_zone_pfns)46 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
47 {
48 #ifdef CONFIG_ZONE_DMA32
49 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
50 #endif
51 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
52 #ifdef CONFIG_HIGHMEM
53 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
54 #endif
55 }
56
free_initmem(void)57 void __ref free_initmem(void)
58 {
59 free_initmem_default(POISON_FREE_INITMEM);
60 }
61
62 #ifdef CONFIG_HIGHMEM
63
fixrange_init(unsigned long start,unsigned long end,pgd_t * pgd_base)64 void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
65 {
66 pgd_t *pgd;
67 pud_t *pud;
68 pmd_t *pmd;
69 pte_t *pte;
70 int i, j, k;
71 int ptrs_per_pgd;
72 unsigned long vaddr;
73
74 vaddr = start;
75 i = pgd_index(vaddr);
76 j = pud_index(vaddr);
77 k = pmd_index(vaddr);
78 pgd = pgd_base + i;
79 ptrs_per_pgd = min((1 << (BITS_PER_LONG - PGDIR_SHIFT)), PTRS_PER_PGD);
80
81 for ( ; (i < ptrs_per_pgd) && (vaddr < end); pgd++, i++) {
82 pud = (pud_t *)pgd;
83 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
84 pmd = (pmd_t *)pud;
85 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
86 if (pmd_none(*pmd)) {
87 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
88 if (!pte)
89 panic("%s: Failed to allocate %lu bytes align=%lx\n",
90 __func__, PAGE_SIZE, PAGE_SIZE);
91
92 kernel_pte_init(pte);
93 set_pmd(pmd, __pmd((unsigned long)pte));
94 BUG_ON(pte != pte_offset_kernel(pmd, 0));
95 }
96 vaddr += PMD_SIZE;
97 }
98 k = 0;
99 }
100 j = 0;
101 }
102 }
103
104 #endif
105
106 #ifdef CONFIG_MEMORY_HOTPLUG
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)107 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
108 {
109 unsigned long start_pfn = start >> PAGE_SHIFT;
110 unsigned long nr_pages = size >> PAGE_SHIFT;
111 int ret;
112
113 ret = __add_pages(nid, start_pfn, nr_pages, params);
114
115 if (ret)
116 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
117 __func__, ret);
118
119 return ret;
120 }
121
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)122 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
123 {
124 unsigned long start_pfn = start >> PAGE_SHIFT;
125 unsigned long nr_pages = size >> PAGE_SHIFT;
126 struct page *page = pfn_to_page(start_pfn);
127
128 /* With altmap the first mapped page is offset from @start */
129 if (altmap)
130 page += vmem_altmap_offset(altmap);
131 __remove_pages(start_pfn, nr_pages, altmap);
132 }
133 #endif
134
135 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)136 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
137 unsigned long addr, unsigned long next)
138 {
139 pmd_t entry;
140
141 entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
142 pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
143 set_pmd_at(&init_mm, addr, pmd, entry);
144 }
145
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)146 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
147 unsigned long addr, unsigned long next)
148 {
149 int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
150
151 if (huge)
152 vmemmap_verify((pte_t *)pmd, node, addr, next);
153
154 return huge;
155 }
156
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)157 int __meminit vmemmap_populate(unsigned long start, unsigned long end,
158 int node, struct vmem_altmap *altmap)
159 {
160 #if CONFIG_PGTABLE_LEVELS == 2
161 return vmemmap_populate_basepages(start, end, node, NULL);
162 #else
163 return vmemmap_populate_hugepages(start, end, node, NULL);
164 #endif
165 }
166
167 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)168 void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
169 {
170 }
171 #endif
172 #endif
173
populate_kernel_pte(unsigned long addr)174 pte_t * __init populate_kernel_pte(unsigned long addr)
175 {
176 pgd_t *pgd = pgd_offset_k(addr);
177 p4d_t *p4d = p4d_offset(pgd, addr);
178 pud_t *pud;
179 pmd_t *pmd;
180
181 if (p4d_none(p4dp_get(p4d))) {
182 pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
183 p4d_populate(&init_mm, p4d, pud);
184 #ifndef __PAGETABLE_PUD_FOLDED
185 pud_init(pud);
186 #endif
187 }
188
189 pud = pud_offset(p4d, addr);
190 if (pud_none(pudp_get(pud))) {
191 pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
192 pud_populate(&init_mm, pud, pmd);
193 #ifndef __PAGETABLE_PMD_FOLDED
194 pmd_init(pmd);
195 #endif
196 }
197
198 pmd = pmd_offset(pud, addr);
199 if (!pmd_present(pmdp_get(pmd))) {
200 pte_t *pte;
201
202 pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
203 pmd_populate_kernel(&init_mm, pmd, pte);
204 kernel_pte_init(pte);
205 }
206
207 return pte_offset_kernel(pmd, addr);
208 }
209
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)210 void __init __set_fixmap(enum fixed_addresses idx,
211 phys_addr_t phys, pgprot_t flags)
212 {
213 unsigned long addr = __fix_to_virt(idx);
214 pte_t *ptep;
215
216 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
217
218 ptep = populate_kernel_pte(addr);
219 if (!pte_none(ptep_get(ptep))) {
220 pte_ERROR(*ptep);
221 return;
222 }
223
224 if (pgprot_val(flags))
225 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
226 else {
227 pte_clear(&init_mm, addr, ptep);
228 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
229 }
230 }
231
232 /*
233 * Align swapper_pg_dir in to 64K, allows its address to be loaded
234 * with a single LUI instruction in the TLB handlers. If we used
235 * __aligned(64K), its size would get rounded up to the alignment
236 * size, and waste space. So we place it in its own section and align
237 * it in the linker script.
238 */
239 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
240
241 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
242 #ifndef __PAGETABLE_PUD_FOLDED
243 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
244 EXPORT_SYMBOL(invalid_pud_table);
245 #endif
246 #ifndef __PAGETABLE_PMD_FOLDED
247 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
248 EXPORT_SYMBOL(invalid_pmd_table);
249 #endif
250 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
251 EXPORT_SYMBOL(invalid_pte_table);
252
253 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
254 static struct execmem_info execmem_info __ro_after_init;
255
execmem_arch_setup(void)256 struct execmem_info __init *execmem_arch_setup(void)
257 {
258 execmem_info = (struct execmem_info){
259 .ranges = {
260 [EXECMEM_DEFAULT] = {
261 .start = MODULES_VADDR,
262 .end = MODULES_END,
263 .pgprot = PAGE_KERNEL,
264 .alignment = 1,
265 },
266 },
267 };
268
269 return &execmem_info;
270 }
271 #endif /* CONFIG_EXECMEM && MODULES_VADDR */
272