xref: /linux/arch/loongarch/mm/pgtable.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/mm.h>
8 #include <asm/pgalloc.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
11 
12 struct page *dmw_virt_to_page(unsigned long kaddr)
13 {
14 	return phys_to_page(__pa(kaddr));
15 }
16 EXPORT_SYMBOL(dmw_virt_to_page);
17 
18 struct page *tlb_virt_to_page(unsigned long kaddr)
19 {
20 	return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
21 }
22 EXPORT_SYMBOL(tlb_virt_to_page);
23 
24 pgd_t *pgd_alloc(struct mm_struct *mm)
25 {
26 	pgd_t *init, *ret = NULL;
27 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
28 
29 	if (ptdesc) {
30 		ret = (pgd_t *)ptdesc_address(ptdesc);
31 		init = pgd_offset(&init_mm, 0UL);
32 		pgd_init(ret);
33 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
34 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
35 	}
36 
37 	return ret;
38 }
39 EXPORT_SYMBOL_GPL(pgd_alloc);
40 
41 void pgd_init(void *addr)
42 {
43 	unsigned long *p, *end;
44 	unsigned long entry;
45 
46 #if !defined(__PAGETABLE_PUD_FOLDED)
47 	entry = (unsigned long)invalid_pud_table;
48 #elif !defined(__PAGETABLE_PMD_FOLDED)
49 	entry = (unsigned long)invalid_pmd_table;
50 #else
51 	entry = (unsigned long)invalid_pte_table;
52 #endif
53 
54 	p = (unsigned long *)addr;
55 	end = p + PTRS_PER_PGD;
56 
57 	do {
58 		p[0] = entry;
59 		p[1] = entry;
60 		p[2] = entry;
61 		p[3] = entry;
62 		p[4] = entry;
63 		p += 8;
64 		p[-3] = entry;
65 		p[-2] = entry;
66 		p[-1] = entry;
67 	} while (p != end);
68 }
69 EXPORT_SYMBOL_GPL(pgd_init);
70 
71 #ifndef __PAGETABLE_PMD_FOLDED
72 void pmd_init(void *addr)
73 {
74 	unsigned long *p, *end;
75 	unsigned long pagetable = (unsigned long)invalid_pte_table;
76 
77 	p = (unsigned long *)addr;
78 	end = p + PTRS_PER_PMD;
79 
80 	do {
81 		p[0] = pagetable;
82 		p[1] = pagetable;
83 		p[2] = pagetable;
84 		p[3] = pagetable;
85 		p[4] = pagetable;
86 		p += 8;
87 		p[-3] = pagetable;
88 		p[-2] = pagetable;
89 		p[-1] = pagetable;
90 	} while (p != end);
91 }
92 EXPORT_SYMBOL_GPL(pmd_init);
93 #endif
94 
95 #ifndef __PAGETABLE_PUD_FOLDED
96 void pud_init(void *addr)
97 {
98 	unsigned long *p, *end;
99 	unsigned long pagetable = (unsigned long)invalid_pmd_table;
100 
101 	p = (unsigned long *)addr;
102 	end = p + PTRS_PER_PUD;
103 
104 	do {
105 		p[0] = pagetable;
106 		p[1] = pagetable;
107 		p[2] = pagetable;
108 		p[3] = pagetable;
109 		p[4] = pagetable;
110 		p += 8;
111 		p[-3] = pagetable;
112 		p[-2] = pagetable;
113 		p[-1] = pagetable;
114 	} while (p != end);
115 }
116 EXPORT_SYMBOL_GPL(pud_init);
117 #endif
118 
119 void kernel_pte_init(void *addr)
120 {
121 	unsigned long *p, *end;
122 
123 	p = (unsigned long *)addr;
124 	end = p + PTRS_PER_PTE;
125 
126 	do {
127 		p[0] = _PAGE_GLOBAL;
128 		p[1] = _PAGE_GLOBAL;
129 		p[2] = _PAGE_GLOBAL;
130 		p[3] = _PAGE_GLOBAL;
131 		p[4] = _PAGE_GLOBAL;
132 		p += 8;
133 		p[-3] = _PAGE_GLOBAL;
134 		p[-2] = _PAGE_GLOBAL;
135 		p[-1] = _PAGE_GLOBAL;
136 	} while (p != end);
137 }
138 
139 pmd_t mk_pmd(struct page *page, pgprot_t prot)
140 {
141 	pmd_t pmd;
142 
143 	pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
144 
145 	return pmd;
146 }
147 
148 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
149 		pmd_t *pmdp, pmd_t pmd)
150 {
151 	WRITE_ONCE(*pmdp, pmd);
152 	flush_tlb_all();
153 }
154 
155 void __init pagetable_init(void)
156 {
157 	/* Initialize the entire pgd.  */
158 	pgd_init(swapper_pg_dir);
159 	pgd_init(invalid_pg_dir);
160 #ifndef __PAGETABLE_PUD_FOLDED
161 	pud_init(invalid_pud_table);
162 #endif
163 #ifndef __PAGETABLE_PMD_FOLDED
164 	pmd_init(invalid_pmd_table);
165 #endif
166 }
167