xref: /linux/arch/loongarch/mm/pageattr.c (revision ab68d7eb7b1a64f3f4710da46cc5f93c6c154942)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2024 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/memblock.h>
7 #include <linux/pagewalk.h>
8 #include <linux/pgtable.h>
9 #include <asm/set_memory.h>
10 #include <asm/tlbflush.h>
11 
12 struct pageattr_masks {
13 	pgprot_t set_mask;
14 	pgprot_t clear_mask;
15 };
16 
set_pageattr_masks(unsigned long val,struct mm_walk * walk)17 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
18 {
19 	unsigned long new_val = val;
20 	struct pageattr_masks *masks = walk->private;
21 
22 	new_val &= ~(pgprot_val(masks->clear_mask));
23 	new_val |= (pgprot_val(masks->set_mask));
24 
25 	return new_val;
26 }
27 
pageattr_pgd_entry(pgd_t * pgd,unsigned long addr,unsigned long next,struct mm_walk * walk)28 static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
29 			      unsigned long next, struct mm_walk *walk)
30 {
31 	pgd_t val = pgdp_get(pgd);
32 
33 	if (pgd_leaf(val)) {
34 		val = __pgd(set_pageattr_masks(pgd_val(val), walk));
35 		set_pgd(pgd, val);
36 	}
37 
38 	return 0;
39 }
40 
pageattr_p4d_entry(p4d_t * p4d,unsigned long addr,unsigned long next,struct mm_walk * walk)41 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
42 			      unsigned long next, struct mm_walk *walk)
43 {
44 	p4d_t val = p4dp_get(p4d);
45 
46 	if (p4d_leaf(val)) {
47 		val = __p4d(set_pageattr_masks(p4d_val(val), walk));
48 		set_p4d(p4d, val);
49 	}
50 
51 	return 0;
52 }
53 
pageattr_pud_entry(pud_t * pud,unsigned long addr,unsigned long next,struct mm_walk * walk)54 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
55 			      unsigned long next, struct mm_walk *walk)
56 {
57 	pud_t val = pudp_get(pud);
58 
59 	if (pud_leaf(val)) {
60 		val = __pud(set_pageattr_masks(pud_val(val), walk));
61 		set_pud(pud, val);
62 	}
63 
64 	return 0;
65 }
66 
pageattr_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)67 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
68 			      unsigned long next, struct mm_walk *walk)
69 {
70 	pmd_t val = pmdp_get(pmd);
71 
72 	if (pmd_leaf(val)) {
73 		val = __pmd(set_pageattr_masks(pmd_val(val), walk));
74 		set_pmd(pmd, val);
75 	}
76 
77 	return 0;
78 }
79 
pageattr_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)80 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
81 			      unsigned long next, struct mm_walk *walk)
82 {
83 	pte_t val = ptep_get(pte);
84 
85 	val = __pte(set_pageattr_masks(pte_val(val), walk));
86 	set_pte(pte, val);
87 
88 	return 0;
89 }
90 
pageattr_pte_hole(unsigned long addr,unsigned long next,int depth,struct mm_walk * walk)91 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
92 			     int depth, struct mm_walk *walk)
93 {
94 	return 0;
95 }
96 
97 static const struct mm_walk_ops pageattr_ops = {
98 	.pgd_entry = pageattr_pgd_entry,
99 	.p4d_entry = pageattr_p4d_entry,
100 	.pud_entry = pageattr_pud_entry,
101 	.pmd_entry = pageattr_pmd_entry,
102 	.pte_entry = pageattr_pte_entry,
103 	.pte_hole = pageattr_pte_hole,
104 	.walk_lock = PGWALK_RDLOCK,
105 };
106 
__set_memory(unsigned long addr,int numpages,pgprot_t set_mask,pgprot_t clear_mask)107 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
108 {
109 	int ret;
110 	unsigned long start = addr;
111 	unsigned long end = start + PAGE_SIZE * numpages;
112 	struct pageattr_masks masks = {
113 		.set_mask = set_mask,
114 		.clear_mask = clear_mask
115 	};
116 
117 	if (!numpages)
118 		return 0;
119 
120 	mmap_write_lock(&init_mm);
121 	ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
122 	mmap_write_unlock(&init_mm);
123 
124 	flush_tlb_kernel_range(start, end);
125 
126 	return ret;
127 }
128 
set_memory_x(unsigned long addr,int numpages)129 int set_memory_x(unsigned long addr, int numpages)
130 {
131 	if (addr < vm_map_base)
132 		return 0;
133 
134 	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
135 }
136 
set_memory_nx(unsigned long addr,int numpages)137 int set_memory_nx(unsigned long addr, int numpages)
138 {
139 	if (addr < vm_map_base)
140 		return 0;
141 
142 	return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
143 }
144 
set_memory_ro(unsigned long addr,int numpages)145 int set_memory_ro(unsigned long addr, int numpages)
146 {
147 	if (addr < vm_map_base)
148 		return 0;
149 
150 	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
151 }
152 
set_memory_rw(unsigned long addr,int numpages)153 int set_memory_rw(unsigned long addr, int numpages)
154 {
155 	if (addr < vm_map_base)
156 		return 0;
157 
158 	return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
159 }
160 
kernel_page_present(struct page * page)161 bool kernel_page_present(struct page *page)
162 {
163 	pgd_t *pgd;
164 	p4d_t *p4d;
165 	pud_t *pud;
166 	pmd_t *pmd;
167 	pte_t *pte;
168 	unsigned long addr = (unsigned long)page_address(page);
169 
170 	if (addr < vm_map_base)
171 		return memblock_is_memory(__pa(addr));
172 
173 	pgd = pgd_offset_k(addr);
174 	if (pgd_none(pgdp_get(pgd)))
175 		return false;
176 	if (pgd_leaf(pgdp_get(pgd)))
177 		return true;
178 
179 	p4d = p4d_offset(pgd, addr);
180 	if (p4d_none(p4dp_get(p4d)))
181 		return false;
182 	if (p4d_leaf(p4dp_get(p4d)))
183 		return true;
184 
185 	pud = pud_offset(p4d, addr);
186 	if (pud_none(pudp_get(pud)))
187 		return false;
188 	if (pud_leaf(pudp_get(pud)))
189 		return true;
190 
191 	pmd = pmd_offset(pud, addr);
192 	if (pmd_none(pmdp_get(pmd)))
193 		return false;
194 	if (pmd_leaf(pmdp_get(pmd)))
195 		return true;
196 
197 	pte = pte_offset_kernel(pmd, addr);
198 	return pte_present(ptep_get(pte));
199 }
200 
set_direct_map_default_noflush(struct page * page)201 int set_direct_map_default_noflush(struct page *page)
202 {
203 	unsigned long addr = (unsigned long)page_address(page);
204 
205 	if (addr < vm_map_base)
206 		return 0;
207 
208 	return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
209 }
210 
set_direct_map_invalid_noflush(struct page * page)211 int set_direct_map_invalid_noflush(struct page *page)
212 {
213 	unsigned long addr = (unsigned long)page_address(page);
214 
215 	if (addr < vm_map_base)
216 		return 0;
217 
218 	return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
219 }
220 
set_direct_map_valid_noflush(struct page * page,unsigned nr,bool valid)221 int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
222 {
223 	unsigned long addr = (unsigned long)page_address(page);
224 	pgprot_t set, clear;
225 
226 	if (addr < vm_map_base)
227 		return 0;
228 
229 	if (valid) {
230 		set = PAGE_KERNEL;
231 		clear = __pgprot(0);
232 	} else {
233 		set = __pgprot(0);
234 		clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
235 	}
236 
237 	return __set_memory(addr, 1, set, clear);
238 }
239