xref: /linux/fs/proc/page.c (revision f898c16a0624e7f2dcb0b1cda6916c9be6489197)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
4 #include <linux/fs.h>
5 #include <linux/init.h>
6 #include <linux/ksm.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
19 #include "internal.h"
20 
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24 
25 static inline unsigned long get_max_dump_pfn(void)
26 {
27 #ifdef CONFIG_SPARSEMEM
28 	/*
29 	 * The memmap of early sections is completely populated and marked
30 	 * online even if max_pfn does not fall on a section boundary -
31 	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32 	 * these memmaps.
33 	 */
34 	return round_up(max_pfn, PAGES_PER_SECTION);
35 #else
36 	return max_pfn;
37 #endif
38 }
39 
40 /* /proc/kpagecount - an array exposing page counts
41  *
42  * Each entry is a u64 representing the corresponding
43  * physical page count.
44  */
45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 			     size_t count, loff_t *ppos)
47 {
48 	const unsigned long max_dump_pfn = get_max_dump_pfn();
49 	u64 __user *out = (u64 __user *)buf;
50 	struct page *ppage;
51 	unsigned long src = *ppos;
52 	unsigned long pfn;
53 	ssize_t ret = 0;
54 	u64 pcount;
55 
56 	pfn = src / KPMSIZE;
57 	if (src & KPMMASK || count & KPMMASK)
58 		return -EINVAL;
59 	if (src >= max_dump_pfn * KPMSIZE)
60 		return 0;
61 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
62 
63 	while (count > 0) {
64 		/*
65 		 * TODO: ZONE_DEVICE support requires to identify
66 		 * memmaps that were actually initialized.
67 		 */
68 		ppage = pfn_to_online_page(pfn);
69 
70 		if (!ppage)
71 			pcount = 0;
72 		else
73 			pcount = page_mapcount(ppage);
74 
75 		if (put_user(pcount, out)) {
76 			ret = -EFAULT;
77 			break;
78 		}
79 
80 		pfn++;
81 		out++;
82 		count -= KPMSIZE;
83 
84 		cond_resched();
85 	}
86 
87 	*ppos += (char __user *)out - buf;
88 	if (!ret)
89 		ret = (char __user *)out - buf;
90 	return ret;
91 }
92 
93 static const struct proc_ops kpagecount_proc_ops = {
94 	.proc_flags	= PROC_ENTRY_PERMANENT,
95 	.proc_lseek	= mem_lseek,
96 	.proc_read	= kpagecount_read,
97 };
98 
99 /* /proc/kpageflags - an array exposing page flags
100  *
101  * Each entry is a u64 representing the corresponding
102  * physical page flags.
103  */
104 
105 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106 {
107 	return ((kflags >> kbit) & 1) << ubit;
108 }
109 
110 u64 stable_page_flags(const struct page *page)
111 {
112 	const struct folio *folio;
113 	unsigned long k;
114 	unsigned long mapping;
115 	bool is_anon;
116 	u64 u = 0;
117 
118 	/*
119 	 * pseudo flag: KPF_NOPAGE
120 	 * it differentiates a memory hole from a page with no flags
121 	 */
122 	if (!page)
123 		return 1 << KPF_NOPAGE;
124 	folio = page_folio(page);
125 
126 	k = folio->flags;
127 	mapping = (unsigned long)folio->mapping;
128 	is_anon = mapping & PAGE_MAPPING_ANON;
129 
130 	/*
131 	 * pseudo flags for the well known (anonymous) memory mapped pages
132 	 */
133 	if (page_mapped(page))
134 		u |= 1 << KPF_MMAP;
135 	if (is_anon) {
136 		u |= 1 << KPF_ANON;
137 		if (mapping & PAGE_MAPPING_KSM)
138 			u |= 1 << KPF_KSM;
139 	}
140 
141 	/*
142 	 * compound pages: export both head/tail info
143 	 * they together define a compound page's start/end pos and order
144 	 */
145 	if (page == &folio->page)
146 		u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
147 	else
148 		u |= 1 << KPF_COMPOUND_TAIL;
149 	if (folio_test_hugetlb(folio))
150 		u |= 1 << KPF_HUGE;
151 	/*
152 	 * We need to check PageLRU/PageAnon
153 	 * to make sure a given page is a thp, not a non-huge compound page.
154 	 */
155 	else if (folio_test_large(folio)) {
156 		if ((k & (1 << PG_lru)) || is_anon)
157 			u |= 1 << KPF_THP;
158 		else if (is_huge_zero_folio(folio)) {
159 			u |= 1 << KPF_ZERO_PAGE;
160 			u |= 1 << KPF_THP;
161 		}
162 	} else if (is_zero_pfn(page_to_pfn(page)))
163 		u |= 1 << KPF_ZERO_PAGE;
164 
165 	/*
166 	 * Caveats on high order pages: PG_buddy and PG_slab will only be set
167 	 * on the head page.
168 	 */
169 	if (PageBuddy(page))
170 		u |= 1 << KPF_BUDDY;
171 	else if (page_count(page) == 0 && is_free_buddy_page(page))
172 		u |= 1 << KPF_BUDDY;
173 
174 	if (PageOffline(page))
175 		u |= 1 << KPF_OFFLINE;
176 	if (PageTable(page))
177 		u |= 1 << KPF_PGTABLE;
178 	if (folio_test_slab(folio))
179 		u |= 1 << KPF_SLAB;
180 
181 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
182 	u |= kpf_copy_bit(k, KPF_IDLE,          PG_idle);
183 #else
184 	if (folio_test_idle(folio))
185 		u |= 1 << KPF_IDLE;
186 #endif
187 
188 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
189 	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
190 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
191 	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
192 	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
193 
194 	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
195 	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
196 	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
197 	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
198 
199 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
200 	if ((k & SWAPCACHE) == SWAPCACHE)
201 		u |= 1 << KPF_SWAPCACHE;
202 	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
203 
204 	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
205 	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
206 
207 #ifdef CONFIG_MEMORY_FAILURE
208 	if (u & (1 << KPF_HUGE))
209 		u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
210 	else
211 		u |= kpf_copy_bit(page->flags, KPF_HWPOISON,	PG_hwpoison);
212 #endif
213 
214 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
215 	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
216 #endif
217 
218 	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
219 	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
220 	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
221 	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
222 	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
223 	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
224 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
225 	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
226 	u |= kpf_copy_bit(k, KPF_ARCH_3,	PG_arch_3);
227 #endif
228 
229 	return u;
230 };
231 
232 static ssize_t kpageflags_read(struct file *file, char __user *buf,
233 			     size_t count, loff_t *ppos)
234 {
235 	const unsigned long max_dump_pfn = get_max_dump_pfn();
236 	u64 __user *out = (u64 __user *)buf;
237 	unsigned long src = *ppos;
238 	unsigned long pfn;
239 	ssize_t ret = 0;
240 
241 	pfn = src / KPMSIZE;
242 	if (src & KPMMASK || count & KPMMASK)
243 		return -EINVAL;
244 	if (src >= max_dump_pfn * KPMSIZE)
245 		return 0;
246 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
247 
248 	while (count > 0) {
249 		/*
250 		 * TODO: ZONE_DEVICE support requires to identify
251 		 * memmaps that were actually initialized.
252 		 */
253 		struct page *page = pfn_to_online_page(pfn);
254 
255 		if (put_user(stable_page_flags(page), out)) {
256 			ret = -EFAULT;
257 			break;
258 		}
259 
260 		pfn++;
261 		out++;
262 		count -= KPMSIZE;
263 
264 		cond_resched();
265 	}
266 
267 	*ppos += (char __user *)out - buf;
268 	if (!ret)
269 		ret = (char __user *)out - buf;
270 	return ret;
271 }
272 
273 static const struct proc_ops kpageflags_proc_ops = {
274 	.proc_flags	= PROC_ENTRY_PERMANENT,
275 	.proc_lseek	= mem_lseek,
276 	.proc_read	= kpageflags_read,
277 };
278 
279 #ifdef CONFIG_MEMCG
280 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
281 				size_t count, loff_t *ppos)
282 {
283 	const unsigned long max_dump_pfn = get_max_dump_pfn();
284 	u64 __user *out = (u64 __user *)buf;
285 	struct page *ppage;
286 	unsigned long src = *ppos;
287 	unsigned long pfn;
288 	ssize_t ret = 0;
289 	u64 ino;
290 
291 	pfn = src / KPMSIZE;
292 	if (src & KPMMASK || count & KPMMASK)
293 		return -EINVAL;
294 	if (src >= max_dump_pfn * KPMSIZE)
295 		return 0;
296 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
297 
298 	while (count > 0) {
299 		/*
300 		 * TODO: ZONE_DEVICE support requires to identify
301 		 * memmaps that were actually initialized.
302 		 */
303 		ppage = pfn_to_online_page(pfn);
304 
305 		if (ppage)
306 			ino = page_cgroup_ino(ppage);
307 		else
308 			ino = 0;
309 
310 		if (put_user(ino, out)) {
311 			ret = -EFAULT;
312 			break;
313 		}
314 
315 		pfn++;
316 		out++;
317 		count -= KPMSIZE;
318 
319 		cond_resched();
320 	}
321 
322 	*ppos += (char __user *)out - buf;
323 	if (!ret)
324 		ret = (char __user *)out - buf;
325 	return ret;
326 }
327 
328 static const struct proc_ops kpagecgroup_proc_ops = {
329 	.proc_flags	= PROC_ENTRY_PERMANENT,
330 	.proc_lseek	= mem_lseek,
331 	.proc_read	= kpagecgroup_read,
332 };
333 #endif /* CONFIG_MEMCG */
334 
335 static int __init proc_page_init(void)
336 {
337 	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
338 	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
339 #ifdef CONFIG_MEMCG
340 	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
341 #endif
342 	return 0;
343 }
344 fs_initcall(proc_page_init);
345