xref: /linux/fs/proc/page.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
4 #include <linux/fs.h>
5 #include <linux/init.h>
6 #include <linux/ksm.h>
7 #include <linux/mm.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
19 #include "internal.h"
20 
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24 
get_max_dump_pfn(void)25 static inline unsigned long get_max_dump_pfn(void)
26 {
27 #ifdef CONFIG_SPARSEMEM
28 	/*
29 	 * The memmap of early sections is completely populated and marked
30 	 * online even if max_pfn does not fall on a section boundary -
31 	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32 	 * these memmaps.
33 	 */
34 	return round_up(max_pfn, PAGES_PER_SECTION);
35 #else
36 	return max_pfn;
37 #endif
38 }
39 
40 /* /proc/kpagecount - an array exposing page mapcounts
41  *
42  * Each entry is a u64 representing the corresponding
43  * physical page mapcount.
44  */
kpagecount_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 			     size_t count, loff_t *ppos)
47 {
48 	const unsigned long max_dump_pfn = get_max_dump_pfn();
49 	u64 __user *out = (u64 __user *)buf;
50 	unsigned long src = *ppos;
51 	unsigned long pfn;
52 	ssize_t ret = 0;
53 
54 	pfn = src / KPMSIZE;
55 	if (src & KPMMASK || count & KPMMASK)
56 		return -EINVAL;
57 	if (src >= max_dump_pfn * KPMSIZE)
58 		return 0;
59 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
60 
61 	while (count > 0) {
62 		struct page *page;
63 		u64 mapcount = 0;
64 
65 		/*
66 		 * TODO: ZONE_DEVICE support requires to identify
67 		 * memmaps that were actually initialized.
68 		 */
69 		page = pfn_to_online_page(pfn);
70 		if (page)
71 			mapcount = folio_precise_page_mapcount(page_folio(page),
72 							       page);
73 
74 		if (put_user(mapcount, out)) {
75 			ret = -EFAULT;
76 			break;
77 		}
78 
79 		pfn++;
80 		out++;
81 		count -= KPMSIZE;
82 
83 		cond_resched();
84 	}
85 
86 	*ppos += (char __user *)out - buf;
87 	if (!ret)
88 		ret = (char __user *)out - buf;
89 	return ret;
90 }
91 
92 static const struct proc_ops kpagecount_proc_ops = {
93 	.proc_flags	= PROC_ENTRY_PERMANENT,
94 	.proc_lseek	= mem_lseek,
95 	.proc_read	= kpagecount_read,
96 };
97 
98 /* /proc/kpageflags - an array exposing page flags
99  *
100  * Each entry is a u64 representing the corresponding
101  * physical page flags.
102  */
103 
kpf_copy_bit(u64 kflags,int ubit,int kbit)104 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
105 {
106 	return ((kflags >> kbit) & 1) << ubit;
107 }
108 
stable_page_flags(const struct page * page)109 u64 stable_page_flags(const struct page *page)
110 {
111 	const struct folio *folio;
112 	unsigned long k;
113 	unsigned long mapping;
114 	bool is_anon;
115 	u64 u = 0;
116 
117 	/*
118 	 * pseudo flag: KPF_NOPAGE
119 	 * it differentiates a memory hole from a page with no flags
120 	 */
121 	if (!page)
122 		return 1 << KPF_NOPAGE;
123 	folio = page_folio(page);
124 
125 	k = folio->flags;
126 	mapping = (unsigned long)folio->mapping;
127 	is_anon = mapping & PAGE_MAPPING_ANON;
128 
129 	/*
130 	 * pseudo flags for the well known (anonymous) memory mapped pages
131 	 */
132 	if (page_mapped(page))
133 		u |= 1 << KPF_MMAP;
134 	if (is_anon) {
135 		u |= 1 << KPF_ANON;
136 		if (mapping & PAGE_MAPPING_KSM)
137 			u |= 1 << KPF_KSM;
138 	}
139 
140 	/*
141 	 * compound pages: export both head/tail info
142 	 * they together define a compound page's start/end pos and order
143 	 */
144 	if (page == &folio->page)
145 		u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
146 	else
147 		u |= 1 << KPF_COMPOUND_TAIL;
148 	if (folio_test_hugetlb(folio))
149 		u |= 1 << KPF_HUGE;
150 	else if (folio_test_large(folio) &&
151 	         folio_test_large_rmappable(folio)) {
152 		/* Note: we indicate any THPs here, not just PMD-sized ones */
153 		u |= 1 << KPF_THP;
154 	} else if (is_huge_zero_folio(folio)) {
155 		u |= 1 << KPF_ZERO_PAGE;
156 		u |= 1 << KPF_THP;
157 	} else if (is_zero_folio(folio)) {
158 		u |= 1 << KPF_ZERO_PAGE;
159 	}
160 
161 	/*
162 	 * Caveats on high order pages: PG_buddy and PG_slab will only be set
163 	 * on the head page.
164 	 */
165 	if (PageBuddy(page))
166 		u |= 1 << KPF_BUDDY;
167 	else if (page_count(page) == 0 && is_free_buddy_page(page))
168 		u |= 1 << KPF_BUDDY;
169 
170 	if (PageOffline(page))
171 		u |= 1 << KPF_OFFLINE;
172 	if (PageTable(page))
173 		u |= 1 << KPF_PGTABLE;
174 	if (folio_test_slab(folio))
175 		u |= 1 << KPF_SLAB;
176 
177 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
178 	u |= kpf_copy_bit(k, KPF_IDLE,          PG_idle);
179 #else
180 	if (folio_test_idle(folio))
181 		u |= 1 << KPF_IDLE;
182 #endif
183 
184 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
185 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
186 	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
187 	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
188 
189 	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
190 	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
191 	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
192 	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
193 
194 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
195 	if ((k & SWAPCACHE) == SWAPCACHE)
196 		u |= 1 << KPF_SWAPCACHE;
197 	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
198 
199 	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
200 	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
201 
202 #ifdef CONFIG_MEMORY_FAILURE
203 	if (u & (1 << KPF_HUGE))
204 		u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
205 	else
206 		u |= kpf_copy_bit(page->flags, KPF_HWPOISON,	PG_hwpoison);
207 #endif
208 
209 	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
210 	u |= kpf_copy_bit(k, KPF_OWNER_2,	PG_owner_2);
211 	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
212 	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
213 	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
214 	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
215 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
216 	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
217 #endif
218 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
219 	u |= kpf_copy_bit(k, KPF_ARCH_3,	PG_arch_3);
220 #endif
221 
222 	return u;
223 };
224 
kpageflags_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)225 static ssize_t kpageflags_read(struct file *file, char __user *buf,
226 			     size_t count, loff_t *ppos)
227 {
228 	const unsigned long max_dump_pfn = get_max_dump_pfn();
229 	u64 __user *out = (u64 __user *)buf;
230 	unsigned long src = *ppos;
231 	unsigned long pfn;
232 	ssize_t ret = 0;
233 
234 	pfn = src / KPMSIZE;
235 	if (src & KPMMASK || count & KPMMASK)
236 		return -EINVAL;
237 	if (src >= max_dump_pfn * KPMSIZE)
238 		return 0;
239 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
240 
241 	while (count > 0) {
242 		/*
243 		 * TODO: ZONE_DEVICE support requires to identify
244 		 * memmaps that were actually initialized.
245 		 */
246 		struct page *page = pfn_to_online_page(pfn);
247 
248 		if (put_user(stable_page_flags(page), out)) {
249 			ret = -EFAULT;
250 			break;
251 		}
252 
253 		pfn++;
254 		out++;
255 		count -= KPMSIZE;
256 
257 		cond_resched();
258 	}
259 
260 	*ppos += (char __user *)out - buf;
261 	if (!ret)
262 		ret = (char __user *)out - buf;
263 	return ret;
264 }
265 
266 static const struct proc_ops kpageflags_proc_ops = {
267 	.proc_flags	= PROC_ENTRY_PERMANENT,
268 	.proc_lseek	= mem_lseek,
269 	.proc_read	= kpageflags_read,
270 };
271 
272 #ifdef CONFIG_MEMCG
kpagecgroup_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)273 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
274 				size_t count, loff_t *ppos)
275 {
276 	const unsigned long max_dump_pfn = get_max_dump_pfn();
277 	u64 __user *out = (u64 __user *)buf;
278 	struct page *ppage;
279 	unsigned long src = *ppos;
280 	unsigned long pfn;
281 	ssize_t ret = 0;
282 	u64 ino;
283 
284 	pfn = src / KPMSIZE;
285 	if (src & KPMMASK || count & KPMMASK)
286 		return -EINVAL;
287 	if (src >= max_dump_pfn * KPMSIZE)
288 		return 0;
289 	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
290 
291 	while (count > 0) {
292 		/*
293 		 * TODO: ZONE_DEVICE support requires to identify
294 		 * memmaps that were actually initialized.
295 		 */
296 		ppage = pfn_to_online_page(pfn);
297 
298 		if (ppage)
299 			ino = page_cgroup_ino(ppage);
300 		else
301 			ino = 0;
302 
303 		if (put_user(ino, out)) {
304 			ret = -EFAULT;
305 			break;
306 		}
307 
308 		pfn++;
309 		out++;
310 		count -= KPMSIZE;
311 
312 		cond_resched();
313 	}
314 
315 	*ppos += (char __user *)out - buf;
316 	if (!ret)
317 		ret = (char __user *)out - buf;
318 	return ret;
319 }
320 
321 static const struct proc_ops kpagecgroup_proc_ops = {
322 	.proc_flags	= PROC_ENTRY_PERMANENT,
323 	.proc_lseek	= mem_lseek,
324 	.proc_read	= kpagecgroup_read,
325 };
326 #endif /* CONFIG_MEMCG */
327 
proc_page_init(void)328 static int __init proc_page_init(void)
329 {
330 	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
331 	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
332 #ifdef CONFIG_MEMCG
333 	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
334 #endif
335 	return 0;
336 }
337 fs_initcall(proc_page_init);
338