xref: /linux/fs/proc/page.c (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 #include <linux/bootmem.h>
2 #include <linux/compiler.h>
3 #include <linux/fs.h>
4 #include <linux/init.h>
5 #include <linux/ksm.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <linux/proc_fs.h>
9 #include <linux/seq_file.h>
10 #include <linux/hugetlb.h>
11 #include <asm/uaccess.h>
12 #include "internal.h"
13 
14 #define KPMSIZE sizeof(u64)
15 #define KPMMASK (KPMSIZE - 1)
16 
17 /* /proc/kpagecount - an array exposing page counts
18  *
19  * Each entry is a u64 representing the corresponding
20  * physical page count.
21  */
22 static ssize_t kpagecount_read(struct file *file, char __user *buf,
23 			     size_t count, loff_t *ppos)
24 {
25 	u64 __user *out = (u64 __user *)buf;
26 	struct page *ppage;
27 	unsigned long src = *ppos;
28 	unsigned long pfn;
29 	ssize_t ret = 0;
30 	u64 pcount;
31 
32 	pfn = src / KPMSIZE;
33 	count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
34 	if (src & KPMMASK || count & KPMMASK)
35 		return -EINVAL;
36 
37 	while (count > 0) {
38 		if (pfn_valid(pfn))
39 			ppage = pfn_to_page(pfn);
40 		else
41 			ppage = NULL;
42 		if (!ppage)
43 			pcount = 0;
44 		else
45 			pcount = page_mapcount(ppage);
46 
47 		if (put_user(pcount, out)) {
48 			ret = -EFAULT;
49 			break;
50 		}
51 
52 		pfn++;
53 		out++;
54 		count -= KPMSIZE;
55 	}
56 
57 	*ppos += (char __user *)out - buf;
58 	if (!ret)
59 		ret = (char __user *)out - buf;
60 	return ret;
61 }
62 
63 static const struct file_operations proc_kpagecount_operations = {
64 	.llseek = mem_lseek,
65 	.read = kpagecount_read,
66 };
67 
68 /* /proc/kpageflags - an array exposing page flags
69  *
70  * Each entry is a u64 representing the corresponding
71  * physical page flags.
72  */
73 
74 /* These macros are used to decouple internal flags from exported ones */
75 
76 #define KPF_LOCKED		0
77 #define KPF_ERROR		1
78 #define KPF_REFERENCED		2
79 #define KPF_UPTODATE		3
80 #define KPF_DIRTY		4
81 #define KPF_LRU			5
82 #define KPF_ACTIVE		6
83 #define KPF_SLAB		7
84 #define KPF_WRITEBACK		8
85 #define KPF_RECLAIM		9
86 #define KPF_BUDDY		10
87 
88 /* 11-20: new additions in 2.6.31 */
89 #define KPF_MMAP		11
90 #define KPF_ANON		12
91 #define KPF_SWAPCACHE		13
92 #define KPF_SWAPBACKED		14
93 #define KPF_COMPOUND_HEAD	15
94 #define KPF_COMPOUND_TAIL	16
95 #define KPF_HUGE		17
96 #define KPF_UNEVICTABLE		18
97 #define KPF_HWPOISON		19
98 #define KPF_NOPAGE		20
99 
100 #define KPF_KSM			21
101 
102 /* kernel hacking assistances
103  * WARNING: subject to change, never rely on them!
104  */
105 #define KPF_RESERVED		32
106 #define KPF_MLOCKED		33
107 #define KPF_MAPPEDTODISK	34
108 #define KPF_PRIVATE		35
109 #define KPF_PRIVATE_2		36
110 #define KPF_OWNER_PRIVATE	37
111 #define KPF_ARCH		38
112 #define KPF_UNCACHED		39
113 
114 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
115 {
116 	return ((kflags >> kbit) & 1) << ubit;
117 }
118 
119 static u64 get_uflags(struct page *page)
120 {
121 	u64 k;
122 	u64 u;
123 
124 	/*
125 	 * pseudo flag: KPF_NOPAGE
126 	 * it differentiates a memory hole from a page with no flags
127 	 */
128 	if (!page)
129 		return 1 << KPF_NOPAGE;
130 
131 	k = page->flags;
132 	u = 0;
133 
134 	/*
135 	 * pseudo flags for the well known (anonymous) memory mapped pages
136 	 *
137 	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
138 	 * simple test in page_mapped() is not enough.
139 	 */
140 	if (!PageSlab(page) && page_mapped(page))
141 		u |= 1 << KPF_MMAP;
142 	if (PageAnon(page))
143 		u |= 1 << KPF_ANON;
144 	if (PageKsm(page))
145 		u |= 1 << KPF_KSM;
146 
147 	/*
148 	 * compound pages: export both head/tail info
149 	 * they together define a compound page's start/end pos and order
150 	 */
151 	if (PageHead(page))
152 		u |= 1 << KPF_COMPOUND_HEAD;
153 	if (PageTail(page))
154 		u |= 1 << KPF_COMPOUND_TAIL;
155 	if (PageHuge(page))
156 		u |= 1 << KPF_HUGE;
157 
158 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
159 
160 	/*
161 	 * Caveats on high order pages:
162 	 * PG_buddy will only be set on the head page; SLUB/SLQB do the same
163 	 * for PG_slab; SLOB won't set PG_slab at all on compound pages.
164 	 */
165 	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
166 	u |= kpf_copy_bit(k, KPF_BUDDY,		PG_buddy);
167 
168 	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
169 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
170 	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
171 	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
172 
173 	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
174 	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
175 	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
176 	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
177 
178 	u |= kpf_copy_bit(k, KPF_SWAPCACHE,	PG_swapcache);
179 	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
180 
181 	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
182 	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
183 
184 #ifdef CONFIG_MEMORY_FAILURE
185 	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
186 #endif
187 
188 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
189 	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
190 #endif
191 
192 	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
193 	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
194 	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
195 	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
196 	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
197 	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
198 
199 	return u;
200 };
201 
202 static ssize_t kpageflags_read(struct file *file, char __user *buf,
203 			     size_t count, loff_t *ppos)
204 {
205 	u64 __user *out = (u64 __user *)buf;
206 	struct page *ppage;
207 	unsigned long src = *ppos;
208 	unsigned long pfn;
209 	ssize_t ret = 0;
210 
211 	pfn = src / KPMSIZE;
212 	count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
213 	if (src & KPMMASK || count & KPMMASK)
214 		return -EINVAL;
215 
216 	while (count > 0) {
217 		if (pfn_valid(pfn))
218 			ppage = pfn_to_page(pfn);
219 		else
220 			ppage = NULL;
221 
222 		if (put_user(get_uflags(ppage), out)) {
223 			ret = -EFAULT;
224 			break;
225 		}
226 
227 		pfn++;
228 		out++;
229 		count -= KPMSIZE;
230 	}
231 
232 	*ppos += (char __user *)out - buf;
233 	if (!ret)
234 		ret = (char __user *)out - buf;
235 	return ret;
236 }
237 
238 static const struct file_operations proc_kpageflags_operations = {
239 	.llseek = mem_lseek,
240 	.read = kpageflags_read,
241 };
242 
243 static int __init proc_page_init(void)
244 {
245 	proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
246 	proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
247 	return 0;
248 }
249 module_init(proc_page_init);
250