1 #include <linux/bootmem.h> 2 #include <linux/compiler.h> 3 #include <linux/fs.h> 4 #include <linux/init.h> 5 #include <linux/ksm.h> 6 #include <linux/mm.h> 7 #include <linux/mmzone.h> 8 #include <linux/proc_fs.h> 9 #include <linux/seq_file.h> 10 #include <linux/hugetlb.h> 11 #include <linux/kernel-page-flags.h> 12 #include <asm/uaccess.h> 13 #include "internal.h" 14 15 #define KPMSIZE sizeof(u64) 16 #define KPMMASK (KPMSIZE - 1) 17 18 /* /proc/kpagecount - an array exposing page counts 19 * 20 * Each entry is a u64 representing the corresponding 21 * physical page count. 22 */ 23 static ssize_t kpagecount_read(struct file *file, char __user *buf, 24 size_t count, loff_t *ppos) 25 { 26 u64 __user *out = (u64 __user *)buf; 27 struct page *ppage; 28 unsigned long src = *ppos; 29 unsigned long pfn; 30 ssize_t ret = 0; 31 u64 pcount; 32 33 pfn = src / KPMSIZE; 34 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); 35 if (src & KPMMASK || count & KPMMASK) 36 return -EINVAL; 37 38 while (count > 0) { 39 if (pfn_valid(pfn)) 40 ppage = pfn_to_page(pfn); 41 else 42 ppage = NULL; 43 if (!ppage || PageSlab(ppage)) 44 pcount = 0; 45 else 46 pcount = page_mapcount(ppage); 47 48 if (put_user(pcount, out)) { 49 ret = -EFAULT; 50 break; 51 } 52 53 pfn++; 54 out++; 55 count -= KPMSIZE; 56 } 57 58 *ppos += (char __user *)out - buf; 59 if (!ret) 60 ret = (char __user *)out - buf; 61 return ret; 62 } 63 64 static const struct file_operations proc_kpagecount_operations = { 65 .llseek = mem_lseek, 66 .read = kpagecount_read, 67 }; 68 69 /* /proc/kpageflags - an array exposing page flags 70 * 71 * Each entry is a u64 representing the corresponding 72 * physical page flags. 73 */ 74 75 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) 76 { 77 return ((kflags >> kbit) & 1) << ubit; 78 } 79 80 u64 stable_page_flags(struct page *page) 81 { 82 u64 k; 83 u64 u; 84 85 /* 86 * pseudo flag: KPF_NOPAGE 87 * it differentiates a memory hole from a page with no flags 88 */ 89 if (!page) 90 return 1 << KPF_NOPAGE; 91 92 k = page->flags; 93 u = 0; 94 95 /* 96 * pseudo flags for the well known (anonymous) memory mapped pages 97 * 98 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the 99 * simple test in page_mapped() is not enough. 100 */ 101 if (!PageSlab(page) && page_mapped(page)) 102 u |= 1 << KPF_MMAP; 103 if (PageAnon(page)) 104 u |= 1 << KPF_ANON; 105 if (PageKsm(page)) 106 u |= 1 << KPF_KSM; 107 108 /* 109 * compound pages: export both head/tail info 110 * they together define a compound page's start/end pos and order 111 */ 112 if (PageHead(page)) 113 u |= 1 << KPF_COMPOUND_HEAD; 114 if (PageTail(page)) 115 u |= 1 << KPF_COMPOUND_TAIL; 116 if (PageHuge(page)) 117 u |= 1 << KPF_HUGE; 118 /* 119 * PageTransCompound can be true for non-huge compound pages (slab 120 * pages or pages allocated by drivers with __GFP_COMP) because it 121 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 122 * to make sure a given page is a thp, not a non-huge compound page. 123 */ 124 else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || 125 PageAnon(compound_head(page)))) 126 u |= 1 << KPF_THP; 127 128 /* 129 * Caveats on high order pages: page->_count will only be set 130 * -1 on the head page; SLUB/SLQB do the same for PG_slab; 131 * SLOB won't set PG_slab at all on compound pages. 132 */ 133 if (PageBuddy(page)) 134 u |= 1 << KPF_BUDDY; 135 136 if (PageBalloon(page)) 137 u |= 1 << KPF_BALLOON; 138 139 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); 140 141 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); 142 143 u |= kpf_copy_bit(k, KPF_ERROR, PG_error); 144 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); 145 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); 146 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); 147 148 u |= kpf_copy_bit(k, KPF_LRU, PG_lru); 149 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); 150 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 151 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 152 153 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); 154 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 155 156 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 157 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 158 159 #ifdef CONFIG_MEMORY_FAILURE 160 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 161 #endif 162 163 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 164 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 165 #endif 166 167 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); 168 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); 169 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); 170 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); 171 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); 172 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); 173 174 return u; 175 }; 176 177 static ssize_t kpageflags_read(struct file *file, char __user *buf, 178 size_t count, loff_t *ppos) 179 { 180 u64 __user *out = (u64 __user *)buf; 181 struct page *ppage; 182 unsigned long src = *ppos; 183 unsigned long pfn; 184 ssize_t ret = 0; 185 186 pfn = src / KPMSIZE; 187 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); 188 if (src & KPMMASK || count & KPMMASK) 189 return -EINVAL; 190 191 while (count > 0) { 192 if (pfn_valid(pfn)) 193 ppage = pfn_to_page(pfn); 194 else 195 ppage = NULL; 196 197 if (put_user(stable_page_flags(ppage), out)) { 198 ret = -EFAULT; 199 break; 200 } 201 202 pfn++; 203 out++; 204 count -= KPMSIZE; 205 } 206 207 *ppos += (char __user *)out - buf; 208 if (!ret) 209 ret = (char __user *)out - buf; 210 return ret; 211 } 212 213 static const struct file_operations proc_kpageflags_operations = { 214 .llseek = mem_lseek, 215 .read = kpageflags_read, 216 }; 217 218 static int __init proc_page_init(void) 219 { 220 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); 221 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); 222 return 0; 223 } 224 fs_initcall(proc_page_init); 225