xref: /linux/mm/mincore.c (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mincore.c
4  *
5  * Copyright (C) 1994-2006  Linus Torvalds
6  */
7 
8 /*
9  * The mincore() system call.
10  */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21 
22 #include <linux/uaccess.h>
23 #include "swap.h"
24 #include "internal.h"
25 
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 			unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 	unsigned char present;
31 	unsigned char *vec = walk->private;
32 
33 	/*
34 	 * Hugepages under user process are always in RAM and never
35 	 * swapped out, but theoretically it needs to be checked.
36 	 */
37 	present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
38 	for (; addr != end; vec++, addr += PAGE_SIZE)
39 		*vec = present;
40 	walk->private = vec;
41 #else
42 	BUG();
43 #endif
44 	return 0;
45 }
46 
47 /*
48  * Later we can get more picky about what "in core" means precisely.
49  * For now, simply check to see if the page is in the page cache,
50  * and is up to date; i.e. that no page-in operation would be required
51  * at this time if an application were to map and access this page.
52  */
mincore_page(struct address_space * mapping,pgoff_t index)53 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
54 {
55 	unsigned char present = 0;
56 	struct folio *folio;
57 
58 	/*
59 	 * When tmpfs swaps out a page from a file, any process mapping that
60 	 * file will not get a swp_entry_t in its pte, but rather it is like
61 	 * any other file mapping (ie. marked !present and faulted in with
62 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
63 	 */
64 	folio = filemap_get_incore_folio(mapping, index);
65 	if (!IS_ERR(folio)) {
66 		present = folio_test_uptodate(folio);
67 		folio_put(folio);
68 	}
69 
70 	return present;
71 }
72 
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)73 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
74 				struct vm_area_struct *vma, unsigned char *vec)
75 {
76 	unsigned long nr = (end - addr) >> PAGE_SHIFT;
77 	int i;
78 
79 	if (vma->vm_file) {
80 		pgoff_t pgoff;
81 
82 		pgoff = linear_page_index(vma, addr);
83 		for (i = 0; i < nr; i++, pgoff++)
84 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
85 	} else {
86 		for (i = 0; i < nr; i++)
87 			vec[i] = 0;
88 	}
89 	return nr;
90 }
91 
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)92 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
93 				   __always_unused int depth,
94 				   struct mm_walk *walk)
95 {
96 	walk->private += __mincore_unmapped_range(addr, end,
97 						  walk->vma, walk->private);
98 	return 0;
99 }
100 
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)101 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
102 			struct mm_walk *walk)
103 {
104 	spinlock_t *ptl;
105 	struct vm_area_struct *vma = walk->vma;
106 	pte_t *ptep;
107 	unsigned char *vec = walk->private;
108 	int nr = (end - addr) >> PAGE_SHIFT;
109 	int step, i;
110 
111 	ptl = pmd_trans_huge_lock(pmd, vma);
112 	if (ptl) {
113 		memset(vec, 1, nr);
114 		spin_unlock(ptl);
115 		goto out;
116 	}
117 
118 	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
119 	if (!ptep) {
120 		walk->action = ACTION_AGAIN;
121 		return 0;
122 	}
123 	for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
124 		pte_t pte = ptep_get(ptep);
125 
126 		step = 1;
127 		/* We need to do cache lookup too for pte markers */
128 		if (pte_none_mostly(pte))
129 			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
130 						 vma, vec);
131 		else if (pte_present(pte)) {
132 			unsigned int batch = pte_batch_hint(ptep, pte);
133 
134 			if (batch > 1) {
135 				unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
136 
137 				step = min_t(unsigned int, batch, max_nr);
138 			}
139 
140 			for (i = 0; i < step; i++)
141 				vec[i] = 1;
142 		} else { /* pte is a swap entry */
143 			swp_entry_t entry = pte_to_swp_entry(pte);
144 
145 			if (non_swap_entry(entry)) {
146 				/*
147 				 * migration or hwpoison entries are always
148 				 * uptodate
149 				 */
150 				*vec = 1;
151 			} else {
152 #ifdef CONFIG_SWAP
153 				*vec = mincore_page(swap_address_space(entry),
154 						    swap_cache_index(entry));
155 #else
156 				WARN_ON(1);
157 				*vec = 1;
158 #endif
159 			}
160 		}
161 		vec += step;
162 	}
163 	pte_unmap_unlock(ptep - 1, ptl);
164 out:
165 	walk->private += nr;
166 	cond_resched();
167 	return 0;
168 }
169 
can_do_mincore(struct vm_area_struct * vma)170 static inline bool can_do_mincore(struct vm_area_struct *vma)
171 {
172 	if (vma_is_anonymous(vma))
173 		return true;
174 	if (!vma->vm_file)
175 		return false;
176 	/*
177 	 * Reveal pagecache information only for non-anonymous mappings that
178 	 * correspond to the files the calling process could (if tried) open
179 	 * for writing; otherwise we'd be including shared non-exclusive
180 	 * mappings, which opens a side channel.
181 	 */
182 	return inode_owner_or_capable(&nop_mnt_idmap,
183 				      file_inode(vma->vm_file)) ||
184 	       file_permission(vma->vm_file, MAY_WRITE) == 0;
185 }
186 
187 static const struct mm_walk_ops mincore_walk_ops = {
188 	.pmd_entry		= mincore_pte_range,
189 	.pte_hole		= mincore_unmapped_range,
190 	.hugetlb_entry		= mincore_hugetlb,
191 	.walk_lock		= PGWALK_RDLOCK,
192 };
193 
194 /*
195  * Do a chunk of "sys_mincore()". We've already checked
196  * all the arguments, we hold the mmap semaphore: we should
197  * just return the amount of info we're asked for.
198  */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)199 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
200 {
201 	struct vm_area_struct *vma;
202 	unsigned long end;
203 	int err;
204 
205 	vma = vma_lookup(current->mm, addr);
206 	if (!vma)
207 		return -ENOMEM;
208 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
209 	if (!can_do_mincore(vma)) {
210 		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
211 		memset(vec, 1, pages);
212 		return pages;
213 	}
214 	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
215 	if (err < 0)
216 		return err;
217 	return (end - addr) >> PAGE_SHIFT;
218 }
219 
220 /*
221  * The mincore(2) system call.
222  *
223  * mincore() returns the memory residency status of the pages in the
224  * current process's address space specified by [addr, addr + len).
225  * The status is returned in a vector of bytes.  The least significant
226  * bit of each byte is 1 if the referenced page is in memory, otherwise
227  * it is zero.
228  *
229  * Because the status of a page can change after mincore() checks it
230  * but before it returns to the application, the returned vector may
231  * contain stale information.  Only locked pages are guaranteed to
232  * remain in memory.
233  *
234  * return values:
235  *  zero    - success
236  *  -EFAULT - vec points to an illegal address
237  *  -EINVAL - addr is not a multiple of PAGE_SIZE
238  *  -ENOMEM - Addresses in the range [addr, addr + len] are
239  *		invalid for the address space of this process, or
240  *		specify one or more pages which are not currently
241  *		mapped
242  *  -EAGAIN - A kernel resource was temporarily unavailable.
243  */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)244 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
245 		unsigned char __user *, vec)
246 {
247 	long retval;
248 	unsigned long pages;
249 	unsigned char *tmp;
250 
251 	start = untagged_addr(start);
252 
253 	/* Check the start address: needs to be page-aligned.. */
254 	if (unlikely(start & ~PAGE_MASK))
255 		return -EINVAL;
256 
257 	/* ..and we need to be passed a valid user-space range */
258 	if (!access_ok((void __user *) start, len))
259 		return -ENOMEM;
260 
261 	/* This also avoids any overflows on PAGE_ALIGN */
262 	pages = len >> PAGE_SHIFT;
263 	pages += (offset_in_page(len)) != 0;
264 
265 	if (!access_ok(vec, pages))
266 		return -EFAULT;
267 
268 	tmp = (void *) __get_free_page(GFP_USER);
269 	if (!tmp)
270 		return -EAGAIN;
271 
272 	retval = 0;
273 	while (pages) {
274 		/*
275 		 * Do at most PAGE_SIZE entries per iteration, due to
276 		 * the temporary buffer size.
277 		 */
278 		mmap_read_lock(current->mm);
279 		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
280 		mmap_read_unlock(current->mm);
281 
282 		if (retval <= 0)
283 			break;
284 		if (copy_to_user(vec, tmp, retval)) {
285 			retval = -EFAULT;
286 			break;
287 		}
288 		pages -= retval;
289 		vec += retval;
290 		start += retval << PAGE_SHIFT;
291 		retval = 0;
292 	}
293 	free_page((unsigned long) tmp);
294 	return retval;
295 }
296