xref: /linux/mm/mincore.c (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mincore.c
4  *
5  * Copyright (C) 1994-2006  Linus Torvalds
6  */
7 
8 /*
9  * The mincore() system call.
10  */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/leafops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21 
22 #include <linux/uaccess.h>
23 #include "swap.h"
24 #include "internal.h"
25 
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 			unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 	unsigned char present;
31 	unsigned char *vec = walk->private;
32 	spinlock_t *ptl;
33 
34 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
35 
36 	/*
37 	 * Hugepages under user process are always in RAM and never
38 	 * swapped out, but theoretically it needs to be checked.
39 	 */
40 	if (!pte) {
41 		present = 0;
42 	} else {
43 		const pte_t ptep = huge_ptep_get(walk->mm, addr, pte);
44 
45 		if (huge_pte_none(ptep) || pte_is_marker(ptep))
46 			present = 0;
47 		else
48 			present = 1;
49 	}
50 
51 	for (; addr != end; vec++, addr += PAGE_SIZE)
52 		*vec = present;
53 	walk->private = vec;
54 	spin_unlock(ptl);
55 #else
56 	BUG();
57 #endif
58 	return 0;
59 }
60 
mincore_swap(swp_entry_t entry,bool shmem)61 static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
62 {
63 	struct swap_info_struct *si;
64 	struct folio *folio = NULL;
65 	unsigned char present = 0;
66 
67 	if (!IS_ENABLED(CONFIG_SWAP)) {
68 		WARN_ON(1);
69 		return 0;
70 	}
71 
72 	/*
73 	 * Shmem mapping may contain swapin error entries, which are
74 	 * absent. Page table may contain migration or hwpoison
75 	 * entries which are always uptodate.
76 	 */
77 	if (!softleaf_is_swap(entry))
78 		return !shmem;
79 
80 	/*
81 	 * Shmem mapping lookup is lockless, so we need to grab the swap
82 	 * device. mincore page table walk locks the PTL, and the swap
83 	 * device is stable, avoid touching the si for better performance.
84 	 */
85 	if (shmem) {
86 		si = get_swap_device(entry);
87 		if (!si)
88 			return 0;
89 	}
90 	folio = swap_cache_get_folio(entry);
91 	if (shmem)
92 		put_swap_device(si);
93 	/* The swap cache space contains either folio, shadow or NULL */
94 	if (folio && !xa_is_value(folio)) {
95 		present = folio_test_uptodate(folio);
96 		folio_put(folio);
97 	}
98 
99 	return present;
100 }
101 
102 /*
103  * Later we can get more picky about what "in core" means precisely.
104  * For now, simply check to see if the page is in the page cache,
105  * and is up to date; i.e. that no page-in operation would be required
106  * at this time if an application were to map and access this page.
107  */
mincore_page(struct address_space * mapping,pgoff_t index)108 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
109 {
110 	unsigned char present = 0;
111 	struct folio *folio;
112 
113 	/*
114 	 * When tmpfs swaps out a page from a file, any process mapping that
115 	 * file will not get a swp_entry_t in its pte, but rather it is like
116 	 * any other file mapping (ie. marked !present and faulted in with
117 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
118 	 */
119 	folio = filemap_get_entry(mapping, index);
120 	if (folio) {
121 		if (xa_is_value(folio)) {
122 			if (shmem_mapping(mapping))
123 				return mincore_swap(radix_to_swp_entry(folio),
124 						    true);
125 			else
126 				return 0;
127 		}
128 		present = folio_test_uptodate(folio);
129 		folio_put(folio);
130 	}
131 
132 	return present;
133 }
134 
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)135 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
136 				struct vm_area_struct *vma, unsigned char *vec)
137 {
138 	unsigned long nr = (end - addr) >> PAGE_SHIFT;
139 	int i;
140 
141 	if (vma->vm_file) {
142 		pgoff_t pgoff;
143 
144 		pgoff = linear_page_index(vma, addr);
145 		for (i = 0; i < nr; i++, pgoff++)
146 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
147 	} else {
148 		for (i = 0; i < nr; i++)
149 			vec[i] = 0;
150 	}
151 	return nr;
152 }
153 
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)154 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
155 				   __always_unused int depth,
156 				   struct mm_walk *walk)
157 {
158 	walk->private += __mincore_unmapped_range(addr, end,
159 						  walk->vma, walk->private);
160 	return 0;
161 }
162 
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)163 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
164 			struct mm_walk *walk)
165 {
166 	spinlock_t *ptl;
167 	struct vm_area_struct *vma = walk->vma;
168 	pte_t *ptep;
169 	unsigned char *vec = walk->private;
170 	int nr = (end - addr) >> PAGE_SHIFT;
171 	int step, i;
172 
173 	ptl = pmd_trans_huge_lock(pmd, vma);
174 	if (ptl) {
175 		memset(vec, 1, nr);
176 		spin_unlock(ptl);
177 		goto out;
178 	}
179 
180 	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
181 	if (!ptep) {
182 		walk->action = ACTION_AGAIN;
183 		return 0;
184 	}
185 	for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
186 		pte_t pte = ptep_get(ptep);
187 
188 		step = 1;
189 		/* We need to do cache lookup too for markers */
190 		if (pte_none(pte) || pte_is_marker(pte))
191 			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
192 						 vma, vec);
193 		else if (pte_present(pte)) {
194 			unsigned int batch = pte_batch_hint(ptep, pte);
195 
196 			if (batch > 1) {
197 				unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
198 
199 				step = min_t(unsigned int, batch, max_nr);
200 			}
201 
202 			for (i = 0; i < step; i++)
203 				vec[i] = 1;
204 		} else { /* pte is a swap entry */
205 			const softleaf_t entry = softleaf_from_pte(pte);
206 
207 			*vec = mincore_swap(entry, false);
208 		}
209 		vec += step;
210 	}
211 	pte_unmap_unlock(ptep - 1, ptl);
212 out:
213 	walk->private += nr;
214 	cond_resched();
215 	return 0;
216 }
217 
can_do_mincore(struct vm_area_struct * vma)218 static inline bool can_do_mincore(struct vm_area_struct *vma)
219 {
220 	if (vma_is_anonymous(vma))
221 		return true;
222 	if (!vma->vm_file)
223 		return false;
224 	/*
225 	 * Reveal pagecache information only for non-anonymous mappings that
226 	 * correspond to the files the calling process could (if tried) open
227 	 * for writing; otherwise we'd be including shared non-exclusive
228 	 * mappings, which opens a side channel.
229 	 */
230 	return inode_owner_or_capable(&nop_mnt_idmap,
231 				      file_inode(vma->vm_file)) ||
232 	       file_permission(vma->vm_file, MAY_WRITE) == 0;
233 }
234 
235 static const struct mm_walk_ops mincore_walk_ops = {
236 	.pmd_entry		= mincore_pte_range,
237 	.pte_hole		= mincore_unmapped_range,
238 	.hugetlb_entry		= mincore_hugetlb,
239 	.walk_lock		= PGWALK_RDLOCK,
240 };
241 
242 /*
243  * Do a chunk of "sys_mincore()". We've already checked
244  * all the arguments, we hold the mmap semaphore: we should
245  * just return the amount of info we're asked for.
246  */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)247 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
248 {
249 	struct vm_area_struct *vma;
250 	unsigned long end;
251 	int err;
252 
253 	vma = vma_lookup(current->mm, addr);
254 	if (!vma)
255 		return -ENOMEM;
256 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
257 	if (!can_do_mincore(vma)) {
258 		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
259 		memset(vec, 1, pages);
260 		return pages;
261 	}
262 	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
263 	if (err < 0)
264 		return err;
265 	return (end - addr) >> PAGE_SHIFT;
266 }
267 
268 /*
269  * The mincore(2) system call.
270  *
271  * mincore() returns the memory residency status of the pages in the
272  * current process's address space specified by [addr, addr + len).
273  * The status is returned in a vector of bytes.  The least significant
274  * bit of each byte is 1 if the referenced page is in memory, otherwise
275  * it is zero.
276  *
277  * Because the status of a page can change after mincore() checks it
278  * but before it returns to the application, the returned vector may
279  * contain stale information.  Only locked pages are guaranteed to
280  * remain in memory.
281  *
282  * return values:
283  *  zero    - success
284  *  -EFAULT - vec points to an illegal address
285  *  -EINVAL - addr is not a multiple of PAGE_SIZE
286  *  -ENOMEM - Addresses in the range [addr, addr + len] are
287  *		invalid for the address space of this process, or
288  *		specify one or more pages which are not currently
289  *		mapped
290  *  -EAGAIN - A kernel resource was temporarily unavailable.
291  */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)292 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
293 		unsigned char __user *, vec)
294 {
295 	long retval;
296 	unsigned long pages;
297 	unsigned char *tmp;
298 
299 	start = untagged_addr(start);
300 
301 	/* Check the start address: needs to be page-aligned.. */
302 	if (unlikely(start & ~PAGE_MASK))
303 		return -EINVAL;
304 
305 	/* ..and we need to be passed a valid user-space range */
306 	if (!access_ok((void __user *) start, len))
307 		return -ENOMEM;
308 
309 	/* This also avoids any overflows on PAGE_ALIGN */
310 	pages = len >> PAGE_SHIFT;
311 	pages += (offset_in_page(len)) != 0;
312 
313 	if (!access_ok(vec, pages))
314 		return -EFAULT;
315 
316 	tmp = (void *) __get_free_page(GFP_USER);
317 	if (!tmp)
318 		return -EAGAIN;
319 
320 	retval = 0;
321 	while (pages) {
322 		/*
323 		 * Do at most PAGE_SIZE entries per iteration, due to
324 		 * the temporary buffer size.
325 		 */
326 		mmap_read_lock(current->mm);
327 		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
328 		mmap_read_unlock(current->mm);
329 
330 		if (retval <= 0)
331 			break;
332 		if (copy_to_user(vec, tmp, retval)) {
333 			retval = -EFAULT;
334 			break;
335 		}
336 		pages -= retval;
337 		vec += retval;
338 		start += retval << PAGE_SHIFT;
339 		retval = 0;
340 	}
341 	free_page((unsigned long) tmp);
342 	return retval;
343 }
344