xref: /linux/mm/mincore.c (revision 9907e1df31c0f4bdcebe16de809121baa754e5b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mincore.c
4  *
5  * Copyright (C) 1994-2006  Linus Torvalds
6  */
7 
8 /*
9  * The mincore() system call.
10  */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21 
22 #include <linux/uaccess.h>
23 #include "swap.h"
24 #include "internal.h"
25 
26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 			unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 	unsigned char present;
31 	unsigned char *vec = walk->private;
32 	spinlock_t *ptl;
33 
34 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
35 	/*
36 	 * Hugepages under user process are always in RAM and never
37 	 * swapped out, but theoretically it needs to be checked.
38 	 */
39 	present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
40 	for (; addr != end; vec++, addr += PAGE_SIZE)
41 		*vec = present;
42 	walk->private = vec;
43 	spin_unlock(ptl);
44 #else
45 	BUG();
46 #endif
47 	return 0;
48 }
49 
50 static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
51 {
52 	struct swap_info_struct *si;
53 	struct folio *folio = NULL;
54 	unsigned char present = 0;
55 
56 	if (!IS_ENABLED(CONFIG_SWAP)) {
57 		WARN_ON(1);
58 		return 0;
59 	}
60 
61 	/*
62 	 * Shmem mapping may contain swapin error entries, which are
63 	 * absent. Page table may contain migration or hwpoison
64 	 * entries which are always uptodate.
65 	 */
66 	if (non_swap_entry(entry))
67 		return !shmem;
68 
69 	/*
70 	 * Shmem mapping lookup is lockless, so we need to grab the swap
71 	 * device. mincore page table walk locks the PTL, and the swap
72 	 * device is stable, avoid touching the si for better performance.
73 	 */
74 	if (shmem) {
75 		si = get_swap_device(entry);
76 		if (!si)
77 			return 0;
78 	}
79 	folio = filemap_get_entry(swap_address_space(entry),
80 				  swap_cache_index(entry));
81 	if (shmem)
82 		put_swap_device(si);
83 	/* The swap cache space contains either folio, shadow or NULL */
84 	if (folio && !xa_is_value(folio)) {
85 		present = folio_test_uptodate(folio);
86 		folio_put(folio);
87 	}
88 
89 	return present;
90 }
91 
92 /*
93  * Later we can get more picky about what "in core" means precisely.
94  * For now, simply check to see if the page is in the page cache,
95  * and is up to date; i.e. that no page-in operation would be required
96  * at this time if an application were to map and access this page.
97  */
98 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
99 {
100 	unsigned char present = 0;
101 	struct folio *folio;
102 
103 	/*
104 	 * When tmpfs swaps out a page from a file, any process mapping that
105 	 * file will not get a swp_entry_t in its pte, but rather it is like
106 	 * any other file mapping (ie. marked !present and faulted in with
107 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
108 	 */
109 	folio = filemap_get_entry(mapping, index);
110 	if (folio) {
111 		if (xa_is_value(folio)) {
112 			if (shmem_mapping(mapping))
113 				return mincore_swap(radix_to_swp_entry(folio),
114 						    true);
115 			else
116 				return 0;
117 		}
118 		present = folio_test_uptodate(folio);
119 		folio_put(folio);
120 	}
121 
122 	return present;
123 }
124 
125 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
126 				struct vm_area_struct *vma, unsigned char *vec)
127 {
128 	unsigned long nr = (end - addr) >> PAGE_SHIFT;
129 	int i;
130 
131 	if (vma->vm_file) {
132 		pgoff_t pgoff;
133 
134 		pgoff = linear_page_index(vma, addr);
135 		for (i = 0; i < nr; i++, pgoff++)
136 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
137 	} else {
138 		for (i = 0; i < nr; i++)
139 			vec[i] = 0;
140 	}
141 	return nr;
142 }
143 
144 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
145 				   __always_unused int depth,
146 				   struct mm_walk *walk)
147 {
148 	walk->private += __mincore_unmapped_range(addr, end,
149 						  walk->vma, walk->private);
150 	return 0;
151 }
152 
153 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
154 			struct mm_walk *walk)
155 {
156 	spinlock_t *ptl;
157 	struct vm_area_struct *vma = walk->vma;
158 	pte_t *ptep;
159 	unsigned char *vec = walk->private;
160 	int nr = (end - addr) >> PAGE_SHIFT;
161 	int step, i;
162 
163 	ptl = pmd_trans_huge_lock(pmd, vma);
164 	if (ptl) {
165 		memset(vec, 1, nr);
166 		spin_unlock(ptl);
167 		goto out;
168 	}
169 
170 	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
171 	if (!ptep) {
172 		walk->action = ACTION_AGAIN;
173 		return 0;
174 	}
175 	for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
176 		pte_t pte = ptep_get(ptep);
177 
178 		step = 1;
179 		/* We need to do cache lookup too for pte markers */
180 		if (pte_none_mostly(pte))
181 			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
182 						 vma, vec);
183 		else if (pte_present(pte)) {
184 			unsigned int batch = pte_batch_hint(ptep, pte);
185 
186 			if (batch > 1) {
187 				unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
188 
189 				step = min_t(unsigned int, batch, max_nr);
190 			}
191 
192 			for (i = 0; i < step; i++)
193 				vec[i] = 1;
194 		} else { /* pte is a swap entry */
195 			*vec = mincore_swap(pte_to_swp_entry(pte), false);
196 		}
197 		vec += step;
198 	}
199 	pte_unmap_unlock(ptep - 1, ptl);
200 out:
201 	walk->private += nr;
202 	cond_resched();
203 	return 0;
204 }
205 
206 static inline bool can_do_mincore(struct vm_area_struct *vma)
207 {
208 	if (vma_is_anonymous(vma))
209 		return true;
210 	if (!vma->vm_file)
211 		return false;
212 	/*
213 	 * Reveal pagecache information only for non-anonymous mappings that
214 	 * correspond to the files the calling process could (if tried) open
215 	 * for writing; otherwise we'd be including shared non-exclusive
216 	 * mappings, which opens a side channel.
217 	 */
218 	return inode_owner_or_capable(&nop_mnt_idmap,
219 				      file_inode(vma->vm_file)) ||
220 	       file_permission(vma->vm_file, MAY_WRITE) == 0;
221 }
222 
223 static const struct mm_walk_ops mincore_walk_ops = {
224 	.pmd_entry		= mincore_pte_range,
225 	.pte_hole		= mincore_unmapped_range,
226 	.hugetlb_entry		= mincore_hugetlb,
227 	.walk_lock		= PGWALK_RDLOCK,
228 };
229 
230 /*
231  * Do a chunk of "sys_mincore()". We've already checked
232  * all the arguments, we hold the mmap semaphore: we should
233  * just return the amount of info we're asked for.
234  */
235 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
236 {
237 	struct vm_area_struct *vma;
238 	unsigned long end;
239 	int err;
240 
241 	vma = vma_lookup(current->mm, addr);
242 	if (!vma)
243 		return -ENOMEM;
244 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
245 	if (!can_do_mincore(vma)) {
246 		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
247 		memset(vec, 1, pages);
248 		return pages;
249 	}
250 	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
251 	if (err < 0)
252 		return err;
253 	return (end - addr) >> PAGE_SHIFT;
254 }
255 
256 /*
257  * The mincore(2) system call.
258  *
259  * mincore() returns the memory residency status of the pages in the
260  * current process's address space specified by [addr, addr + len).
261  * The status is returned in a vector of bytes.  The least significant
262  * bit of each byte is 1 if the referenced page is in memory, otherwise
263  * it is zero.
264  *
265  * Because the status of a page can change after mincore() checks it
266  * but before it returns to the application, the returned vector may
267  * contain stale information.  Only locked pages are guaranteed to
268  * remain in memory.
269  *
270  * return values:
271  *  zero    - success
272  *  -EFAULT - vec points to an illegal address
273  *  -EINVAL - addr is not a multiple of PAGE_SIZE
274  *  -ENOMEM - Addresses in the range [addr, addr + len] are
275  *		invalid for the address space of this process, or
276  *		specify one or more pages which are not currently
277  *		mapped
278  *  -EAGAIN - A kernel resource was temporarily unavailable.
279  */
280 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
281 		unsigned char __user *, vec)
282 {
283 	long retval;
284 	unsigned long pages;
285 	unsigned char *tmp;
286 
287 	start = untagged_addr(start);
288 
289 	/* Check the start address: needs to be page-aligned.. */
290 	if (unlikely(start & ~PAGE_MASK))
291 		return -EINVAL;
292 
293 	/* ..and we need to be passed a valid user-space range */
294 	if (!access_ok((void __user *) start, len))
295 		return -ENOMEM;
296 
297 	/* This also avoids any overflows on PAGE_ALIGN */
298 	pages = len >> PAGE_SHIFT;
299 	pages += (offset_in_page(len)) != 0;
300 
301 	if (!access_ok(vec, pages))
302 		return -EFAULT;
303 
304 	tmp = (void *) __get_free_page(GFP_USER);
305 	if (!tmp)
306 		return -EAGAIN;
307 
308 	retval = 0;
309 	while (pages) {
310 		/*
311 		 * Do at most PAGE_SIZE entries per iteration, due to
312 		 * the temporary buffer size.
313 		 */
314 		mmap_read_lock(current->mm);
315 		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
316 		mmap_read_unlock(current->mm);
317 
318 		if (retval <= 0)
319 			break;
320 		if (copy_to_user(vec, tmp, retval)) {
321 			retval = -EFAULT;
322 			break;
323 		}
324 		pages -= retval;
325 		vec += retval;
326 		start += retval << PAGE_SHIFT;
327 		retval = 0;
328 	}
329 	free_page((unsigned long) tmp);
330 	return retval;
331 }
332