xref: /linux/mm/mincore.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mincore.c
4  *
5  * Copyright (C) 1994-2006  Linus Torvalds
6  */
7 
8 /*
9  * The mincore() system call.
10  */
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
21 
22 #include <linux/uaccess.h>
23 #include "swap.h"
24 #include "internal.h"
25 
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)26 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 			unsigned long end, struct mm_walk *walk)
28 {
29 #ifdef CONFIG_HUGETLB_PAGE
30 	unsigned char present;
31 	unsigned char *vec = walk->private;
32 	spinlock_t *ptl;
33 
34 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
35 	/*
36 	 * Hugepages under user process are always in RAM and never
37 	 * swapped out, but theoretically it needs to be checked.
38 	 */
39 	present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
40 	for (; addr != end; vec++, addr += PAGE_SIZE)
41 		*vec = present;
42 	walk->private = vec;
43 	spin_unlock(ptl);
44 #else
45 	BUG();
46 #endif
47 	return 0;
48 }
49 
mincore_swap(swp_entry_t entry,bool shmem)50 static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
51 {
52 	struct swap_info_struct *si;
53 	struct folio *folio = NULL;
54 	unsigned char present = 0;
55 
56 	if (!IS_ENABLED(CONFIG_SWAP)) {
57 		WARN_ON(1);
58 		return 0;
59 	}
60 
61 	/*
62 	 * Shmem mapping may contain swapin error entries, which are
63 	 * absent. Page table may contain migration or hwpoison
64 	 * entries which are always uptodate.
65 	 */
66 	if (non_swap_entry(entry))
67 		return !shmem;
68 
69 	/*
70 	 * Shmem mapping lookup is lockless, so we need to grab the swap
71 	 * device. mincore page table walk locks the PTL, and the swap
72 	 * device is stable, avoid touching the si for better performance.
73 	 */
74 	if (shmem) {
75 		si = get_swap_device(entry);
76 		if (!si)
77 			return 0;
78 	}
79 	folio = swap_cache_get_folio(entry);
80 	if (shmem)
81 		put_swap_device(si);
82 	/* The swap cache space contains either folio, shadow or NULL */
83 	if (folio && !xa_is_value(folio)) {
84 		present = folio_test_uptodate(folio);
85 		folio_put(folio);
86 	}
87 
88 	return present;
89 }
90 
91 /*
92  * Later we can get more picky about what "in core" means precisely.
93  * For now, simply check to see if the page is in the page cache,
94  * and is up to date; i.e. that no page-in operation would be required
95  * at this time if an application were to map and access this page.
96  */
mincore_page(struct address_space * mapping,pgoff_t index)97 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
98 {
99 	unsigned char present = 0;
100 	struct folio *folio;
101 
102 	/*
103 	 * When tmpfs swaps out a page from a file, any process mapping that
104 	 * file will not get a swp_entry_t in its pte, but rather it is like
105 	 * any other file mapping (ie. marked !present and faulted in with
106 	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
107 	 */
108 	folio = filemap_get_entry(mapping, index);
109 	if (folio) {
110 		if (xa_is_value(folio)) {
111 			if (shmem_mapping(mapping))
112 				return mincore_swap(radix_to_swp_entry(folio),
113 						    true);
114 			else
115 				return 0;
116 		}
117 		present = folio_test_uptodate(folio);
118 		folio_put(folio);
119 	}
120 
121 	return present;
122 }
123 
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)124 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
125 				struct vm_area_struct *vma, unsigned char *vec)
126 {
127 	unsigned long nr = (end - addr) >> PAGE_SHIFT;
128 	int i;
129 
130 	if (vma->vm_file) {
131 		pgoff_t pgoff;
132 
133 		pgoff = linear_page_index(vma, addr);
134 		for (i = 0; i < nr; i++, pgoff++)
135 			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
136 	} else {
137 		for (i = 0; i < nr; i++)
138 			vec[i] = 0;
139 	}
140 	return nr;
141 }
142 
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)143 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
144 				   __always_unused int depth,
145 				   struct mm_walk *walk)
146 {
147 	walk->private += __mincore_unmapped_range(addr, end,
148 						  walk->vma, walk->private);
149 	return 0;
150 }
151 
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)152 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
153 			struct mm_walk *walk)
154 {
155 	spinlock_t *ptl;
156 	struct vm_area_struct *vma = walk->vma;
157 	pte_t *ptep;
158 	unsigned char *vec = walk->private;
159 	int nr = (end - addr) >> PAGE_SHIFT;
160 	int step, i;
161 
162 	ptl = pmd_trans_huge_lock(pmd, vma);
163 	if (ptl) {
164 		memset(vec, 1, nr);
165 		spin_unlock(ptl);
166 		goto out;
167 	}
168 
169 	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
170 	if (!ptep) {
171 		walk->action = ACTION_AGAIN;
172 		return 0;
173 	}
174 	for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
175 		pte_t pte = ptep_get(ptep);
176 
177 		step = 1;
178 		/* We need to do cache lookup too for pte markers */
179 		if (pte_none_mostly(pte))
180 			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
181 						 vma, vec);
182 		else if (pte_present(pte)) {
183 			unsigned int batch = pte_batch_hint(ptep, pte);
184 
185 			if (batch > 1) {
186 				unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
187 
188 				step = min_t(unsigned int, batch, max_nr);
189 			}
190 
191 			for (i = 0; i < step; i++)
192 				vec[i] = 1;
193 		} else { /* pte is a swap entry */
194 			*vec = mincore_swap(pte_to_swp_entry(pte), false);
195 		}
196 		vec += step;
197 	}
198 	pte_unmap_unlock(ptep - 1, ptl);
199 out:
200 	walk->private += nr;
201 	cond_resched();
202 	return 0;
203 }
204 
can_do_mincore(struct vm_area_struct * vma)205 static inline bool can_do_mincore(struct vm_area_struct *vma)
206 {
207 	if (vma_is_anonymous(vma))
208 		return true;
209 	if (!vma->vm_file)
210 		return false;
211 	/*
212 	 * Reveal pagecache information only for non-anonymous mappings that
213 	 * correspond to the files the calling process could (if tried) open
214 	 * for writing; otherwise we'd be including shared non-exclusive
215 	 * mappings, which opens a side channel.
216 	 */
217 	return inode_owner_or_capable(&nop_mnt_idmap,
218 				      file_inode(vma->vm_file)) ||
219 	       file_permission(vma->vm_file, MAY_WRITE) == 0;
220 }
221 
222 static const struct mm_walk_ops mincore_walk_ops = {
223 	.pmd_entry		= mincore_pte_range,
224 	.pte_hole		= mincore_unmapped_range,
225 	.hugetlb_entry		= mincore_hugetlb,
226 	.walk_lock		= PGWALK_RDLOCK,
227 };
228 
229 /*
230  * Do a chunk of "sys_mincore()". We've already checked
231  * all the arguments, we hold the mmap semaphore: we should
232  * just return the amount of info we're asked for.
233  */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)234 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
235 {
236 	struct vm_area_struct *vma;
237 	unsigned long end;
238 	int err;
239 
240 	vma = vma_lookup(current->mm, addr);
241 	if (!vma)
242 		return -ENOMEM;
243 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
244 	if (!can_do_mincore(vma)) {
245 		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
246 		memset(vec, 1, pages);
247 		return pages;
248 	}
249 	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
250 	if (err < 0)
251 		return err;
252 	return (end - addr) >> PAGE_SHIFT;
253 }
254 
255 /*
256  * The mincore(2) system call.
257  *
258  * mincore() returns the memory residency status of the pages in the
259  * current process's address space specified by [addr, addr + len).
260  * The status is returned in a vector of bytes.  The least significant
261  * bit of each byte is 1 if the referenced page is in memory, otherwise
262  * it is zero.
263  *
264  * Because the status of a page can change after mincore() checks it
265  * but before it returns to the application, the returned vector may
266  * contain stale information.  Only locked pages are guaranteed to
267  * remain in memory.
268  *
269  * return values:
270  *  zero    - success
271  *  -EFAULT - vec points to an illegal address
272  *  -EINVAL - addr is not a multiple of PAGE_SIZE
273  *  -ENOMEM - Addresses in the range [addr, addr + len] are
274  *		invalid for the address space of this process, or
275  *		specify one or more pages which are not currently
276  *		mapped
277  *  -EAGAIN - A kernel resource was temporarily unavailable.
278  */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)279 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
280 		unsigned char __user *, vec)
281 {
282 	long retval;
283 	unsigned long pages;
284 	unsigned char *tmp;
285 
286 	start = untagged_addr(start);
287 
288 	/* Check the start address: needs to be page-aligned.. */
289 	if (unlikely(start & ~PAGE_MASK))
290 		return -EINVAL;
291 
292 	/* ..and we need to be passed a valid user-space range */
293 	if (!access_ok((void __user *) start, len))
294 		return -ENOMEM;
295 
296 	/* This also avoids any overflows on PAGE_ALIGN */
297 	pages = len >> PAGE_SHIFT;
298 	pages += (offset_in_page(len)) != 0;
299 
300 	if (!access_ok(vec, pages))
301 		return -EFAULT;
302 
303 	tmp = (void *) __get_free_page(GFP_USER);
304 	if (!tmp)
305 		return -EAGAIN;
306 
307 	retval = 0;
308 	while (pages) {
309 		/*
310 		 * Do at most PAGE_SIZE entries per iteration, due to
311 		 * the temporary buffer size.
312 		 */
313 		mmap_read_lock(current->mm);
314 		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
315 		mmap_read_unlock(current->mm);
316 
317 		if (retval <= 0)
318 			break;
319 		if (copy_to_user(vec, tmp, retval)) {
320 			retval = -EFAULT;
321 			break;
322 		}
323 		pages -= retval;
324 		vec += retval;
325 		start += retval << PAGE_SHIFT;
326 		retval = 0;
327 	}
328 	free_page((unsigned long) tmp);
329 	return retval;
330 }
331