xref: /linux/fs/proc/task_mmu.c (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/ksm.h>
8 #include <linux/seq_file.h>
9 #include <linux/highmem.h>
10 #include <linux/ptrace.h>
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/mempolicy.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/sched/mm.h>
17 #include <linux/leafops.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/page_idle.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/pkeys.h>
23 #include <linux/minmax.h>
24 #include <linux/overflow.h>
25 #include <linux/buildid.h>
26 
27 #include <asm/elf.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include "internal.h"
31 
32 #define SENTINEL_VMA_END	-1
33 #define SENTINEL_VMA_GATE	-2
34 
35 #define SEQ_PUT_DEC(str, val) \
36 		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
task_mem(struct seq_file * m,struct mm_struct * mm)37 void task_mem(struct seq_file *m, struct mm_struct *mm)
38 {
39 	unsigned long text, lib, swap, anon, file, shmem;
40 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
41 
42 	anon = get_mm_counter_sum(mm, MM_ANONPAGES);
43 	file = get_mm_counter_sum(mm, MM_FILEPAGES);
44 	shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES);
45 
46 	/*
47 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
48 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
49 	 * collector of these hiwater stats must therefore get total_vm
50 	 * and rss too, which will usually be the higher.  Barriers? not
51 	 * worth the effort, such snapshots can always be inconsistent.
52 	 */
53 	hiwater_vm = total_vm = mm->total_vm;
54 	if (hiwater_vm < mm->hiwater_vm)
55 		hiwater_vm = mm->hiwater_vm;
56 	hiwater_rss = total_rss = anon + file + shmem;
57 	if (hiwater_rss < mm->hiwater_rss)
58 		hiwater_rss = mm->hiwater_rss;
59 
60 	/* split executable areas between text and lib */
61 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
62 	text = min(text, mm->exec_vm << PAGE_SHIFT);
63 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
64 
65 	swap = get_mm_counter_sum(mm, MM_SWAPENTS);
66 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
67 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
68 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
69 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
70 	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
71 	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
72 	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
73 	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
74 	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
75 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
76 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
77 	seq_put_decimal_ull_width(m,
78 		    " kB\nVmExe:\t", text >> 10, 8);
79 	seq_put_decimal_ull_width(m,
80 		    " kB\nVmLib:\t", lib >> 10, 8);
81 	seq_put_decimal_ull_width(m,
82 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
83 	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
84 	seq_puts(m, " kB\n");
85 	hugetlb_report_usage(m, mm);
86 }
87 #undef SEQ_PUT_DEC
88 
task_vsize(struct mm_struct * mm)89 unsigned long task_vsize(struct mm_struct *mm)
90 {
91 	return PAGE_SIZE * mm->total_vm;
92 }
93 
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)94 unsigned long task_statm(struct mm_struct *mm,
95 			 unsigned long *shared, unsigned long *text,
96 			 unsigned long *data, unsigned long *resident)
97 {
98 	*shared = get_mm_counter_sum(mm, MM_FILEPAGES) +
99 			get_mm_counter_sum(mm, MM_SHMEMPAGES);
100 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
101 								>> PAGE_SHIFT;
102 	*data = mm->data_vm + mm->stack_vm;
103 	*resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES);
104 	return mm->total_vm;
105 }
106 
107 #ifdef CONFIG_NUMA
108 /*
109  * Save get_task_policy() for show_numa_map().
110  */
hold_task_mempolicy(struct proc_maps_private * priv)111 static void hold_task_mempolicy(struct proc_maps_private *priv)
112 {
113 	struct task_struct *task = priv->task;
114 
115 	task_lock(task);
116 	priv->task_mempolicy = get_task_policy(task);
117 	mpol_get(priv->task_mempolicy);
118 	task_unlock(task);
119 }
release_task_mempolicy(struct proc_maps_private * priv)120 static void release_task_mempolicy(struct proc_maps_private *priv)
121 {
122 	mpol_put(priv->task_mempolicy);
123 }
124 #else
hold_task_mempolicy(struct proc_maps_private * priv)125 static void hold_task_mempolicy(struct proc_maps_private *priv)
126 {
127 }
release_task_mempolicy(struct proc_maps_private * priv)128 static void release_task_mempolicy(struct proc_maps_private *priv)
129 {
130 }
131 #endif
132 
133 #ifdef CONFIG_PER_VMA_LOCK
134 
reset_lock_ctx(struct proc_maps_locking_ctx * lock_ctx)135 static void reset_lock_ctx(struct proc_maps_locking_ctx *lock_ctx)
136 {
137 	lock_ctx->locked_vma = NULL;
138 	lock_ctx->mmap_locked = false;
139 }
140 
unlock_ctx_vma(struct proc_maps_locking_ctx * lock_ctx)141 static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx)
142 {
143 	if (lock_ctx->locked_vma) {
144 		vma_end_read(lock_ctx->locked_vma);
145 		lock_ctx->locked_vma = NULL;
146 	}
147 }
148 
149 static const struct seq_operations proc_pid_maps_op;
150 
lock_vma_range(struct seq_file * m,struct proc_maps_locking_ctx * lock_ctx)151 static inline bool lock_vma_range(struct seq_file *m,
152 				  struct proc_maps_locking_ctx *lock_ctx)
153 {
154 	/*
155 	 * smaps and numa_maps perform page table walk, therefore require
156 	 * mmap_lock but maps can be read with locking just the vma and
157 	 * walking the vma tree under rcu read protection.
158 	 */
159 	if (m->op != &proc_pid_maps_op) {
160 		if (mmap_read_lock_killable(lock_ctx->mm))
161 			return false;
162 
163 		lock_ctx->mmap_locked = true;
164 	} else {
165 		rcu_read_lock();
166 		reset_lock_ctx(lock_ctx);
167 	}
168 
169 	return true;
170 }
171 
unlock_vma_range(struct proc_maps_locking_ctx * lock_ctx)172 static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
173 {
174 	if (lock_ctx->mmap_locked) {
175 		mmap_read_unlock(lock_ctx->mm);
176 	} else {
177 		unlock_ctx_vma(lock_ctx);
178 		rcu_read_unlock();
179 	}
180 }
181 
get_next_vma(struct proc_maps_private * priv,loff_t last_pos)182 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
183 					   loff_t last_pos)
184 {
185 	struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
186 	struct vm_area_struct *vma;
187 
188 	if (lock_ctx->mmap_locked)
189 		return vma_next(&priv->iter);
190 
191 	unlock_ctx_vma(lock_ctx);
192 	vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
193 	if (!IS_ERR_OR_NULL(vma))
194 		lock_ctx->locked_vma = vma;
195 
196 	return vma;
197 }
198 
fallback_to_mmap_lock(struct proc_maps_private * priv,loff_t pos)199 static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
200 					 loff_t pos)
201 {
202 	struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx;
203 
204 	if (lock_ctx->mmap_locked)
205 		return false;
206 
207 	rcu_read_unlock();
208 	mmap_read_lock(lock_ctx->mm);
209 	/* Reinitialize the iterator after taking mmap_lock */
210 	vma_iter_set(&priv->iter, pos);
211 	lock_ctx->mmap_locked = true;
212 
213 	return true;
214 }
215 
216 #else /* CONFIG_PER_VMA_LOCK */
217 
lock_vma_range(struct seq_file * m,struct proc_maps_locking_ctx * lock_ctx)218 static inline bool lock_vma_range(struct seq_file *m,
219 				  struct proc_maps_locking_ctx *lock_ctx)
220 {
221 	return mmap_read_lock_killable(lock_ctx->mm) == 0;
222 }
223 
unlock_vma_range(struct proc_maps_locking_ctx * lock_ctx)224 static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx)
225 {
226 	mmap_read_unlock(lock_ctx->mm);
227 }
228 
get_next_vma(struct proc_maps_private * priv,loff_t last_pos)229 static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv,
230 					   loff_t last_pos)
231 {
232 	return vma_next(&priv->iter);
233 }
234 
fallback_to_mmap_lock(struct proc_maps_private * priv,loff_t pos)235 static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv,
236 					 loff_t pos)
237 {
238 	return false;
239 }
240 
241 #endif /* CONFIG_PER_VMA_LOCK */
242 
proc_get_vma(struct seq_file * m,loff_t * ppos)243 static struct vm_area_struct *proc_get_vma(struct seq_file *m, loff_t *ppos)
244 {
245 	struct proc_maps_private *priv = m->private;
246 	struct vm_area_struct *vma;
247 
248 retry:
249 	vma = get_next_vma(priv, *ppos);
250 	/* EINTR of EAGAIN is possible */
251 	if (IS_ERR(vma)) {
252 		if (PTR_ERR(vma) == -EAGAIN && fallback_to_mmap_lock(priv, *ppos))
253 			goto retry;
254 
255 		return vma;
256 	}
257 
258 	/* Store previous position to be able to restart if needed */
259 	priv->last_pos = *ppos;
260 	if (vma) {
261 		/*
262 		 * Track the end of the reported vma to ensure position changes
263 		 * even if previous vma was merged with the next vma and we
264 		 * found the extended vma with the same vm_start.
265 		 */
266 		*ppos = vma->vm_end;
267 	} else {
268 		*ppos = SENTINEL_VMA_GATE;
269 		vma = get_gate_vma(priv->lock_ctx.mm);
270 	}
271 
272 	return vma;
273 }
274 
m_start(struct seq_file * m,loff_t * ppos)275 static void *m_start(struct seq_file *m, loff_t *ppos)
276 {
277 	struct proc_maps_private *priv = m->private;
278 	struct proc_maps_locking_ctx *lock_ctx;
279 	loff_t last_addr = *ppos;
280 	struct mm_struct *mm;
281 
282 	/* See m_next(). Zero at the start or after lseek. */
283 	if (last_addr == SENTINEL_VMA_END)
284 		return NULL;
285 
286 	priv->task = get_proc_task(priv->inode);
287 	if (!priv->task)
288 		return ERR_PTR(-ESRCH);
289 
290 	lock_ctx = &priv->lock_ctx;
291 	mm = lock_ctx->mm;
292 	if (!mm || !mmget_not_zero(mm)) {
293 		put_task_struct(priv->task);
294 		priv->task = NULL;
295 		return NULL;
296 	}
297 
298 	if (!lock_vma_range(m, lock_ctx)) {
299 		mmput(mm);
300 		put_task_struct(priv->task);
301 		priv->task = NULL;
302 		return ERR_PTR(-EINTR);
303 	}
304 
305 	/*
306 	 * Reset current position if last_addr was set before
307 	 * and it's not a sentinel.
308 	 */
309 	if (last_addr > 0)
310 		*ppos = last_addr = priv->last_pos;
311 	vma_iter_init(&priv->iter, mm, (unsigned long)last_addr);
312 	hold_task_mempolicy(priv);
313 	if (last_addr == SENTINEL_VMA_GATE)
314 		return get_gate_vma(mm);
315 
316 	return proc_get_vma(m, ppos);
317 }
318 
m_next(struct seq_file * m,void * v,loff_t * ppos)319 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
320 {
321 	if (*ppos == SENTINEL_VMA_GATE) {
322 		*ppos = SENTINEL_VMA_END;
323 		return NULL;
324 	}
325 	return proc_get_vma(m, ppos);
326 }
327 
m_stop(struct seq_file * m,void * v)328 static void m_stop(struct seq_file *m, void *v)
329 {
330 	struct proc_maps_private *priv = m->private;
331 	struct mm_struct *mm = priv->lock_ctx.mm;
332 
333 	if (!priv->task)
334 		return;
335 
336 	release_task_mempolicy(priv);
337 	unlock_vma_range(&priv->lock_ctx);
338 	mmput(mm);
339 	put_task_struct(priv->task);
340 	priv->task = NULL;
341 }
342 
proc_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops,int psize)343 static int proc_maps_open(struct inode *inode, struct file *file,
344 			const struct seq_operations *ops, int psize)
345 {
346 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
347 
348 	if (!priv)
349 		return -ENOMEM;
350 
351 	priv->inode = inode;
352 	priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
353 	if (IS_ERR(priv->lock_ctx.mm)) {
354 		int err = PTR_ERR(priv->lock_ctx.mm);
355 
356 		seq_release_private(inode, file);
357 		return err;
358 	}
359 
360 	return 0;
361 }
362 
proc_map_release(struct inode * inode,struct file * file)363 static int proc_map_release(struct inode *inode, struct file *file)
364 {
365 	struct seq_file *seq = file->private_data;
366 	struct proc_maps_private *priv = seq->private;
367 
368 	if (priv->lock_ctx.mm)
369 		mmdrop(priv->lock_ctx.mm);
370 
371 	return seq_release_private(inode, file);
372 }
373 
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)374 static int do_maps_open(struct inode *inode, struct file *file,
375 			const struct seq_operations *ops)
376 {
377 	return proc_maps_open(inode, file, ops,
378 				sizeof(struct proc_maps_private));
379 }
380 
get_vma_name(struct vm_area_struct * vma,const struct path ** path,const char ** name,const char ** name_fmt)381 static void get_vma_name(struct vm_area_struct *vma,
382 			 const struct path **path,
383 			 const char **name,
384 			 const char **name_fmt)
385 {
386 	struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
387 
388 	*name = NULL;
389 	*path = NULL;
390 	*name_fmt = NULL;
391 
392 	/*
393 	 * Print the dentry name for named mappings, and a
394 	 * special [heap] marker for the heap:
395 	 */
396 	if (vma->vm_file) {
397 		/*
398 		 * If user named this anon shared memory via
399 		 * prctl(PR_SET_VMA ..., use the provided name.
400 		 */
401 		if (anon_name) {
402 			*name_fmt = "[anon_shmem:%s]";
403 			*name = anon_name->name;
404 		} else {
405 			*path = file_user_path(vma->vm_file);
406 		}
407 		return;
408 	}
409 
410 	if (vma->vm_ops && vma->vm_ops->name) {
411 		*name = vma->vm_ops->name(vma);
412 		if (*name)
413 			return;
414 	}
415 
416 	*name = arch_vma_name(vma);
417 	if (*name)
418 		return;
419 
420 	if (!vma->vm_mm) {
421 		*name = "[vdso]";
422 		return;
423 	}
424 
425 	if (vma_is_initial_heap(vma)) {
426 		*name = "[heap]";
427 		return;
428 	}
429 
430 	if (vma_is_initial_stack(vma)) {
431 		*name = "[stack]";
432 		return;
433 	}
434 
435 	if (anon_name) {
436 		*name_fmt = "[anon:%s]";
437 		*name = anon_name->name;
438 		return;
439 	}
440 }
441 
show_vma_header_prefix(struct seq_file * m,unsigned long start,unsigned long end,vm_flags_t flags,unsigned long long pgoff,dev_t dev,unsigned long ino)442 static void show_vma_header_prefix(struct seq_file *m,
443 				   unsigned long start, unsigned long end,
444 				   vm_flags_t flags, unsigned long long pgoff,
445 				   dev_t dev, unsigned long ino)
446 {
447 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
448 	seq_put_hex_ll(m, NULL, start, 8);
449 	seq_put_hex_ll(m, "-", end, 8);
450 	seq_putc(m, ' ');
451 	seq_putc(m, flags & VM_READ ? 'r' : '-');
452 	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
453 	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
454 	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
455 	seq_put_hex_ll(m, " ", pgoff, 8);
456 	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
457 	seq_put_hex_ll(m, ":", MINOR(dev), 2);
458 	seq_put_decimal_ull(m, " ", ino);
459 	seq_putc(m, ' ');
460 }
461 
462 static void
show_map_vma(struct seq_file * m,struct vm_area_struct * vma)463 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
464 {
465 	const struct path *path;
466 	const char *name_fmt, *name;
467 	vm_flags_t flags = vma->vm_flags;
468 	unsigned long ino = 0;
469 	unsigned long long pgoff = 0;
470 	unsigned long start, end;
471 	dev_t dev = 0;
472 
473 	if (vma->vm_file) {
474 		const struct inode *inode = file_user_inode(vma->vm_file);
475 
476 		dev = inode->i_sb->s_dev;
477 		ino = inode->i_ino;
478 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
479 	}
480 
481 	start = vma->vm_start;
482 	end = vma->vm_end;
483 	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
484 
485 	get_vma_name(vma, &path, &name, &name_fmt);
486 	if (path) {
487 		seq_pad(m, ' ');
488 		seq_path(m, path, "\n");
489 	} else if (name_fmt) {
490 		seq_pad(m, ' ');
491 		seq_printf(m, name_fmt, name);
492 	} else if (name) {
493 		seq_pad(m, ' ');
494 		seq_puts(m, name);
495 	}
496 	seq_putc(m, '\n');
497 }
498 
show_map(struct seq_file * m,void * v)499 static int show_map(struct seq_file *m, void *v)
500 {
501 	show_map_vma(m, v);
502 	return 0;
503 }
504 
505 static const struct seq_operations proc_pid_maps_op = {
506 	.start	= m_start,
507 	.next	= m_next,
508 	.stop	= m_stop,
509 	.show	= show_map
510 };
511 
pid_maps_open(struct inode * inode,struct file * file)512 static int pid_maps_open(struct inode *inode, struct file *file)
513 {
514 	return do_maps_open(inode, file, &proc_pid_maps_op);
515 }
516 
517 #define PROCMAP_QUERY_VMA_FLAGS (				\
518 		PROCMAP_QUERY_VMA_READABLE |			\
519 		PROCMAP_QUERY_VMA_WRITABLE |			\
520 		PROCMAP_QUERY_VMA_EXECUTABLE |			\
521 		PROCMAP_QUERY_VMA_SHARED			\
522 )
523 
524 #define PROCMAP_QUERY_VALID_FLAGS_MASK (			\
525 		PROCMAP_QUERY_COVERING_OR_NEXT_VMA |		\
526 		PROCMAP_QUERY_FILE_BACKED_VMA |			\
527 		PROCMAP_QUERY_VMA_FLAGS				\
528 )
529 
530 #ifdef CONFIG_PER_VMA_LOCK
531 
query_vma_setup(struct proc_maps_locking_ctx * lock_ctx)532 static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx)
533 {
534 	reset_lock_ctx(lock_ctx);
535 
536 	return 0;
537 }
538 
query_vma_teardown(struct proc_maps_locking_ctx * lock_ctx)539 static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx)
540 {
541 	if (lock_ctx->mmap_locked) {
542 		mmap_read_unlock(lock_ctx->mm);
543 		lock_ctx->mmap_locked = false;
544 	} else {
545 		unlock_ctx_vma(lock_ctx);
546 	}
547 }
548 
query_vma_find_by_addr(struct proc_maps_locking_ctx * lock_ctx,unsigned long addr)549 static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx,
550 						     unsigned long addr)
551 {
552 	struct mm_struct *mm = lock_ctx->mm;
553 	struct vm_area_struct *vma;
554 	struct vma_iterator vmi;
555 
556 	if (lock_ctx->mmap_locked)
557 		return find_vma(mm, addr);
558 
559 	/* Unlock previously locked VMA and find the next one under RCU */
560 	unlock_ctx_vma(lock_ctx);
561 	rcu_read_lock();
562 	vma_iter_init(&vmi, mm, addr);
563 	vma = lock_next_vma(mm, &vmi, addr);
564 	rcu_read_unlock();
565 
566 	if (!vma)
567 		return NULL;
568 
569 	if (!IS_ERR(vma)) {
570 		lock_ctx->locked_vma = vma;
571 		return vma;
572 	}
573 
574 	if (PTR_ERR(vma) == -EAGAIN) {
575 		/* Fallback to mmap_lock on vma->vm_refcnt overflow */
576 		mmap_read_lock(mm);
577 		vma = find_vma(mm, addr);
578 		lock_ctx->mmap_locked = true;
579 	}
580 
581 	return vma;
582 }
583 
584 #else /* CONFIG_PER_VMA_LOCK */
585 
query_vma_setup(struct proc_maps_locking_ctx * lock_ctx)586 static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx)
587 {
588 	return mmap_read_lock_killable(lock_ctx->mm);
589 }
590 
query_vma_teardown(struct proc_maps_locking_ctx * lock_ctx)591 static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx)
592 {
593 	mmap_read_unlock(lock_ctx->mm);
594 }
595 
query_vma_find_by_addr(struct proc_maps_locking_ctx * lock_ctx,unsigned long addr)596 static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx,
597 						     unsigned long addr)
598 {
599 	return find_vma(lock_ctx->mm, addr);
600 }
601 
602 #endif  /* CONFIG_PER_VMA_LOCK */
603 
query_matching_vma(struct proc_maps_locking_ctx * lock_ctx,unsigned long addr,u32 flags)604 static struct vm_area_struct *query_matching_vma(struct proc_maps_locking_ctx *lock_ctx,
605 						 unsigned long addr, u32 flags)
606 {
607 	struct vm_area_struct *vma;
608 
609 next_vma:
610 	vma = query_vma_find_by_addr(lock_ctx, addr);
611 	if (IS_ERR(vma))
612 		return vma;
613 
614 	if (!vma)
615 		goto no_vma;
616 
617 	/* user requested only file-backed VMA, keep iterating */
618 	if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
619 		goto skip_vma;
620 
621 	/* VMA permissions should satisfy query flags */
622 	if (flags & PROCMAP_QUERY_VMA_FLAGS) {
623 		u32 perm = 0;
624 
625 		if (flags & PROCMAP_QUERY_VMA_READABLE)
626 			perm |= VM_READ;
627 		if (flags & PROCMAP_QUERY_VMA_WRITABLE)
628 			perm |= VM_WRITE;
629 		if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
630 			perm |= VM_EXEC;
631 		if (flags & PROCMAP_QUERY_VMA_SHARED)
632 			perm |= VM_MAYSHARE;
633 
634 		if ((vma->vm_flags & perm) != perm)
635 			goto skip_vma;
636 	}
637 
638 	/* found covering VMA or user is OK with the matching next VMA */
639 	if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
640 		return vma;
641 
642 skip_vma:
643 	/*
644 	 * If the user needs closest matching VMA, keep iterating.
645 	 */
646 	addr = vma->vm_end;
647 	if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
648 		goto next_vma;
649 
650 no_vma:
651 	return ERR_PTR(-ENOENT);
652 }
653 
do_procmap_query(struct mm_struct * mm,void __user * uarg)654 static int do_procmap_query(struct mm_struct *mm, void __user *uarg)
655 {
656 	struct proc_maps_locking_ctx lock_ctx = { .mm = mm };
657 	struct procmap_query karg;
658 	struct vm_area_struct *vma;
659 	const char *name = NULL;
660 	char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
661 	__u64 usize;
662 	int err;
663 
664 	if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
665 		return -EFAULT;
666 	/* argument struct can never be that large, reject abuse */
667 	if (usize > PAGE_SIZE)
668 		return -E2BIG;
669 	/* argument struct should have at least query_flags and query_addr fields */
670 	if (usize < offsetofend(struct procmap_query, query_addr))
671 		return -EINVAL;
672 	err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
673 	if (err)
674 		return err;
675 
676 	/* reject unknown flags */
677 	if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
678 		return -EINVAL;
679 	/* either both buffer address and size are set, or both should be zero */
680 	if (!!karg.vma_name_size != !!karg.vma_name_addr)
681 		return -EINVAL;
682 	if (!!karg.build_id_size != !!karg.build_id_addr)
683 		return -EINVAL;
684 
685 	if (!mm || !mmget_not_zero(mm))
686 		return -ESRCH;
687 
688 	err = query_vma_setup(&lock_ctx);
689 	if (err) {
690 		mmput(mm);
691 		return err;
692 	}
693 
694 	vma = query_matching_vma(&lock_ctx, karg.query_addr, karg.query_flags);
695 	if (IS_ERR(vma)) {
696 		err = PTR_ERR(vma);
697 		vma = NULL;
698 		goto out;
699 	}
700 
701 	karg.vma_start = vma->vm_start;
702 	karg.vma_end = vma->vm_end;
703 
704 	karg.vma_flags = 0;
705 	if (vma->vm_flags & VM_READ)
706 		karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
707 	if (vma->vm_flags & VM_WRITE)
708 		karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
709 	if (vma->vm_flags & VM_EXEC)
710 		karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
711 	if (vma->vm_flags & VM_MAYSHARE)
712 		karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
713 
714 	karg.vma_page_size = vma_kernel_pagesize(vma);
715 
716 	if (vma->vm_file) {
717 		const struct inode *inode = file_user_inode(vma->vm_file);
718 
719 		karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
720 		karg.dev_major = MAJOR(inode->i_sb->s_dev);
721 		karg.dev_minor = MINOR(inode->i_sb->s_dev);
722 		karg.inode = inode->i_ino;
723 	} else {
724 		karg.vma_offset = 0;
725 		karg.dev_major = 0;
726 		karg.dev_minor = 0;
727 		karg.inode = 0;
728 	}
729 
730 	if (karg.build_id_size) {
731 		__u32 build_id_sz;
732 
733 		err = build_id_parse(vma, build_id_buf, &build_id_sz);
734 		if (err) {
735 			karg.build_id_size = 0;
736 		} else {
737 			if (karg.build_id_size < build_id_sz) {
738 				err = -ENAMETOOLONG;
739 				goto out;
740 			}
741 			karg.build_id_size = build_id_sz;
742 		}
743 	}
744 
745 	if (karg.vma_name_size) {
746 		size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
747 		const struct path *path;
748 		const char *name_fmt;
749 		size_t name_sz = 0;
750 
751 		get_vma_name(vma, &path, &name, &name_fmt);
752 
753 		if (path || name_fmt || name) {
754 			name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
755 			if (!name_buf) {
756 				err = -ENOMEM;
757 				goto out;
758 			}
759 		}
760 		if (path) {
761 			name = d_path(path, name_buf, name_buf_sz);
762 			if (IS_ERR(name)) {
763 				err = PTR_ERR(name);
764 				goto out;
765 			}
766 			name_sz = name_buf + name_buf_sz - name;
767 		} else if (name || name_fmt) {
768 			name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
769 			name = name_buf;
770 		}
771 		if (name_sz > name_buf_sz) {
772 			err = -ENAMETOOLONG;
773 			goto out;
774 		}
775 		karg.vma_name_size = name_sz;
776 	}
777 
778 	/* unlock vma or mmap_lock, and put mm_struct before copying data to user */
779 	query_vma_teardown(&lock_ctx);
780 	mmput(mm);
781 
782 	if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
783 					       name, karg.vma_name_size)) {
784 		kfree(name_buf);
785 		return -EFAULT;
786 	}
787 	kfree(name_buf);
788 
789 	if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
790 					       build_id_buf, karg.build_id_size))
791 		return -EFAULT;
792 
793 	if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
794 		return -EFAULT;
795 
796 	return 0;
797 
798 out:
799 	query_vma_teardown(&lock_ctx);
800 	mmput(mm);
801 	kfree(name_buf);
802 	return err;
803 }
804 
procfs_procmap_ioctl(struct file * file,unsigned int cmd,unsigned long arg)805 static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
806 {
807 	struct seq_file *seq = file->private_data;
808 	struct proc_maps_private *priv = seq->private;
809 
810 	switch (cmd) {
811 	case PROCMAP_QUERY:
812 		/* priv->lock_ctx.mm is set during file open operation */
813 		return do_procmap_query(priv->lock_ctx.mm, (void __user *)arg);
814 	default:
815 		return -ENOIOCTLCMD;
816 	}
817 }
818 
819 const struct file_operations proc_pid_maps_operations = {
820 	.open		= pid_maps_open,
821 	.read		= seq_read,
822 	.llseek		= seq_lseek,
823 	.release	= proc_map_release,
824 	.unlocked_ioctl = procfs_procmap_ioctl,
825 	.compat_ioctl	= compat_ptr_ioctl,
826 };
827 
828 /*
829  * Proportional Set Size(PSS): my share of RSS.
830  *
831  * PSS of a process is the count of pages it has in memory, where each
832  * page is divided by the number of processes sharing it.  So if a
833  * process has 1000 pages all to itself, and 1000 shared with one other
834  * process, its PSS will be 1500.
835  *
836  * To keep (accumulated) division errors low, we adopt a 64bit
837  * fixed-point pss counter to minimize division errors. So (pss >>
838  * PSS_SHIFT) would be the real byte count.
839  *
840  * A shift of 12 before division means (assuming 4K page size):
841  * 	- 1M 3-user-pages add up to 8KB errors;
842  * 	- supports mapcount up to 2^24, or 16M;
843  * 	- supports PSS up to 2^52 bytes, or 4PB.
844  */
845 #define PSS_SHIFT 12
846 
847 #ifdef CONFIG_PROC_PAGE_MONITOR
848 struct mem_size_stats {
849 	unsigned long resident;
850 	unsigned long shared_clean;
851 	unsigned long shared_dirty;
852 	unsigned long private_clean;
853 	unsigned long private_dirty;
854 	unsigned long referenced;
855 	unsigned long anonymous;
856 	unsigned long lazyfree;
857 	unsigned long anonymous_thp;
858 	unsigned long shmem_thp;
859 	unsigned long file_thp;
860 	unsigned long swap;
861 	unsigned long shared_hugetlb;
862 	unsigned long private_hugetlb;
863 	unsigned long ksm;
864 	u64 pss;
865 	u64 pss_anon;
866 	u64 pss_file;
867 	u64 pss_shmem;
868 	u64 pss_dirty;
869 	u64 pss_locked;
870 	u64 swap_pss;
871 };
872 
smaps_page_accumulate(struct mem_size_stats * mss,struct folio * folio,unsigned long size,unsigned long pss,bool dirty,bool locked,bool private)873 static void smaps_page_accumulate(struct mem_size_stats *mss,
874 		struct folio *folio, unsigned long size, unsigned long pss,
875 		bool dirty, bool locked, bool private)
876 {
877 	mss->pss += pss;
878 
879 	if (folio_test_anon(folio))
880 		mss->pss_anon += pss;
881 	else if (folio_test_swapbacked(folio))
882 		mss->pss_shmem += pss;
883 	else
884 		mss->pss_file += pss;
885 
886 	if (locked)
887 		mss->pss_locked += pss;
888 
889 	if (dirty || folio_test_dirty(folio)) {
890 		mss->pss_dirty += pss;
891 		if (private)
892 			mss->private_dirty += size;
893 		else
894 			mss->shared_dirty += size;
895 	} else {
896 		if (private)
897 			mss->private_clean += size;
898 		else
899 			mss->shared_clean += size;
900 	}
901 }
902 
smaps_account(struct mem_size_stats * mss,struct page * page,bool compound,bool young,bool dirty,bool locked,bool present)903 static void smaps_account(struct mem_size_stats *mss, struct page *page,
904 		bool compound, bool young, bool dirty, bool locked,
905 		bool present)
906 {
907 	struct folio *folio = page_folio(page);
908 	int i, nr = compound ? compound_nr(page) : 1;
909 	unsigned long size = nr * PAGE_SIZE;
910 	bool exclusive;
911 	int mapcount;
912 
913 	/*
914 	 * First accumulate quantities that depend only on |size| and the type
915 	 * of the compound page.
916 	 */
917 	if (folio_test_anon(folio)) {
918 		mss->anonymous += size;
919 		if (!folio_test_swapbacked(folio) && !dirty &&
920 		    !folio_test_dirty(folio))
921 			mss->lazyfree += size;
922 	}
923 
924 	if (folio_test_ksm(folio))
925 		mss->ksm += size;
926 
927 	mss->resident += size;
928 	/* Accumulate the size in pages that have been accessed. */
929 	if (young || folio_test_young(folio) || folio_test_referenced(folio))
930 		mss->referenced += size;
931 
932 	/*
933 	 * Then accumulate quantities that may depend on sharing, or that may
934 	 * differ page-by-page.
935 	 *
936 	 * refcount == 1 for present entries guarantees that the folio is mapped
937 	 * exactly once. For large folios this implies that exactly one
938 	 * PTE/PMD/... maps (a part of) this folio.
939 	 *
940 	 * Treat all non-present entries (where relying on the mapcount and
941 	 * refcount doesn't make sense) as "maybe shared, but not sure how
942 	 * often". We treat device private entries as being fake-present.
943 	 *
944 	 * Note that it would not be safe to read the mapcount especially for
945 	 * pages referenced by migration entries, even with the PTL held.
946 	 */
947 	if (folio_ref_count(folio) == 1 || !present) {
948 		smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
949 				      dirty, locked, present);
950 		return;
951 	}
952 
953 	if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
954 		mapcount = folio_average_page_mapcount(folio);
955 		exclusive = !folio_maybe_mapped_shared(folio);
956 	}
957 
958 	/*
959 	 * We obtain a snapshot of the mapcount. Without holding the folio lock
960 	 * this snapshot can be slightly wrong as we cannot always read the
961 	 * mapcount atomically.
962 	 */
963 	for (i = 0; i < nr; i++, page++) {
964 		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
965 
966 		if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
967 			mapcount = folio_precise_page_mapcount(folio, page);
968 			exclusive = mapcount < 2;
969 		}
970 
971 		if (mapcount >= 2)
972 			pss /= mapcount;
973 		smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
974 				dirty, locked, exclusive);
975 	}
976 }
977 
978 #ifdef CONFIG_SHMEM
smaps_pte_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)979 static int smaps_pte_hole(unsigned long addr, unsigned long end,
980 			  __always_unused int depth, struct mm_walk *walk)
981 {
982 	struct mem_size_stats *mss = walk->private;
983 	struct vm_area_struct *vma = walk->vma;
984 
985 	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
986 					      linear_page_index(vma, addr),
987 					      linear_page_index(vma, end));
988 
989 	return 0;
990 }
991 #else
992 #define smaps_pte_hole		NULL
993 #endif /* CONFIG_SHMEM */
994 
smaps_pte_hole_lookup(unsigned long addr,struct mm_walk * walk)995 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
996 {
997 #ifdef CONFIG_SHMEM
998 	if (walk->ops->pte_hole) {
999 		/* depth is not used */
1000 		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
1001 	}
1002 #endif
1003 }
1004 
smaps_pte_entry(pte_t * pte,unsigned long addr,struct mm_walk * walk)1005 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
1006 		struct mm_walk *walk)
1007 {
1008 	struct mem_size_stats *mss = walk->private;
1009 	struct vm_area_struct *vma = walk->vma;
1010 	bool locked = !!(vma->vm_flags & VM_LOCKED);
1011 	struct page *page = NULL;
1012 	bool present = false, young = false, dirty = false;
1013 	pte_t ptent = ptep_get(pte);
1014 
1015 	if (pte_present(ptent)) {
1016 		page = vm_normal_page(vma, addr, ptent);
1017 		young = pte_young(ptent);
1018 		dirty = pte_dirty(ptent);
1019 		present = true;
1020 	} else if (pte_none(ptent)) {
1021 		smaps_pte_hole_lookup(addr, walk);
1022 	} else {
1023 		const softleaf_t entry = softleaf_from_pte(ptent);
1024 
1025 		if (softleaf_is_swap(entry)) {
1026 			int mapcount;
1027 
1028 			mss->swap += PAGE_SIZE;
1029 			mapcount = swp_swapcount(entry);
1030 			if (mapcount >= 2) {
1031 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
1032 
1033 				do_div(pss_delta, mapcount);
1034 				mss->swap_pss += pss_delta;
1035 			} else {
1036 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
1037 			}
1038 		} else if (softleaf_has_pfn(entry)) {
1039 			if (softleaf_is_device_private(entry))
1040 				present = true;
1041 			page = softleaf_to_page(entry);
1042 		}
1043 	}
1044 
1045 	if (!page)
1046 		return;
1047 
1048 	smaps_account(mss, page, false, young, dirty, locked, present);
1049 }
1050 
1051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)1052 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
1053 		struct mm_walk *walk)
1054 {
1055 	struct mem_size_stats *mss = walk->private;
1056 	struct vm_area_struct *vma = walk->vma;
1057 	bool locked = !!(vma->vm_flags & VM_LOCKED);
1058 	struct page *page = NULL;
1059 	bool present = false;
1060 	struct folio *folio;
1061 
1062 	if (pmd_none(*pmd))
1063 		return;
1064 	if (pmd_present(*pmd)) {
1065 		page = vm_normal_page_pmd(vma, addr, *pmd);
1066 		present = true;
1067 	} else if (unlikely(thp_migration_supported())) {
1068 		const softleaf_t entry = softleaf_from_pmd(*pmd);
1069 
1070 		if (softleaf_has_pfn(entry))
1071 			page = softleaf_to_page(entry);
1072 	}
1073 	if (IS_ERR_OR_NULL(page))
1074 		return;
1075 	folio = page_folio(page);
1076 	if (folio_test_anon(folio))
1077 		mss->anonymous_thp += HPAGE_PMD_SIZE;
1078 	else if (folio_test_swapbacked(folio))
1079 		mss->shmem_thp += HPAGE_PMD_SIZE;
1080 	else if (folio_is_zone_device(folio))
1081 		/* pass */;
1082 	else
1083 		mss->file_thp += HPAGE_PMD_SIZE;
1084 
1085 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
1086 		      locked, present);
1087 }
1088 #else
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)1089 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
1090 		struct mm_walk *walk)
1091 {
1092 }
1093 #endif
1094 
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1095 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1096 			   struct mm_walk *walk)
1097 {
1098 	struct vm_area_struct *vma = walk->vma;
1099 	pte_t *pte;
1100 	spinlock_t *ptl;
1101 
1102 	ptl = pmd_trans_huge_lock(pmd, vma);
1103 	if (ptl) {
1104 		smaps_pmd_entry(pmd, addr, walk);
1105 		spin_unlock(ptl);
1106 		goto out;
1107 	}
1108 
1109 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1110 	if (!pte) {
1111 		walk->action = ACTION_AGAIN;
1112 		return 0;
1113 	}
1114 	for (; addr != end; pte++, addr += PAGE_SIZE)
1115 		smaps_pte_entry(pte, addr, walk);
1116 	pte_unmap_unlock(pte - 1, ptl);
1117 out:
1118 	cond_resched();
1119 	return 0;
1120 }
1121 
show_smap_vma_flags(struct seq_file * m,struct vm_area_struct * vma)1122 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
1123 {
1124 	/*
1125 	 * Don't forget to update Documentation/ on changes.
1126 	 *
1127 	 * The length of the second argument of mnemonics[]
1128 	 * needs to be 3 instead of previously set 2
1129 	 * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
1130 	 * to avoid spurious
1131 	 * -Werror=unterminated-string-initialization warning
1132 	 *  with GCC 15
1133 	 */
1134 	static const char mnemonics[BITS_PER_LONG][3] = {
1135 		/*
1136 		 * In case if we meet a flag we don't know about.
1137 		 */
1138 		[0 ... (BITS_PER_LONG-1)] = "??",
1139 
1140 		[ilog2(VM_READ)]	= "rd",
1141 		[ilog2(VM_WRITE)]	= "wr",
1142 		[ilog2(VM_EXEC)]	= "ex",
1143 		[ilog2(VM_SHARED)]	= "sh",
1144 		[ilog2(VM_MAYREAD)]	= "mr",
1145 		[ilog2(VM_MAYWRITE)]	= "mw",
1146 		[ilog2(VM_MAYEXEC)]	= "me",
1147 		[ilog2(VM_MAYSHARE)]	= "ms",
1148 		[ilog2(VM_GROWSDOWN)]	= "gd",
1149 		[ilog2(VM_PFNMAP)]	= "pf",
1150 		[ilog2(VM_MAYBE_GUARD)]	= "gu",
1151 		[ilog2(VM_LOCKED)]	= "lo",
1152 		[ilog2(VM_IO)]		= "io",
1153 		[ilog2(VM_SEQ_READ)]	= "sr",
1154 		[ilog2(VM_RAND_READ)]	= "rr",
1155 		[ilog2(VM_DONTCOPY)]	= "dc",
1156 		[ilog2(VM_DONTEXPAND)]	= "de",
1157 		[ilog2(VM_LOCKONFAULT)]	= "lf",
1158 		[ilog2(VM_ACCOUNT)]	= "ac",
1159 		[ilog2(VM_NORESERVE)]	= "nr",
1160 		[ilog2(VM_HUGETLB)]	= "ht",
1161 		[ilog2(VM_SYNC)]	= "sf",
1162 		[ilog2(VM_ARCH_1)]	= "ar",
1163 		[ilog2(VM_WIPEONFORK)]	= "wf",
1164 		[ilog2(VM_DONTDUMP)]	= "dd",
1165 #ifdef CONFIG_ARM64_BTI
1166 		[ilog2(VM_ARM64_BTI)]	= "bt",
1167 #endif
1168 #ifdef CONFIG_MEM_SOFT_DIRTY
1169 		[ilog2(VM_SOFTDIRTY)]	= "sd",
1170 #endif
1171 		[ilog2(VM_MIXEDMAP)]	= "mm",
1172 		[ilog2(VM_HUGEPAGE)]	= "hg",
1173 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
1174 		[ilog2(VM_MERGEABLE)]	= "mg",
1175 		[ilog2(VM_UFFD_MISSING)]= "um",
1176 		[ilog2(VM_UFFD_WP)]	= "uw",
1177 #ifdef CONFIG_ARM64_MTE
1178 		[ilog2(VM_MTE)]		= "mt",
1179 		[ilog2(VM_MTE_ALLOWED)]	= "",
1180 #endif
1181 #ifdef CONFIG_ARCH_HAS_PKEYS
1182 		/* These come out via ProtectionKey: */
1183 		[ilog2(VM_PKEY_BIT0)]	= "",
1184 		[ilog2(VM_PKEY_BIT1)]	= "",
1185 		[ilog2(VM_PKEY_BIT2)]	= "",
1186 #if CONFIG_ARCH_PKEY_BITS > 3
1187 		[ilog2(VM_PKEY_BIT3)]	= "",
1188 #endif
1189 #if CONFIG_ARCH_PKEY_BITS > 4
1190 		[ilog2(VM_PKEY_BIT4)]	= "",
1191 #endif
1192 #endif /* CONFIG_ARCH_HAS_PKEYS */
1193 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1194 		[ilog2(VM_UFFD_MINOR)]	= "ui",
1195 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
1196 #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
1197 		[ilog2(VM_SHADOW_STACK)] = "ss",
1198 #endif
1199 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
1200 		[ilog2(VM_DROPPABLE)] = "dp",
1201 #endif
1202 #ifdef CONFIG_64BIT
1203 		[ilog2(VM_SEALED)] = "sl",
1204 #endif
1205 	};
1206 	size_t i;
1207 
1208 	seq_puts(m, "VmFlags: ");
1209 	for (i = 0; i < BITS_PER_LONG; i++) {
1210 		if (!mnemonics[i][0])
1211 			continue;
1212 		if (vma->vm_flags & (1UL << i))
1213 			seq_printf(m, "%s ", mnemonics[i]);
1214 	}
1215 	seq_putc(m, '\n');
1216 }
1217 
1218 #ifdef CONFIG_HUGETLB_PAGE
smaps_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1219 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1220 				 unsigned long addr, unsigned long end,
1221 				 struct mm_walk *walk)
1222 {
1223 	struct mem_size_stats *mss = walk->private;
1224 	struct vm_area_struct *vma = walk->vma;
1225 	struct folio *folio = NULL;
1226 	bool present = false;
1227 	spinlock_t *ptl;
1228 	pte_t ptent;
1229 
1230 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
1231 	ptent = huge_ptep_get(walk->mm, addr, pte);
1232 	if (pte_present(ptent)) {
1233 		folio = page_folio(pte_page(ptent));
1234 		present = true;
1235 	} else {
1236 		const softleaf_t entry = softleaf_from_pte(ptent);
1237 
1238 		if (softleaf_has_pfn(entry))
1239 			folio = softleaf_to_folio(entry);
1240 	}
1241 
1242 	if (folio) {
1243 		/* We treat non-present entries as "maybe shared". */
1244 		if (!present || folio_maybe_mapped_shared(folio) ||
1245 		    hugetlb_pmd_shared(pte))
1246 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1247 		else
1248 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1249 	}
1250 	spin_unlock(ptl);
1251 	return 0;
1252 }
1253 #else
1254 #define smaps_hugetlb_range	NULL
1255 #endif /* HUGETLB_PAGE */
1256 
1257 static const struct mm_walk_ops smaps_walk_ops = {
1258 	.pmd_entry		= smaps_pte_range,
1259 	.hugetlb_entry		= smaps_hugetlb_range,
1260 	.walk_lock		= PGWALK_RDLOCK,
1261 };
1262 
1263 static const struct mm_walk_ops smaps_shmem_walk_ops = {
1264 	.pmd_entry		= smaps_pte_range,
1265 	.hugetlb_entry		= smaps_hugetlb_range,
1266 	.pte_hole		= smaps_pte_hole,
1267 	.walk_lock		= PGWALK_RDLOCK,
1268 };
1269 
1270 /*
1271  * Gather mem stats from @vma with the indicated beginning
1272  * address @start, and keep them in @mss.
1273  *
1274  * Use vm_start of @vma as the beginning address if @start is 0.
1275  */
smap_gather_stats(struct vm_area_struct * vma,struct mem_size_stats * mss,unsigned long start)1276 static void smap_gather_stats(struct vm_area_struct *vma,
1277 		struct mem_size_stats *mss, unsigned long start)
1278 {
1279 	const struct mm_walk_ops *ops = &smaps_walk_ops;
1280 
1281 	/* Invalid start */
1282 	if (start >= vma->vm_end)
1283 		return;
1284 
1285 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
1286 		/*
1287 		 * For shared or readonly shmem mappings we know that all
1288 		 * swapped out pages belong to the shmem object, and we can
1289 		 * obtain the swap value much more efficiently. For private
1290 		 * writable mappings, we might have COW pages that are
1291 		 * not affected by the parent swapped out pages of the shmem
1292 		 * object, so we have to distinguish them during the page walk.
1293 		 * Unless we know that the shmem object (or the part mapped by
1294 		 * our VMA) has no swapped out pages at all.
1295 		 */
1296 		unsigned long shmem_swapped = shmem_swap_usage(vma);
1297 
1298 		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
1299 					!(vma->vm_flags & VM_WRITE))) {
1300 			mss->swap += shmem_swapped;
1301 		} else {
1302 			ops = &smaps_shmem_walk_ops;
1303 		}
1304 	}
1305 
1306 	/* mmap_lock is held in m_start */
1307 	if (!start)
1308 		walk_page_vma(vma, ops, mss);
1309 	else
1310 		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
1311 }
1312 
1313 #define SEQ_PUT_DEC(str, val) \
1314 		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
1315 
1316 /* Show the contents common for smaps and smaps_rollup */
__show_smap(struct seq_file * m,const struct mem_size_stats * mss,bool rollup_mode)1317 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
1318 	bool rollup_mode)
1319 {
1320 	SEQ_PUT_DEC("Rss:            ", mss->resident);
1321 	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
1322 	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
1323 	if (rollup_mode) {
1324 		/*
1325 		 * These are meaningful only for smaps_rollup, otherwise two of
1326 		 * them are zero, and the other one is the same as Pss.
1327 		 */
1328 		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
1329 			mss->pss_anon >> PSS_SHIFT);
1330 		SEQ_PUT_DEC(" kB\nPss_File:       ",
1331 			mss->pss_file >> PSS_SHIFT);
1332 		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
1333 			mss->pss_shmem >> PSS_SHIFT);
1334 	}
1335 	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
1336 	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
1337 	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
1338 	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
1339 	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
1340 	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
1341 	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
1342 	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
1343 	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
1344 	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
1345 	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
1346 	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
1347 	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
1348 				  mss->private_hugetlb >> 10, 7);
1349 	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
1350 	SEQ_PUT_DEC(" kB\nSwapPss:        ",
1351 					mss->swap_pss >> PSS_SHIFT);
1352 	SEQ_PUT_DEC(" kB\nLocked:         ",
1353 					mss->pss_locked >> PSS_SHIFT);
1354 	seq_puts(m, " kB\n");
1355 }
1356 
show_smap(struct seq_file * m,void * v)1357 static int show_smap(struct seq_file *m, void *v)
1358 {
1359 	struct vm_area_struct *vma = v;
1360 	struct mem_size_stats mss = {};
1361 
1362 	smap_gather_stats(vma, &mss, 0);
1363 
1364 	show_map_vma(m, vma);
1365 
1366 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
1367 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
1368 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
1369 	seq_puts(m, " kB\n");
1370 
1371 	__show_smap(m, &mss, false);
1372 
1373 	seq_printf(m, "THPeligible:    %8u\n",
1374 		   !!thp_vma_allowable_orders(vma, vma->vm_flags, TVA_SMAPS,
1375 					      THP_ORDERS_ALL));
1376 
1377 	if (arch_pkeys_enabled())
1378 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
1379 	show_smap_vma_flags(m, vma);
1380 
1381 	return 0;
1382 }
1383 
show_smaps_rollup(struct seq_file * m,void * v)1384 static int show_smaps_rollup(struct seq_file *m, void *v)
1385 {
1386 	struct proc_maps_private *priv = m->private;
1387 	struct mem_size_stats mss = {};
1388 	struct mm_struct *mm = priv->lock_ctx.mm;
1389 	struct vm_area_struct *vma;
1390 	unsigned long vma_start = 0, last_vma_end = 0;
1391 	int ret = 0;
1392 	VMA_ITERATOR(vmi, mm, 0);
1393 
1394 	priv->task = get_proc_task(priv->inode);
1395 	if (!priv->task)
1396 		return -ESRCH;
1397 
1398 	if (!mm || !mmget_not_zero(mm)) {
1399 		ret = -ESRCH;
1400 		goto out_put_task;
1401 	}
1402 
1403 	ret = mmap_read_lock_killable(mm);
1404 	if (ret)
1405 		goto out_put_mm;
1406 
1407 	hold_task_mempolicy(priv);
1408 	vma = vma_next(&vmi);
1409 
1410 	if (unlikely(!vma))
1411 		goto empty_set;
1412 
1413 	vma_start = vma->vm_start;
1414 	do {
1415 		smap_gather_stats(vma, &mss, 0);
1416 		last_vma_end = vma->vm_end;
1417 
1418 		/*
1419 		 * Release mmap_lock temporarily if someone wants to
1420 		 * access it for write request.
1421 		 */
1422 		if (mmap_lock_is_contended(mm)) {
1423 			vma_iter_invalidate(&vmi);
1424 			mmap_read_unlock(mm);
1425 			ret = mmap_read_lock_killable(mm);
1426 			if (ret) {
1427 				release_task_mempolicy(priv);
1428 				goto out_put_mm;
1429 			}
1430 
1431 			/*
1432 			 * After dropping the lock, there are four cases to
1433 			 * consider. See the following example for explanation.
1434 			 *
1435 			 *   +------+------+-----------+
1436 			 *   | VMA1 | VMA2 | VMA3      |
1437 			 *   +------+------+-----------+
1438 			 *   |      |      |           |
1439 			 *  4k     8k     16k         400k
1440 			 *
1441 			 * Suppose we drop the lock after reading VMA2 due to
1442 			 * contention, then we get:
1443 			 *
1444 			 *	last_vma_end = 16k
1445 			 *
1446 			 * 1) VMA2 is freed, but VMA3 exists:
1447 			 *
1448 			 *    vma_next(vmi) will return VMA3.
1449 			 *    In this case, just continue from VMA3.
1450 			 *
1451 			 * 2) VMA2 still exists:
1452 			 *
1453 			 *    vma_next(vmi) will return VMA3.
1454 			 *    In this case, just continue from VMA3.
1455 			 *
1456 			 * 3) No more VMAs can be found:
1457 			 *
1458 			 *    vma_next(vmi) will return NULL.
1459 			 *    No more things to do, just break.
1460 			 *
1461 			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
1462 			 *
1463 			 *    vma_next(vmi) will return VMA' whose range
1464 			 *    contains last_vma_end.
1465 			 *    Iterate VMA' from last_vma_end.
1466 			 */
1467 			vma = vma_next(&vmi);
1468 			/* Case 3 above */
1469 			if (!vma)
1470 				break;
1471 
1472 			/* Case 1 and 2 above */
1473 			if (vma->vm_start >= last_vma_end) {
1474 				smap_gather_stats(vma, &mss, 0);
1475 				last_vma_end = vma->vm_end;
1476 				continue;
1477 			}
1478 
1479 			/* Case 4 above */
1480 			if (vma->vm_end > last_vma_end) {
1481 				smap_gather_stats(vma, &mss, last_vma_end);
1482 				last_vma_end = vma->vm_end;
1483 			}
1484 		}
1485 	} for_each_vma(vmi, vma);
1486 
1487 empty_set:
1488 	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
1489 	seq_pad(m, ' ');
1490 	seq_puts(m, "[rollup]\n");
1491 
1492 	__show_smap(m, &mss, true);
1493 
1494 	release_task_mempolicy(priv);
1495 	mmap_read_unlock(mm);
1496 
1497 out_put_mm:
1498 	mmput(mm);
1499 out_put_task:
1500 	put_task_struct(priv->task);
1501 	priv->task = NULL;
1502 
1503 	return ret;
1504 }
1505 #undef SEQ_PUT_DEC
1506 
1507 static const struct seq_operations proc_pid_smaps_op = {
1508 	.start	= m_start,
1509 	.next	= m_next,
1510 	.stop	= m_stop,
1511 	.show	= show_smap
1512 };
1513 
pid_smaps_open(struct inode * inode,struct file * file)1514 static int pid_smaps_open(struct inode *inode, struct file *file)
1515 {
1516 	return do_maps_open(inode, file, &proc_pid_smaps_op);
1517 }
1518 
smaps_rollup_open(struct inode * inode,struct file * file)1519 static int smaps_rollup_open(struct inode *inode, struct file *file)
1520 {
1521 	int ret;
1522 	struct proc_maps_private *priv;
1523 
1524 	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1525 	if (!priv)
1526 		return -ENOMEM;
1527 
1528 	ret = single_open(file, show_smaps_rollup, priv);
1529 	if (ret)
1530 		goto out_free;
1531 
1532 	priv->inode = inode;
1533 	priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ);
1534 	if (IS_ERR_OR_NULL(priv->lock_ctx.mm)) {
1535 		ret = priv->lock_ctx.mm ? PTR_ERR(priv->lock_ctx.mm) : -ESRCH;
1536 
1537 		single_release(inode, file);
1538 		goto out_free;
1539 	}
1540 
1541 	return 0;
1542 
1543 out_free:
1544 	kfree(priv);
1545 	return ret;
1546 }
1547 
smaps_rollup_release(struct inode * inode,struct file * file)1548 static int smaps_rollup_release(struct inode *inode, struct file *file)
1549 {
1550 	struct seq_file *seq = file->private_data;
1551 	struct proc_maps_private *priv = seq->private;
1552 
1553 	if (priv->lock_ctx.mm)
1554 		mmdrop(priv->lock_ctx.mm);
1555 
1556 	kfree(priv);
1557 	return single_release(inode, file);
1558 }
1559 
1560 const struct file_operations proc_pid_smaps_operations = {
1561 	.open		= pid_smaps_open,
1562 	.read		= seq_read,
1563 	.llseek		= seq_lseek,
1564 	.release	= proc_map_release,
1565 };
1566 
1567 const struct file_operations proc_pid_smaps_rollup_operations = {
1568 	.open		= smaps_rollup_open,
1569 	.read		= seq_read,
1570 	.llseek		= seq_lseek,
1571 	.release	= smaps_rollup_release,
1572 };
1573 
1574 enum clear_refs_types {
1575 	CLEAR_REFS_ALL = 1,
1576 	CLEAR_REFS_ANON,
1577 	CLEAR_REFS_MAPPED,
1578 	CLEAR_REFS_SOFT_DIRTY,
1579 	CLEAR_REFS_MM_HIWATER_RSS,
1580 	CLEAR_REFS_LAST,
1581 };
1582 
1583 struct clear_refs_private {
1584 	enum clear_refs_types type;
1585 };
1586 
pte_is_pinned(struct vm_area_struct * vma,unsigned long addr,pte_t pte)1587 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1588 {
1589 	struct folio *folio;
1590 
1591 	if (!pte_write(pte))
1592 		return false;
1593 	if (!is_cow_mapping(vma->vm_flags))
1594 		return false;
1595 	if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm)))
1596 		return false;
1597 	folio = vm_normal_folio(vma, addr, pte);
1598 	if (!folio)
1599 		return false;
1600 	return folio_maybe_dma_pinned(folio);
1601 }
1602 
clear_soft_dirty(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)1603 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1604 		unsigned long addr, pte_t *pte)
1605 {
1606 	if (!pgtable_supports_soft_dirty())
1607 		return;
1608 	/*
1609 	 * The soft-dirty tracker uses #PF-s to catch writes
1610 	 * to pages, so write-protect the pte as well. See the
1611 	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1612 	 * of how soft-dirty works.
1613 	 */
1614 	pte_t ptent = ptep_get(pte);
1615 
1616 	if (pte_none(ptent))
1617 		return;
1618 
1619 	if (pte_present(ptent)) {
1620 		pte_t old_pte;
1621 
1622 		if (pte_is_pinned(vma, addr, ptent))
1623 			return;
1624 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1625 		ptent = pte_wrprotect(old_pte);
1626 		ptent = pte_clear_soft_dirty(ptent);
1627 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1628 	} else {
1629 		ptent = pte_swp_clear_soft_dirty(ptent);
1630 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1631 	}
1632 }
1633 
1634 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1635 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1636 		unsigned long addr, pmd_t *pmdp)
1637 {
1638 	pmd_t old, pmd = *pmdp;
1639 
1640 	if (!pgtable_supports_soft_dirty())
1641 		return;
1642 
1643 	if (pmd_present(pmd)) {
1644 		/* See comment in change_huge_pmd() */
1645 		old = pmdp_invalidate(vma, addr, pmdp);
1646 		if (pmd_dirty(old))
1647 			pmd = pmd_mkdirty(pmd);
1648 		if (pmd_young(old))
1649 			pmd = pmd_mkyoung(pmd);
1650 
1651 		pmd = pmd_wrprotect(pmd);
1652 		pmd = pmd_clear_soft_dirty(pmd);
1653 
1654 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1655 	} else if (pmd_is_migration_entry(pmd)) {
1656 		pmd = pmd_swp_clear_soft_dirty(pmd);
1657 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1658 	}
1659 }
1660 #else
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1661 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1662 		unsigned long addr, pmd_t *pmdp)
1663 {
1664 }
1665 #endif
1666 
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1667 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1668 				unsigned long end, struct mm_walk *walk)
1669 {
1670 	struct clear_refs_private *cp = walk->private;
1671 	struct vm_area_struct *vma = walk->vma;
1672 	pte_t *pte, ptent;
1673 	spinlock_t *ptl;
1674 	struct folio *folio;
1675 
1676 	ptl = pmd_trans_huge_lock(pmd, vma);
1677 	if (ptl) {
1678 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1679 			clear_soft_dirty_pmd(vma, addr, pmd);
1680 			goto out;
1681 		}
1682 
1683 		if (!pmd_present(*pmd))
1684 			goto out;
1685 
1686 		folio = pmd_folio(*pmd);
1687 
1688 		/* Clear accessed and referenced bits. */
1689 		pmdp_test_and_clear_young(vma, addr, pmd);
1690 		folio_test_clear_young(folio);
1691 		folio_clear_referenced(folio);
1692 out:
1693 		spin_unlock(ptl);
1694 		return 0;
1695 	}
1696 
1697 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1698 	if (!pte) {
1699 		walk->action = ACTION_AGAIN;
1700 		return 0;
1701 	}
1702 	for (; addr != end; pte++, addr += PAGE_SIZE) {
1703 		ptent = ptep_get(pte);
1704 
1705 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1706 			clear_soft_dirty(vma, addr, pte);
1707 			continue;
1708 		}
1709 
1710 		if (!pte_present(ptent))
1711 			continue;
1712 
1713 		folio = vm_normal_folio(vma, addr, ptent);
1714 		if (!folio)
1715 			continue;
1716 
1717 		/* Clear accessed and referenced bits. */
1718 		ptep_test_and_clear_young(vma, addr, pte);
1719 		folio_test_clear_young(folio);
1720 		folio_clear_referenced(folio);
1721 	}
1722 	pte_unmap_unlock(pte - 1, ptl);
1723 	cond_resched();
1724 	return 0;
1725 }
1726 
clear_refs_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)1727 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1728 				struct mm_walk *walk)
1729 {
1730 	struct clear_refs_private *cp = walk->private;
1731 	struct vm_area_struct *vma = walk->vma;
1732 
1733 	if (vma->vm_flags & VM_PFNMAP)
1734 		return 1;
1735 
1736 	/*
1737 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1738 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1739 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1740 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1741 	 */
1742 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1743 		return 1;
1744 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1745 		return 1;
1746 	return 0;
1747 }
1748 
1749 static const struct mm_walk_ops clear_refs_walk_ops = {
1750 	.pmd_entry		= clear_refs_pte_range,
1751 	.test_walk		= clear_refs_test_walk,
1752 	.walk_lock		= PGWALK_WRLOCK,
1753 };
1754 
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1755 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1756 				size_t count, loff_t *ppos)
1757 {
1758 	struct task_struct *task;
1759 	char buffer[PROC_NUMBUF] = {};
1760 	struct mm_struct *mm;
1761 	struct vm_area_struct *vma;
1762 	enum clear_refs_types type;
1763 	int itype;
1764 	int rv;
1765 
1766 	if (count > sizeof(buffer) - 1)
1767 		count = sizeof(buffer) - 1;
1768 	if (copy_from_user(buffer, buf, count))
1769 		return -EFAULT;
1770 	rv = kstrtoint(strstrip(buffer), 10, &itype);
1771 	if (rv < 0)
1772 		return rv;
1773 	type = (enum clear_refs_types)itype;
1774 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1775 		return -EINVAL;
1776 
1777 	task = get_proc_task(file_inode(file));
1778 	if (!task)
1779 		return -ESRCH;
1780 	mm = get_task_mm(task);
1781 	if (mm) {
1782 		VMA_ITERATOR(vmi, mm, 0);
1783 		struct mmu_notifier_range range;
1784 		struct clear_refs_private cp = {
1785 			.type = type,
1786 		};
1787 
1788 		if (mmap_write_lock_killable(mm)) {
1789 			count = -EINTR;
1790 			goto out_mm;
1791 		}
1792 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1793 			/*
1794 			 * Writing 5 to /proc/pid/clear_refs resets the peak
1795 			 * resident set size to this mm's current rss value.
1796 			 */
1797 			reset_mm_hiwater_rss(mm);
1798 			goto out_unlock;
1799 		}
1800 
1801 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1802 			for_each_vma(vmi, vma) {
1803 				if (!(vma->vm_flags & VM_SOFTDIRTY))
1804 					continue;
1805 				vm_flags_clear(vma, VM_SOFTDIRTY);
1806 				vma_set_page_prot(vma);
1807 			}
1808 
1809 			inc_tlb_flush_pending(mm);
1810 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1811 						0, mm, 0, -1UL);
1812 			mmu_notifier_invalidate_range_start(&range);
1813 		}
1814 		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1815 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1816 			mmu_notifier_invalidate_range_end(&range);
1817 			flush_tlb_mm(mm);
1818 			dec_tlb_flush_pending(mm);
1819 		}
1820 out_unlock:
1821 		mmap_write_unlock(mm);
1822 out_mm:
1823 		mmput(mm);
1824 	}
1825 	put_task_struct(task);
1826 
1827 	return count;
1828 }
1829 
1830 const struct file_operations proc_clear_refs_operations = {
1831 	.write		= clear_refs_write,
1832 	.llseek		= noop_llseek,
1833 };
1834 
1835 typedef struct {
1836 	u64 pme;
1837 } pagemap_entry_t;
1838 
1839 struct pagemapread {
1840 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1841 	pagemap_entry_t *buffer;
1842 	bool show_pfn;
1843 };
1844 
1845 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1846 #define PAGEMAP_WALK_MASK	(PMD_MASK)
1847 
1848 #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1849 #define PM_PFRAME_BITS		55
1850 #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1851 #define PM_SOFT_DIRTY		BIT_ULL(55)
1852 #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1853 #define PM_UFFD_WP		BIT_ULL(57)
1854 #define PM_GUARD_REGION		BIT_ULL(58)
1855 #define PM_FILE			BIT_ULL(61)
1856 #define PM_SWAP			BIT_ULL(62)
1857 #define PM_PRESENT		BIT_ULL(63)
1858 
1859 #define PM_END_OF_BUFFER    1
1860 
make_pme(u64 frame,u64 flags)1861 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1862 {
1863 	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1864 }
1865 
add_to_pagemap(pagemap_entry_t * pme,struct pagemapread * pm)1866 static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
1867 {
1868 	pm->buffer[pm->pos++] = *pme;
1869 	if (pm->pos >= pm->len)
1870 		return PM_END_OF_BUFFER;
1871 	return 0;
1872 }
1873 
__folio_page_mapped_exclusively(struct folio * folio,struct page * page)1874 static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page)
1875 {
1876 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1877 		return folio_precise_page_mapcount(folio, page) == 1;
1878 	return !folio_maybe_mapped_shared(folio);
1879 }
1880 
pagemap_pte_hole(unsigned long start,unsigned long end,__always_unused int depth,struct mm_walk * walk)1881 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1882 			    __always_unused int depth, struct mm_walk *walk)
1883 {
1884 	struct pagemapread *pm = walk->private;
1885 	unsigned long addr = start;
1886 	int err = 0;
1887 
1888 	while (addr < end) {
1889 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1890 		pagemap_entry_t pme = make_pme(0, 0);
1891 		/* End of address space hole, which we mark as non-present. */
1892 		unsigned long hole_end;
1893 
1894 		if (vma)
1895 			hole_end = min(end, vma->vm_start);
1896 		else
1897 			hole_end = end;
1898 
1899 		for (; addr < hole_end; addr += PAGE_SIZE) {
1900 			err = add_to_pagemap(&pme, pm);
1901 			if (err)
1902 				goto out;
1903 		}
1904 
1905 		if (!vma)
1906 			break;
1907 
1908 		/* Addresses in the VMA. */
1909 		if (vma->vm_flags & VM_SOFTDIRTY)
1910 			pme = make_pme(0, PM_SOFT_DIRTY);
1911 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1912 			err = add_to_pagemap(&pme, pm);
1913 			if (err)
1914 				goto out;
1915 		}
1916 	}
1917 out:
1918 	return err;
1919 }
1920 
pte_to_pagemap_entry(struct pagemapread * pm,struct vm_area_struct * vma,unsigned long addr,pte_t pte)1921 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1922 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1923 {
1924 	u64 frame = 0, flags = 0;
1925 	struct page *page = NULL;
1926 	struct folio *folio;
1927 
1928 	if (pte_none(pte))
1929 		goto out;
1930 
1931 	if (pte_present(pte)) {
1932 		if (pm->show_pfn)
1933 			frame = pte_pfn(pte);
1934 		flags |= PM_PRESENT;
1935 		page = vm_normal_page(vma, addr, pte);
1936 		if (pte_soft_dirty(pte))
1937 			flags |= PM_SOFT_DIRTY;
1938 		if (pte_uffd_wp(pte))
1939 			flags |= PM_UFFD_WP;
1940 	} else {
1941 		softleaf_t entry;
1942 
1943 		if (pte_swp_soft_dirty(pte))
1944 			flags |= PM_SOFT_DIRTY;
1945 		if (pte_swp_uffd_wp(pte))
1946 			flags |= PM_UFFD_WP;
1947 		entry = softleaf_from_pte(pte);
1948 		if (pm->show_pfn) {
1949 			pgoff_t offset;
1950 
1951 			/*
1952 			 * For PFN swap offsets, keeping the offset field
1953 			 * to be PFN only to be compatible with old smaps.
1954 			 */
1955 			if (softleaf_has_pfn(entry))
1956 				offset = softleaf_to_pfn(entry);
1957 			else
1958 				offset = swp_offset(entry);
1959 			frame = swp_type(entry) |
1960 			    (offset << MAX_SWAPFILES_SHIFT);
1961 		}
1962 		flags |= PM_SWAP;
1963 		if (softleaf_has_pfn(entry))
1964 			page = softleaf_to_page(entry);
1965 		if (softleaf_is_uffd_wp_marker(entry))
1966 			flags |= PM_UFFD_WP;
1967 		if (softleaf_is_guard_marker(entry))
1968 			flags |=  PM_GUARD_REGION;
1969 	}
1970 
1971 	if (page) {
1972 		folio = page_folio(page);
1973 		if (!folio_test_anon(folio))
1974 			flags |= PM_FILE;
1975 		if ((flags & PM_PRESENT) &&
1976 		    __folio_page_mapped_exclusively(folio, page))
1977 			flags |= PM_MMAP_EXCLUSIVE;
1978 	}
1979 
1980 out:
1981 	if (vma->vm_flags & VM_SOFTDIRTY)
1982 		flags |= PM_SOFT_DIRTY;
1983 
1984 	return make_pme(frame, flags);
1985 }
1986 
1987 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pagemap_pmd_range_thp(pmd_t * pmdp,unsigned long addr,unsigned long end,struct vm_area_struct * vma,struct pagemapread * pm)1988 static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr,
1989 		unsigned long end, struct vm_area_struct *vma,
1990 		struct pagemapread *pm)
1991 {
1992 	unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1993 	u64 flags = 0, frame = 0;
1994 	pmd_t pmd = *pmdp;
1995 	struct page *page = NULL;
1996 	struct folio *folio = NULL;
1997 	int err = 0;
1998 
1999 	if (vma->vm_flags & VM_SOFTDIRTY)
2000 		flags |= PM_SOFT_DIRTY;
2001 
2002 	if (pmd_none(pmd))
2003 		goto populate_pagemap;
2004 
2005 	if (pmd_present(pmd)) {
2006 		page = pmd_page(pmd);
2007 
2008 		flags |= PM_PRESENT;
2009 		if (pmd_soft_dirty(pmd))
2010 			flags |= PM_SOFT_DIRTY;
2011 		if (pmd_uffd_wp(pmd))
2012 			flags |= PM_UFFD_WP;
2013 		if (pm->show_pfn)
2014 			frame = pmd_pfn(pmd) + idx;
2015 	} else if (thp_migration_supported()) {
2016 		const softleaf_t entry = softleaf_from_pmd(pmd);
2017 		unsigned long offset;
2018 
2019 		if (pm->show_pfn) {
2020 			if (softleaf_has_pfn(entry))
2021 				offset = softleaf_to_pfn(entry) + idx;
2022 			else
2023 				offset = swp_offset(entry) + idx;
2024 			frame = swp_type(entry) |
2025 				(offset << MAX_SWAPFILES_SHIFT);
2026 		}
2027 		flags |= PM_SWAP;
2028 		if (pmd_swp_soft_dirty(pmd))
2029 			flags |= PM_SOFT_DIRTY;
2030 		if (pmd_swp_uffd_wp(pmd))
2031 			flags |= PM_UFFD_WP;
2032 		VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd));
2033 		page = softleaf_to_page(entry);
2034 	}
2035 
2036 	if (page) {
2037 		folio = page_folio(page);
2038 		if (!folio_test_anon(folio))
2039 			flags |= PM_FILE;
2040 	}
2041 
2042 populate_pagemap:
2043 	for (; addr != end; addr += PAGE_SIZE, idx++) {
2044 		u64 cur_flags = flags;
2045 		pagemap_entry_t pme;
2046 
2047 		if (folio && (flags & PM_PRESENT) &&
2048 		    __folio_page_mapped_exclusively(folio, page))
2049 			cur_flags |= PM_MMAP_EXCLUSIVE;
2050 
2051 		pme = make_pme(frame, cur_flags);
2052 		err = add_to_pagemap(&pme, pm);
2053 		if (err)
2054 			break;
2055 		if (pm->show_pfn) {
2056 			if (flags & PM_PRESENT)
2057 				frame++;
2058 			else if (flags & PM_SWAP)
2059 				frame += (1 << MAX_SWAPFILES_SHIFT);
2060 		}
2061 	}
2062 	return err;
2063 }
2064 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2065 
pagemap_pmd_range(pmd_t * pmdp,unsigned long addr,unsigned long end,struct mm_walk * walk)2066 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
2067 			     struct mm_walk *walk)
2068 {
2069 	struct vm_area_struct *vma = walk->vma;
2070 	struct pagemapread *pm = walk->private;
2071 	spinlock_t *ptl;
2072 	pte_t *pte, *orig_pte;
2073 	int err = 0;
2074 
2075 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2076 	ptl = pmd_trans_huge_lock(pmdp, vma);
2077 	if (ptl) {
2078 		err = pagemap_pmd_range_thp(pmdp, addr, end, vma, pm);
2079 		spin_unlock(ptl);
2080 		return err;
2081 	}
2082 #endif
2083 
2084 	/*
2085 	 * We can assume that @vma always points to a valid one and @end never
2086 	 * goes beyond vma->vm_end.
2087 	 */
2088 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
2089 	if (!pte) {
2090 		walk->action = ACTION_AGAIN;
2091 		return err;
2092 	}
2093 	for (; addr < end; pte++, addr += PAGE_SIZE) {
2094 		pagemap_entry_t pme;
2095 
2096 		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
2097 		err = add_to_pagemap(&pme, pm);
2098 		if (err)
2099 			break;
2100 	}
2101 	pte_unmap_unlock(orig_pte, ptl);
2102 
2103 	cond_resched();
2104 
2105 	return err;
2106 }
2107 
2108 #ifdef CONFIG_HUGETLB_PAGE
2109 /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * ptep,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)2110 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
2111 				 unsigned long addr, unsigned long end,
2112 				 struct mm_walk *walk)
2113 {
2114 	struct pagemapread *pm = walk->private;
2115 	struct vm_area_struct *vma = walk->vma;
2116 	u64 flags = 0, frame = 0;
2117 	spinlock_t *ptl;
2118 	int err = 0;
2119 	pte_t pte;
2120 
2121 	if (vma->vm_flags & VM_SOFTDIRTY)
2122 		flags |= PM_SOFT_DIRTY;
2123 
2124 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep);
2125 	pte = huge_ptep_get(walk->mm, addr, ptep);
2126 	if (pte_present(pte)) {
2127 		struct folio *folio = page_folio(pte_page(pte));
2128 
2129 		if (!folio_test_anon(folio))
2130 			flags |= PM_FILE;
2131 
2132 		if (!folio_maybe_mapped_shared(folio) &&
2133 		    !hugetlb_pmd_shared(ptep))
2134 			flags |= PM_MMAP_EXCLUSIVE;
2135 
2136 		if (huge_pte_uffd_wp(pte))
2137 			flags |= PM_UFFD_WP;
2138 
2139 		flags |= PM_PRESENT;
2140 		if (pm->show_pfn)
2141 			frame = pte_pfn(pte) +
2142 				((addr & ~hmask) >> PAGE_SHIFT);
2143 	} else if (pte_swp_uffd_wp_any(pte)) {
2144 		flags |= PM_UFFD_WP;
2145 	}
2146 
2147 	for (; addr != end; addr += PAGE_SIZE) {
2148 		pagemap_entry_t pme = make_pme(frame, flags);
2149 
2150 		err = add_to_pagemap(&pme, pm);
2151 		if (err)
2152 			break;
2153 		if (pm->show_pfn && (flags & PM_PRESENT))
2154 			frame++;
2155 	}
2156 
2157 	spin_unlock(ptl);
2158 	cond_resched();
2159 
2160 	return err;
2161 }
2162 #else
2163 #define pagemap_hugetlb_range	NULL
2164 #endif /* HUGETLB_PAGE */
2165 
2166 static const struct mm_walk_ops pagemap_ops = {
2167 	.pmd_entry	= pagemap_pmd_range,
2168 	.pte_hole	= pagemap_pte_hole,
2169 	.hugetlb_entry	= pagemap_hugetlb_range,
2170 	.walk_lock	= PGWALK_RDLOCK,
2171 };
2172 
2173 /*
2174  * /proc/pid/pagemap - an array mapping virtual pages to pfns
2175  *
2176  * For each page in the address space, this file contains one 64-bit entry
2177  * consisting of the following:
2178  *
2179  * Bits 0-54  page frame number (PFN) if present
2180  * Bits 0-4   swap type if swapped
2181  * Bits 5-54  swap offset if swapped
2182  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
2183  * Bit  56    page exclusively mapped
2184  * Bit  57    pte is uffd-wp write-protected
2185  * Bit  58    pte is a guard region
2186  * Bits 59-60 zero
2187  * Bit  61    page is file-page or shared-anon
2188  * Bit  62    page swapped
2189  * Bit  63    page present
2190  *
2191  * If the page is not present but in swap, then the PFN contains an
2192  * encoding of the swap file number and the page's offset into the
2193  * swap. Unmapped pages return a null PFN. This allows determining
2194  * precisely which pages are mapped (or in swap) and comparing mapped
2195  * pages between processes.
2196  *
2197  * Efficient users of this interface will use /proc/pid/maps to
2198  * determine which areas of memory are actually mapped and llseek to
2199  * skip over unmapped regions.
2200  */
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2201 static ssize_t pagemap_read(struct file *file, char __user *buf,
2202 			    size_t count, loff_t *ppos)
2203 {
2204 	struct mm_struct *mm = file->private_data;
2205 	struct pagemapread pm;
2206 	unsigned long src;
2207 	unsigned long svpfn;
2208 	unsigned long start_vaddr;
2209 	unsigned long end_vaddr;
2210 	int ret = 0, copied = 0;
2211 
2212 	if (!mm || !mmget_not_zero(mm))
2213 		goto out;
2214 
2215 	ret = -EINVAL;
2216 	/* file position must be aligned */
2217 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
2218 		goto out_mm;
2219 
2220 	ret = 0;
2221 	if (!count)
2222 		goto out_mm;
2223 
2224 	/* do not disclose physical addresses: attack vector */
2225 	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
2226 
2227 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
2228 	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
2229 	ret = -ENOMEM;
2230 	if (!pm.buffer)
2231 		goto out_mm;
2232 
2233 	src = *ppos;
2234 	svpfn = src / PM_ENTRY_BYTES;
2235 	end_vaddr = mm->task_size;
2236 
2237 	/* watch out for wraparound */
2238 	start_vaddr = end_vaddr;
2239 	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
2240 		unsigned long end;
2241 
2242 		ret = mmap_read_lock_killable(mm);
2243 		if (ret)
2244 			goto out_free;
2245 		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
2246 		mmap_read_unlock(mm);
2247 
2248 		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
2249 		if (end >= start_vaddr && end < mm->task_size)
2250 			end_vaddr = end;
2251 	}
2252 
2253 	/* Ensure the address is inside the task */
2254 	if (start_vaddr > mm->task_size)
2255 		start_vaddr = end_vaddr;
2256 
2257 	ret = 0;
2258 	while (count && (start_vaddr < end_vaddr)) {
2259 		int len;
2260 		unsigned long end;
2261 
2262 		pm.pos = 0;
2263 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2264 		/* overflow ? */
2265 		if (end < start_vaddr || end > end_vaddr)
2266 			end = end_vaddr;
2267 		ret = mmap_read_lock_killable(mm);
2268 		if (ret)
2269 			goto out_free;
2270 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
2271 		mmap_read_unlock(mm);
2272 		start_vaddr = end;
2273 
2274 		len = min(count, PM_ENTRY_BYTES * pm.pos);
2275 		if (copy_to_user(buf, pm.buffer, len)) {
2276 			ret = -EFAULT;
2277 			goto out_free;
2278 		}
2279 		copied += len;
2280 		buf += len;
2281 		count -= len;
2282 	}
2283 	*ppos += copied;
2284 	if (!ret || ret == PM_END_OF_BUFFER)
2285 		ret = copied;
2286 
2287 out_free:
2288 	kfree(pm.buffer);
2289 out_mm:
2290 	mmput(mm);
2291 out:
2292 	return ret;
2293 }
2294 
pagemap_open(struct inode * inode,struct file * file)2295 static int pagemap_open(struct inode *inode, struct file *file)
2296 {
2297 	struct mm_struct *mm;
2298 
2299 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
2300 	if (IS_ERR_OR_NULL(mm))
2301 		return mm ? PTR_ERR(mm) : -ESRCH;
2302 	file->private_data = mm;
2303 	return 0;
2304 }
2305 
pagemap_release(struct inode * inode,struct file * file)2306 static int pagemap_release(struct inode *inode, struct file *file)
2307 {
2308 	struct mm_struct *mm = file->private_data;
2309 
2310 	if (mm)
2311 		mmdrop(mm);
2312 	return 0;
2313 }
2314 
2315 #define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
2316 				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
2317 				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
2318 				 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY |	\
2319 				 PAGE_IS_GUARD)
2320 #define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
2321 
2322 struct pagemap_scan_private {
2323 	struct pm_scan_arg arg;
2324 	unsigned long masks_of_interest, cur_vma_category;
2325 	struct page_region *vec_buf;
2326 	unsigned long vec_buf_len, vec_buf_index, found_pages;
2327 	struct page_region __user *vec_out;
2328 };
2329 
pagemap_page_category(struct pagemap_scan_private * p,struct vm_area_struct * vma,unsigned long addr,pte_t pte)2330 static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
2331 					   struct vm_area_struct *vma,
2332 					   unsigned long addr, pte_t pte)
2333 {
2334 	unsigned long categories;
2335 
2336 	if (pte_none(pte))
2337 		return 0;
2338 
2339 	if (pte_present(pte)) {
2340 		struct page *page;
2341 
2342 		categories = PAGE_IS_PRESENT;
2343 
2344 		if (!pte_uffd_wp(pte))
2345 			categories |= PAGE_IS_WRITTEN;
2346 
2347 		if (p->masks_of_interest & PAGE_IS_FILE) {
2348 			page = vm_normal_page(vma, addr, pte);
2349 			if (page && !PageAnon(page))
2350 				categories |= PAGE_IS_FILE;
2351 		}
2352 
2353 		if (is_zero_pfn(pte_pfn(pte)))
2354 			categories |= PAGE_IS_PFNZERO;
2355 		if (pte_soft_dirty(pte))
2356 			categories |= PAGE_IS_SOFT_DIRTY;
2357 	} else {
2358 		softleaf_t entry;
2359 
2360 		categories = PAGE_IS_SWAPPED;
2361 
2362 		if (!pte_swp_uffd_wp_any(pte))
2363 			categories |= PAGE_IS_WRITTEN;
2364 
2365 		entry = softleaf_from_pte(pte);
2366 		if (softleaf_is_guard_marker(entry))
2367 			categories |= PAGE_IS_GUARD;
2368 		else if ((p->masks_of_interest & PAGE_IS_FILE) &&
2369 			 softleaf_has_pfn(entry) &&
2370 			 !folio_test_anon(softleaf_to_folio(entry)))
2371 			categories |= PAGE_IS_FILE;
2372 
2373 		if (pte_swp_soft_dirty(pte))
2374 			categories |= PAGE_IS_SOFT_DIRTY;
2375 	}
2376 
2377 	return categories;
2378 }
2379 
make_uffd_wp_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t ptent)2380 static void make_uffd_wp_pte(struct vm_area_struct *vma,
2381 			     unsigned long addr, pte_t *pte, pte_t ptent)
2382 {
2383 	if (pte_present(ptent)) {
2384 		pte_t old_pte;
2385 
2386 		old_pte = ptep_modify_prot_start(vma, addr, pte);
2387 		ptent = pte_mkuffd_wp(old_pte);
2388 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
2389 	} else if (pte_none(ptent)) {
2390 		set_pte_at(vma->vm_mm, addr, pte,
2391 			   make_pte_marker(PTE_MARKER_UFFD_WP));
2392 	} else {
2393 		ptent = pte_swp_mkuffd_wp(ptent);
2394 		set_pte_at(vma->vm_mm, addr, pte, ptent);
2395 	}
2396 }
2397 
2398 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pagemap_thp_category(struct pagemap_scan_private * p,struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)2399 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
2400 					  struct vm_area_struct *vma,
2401 					  unsigned long addr, pmd_t pmd)
2402 {
2403 	unsigned long categories = PAGE_IS_HUGE;
2404 
2405 	if (pmd_none(pmd))
2406 		return categories;
2407 
2408 	if (pmd_present(pmd)) {
2409 		struct page *page;
2410 
2411 		categories |= PAGE_IS_PRESENT;
2412 		if (!pmd_uffd_wp(pmd))
2413 			categories |= PAGE_IS_WRITTEN;
2414 
2415 		if (p->masks_of_interest & PAGE_IS_FILE) {
2416 			page = vm_normal_page_pmd(vma, addr, pmd);
2417 			if (page && !PageAnon(page))
2418 				categories |= PAGE_IS_FILE;
2419 		}
2420 
2421 		if (is_huge_zero_pmd(pmd))
2422 			categories |= PAGE_IS_PFNZERO;
2423 		if (pmd_soft_dirty(pmd))
2424 			categories |= PAGE_IS_SOFT_DIRTY;
2425 	} else {
2426 		categories |= PAGE_IS_SWAPPED;
2427 		if (!pmd_swp_uffd_wp(pmd))
2428 			categories |= PAGE_IS_WRITTEN;
2429 		if (pmd_swp_soft_dirty(pmd))
2430 			categories |= PAGE_IS_SOFT_DIRTY;
2431 
2432 		if (p->masks_of_interest & PAGE_IS_FILE) {
2433 			const softleaf_t entry = softleaf_from_pmd(pmd);
2434 
2435 			if (softleaf_has_pfn(entry) &&
2436 			    !folio_test_anon(softleaf_to_folio(entry)))
2437 				categories |= PAGE_IS_FILE;
2438 		}
2439 	}
2440 
2441 	return categories;
2442 }
2443 
make_uffd_wp_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)2444 static void make_uffd_wp_pmd(struct vm_area_struct *vma,
2445 			     unsigned long addr, pmd_t *pmdp)
2446 {
2447 	pmd_t old, pmd = *pmdp;
2448 
2449 	if (pmd_present(pmd)) {
2450 		old = pmdp_invalidate_ad(vma, addr, pmdp);
2451 		pmd = pmd_mkuffd_wp(old);
2452 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2453 	} else if (pmd_is_migration_entry(pmd)) {
2454 		pmd = pmd_swp_mkuffd_wp(pmd);
2455 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2456 	}
2457 }
2458 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2459 
2460 #ifdef CONFIG_HUGETLB_PAGE
pagemap_hugetlb_category(pte_t pte)2461 static unsigned long pagemap_hugetlb_category(pte_t pte)
2462 {
2463 	unsigned long categories = PAGE_IS_HUGE;
2464 
2465 	if (pte_none(pte))
2466 		return categories;
2467 
2468 	/*
2469 	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
2470 	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
2471 	 * swapped pages.
2472 	 */
2473 	if (pte_present(pte)) {
2474 		categories |= PAGE_IS_PRESENT;
2475 
2476 		if (!huge_pte_uffd_wp(pte))
2477 			categories |= PAGE_IS_WRITTEN;
2478 		if (!PageAnon(pte_page(pte)))
2479 			categories |= PAGE_IS_FILE;
2480 		if (is_zero_pfn(pte_pfn(pte)))
2481 			categories |= PAGE_IS_PFNZERO;
2482 		if (pte_soft_dirty(pte))
2483 			categories |= PAGE_IS_SOFT_DIRTY;
2484 	} else {
2485 		categories |= PAGE_IS_SWAPPED;
2486 
2487 		if (!pte_swp_uffd_wp_any(pte))
2488 			categories |= PAGE_IS_WRITTEN;
2489 		if (pte_swp_soft_dirty(pte))
2490 			categories |= PAGE_IS_SOFT_DIRTY;
2491 	}
2492 
2493 	return categories;
2494 }
2495 
make_uffd_wp_huge_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t ptent)2496 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
2497 				  unsigned long addr, pte_t *ptep,
2498 				  pte_t ptent)
2499 {
2500 	const unsigned long psize = huge_page_size(hstate_vma(vma));
2501 	softleaf_t entry;
2502 
2503 	if (huge_pte_none(ptent)) {
2504 		set_huge_pte_at(vma->vm_mm, addr, ptep,
2505 				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
2506 		return;
2507 	}
2508 
2509 	entry = softleaf_from_pte(ptent);
2510 	if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
2511 		return;
2512 
2513 	if (softleaf_is_migration(entry))
2514 		set_huge_pte_at(vma->vm_mm, addr, ptep,
2515 				pte_swp_mkuffd_wp(ptent), psize);
2516 	else
2517 		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
2518 					     huge_pte_mkuffd_wp(ptent));
2519 }
2520 #endif /* CONFIG_HUGETLB_PAGE */
2521 
2522 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
pagemap_scan_backout_range(struct pagemap_scan_private * p,unsigned long addr,unsigned long end)2523 static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
2524 				       unsigned long addr, unsigned long end)
2525 {
2526 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2527 
2528 	if (!p->vec_buf)
2529 		return;
2530 
2531 	if (cur_buf->start != addr)
2532 		cur_buf->end = addr;
2533 	else
2534 		cur_buf->start = cur_buf->end = 0;
2535 
2536 	p->found_pages -= (end - addr) / PAGE_SIZE;
2537 }
2538 #endif
2539 
pagemap_scan_is_interesting_page(unsigned long categories,const struct pagemap_scan_private * p)2540 static bool pagemap_scan_is_interesting_page(unsigned long categories,
2541 					     const struct pagemap_scan_private *p)
2542 {
2543 	categories ^= p->arg.category_inverted;
2544 	if ((categories & p->arg.category_mask) != p->arg.category_mask)
2545 		return false;
2546 	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
2547 		return false;
2548 
2549 	return true;
2550 }
2551 
pagemap_scan_is_interesting_vma(unsigned long categories,const struct pagemap_scan_private * p)2552 static bool pagemap_scan_is_interesting_vma(unsigned long categories,
2553 					    const struct pagemap_scan_private *p)
2554 {
2555 	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
2556 
2557 	categories ^= p->arg.category_inverted;
2558 	if ((categories & required) != required)
2559 		return false;
2560 
2561 	return true;
2562 }
2563 
pagemap_scan_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)2564 static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2565 				  struct mm_walk *walk)
2566 {
2567 	struct pagemap_scan_private *p = walk->private;
2568 	struct vm_area_struct *vma = walk->vma;
2569 	unsigned long vma_category = 0;
2570 	bool wp_allowed = userfaultfd_wp_async(vma) &&
2571 	    userfaultfd_wp_use_markers(vma);
2572 
2573 	if (!wp_allowed) {
2574 		/* User requested explicit failure over wp-async capability */
2575 		if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2576 			return -EPERM;
2577 		/*
2578 		 * User requires wr-protect, and allows silently skipping
2579 		 * unsupported vmas.
2580 		 */
2581 		if (p->arg.flags & PM_SCAN_WP_MATCHING)
2582 			return 1;
2583 		/*
2584 		 * Then the request doesn't involve wr-protects at all,
2585 		 * fall through to the rest checks, and allow vma walk.
2586 		 */
2587 	}
2588 
2589 	if (vma->vm_flags & VM_PFNMAP)
2590 		return 1;
2591 
2592 	if (wp_allowed)
2593 		vma_category |= PAGE_IS_WPALLOWED;
2594 
2595 	if (vma->vm_flags & VM_SOFTDIRTY)
2596 		vma_category |= PAGE_IS_SOFT_DIRTY;
2597 
2598 	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2599 		return 1;
2600 
2601 	p->cur_vma_category = vma_category;
2602 
2603 	return 0;
2604 }
2605 
pagemap_scan_push_range(unsigned long categories,struct pagemap_scan_private * p,unsigned long addr,unsigned long end)2606 static bool pagemap_scan_push_range(unsigned long categories,
2607 				    struct pagemap_scan_private *p,
2608 				    unsigned long addr, unsigned long end)
2609 {
2610 	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2611 
2612 	/*
2613 	 * When there is no output buffer provided at all, the sentinel values
2614 	 * won't match here. There is no other way for `cur_buf->end` to be
2615 	 * non-zero other than it being non-empty.
2616 	 */
2617 	if (addr == cur_buf->end && categories == cur_buf->categories) {
2618 		cur_buf->end = end;
2619 		return true;
2620 	}
2621 
2622 	if (cur_buf->end) {
2623 		if (p->vec_buf_index >= p->vec_buf_len - 1)
2624 			return false;
2625 
2626 		cur_buf = &p->vec_buf[++p->vec_buf_index];
2627 	}
2628 
2629 	cur_buf->start = addr;
2630 	cur_buf->end = end;
2631 	cur_buf->categories = categories;
2632 
2633 	return true;
2634 }
2635 
pagemap_scan_output(unsigned long categories,struct pagemap_scan_private * p,unsigned long addr,unsigned long * end)2636 static int pagemap_scan_output(unsigned long categories,
2637 			       struct pagemap_scan_private *p,
2638 			       unsigned long addr, unsigned long *end)
2639 {
2640 	unsigned long n_pages, total_pages;
2641 	int ret = 0;
2642 
2643 	if (!p->vec_buf)
2644 		return 0;
2645 
2646 	categories &= p->arg.return_mask;
2647 
2648 	n_pages = (*end - addr) / PAGE_SIZE;
2649 	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2650 	    total_pages > p->arg.max_pages) {
2651 		size_t n_too_much = total_pages - p->arg.max_pages;
2652 		*end -= n_too_much * PAGE_SIZE;
2653 		n_pages -= n_too_much;
2654 		ret = -ENOSPC;
2655 	}
2656 
2657 	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2658 		*end = addr;
2659 		n_pages = 0;
2660 		ret = -ENOSPC;
2661 	}
2662 
2663 	p->found_pages += n_pages;
2664 	if (ret)
2665 		p->arg.walk_end = *end;
2666 
2667 	return ret;
2668 }
2669 
pagemap_scan_thp_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)2670 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2671 				  unsigned long end, struct mm_walk *walk)
2672 {
2673 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2674 	struct pagemap_scan_private *p = walk->private;
2675 	struct vm_area_struct *vma = walk->vma;
2676 	unsigned long categories;
2677 	spinlock_t *ptl;
2678 	int ret = 0;
2679 
2680 	ptl = pmd_trans_huge_lock(pmd, vma);
2681 	if (!ptl)
2682 		return -ENOENT;
2683 
2684 	categories = p->cur_vma_category |
2685 		     pagemap_thp_category(p, vma, start, *pmd);
2686 
2687 	if (!pagemap_scan_is_interesting_page(categories, p))
2688 		goto out_unlock;
2689 
2690 	ret = pagemap_scan_output(categories, p, start, &end);
2691 	if (start == end)
2692 		goto out_unlock;
2693 
2694 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2695 		goto out_unlock;
2696 	if (~categories & PAGE_IS_WRITTEN)
2697 		goto out_unlock;
2698 
2699 	/*
2700 	 * Break huge page into small pages if the WP operation
2701 	 * needs to be performed on a portion of the huge page.
2702 	 */
2703 	if (end != start + HPAGE_SIZE) {
2704 		spin_unlock(ptl);
2705 		split_huge_pmd(vma, pmd, start);
2706 		pagemap_scan_backout_range(p, start, end);
2707 		/* Report as if there was no THP */
2708 		return -ENOENT;
2709 	}
2710 
2711 	make_uffd_wp_pmd(vma, start, pmd);
2712 	flush_tlb_range(vma, start, end);
2713 out_unlock:
2714 	spin_unlock(ptl);
2715 	return ret;
2716 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2717 	return -ENOENT;
2718 #endif
2719 }
2720 
pagemap_scan_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)2721 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2722 				  unsigned long end, struct mm_walk *walk)
2723 {
2724 	struct pagemap_scan_private *p = walk->private;
2725 	struct vm_area_struct *vma = walk->vma;
2726 	unsigned long addr, flush_end = 0;
2727 	pte_t *pte, *start_pte;
2728 	spinlock_t *ptl;
2729 	int ret;
2730 
2731 	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2732 	if (ret != -ENOENT)
2733 		return ret;
2734 
2735 	ret = 0;
2736 	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2737 	if (!pte) {
2738 		walk->action = ACTION_AGAIN;
2739 		return 0;
2740 	}
2741 
2742 	arch_enter_lazy_mmu_mode();
2743 
2744 	if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2745 		/* Fast path for performing exclusive WP */
2746 		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2747 			pte_t ptent = ptep_get(pte);
2748 
2749 			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2750 			    pte_swp_uffd_wp_any(ptent))
2751 				continue;
2752 			make_uffd_wp_pte(vma, addr, pte, ptent);
2753 			if (!flush_end)
2754 				start = addr;
2755 			flush_end = addr + PAGE_SIZE;
2756 		}
2757 		goto flush_and_return;
2758 	}
2759 
2760 	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2761 	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2762 	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2763 		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2764 			unsigned long next = addr + PAGE_SIZE;
2765 			pte_t ptent = ptep_get(pte);
2766 
2767 			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2768 			    pte_swp_uffd_wp_any(ptent))
2769 				continue;
2770 			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2771 						  p, addr, &next);
2772 			if (next == addr)
2773 				break;
2774 			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2775 				continue;
2776 			make_uffd_wp_pte(vma, addr, pte, ptent);
2777 			if (!flush_end)
2778 				start = addr;
2779 			flush_end = next;
2780 		}
2781 		goto flush_and_return;
2782 	}
2783 
2784 	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2785 		pte_t ptent = ptep_get(pte);
2786 		unsigned long categories = p->cur_vma_category |
2787 					   pagemap_page_category(p, vma, addr, ptent);
2788 		unsigned long next = addr + PAGE_SIZE;
2789 
2790 		if (!pagemap_scan_is_interesting_page(categories, p))
2791 			continue;
2792 
2793 		ret = pagemap_scan_output(categories, p, addr, &next);
2794 		if (next == addr)
2795 			break;
2796 
2797 		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2798 			continue;
2799 		if (~categories & PAGE_IS_WRITTEN)
2800 			continue;
2801 
2802 		make_uffd_wp_pte(vma, addr, pte, ptent);
2803 		if (!flush_end)
2804 			start = addr;
2805 		flush_end = next;
2806 	}
2807 
2808 flush_and_return:
2809 	if (flush_end)
2810 		flush_tlb_range(vma, start, addr);
2811 
2812 	arch_leave_lazy_mmu_mode();
2813 	pte_unmap_unlock(start_pte, ptl);
2814 
2815 	cond_resched();
2816 	return ret;
2817 }
2818 
2819 #ifdef CONFIG_HUGETLB_PAGE
pagemap_scan_hugetlb_entry(pte_t * ptep,unsigned long hmask,unsigned long start,unsigned long end,struct mm_walk * walk)2820 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2821 				      unsigned long start, unsigned long end,
2822 				      struct mm_walk *walk)
2823 {
2824 	struct pagemap_scan_private *p = walk->private;
2825 	struct vm_area_struct *vma = walk->vma;
2826 	unsigned long categories;
2827 	spinlock_t *ptl;
2828 	int ret = 0;
2829 	pte_t pte;
2830 
2831 	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2832 		/* Go the short route when not write-protecting pages. */
2833 
2834 		pte = huge_ptep_get(walk->mm, start, ptep);
2835 		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2836 
2837 		if (!pagemap_scan_is_interesting_page(categories, p))
2838 			return 0;
2839 
2840 		return pagemap_scan_output(categories, p, start, &end);
2841 	}
2842 
2843 	i_mmap_lock_write(vma->vm_file->f_mapping);
2844 	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2845 
2846 	pte = huge_ptep_get(walk->mm, start, ptep);
2847 	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2848 
2849 	if (!pagemap_scan_is_interesting_page(categories, p))
2850 		goto out_unlock;
2851 
2852 	ret = pagemap_scan_output(categories, p, start, &end);
2853 	if (start == end)
2854 		goto out_unlock;
2855 
2856 	if (~categories & PAGE_IS_WRITTEN)
2857 		goto out_unlock;
2858 
2859 	if (end != start + HPAGE_SIZE) {
2860 		/* Partial HugeTLB page WP isn't possible. */
2861 		pagemap_scan_backout_range(p, start, end);
2862 		p->arg.walk_end = start;
2863 		ret = 0;
2864 		goto out_unlock;
2865 	}
2866 
2867 	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2868 	flush_hugetlb_tlb_range(vma, start, end);
2869 
2870 out_unlock:
2871 	spin_unlock(ptl);
2872 	i_mmap_unlock_write(vma->vm_file->f_mapping);
2873 
2874 	return ret;
2875 }
2876 #else
2877 #define pagemap_scan_hugetlb_entry NULL
2878 #endif
2879 
pagemap_scan_pte_hole(unsigned long addr,unsigned long end,int depth,struct mm_walk * walk)2880 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2881 				 int depth, struct mm_walk *walk)
2882 {
2883 	struct pagemap_scan_private *p = walk->private;
2884 	struct vm_area_struct *vma = walk->vma;
2885 	int ret, err;
2886 
2887 	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2888 		return 0;
2889 
2890 	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2891 	if (addr == end)
2892 		return ret;
2893 
2894 	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2895 		return ret;
2896 
2897 	err = uffd_wp_range(vma, addr, end - addr, true);
2898 	if (err < 0)
2899 		ret = err;
2900 
2901 	return ret;
2902 }
2903 
2904 static const struct mm_walk_ops pagemap_scan_ops = {
2905 	.test_walk = pagemap_scan_test_walk,
2906 	.pmd_entry = pagemap_scan_pmd_entry,
2907 	.pte_hole = pagemap_scan_pte_hole,
2908 	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2909 };
2910 
pagemap_scan_get_args(struct pm_scan_arg * arg,unsigned long uarg)2911 static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2912 				 unsigned long uarg)
2913 {
2914 	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2915 		return -EFAULT;
2916 
2917 	if (arg->size != sizeof(struct pm_scan_arg))
2918 		return -EINVAL;
2919 
2920 	/* Validate requested features */
2921 	if (arg->flags & ~PM_SCAN_FLAGS)
2922 		return -EINVAL;
2923 	if ((arg->category_inverted | arg->category_mask |
2924 	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2925 		return -EINVAL;
2926 
2927 	arg->start = untagged_addr((unsigned long)arg->start);
2928 	arg->end = untagged_addr((unsigned long)arg->end);
2929 	arg->vec = untagged_addr((unsigned long)arg->vec);
2930 
2931 	/* Validate memory pointers */
2932 	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2933 		return -EINVAL;
2934 	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2935 		return -EFAULT;
2936 	if (!arg->vec && arg->vec_len)
2937 		return -EINVAL;
2938 	if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
2939 		return -EINVAL;
2940 	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2941 				   size_mul(arg->vec_len, sizeof(struct page_region))))
2942 		return -EFAULT;
2943 
2944 	/* Fixup default values */
2945 	arg->end = ALIGN(arg->end, PAGE_SIZE);
2946 	arg->walk_end = 0;
2947 	if (!arg->max_pages)
2948 		arg->max_pages = ULONG_MAX;
2949 
2950 	return 0;
2951 }
2952 
pagemap_scan_writeback_args(struct pm_scan_arg * arg,unsigned long uargl)2953 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2954 				       unsigned long uargl)
2955 {
2956 	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2957 
2958 	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2959 		return -EFAULT;
2960 
2961 	return 0;
2962 }
2963 
pagemap_scan_init_bounce_buffer(struct pagemap_scan_private * p)2964 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2965 {
2966 	if (!p->arg.vec_len)
2967 		return 0;
2968 
2969 	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2970 			       p->arg.vec_len);
2971 	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2972 				   GFP_KERNEL);
2973 	if (!p->vec_buf)
2974 		return -ENOMEM;
2975 
2976 	p->vec_buf->start = p->vec_buf->end = 0;
2977 	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2978 
2979 	return 0;
2980 }
2981 
pagemap_scan_flush_buffer(struct pagemap_scan_private * p)2982 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2983 {
2984 	const struct page_region *buf = p->vec_buf;
2985 	long n = p->vec_buf_index;
2986 
2987 	if (!p->vec_buf)
2988 		return 0;
2989 
2990 	if (buf[n].end != buf[n].start)
2991 		n++;
2992 
2993 	if (!n)
2994 		return 0;
2995 
2996 	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2997 		return -EFAULT;
2998 
2999 	p->arg.vec_len -= n;
3000 	p->vec_out += n;
3001 
3002 	p->vec_buf_index = 0;
3003 	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
3004 	p->vec_buf->start = p->vec_buf->end = 0;
3005 
3006 	return n;
3007 }
3008 
do_pagemap_scan(struct mm_struct * mm,unsigned long uarg)3009 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
3010 {
3011 	struct pagemap_scan_private p = {0};
3012 	unsigned long walk_start;
3013 	size_t n_ranges_out = 0;
3014 	int ret;
3015 
3016 	ret = pagemap_scan_get_args(&p.arg, uarg);
3017 	if (ret)
3018 		return ret;
3019 
3020 	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
3021 			      p.arg.return_mask;
3022 	ret = pagemap_scan_init_bounce_buffer(&p);
3023 	if (ret)
3024 		return ret;
3025 
3026 	for (walk_start = p.arg.start; walk_start < p.arg.end;
3027 			walk_start = p.arg.walk_end) {
3028 		struct mmu_notifier_range range;
3029 		long n_out;
3030 
3031 		if (fatal_signal_pending(current)) {
3032 			ret = -EINTR;
3033 			break;
3034 		}
3035 
3036 		ret = mmap_read_lock_killable(mm);
3037 		if (ret)
3038 			break;
3039 
3040 		/* Protection change for the range is going to happen. */
3041 		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
3042 			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
3043 						mm, walk_start, p.arg.end);
3044 			mmu_notifier_invalidate_range_start(&range);
3045 		}
3046 
3047 		ret = walk_page_range(mm, walk_start, p.arg.end,
3048 				      &pagemap_scan_ops, &p);
3049 
3050 		if (p.arg.flags & PM_SCAN_WP_MATCHING)
3051 			mmu_notifier_invalidate_range_end(&range);
3052 
3053 		mmap_read_unlock(mm);
3054 
3055 		n_out = pagemap_scan_flush_buffer(&p);
3056 		if (n_out < 0)
3057 			ret = n_out;
3058 		else
3059 			n_ranges_out += n_out;
3060 
3061 		if (ret != -ENOSPC)
3062 			break;
3063 
3064 		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
3065 			break;
3066 	}
3067 
3068 	/* ENOSPC signifies early stop (buffer full) from the walk. */
3069 	if (!ret || ret == -ENOSPC)
3070 		ret = n_ranges_out;
3071 
3072 	/* The walk_end isn't set when ret is zero */
3073 	if (!p.arg.walk_end)
3074 		p.arg.walk_end = p.arg.end;
3075 	if (pagemap_scan_writeback_args(&p.arg, uarg))
3076 		ret = -EFAULT;
3077 
3078 	kfree(p.vec_buf);
3079 	return ret;
3080 }
3081 
do_pagemap_cmd(struct file * file,unsigned int cmd,unsigned long arg)3082 static long do_pagemap_cmd(struct file *file, unsigned int cmd,
3083 			   unsigned long arg)
3084 {
3085 	struct mm_struct *mm = file->private_data;
3086 
3087 	switch (cmd) {
3088 	case PAGEMAP_SCAN:
3089 		return do_pagemap_scan(mm, arg);
3090 
3091 	default:
3092 		return -EINVAL;
3093 	}
3094 }
3095 
3096 const struct file_operations proc_pagemap_operations = {
3097 	.llseek		= mem_lseek, /* borrow this */
3098 	.read		= pagemap_read,
3099 	.open		= pagemap_open,
3100 	.release	= pagemap_release,
3101 	.unlocked_ioctl = do_pagemap_cmd,
3102 	.compat_ioctl	= do_pagemap_cmd,
3103 };
3104 #endif /* CONFIG_PROC_PAGE_MONITOR */
3105 
3106 #ifdef CONFIG_NUMA
3107 
3108 struct numa_maps {
3109 	unsigned long pages;
3110 	unsigned long anon;
3111 	unsigned long active;
3112 	unsigned long writeback;
3113 	unsigned long mapcount_max;
3114 	unsigned long dirty;
3115 	unsigned long swapcache;
3116 	unsigned long node[MAX_NUMNODES];
3117 };
3118 
3119 struct numa_maps_private {
3120 	struct proc_maps_private proc_maps;
3121 	struct numa_maps md;
3122 };
3123 
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)3124 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
3125 			unsigned long nr_pages)
3126 {
3127 	struct folio *folio = page_folio(page);
3128 	int count;
3129 
3130 	if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
3131 		count = folio_precise_page_mapcount(folio, page);
3132 	else
3133 		count = folio_average_page_mapcount(folio);
3134 
3135 	md->pages += nr_pages;
3136 	if (pte_dirty || folio_test_dirty(folio))
3137 		md->dirty += nr_pages;
3138 
3139 	if (folio_test_swapcache(folio))
3140 		md->swapcache += nr_pages;
3141 
3142 	if (folio_test_active(folio) || folio_test_unevictable(folio))
3143 		md->active += nr_pages;
3144 
3145 	if (folio_test_writeback(folio))
3146 		md->writeback += nr_pages;
3147 
3148 	if (folio_test_anon(folio))
3149 		md->anon += nr_pages;
3150 
3151 	if (count > md->mapcount_max)
3152 		md->mapcount_max = count;
3153 
3154 	md->node[folio_nid(folio)] += nr_pages;
3155 }
3156 
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)3157 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
3158 		unsigned long addr)
3159 {
3160 	struct page *page;
3161 	int nid;
3162 
3163 	if (!pte_present(pte))
3164 		return NULL;
3165 
3166 	page = vm_normal_page(vma, addr, pte);
3167 	if (!page || is_zone_device_page(page))
3168 		return NULL;
3169 
3170 	if (PageReserved(page))
3171 		return NULL;
3172 
3173 	nid = page_to_nid(page);
3174 	if (!node_isset(nid, node_states[N_MEMORY]))
3175 		return NULL;
3176 
3177 	return page;
3178 }
3179 
3180 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
can_gather_numa_stats_pmd(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr)3181 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
3182 					      struct vm_area_struct *vma,
3183 					      unsigned long addr)
3184 {
3185 	struct page *page;
3186 	int nid;
3187 
3188 	if (!pmd_present(pmd))
3189 		return NULL;
3190 
3191 	page = vm_normal_page_pmd(vma, addr, pmd);
3192 	if (!page)
3193 		return NULL;
3194 
3195 	if (PageReserved(page))
3196 		return NULL;
3197 
3198 	nid = page_to_nid(page);
3199 	if (!node_isset(nid, node_states[N_MEMORY]))
3200 		return NULL;
3201 
3202 	return page;
3203 }
3204 #endif
3205 
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)3206 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
3207 		unsigned long end, struct mm_walk *walk)
3208 {
3209 	struct numa_maps *md = walk->private;
3210 	struct vm_area_struct *vma = walk->vma;
3211 	spinlock_t *ptl;
3212 	pte_t *orig_pte;
3213 	pte_t *pte;
3214 
3215 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3216 	ptl = pmd_trans_huge_lock(pmd, vma);
3217 	if (ptl) {
3218 		struct page *page;
3219 
3220 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
3221 		if (page)
3222 			gather_stats(page, md, pmd_dirty(*pmd),
3223 				     HPAGE_PMD_SIZE/PAGE_SIZE);
3224 		spin_unlock(ptl);
3225 		return 0;
3226 	}
3227 #endif
3228 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
3229 	if (!pte) {
3230 		walk->action = ACTION_AGAIN;
3231 		return 0;
3232 	}
3233 	do {
3234 		pte_t ptent = ptep_get(pte);
3235 		struct page *page = can_gather_numa_stats(ptent, vma, addr);
3236 		if (!page)
3237 			continue;
3238 		gather_stats(page, md, pte_dirty(ptent), 1);
3239 
3240 	} while (pte++, addr += PAGE_SIZE, addr != end);
3241 	pte_unmap_unlock(orig_pte, ptl);
3242 	cond_resched();
3243 	return 0;
3244 }
3245 #ifdef CONFIG_HUGETLB_PAGE
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)3246 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3247 		unsigned long addr, unsigned long end, struct mm_walk *walk)
3248 {
3249 	pte_t huge_pte;
3250 	struct numa_maps *md;
3251 	struct page *page;
3252 	spinlock_t *ptl;
3253 
3254 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
3255 	huge_pte = huge_ptep_get(walk->mm, addr, pte);
3256 	if (!pte_present(huge_pte))
3257 		goto out;
3258 
3259 	page = pte_page(huge_pte);
3260 
3261 	md = walk->private;
3262 	gather_stats(page, md, pte_dirty(huge_pte), 1);
3263 out:
3264 	spin_unlock(ptl);
3265 	return 0;
3266 }
3267 
3268 #else
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)3269 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
3270 		unsigned long addr, unsigned long end, struct mm_walk *walk)
3271 {
3272 	return 0;
3273 }
3274 #endif
3275 
3276 static const struct mm_walk_ops show_numa_ops = {
3277 	.hugetlb_entry = gather_hugetlb_stats,
3278 	.pmd_entry = gather_pte_stats,
3279 	.walk_lock = PGWALK_RDLOCK,
3280 };
3281 
3282 /*
3283  * Display pages allocated per node and memory policy via /proc.
3284  */
show_numa_map(struct seq_file * m,void * v)3285 static int show_numa_map(struct seq_file *m, void *v)
3286 {
3287 	struct numa_maps_private *numa_priv = m->private;
3288 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
3289 	struct vm_area_struct *vma = v;
3290 	struct numa_maps *md = &numa_priv->md;
3291 	struct file *file = vma->vm_file;
3292 	struct mm_struct *mm = vma->vm_mm;
3293 	char buffer[64];
3294 	struct mempolicy *pol;
3295 	pgoff_t ilx;
3296 	int nid;
3297 
3298 	if (!mm)
3299 		return 0;
3300 
3301 	/* Ensure we start with an empty set of numa_maps statistics. */
3302 	memset(md, 0, sizeof(*md));
3303 
3304 	pol = __get_vma_policy(vma, vma->vm_start, &ilx);
3305 	if (pol) {
3306 		mpol_to_str(buffer, sizeof(buffer), pol);
3307 		mpol_cond_put(pol);
3308 	} else {
3309 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
3310 	}
3311 
3312 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
3313 
3314 	if (file) {
3315 		seq_puts(m, " file=");
3316 		seq_path(m, file_user_path(file), "\n\t= ");
3317 	} else if (vma_is_initial_heap(vma)) {
3318 		seq_puts(m, " heap");
3319 	} else if (vma_is_initial_stack(vma)) {
3320 		seq_puts(m, " stack");
3321 	}
3322 
3323 	if (is_vm_hugetlb_page(vma))
3324 		seq_puts(m, " huge");
3325 
3326 	/* mmap_lock is held by m_start */
3327 	walk_page_vma(vma, &show_numa_ops, md);
3328 
3329 	if (!md->pages)
3330 		goto out;
3331 
3332 	if (md->anon)
3333 		seq_printf(m, " anon=%lu", md->anon);
3334 
3335 	if (md->dirty)
3336 		seq_printf(m, " dirty=%lu", md->dirty);
3337 
3338 	if (md->pages != md->anon && md->pages != md->dirty)
3339 		seq_printf(m, " mapped=%lu", md->pages);
3340 
3341 	if (md->mapcount_max > 1)
3342 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
3343 
3344 	if (md->swapcache)
3345 		seq_printf(m, " swapcache=%lu", md->swapcache);
3346 
3347 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
3348 		seq_printf(m, " active=%lu", md->active);
3349 
3350 	if (md->writeback)
3351 		seq_printf(m, " writeback=%lu", md->writeback);
3352 
3353 	for_each_node_state(nid, N_MEMORY)
3354 		if (md->node[nid])
3355 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
3356 
3357 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
3358 out:
3359 	seq_putc(m, '\n');
3360 	return 0;
3361 }
3362 
3363 static const struct seq_operations proc_pid_numa_maps_op = {
3364 	.start  = m_start,
3365 	.next   = m_next,
3366 	.stop   = m_stop,
3367 	.show   = show_numa_map,
3368 };
3369 
pid_numa_maps_open(struct inode * inode,struct file * file)3370 static int pid_numa_maps_open(struct inode *inode, struct file *file)
3371 {
3372 	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
3373 				sizeof(struct numa_maps_private));
3374 }
3375 
3376 const struct file_operations proc_pid_numa_maps_operations = {
3377 	.open		= pid_numa_maps_open,
3378 	.read		= seq_read,
3379 	.llseek		= seq_lseek,
3380 	.release	= proc_map_release,
3381 };
3382 
3383 #endif /* CONFIG_NUMA */
3384