task_mmu.c (fbc90c042cd1dc7258ebfebe6d226017e5b5ac8c) task_mmu.c (9651fcedf7b92d3f7f1ab179e8ab55b85ee10fc1)
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pagewalk.h>
3#include <linux/mm_inline.h>
4#include <linux/hugetlb.h>
5#include <linux/huge_mm.h>
6#include <linux/mount.h>
7#include <linux/ksm.h>
8#include <linux/seq_file.h>

--- 8 unchanged lines hidden (view full) ---

17#include <linux/swapops.h>
18#include <linux/mmu_notifier.h>
19#include <linux/page_idle.h>
20#include <linux/shmem_fs.h>
21#include <linux/uaccess.h>
22#include <linux/pkeys.h>
23#include <linux/minmax.h>
24#include <linux/overflow.h>
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/pagewalk.h>
3#include <linux/mm_inline.h>
4#include <linux/hugetlb.h>
5#include <linux/huge_mm.h>
6#include <linux/mount.h>
7#include <linux/ksm.h>
8#include <linux/seq_file.h>

--- 8 unchanged lines hidden (view full) ---

17#include <linux/swapops.h>
18#include <linux/mmu_notifier.h>
19#include <linux/page_idle.h>
20#include <linux/shmem_fs.h>
21#include <linux/uaccess.h>
22#include <linux/pkeys.h>
23#include <linux/minmax.h>
24#include <linux/overflow.h>
25#include <linux/buildid.h>
26
27#include <asm/elf.h>
28#include <asm/tlb.h>
29#include <asm/tlbflush.h>
30#include "internal.h"
31
32#define SEQ_PUT_DEC(str, val) \
33 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)

--- 201 unchanged lines hidden (view full) ---

235
236static int do_maps_open(struct inode *inode, struct file *file,
237 const struct seq_operations *ops)
238{
239 return proc_maps_open(inode, file, ops,
240 sizeof(struct proc_maps_private));
241}
242
25
26#include <asm/elf.h>
27#include <asm/tlb.h>
28#include <asm/tlbflush.h>
29#include "internal.h"
30
31#define SEQ_PUT_DEC(str, val) \
32 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)

--- 201 unchanged lines hidden (view full) ---

234
235static int do_maps_open(struct inode *inode, struct file *file,
236 const struct seq_operations *ops)
237{
238 return proc_maps_open(inode, file, ops,
239 sizeof(struct proc_maps_private));
240}
241
243static void get_vma_name(struct vm_area_struct *vma,
244 const struct path **path,
245 const char **name,
246 const char **name_fmt)
247{
248 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
249
250 *name = NULL;
251 *path = NULL;
252 *name_fmt = NULL;
253
254 /*
255 * Print the dentry name for named mappings, and a
256 * special [heap] marker for the heap:
257 */
258 if (vma->vm_file) {
259 /*
260 * If user named this anon shared memory via
261 * prctl(PR_SET_VMA ..., use the provided name.
262 */
263 if (anon_name) {
264 *name_fmt = "[anon_shmem:%s]";
265 *name = anon_name->name;
266 } else {
267 *path = file_user_path(vma->vm_file);
268 }
269 return;
270 }
271
272 if (vma->vm_ops && vma->vm_ops->name) {
273 *name = vma->vm_ops->name(vma);
274 if (*name)
275 return;
276 }
277
278 *name = arch_vma_name(vma);
279 if (*name)
280 return;
281
282 if (!vma->vm_mm) {
283 *name = "[vdso]";
284 return;
285 }
286
287 if (vma_is_initial_heap(vma)) {
288 *name = "[heap]";
289 return;
290 }
291
292 if (vma_is_initial_stack(vma)) {
293 *name = "[stack]";
294 return;
295 }
296
297 if (anon_name) {
298 *name_fmt = "[anon:%s]";
299 *name = anon_name->name;
300 return;
301 }
302}
303
304static void show_vma_header_prefix(struct seq_file *m,
305 unsigned long start, unsigned long end,
306 vm_flags_t flags, unsigned long long pgoff,
307 dev_t dev, unsigned long ino)
308{
309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310 seq_put_hex_ll(m, NULL, start, 8);
311 seq_put_hex_ll(m, "-", end, 8);

--- 7 unchanged lines hidden (view full) ---

319 seq_put_hex_ll(m, ":", MINOR(dev), 2);
320 seq_put_decimal_ull(m, " ", ino);
321 seq_putc(m, ' ');
322}
323
324static void
325show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
326{
242static void show_vma_header_prefix(struct seq_file *m,
243 unsigned long start, unsigned long end,
244 vm_flags_t flags, unsigned long long pgoff,
245 dev_t dev, unsigned long ino)
246{
247 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
248 seq_put_hex_ll(m, NULL, start, 8);
249 seq_put_hex_ll(m, "-", end, 8);

--- 7 unchanged lines hidden (view full) ---

257 seq_put_hex_ll(m, ":", MINOR(dev), 2);
258 seq_put_decimal_ull(m, " ", ino);
259 seq_putc(m, ' ');
260}
261
262static void
263show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
264{
327 const struct path *path;
328 const char *name_fmt, *name;
265 struct anon_vma_name *anon_name = NULL;
266 struct mm_struct *mm = vma->vm_mm;
267 struct file *file = vma->vm_file;
329 vm_flags_t flags = vma->vm_flags;
330 unsigned long ino = 0;
331 unsigned long long pgoff = 0;
332 unsigned long start, end;
333 dev_t dev = 0;
268 vm_flags_t flags = vma->vm_flags;
269 unsigned long ino = 0;
270 unsigned long long pgoff = 0;
271 unsigned long start, end;
272 dev_t dev = 0;
273 const char *name = NULL;
334
274
335 if (vma->vm_file) {
275 if (file) {
336 const struct inode *inode = file_user_inode(vma->vm_file);
337
338 dev = inode->i_sb->s_dev;
339 ino = inode->i_ino;
340 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
341 }
342
343 start = vma->vm_start;
344 end = vma->vm_end;
345 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
276 const struct inode *inode = file_user_inode(vma->vm_file);
277
278 dev = inode->i_sb->s_dev;
279 ino = inode->i_ino;
280 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
281 }
282
283 start = vma->vm_start;
284 end = vma->vm_end;
285 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
286 if (mm)
287 anon_name = anon_vma_name(vma);
346
288
347 get_vma_name(vma, &path, &name, &name_fmt);
348 if (path) {
289 /*
290 * Print the dentry name for named mappings, and a
291 * special [heap] marker for the heap:
292 */
293 if (file) {
349 seq_pad(m, ' ');
294 seq_pad(m, ' ');
350 seq_path(m, path, "\n");
351 } else if (name_fmt) {
295 /*
296 * If user named this anon shared memory via
297 * prctl(PR_SET_VMA ..., use the provided name.
298 */
299 if (anon_name)
300 seq_printf(m, "[anon_shmem:%s]", anon_name->name);
301 else
302 seq_path(m, file_user_path(file), "\n");
303 goto done;
304 }
305
306 if (vma->vm_ops && vma->vm_ops->name) {
307 name = vma->vm_ops->name(vma);
308 if (name)
309 goto done;
310 }
311
312 name = arch_vma_name(vma);
313 if (!name) {
314 if (!mm) {
315 name = "[vdso]";
316 goto done;
317 }
318
319 if (vma_is_initial_heap(vma)) {
320 name = "[heap]";
321 goto done;
322 }
323
324 if (vma_is_initial_stack(vma)) {
325 name = "[stack]";
326 goto done;
327 }
328
329 if (anon_name) {
330 seq_pad(m, ' ');
331 seq_printf(m, "[anon:%s]", anon_name->name);
332 }
333 }
334
335done:
336 if (name) {
352 seq_pad(m, ' ');
337 seq_pad(m, ' ');
353 seq_printf(m, name_fmt, name);
354 } else if (name) {
355 seq_pad(m, ' ');
356 seq_puts(m, name);
357 }
358 seq_putc(m, '\n');
359}
360
361static int show_map(struct seq_file *m, void *v)
362{
363 show_map_vma(m, v);

--- 7 unchanged lines hidden (view full) ---

371 .show = show_map
372};
373
374static int pid_maps_open(struct inode *inode, struct file *file)
375{
376 return do_maps_open(inode, file, &proc_pid_maps_op);
377}
378
338 seq_puts(m, name);
339 }
340 seq_putc(m, '\n');
341}
342
343static int show_map(struct seq_file *m, void *v)
344{
345 show_map_vma(m, v);

--- 7 unchanged lines hidden (view full) ---

353 .show = show_map
354};
355
356static int pid_maps_open(struct inode *inode, struct file *file)
357{
358 return do_maps_open(inode, file, &proc_pid_maps_op);
359}
360
379#define PROCMAP_QUERY_VMA_FLAGS ( \
380 PROCMAP_QUERY_VMA_READABLE | \
381 PROCMAP_QUERY_VMA_WRITABLE | \
382 PROCMAP_QUERY_VMA_EXECUTABLE | \
383 PROCMAP_QUERY_VMA_SHARED \
384)
385
386#define PROCMAP_QUERY_VALID_FLAGS_MASK ( \
387 PROCMAP_QUERY_COVERING_OR_NEXT_VMA | \
388 PROCMAP_QUERY_FILE_BACKED_VMA | \
389 PROCMAP_QUERY_VMA_FLAGS \
390)
391
392static int query_vma_setup(struct mm_struct *mm)
393{
394 return mmap_read_lock_killable(mm);
395}
396
397static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
398{
399 mmap_read_unlock(mm);
400}
401
402static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
403{
404 return find_vma(mm, addr);
405}
406
407static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
408 unsigned long addr, u32 flags)
409{
410 struct vm_area_struct *vma;
411
412next_vma:
413 vma = query_vma_find_by_addr(mm, addr);
414 if (!vma)
415 goto no_vma;
416
417 /* user requested only file-backed VMA, keep iterating */
418 if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
419 goto skip_vma;
420
421 /* VMA permissions should satisfy query flags */
422 if (flags & PROCMAP_QUERY_VMA_FLAGS) {
423 u32 perm = 0;
424
425 if (flags & PROCMAP_QUERY_VMA_READABLE)
426 perm |= VM_READ;
427 if (flags & PROCMAP_QUERY_VMA_WRITABLE)
428 perm |= VM_WRITE;
429 if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
430 perm |= VM_EXEC;
431 if (flags & PROCMAP_QUERY_VMA_SHARED)
432 perm |= VM_MAYSHARE;
433
434 if ((vma->vm_flags & perm) != perm)
435 goto skip_vma;
436 }
437
438 /* found covering VMA or user is OK with the matching next VMA */
439 if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
440 return vma;
441
442skip_vma:
443 /*
444 * If the user needs closest matching VMA, keep iterating.
445 */
446 addr = vma->vm_end;
447 if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
448 goto next_vma;
449
450no_vma:
451 return ERR_PTR(-ENOENT);
452}
453
454static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
455{
456 struct procmap_query karg;
457 struct vm_area_struct *vma;
458 struct mm_struct *mm;
459 const char *name = NULL;
460 char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
461 __u64 usize;
462 int err;
463
464 if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
465 return -EFAULT;
466 /* argument struct can never be that large, reject abuse */
467 if (usize > PAGE_SIZE)
468 return -E2BIG;
469 /* argument struct should have at least query_flags and query_addr fields */
470 if (usize < offsetofend(struct procmap_query, query_addr))
471 return -EINVAL;
472 err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
473 if (err)
474 return err;
475
476 /* reject unknown flags */
477 if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
478 return -EINVAL;
479 /* either both buffer address and size are set, or both should be zero */
480 if (!!karg.vma_name_size != !!karg.vma_name_addr)
481 return -EINVAL;
482 if (!!karg.build_id_size != !!karg.build_id_addr)
483 return -EINVAL;
484
485 mm = priv->mm;
486 if (!mm || !mmget_not_zero(mm))
487 return -ESRCH;
488
489 err = query_vma_setup(mm);
490 if (err) {
491 mmput(mm);
492 return err;
493 }
494
495 vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
496 if (IS_ERR(vma)) {
497 err = PTR_ERR(vma);
498 vma = NULL;
499 goto out;
500 }
501
502 karg.vma_start = vma->vm_start;
503 karg.vma_end = vma->vm_end;
504
505 karg.vma_flags = 0;
506 if (vma->vm_flags & VM_READ)
507 karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
508 if (vma->vm_flags & VM_WRITE)
509 karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
510 if (vma->vm_flags & VM_EXEC)
511 karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
512 if (vma->vm_flags & VM_MAYSHARE)
513 karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
514
515 karg.vma_page_size = vma_kernel_pagesize(vma);
516
517 if (vma->vm_file) {
518 const struct inode *inode = file_user_inode(vma->vm_file);
519
520 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
521 karg.dev_major = MAJOR(inode->i_sb->s_dev);
522 karg.dev_minor = MINOR(inode->i_sb->s_dev);
523 karg.inode = inode->i_ino;
524 } else {
525 karg.vma_offset = 0;
526 karg.dev_major = 0;
527 karg.dev_minor = 0;
528 karg.inode = 0;
529 }
530
531 if (karg.build_id_size) {
532 __u32 build_id_sz;
533
534 err = build_id_parse(vma, build_id_buf, &build_id_sz);
535 if (err) {
536 karg.build_id_size = 0;
537 } else {
538 if (karg.build_id_size < build_id_sz) {
539 err = -ENAMETOOLONG;
540 goto out;
541 }
542 karg.build_id_size = build_id_sz;
543 }
544 }
545
546 if (karg.build_id_size) {
547 __u32 build_id_sz;
548
549 err = build_id_parse(vma, build_id_buf, &build_id_sz);
550 if (err) {
551 karg.build_id_size = 0;
552 } else {
553 if (karg.build_id_size < build_id_sz) {
554 err = -ENAMETOOLONG;
555 goto out;
556 }
557 karg.build_id_size = build_id_sz;
558 }
559 }
560
561 if (karg.vma_name_size) {
562 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
563 const struct path *path;
564 const char *name_fmt;
565 size_t name_sz = 0;
566
567 get_vma_name(vma, &path, &name, &name_fmt);
568
569 if (path || name_fmt || name) {
570 name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
571 if (!name_buf) {
572 err = -ENOMEM;
573 goto out;
574 }
575 }
576 if (path) {
577 name = d_path(path, name_buf, name_buf_sz);
578 if (IS_ERR(name)) {
579 err = PTR_ERR(name);
580 goto out;
581 }
582 name_sz = name_buf + name_buf_sz - name;
583 } else if (name || name_fmt) {
584 name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
585 name = name_buf;
586 }
587 if (name_sz > name_buf_sz) {
588 err = -ENAMETOOLONG;
589 goto out;
590 }
591 karg.vma_name_size = name_sz;
592 }
593
594 /* unlock vma or mmap_lock, and put mm_struct before copying data to user */
595 query_vma_teardown(mm, vma);
596 mmput(mm);
597
598 if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
599 name, karg.vma_name_size)) {
600 kfree(name_buf);
601 return -EFAULT;
602 }
603 kfree(name_buf);
604
605 if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
606 build_id_buf, karg.build_id_size))
607 return -EFAULT;
608
609 if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
610 return -EFAULT;
611
612 return 0;
613
614out:
615 query_vma_teardown(mm, vma);
616 mmput(mm);
617 kfree(name_buf);
618 return err;
619}
620
621static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
622{
623 struct seq_file *seq = file->private_data;
624 struct proc_maps_private *priv = seq->private;
625
626 switch (cmd) {
627 case PROCMAP_QUERY:
628 return do_procmap_query(priv, (void __user *)arg);
629 default:
630 return -ENOIOCTLCMD;
631 }
632}
633
634const struct file_operations proc_pid_maps_operations = {
635 .open = pid_maps_open,
636 .read = seq_read,
637 .llseek = seq_lseek,
638 .release = proc_map_release,
361const struct file_operations proc_pid_maps_operations = {
362 .open = pid_maps_open,
363 .read = seq_read,
364 .llseek = seq_lseek,
365 .release = proc_map_release,
639 .unlocked_ioctl = procfs_procmap_ioctl,
640 .compat_ioctl = compat_ptr_ioctl,
641};
642
643/*
644 * Proportional Set Size(PSS): my share of RSS.
645 *
646 * PSS of a process is the count of pages it has in memory, where each
647 * page is divided by the number of processes sharing it. So if a
648 * process has 1000 pages all to itself, and 1000 shared with one other

--- 63 unchanged lines hidden (view full) ---

712 mss->private_clean += size;
713 else
714 mss->shared_clean += size;
715 }
716}
717
718static void smaps_account(struct mem_size_stats *mss, struct page *page,
719 bool compound, bool young, bool dirty, bool locked,
366};
367
368/*
369 * Proportional Set Size(PSS): my share of RSS.
370 *
371 * PSS of a process is the count of pages it has in memory, where each
372 * page is divided by the number of processes sharing it. So if a
373 * process has 1000 pages all to itself, and 1000 shared with one other

--- 63 unchanged lines hidden (view full) ---

437 mss->private_clean += size;
438 else
439 mss->shared_clean += size;
440 }
441}
442
443static void smaps_account(struct mem_size_stats *mss, struct page *page,
444 bool compound, bool young, bool dirty, bool locked,
720 bool present)
445 bool migration)
721{
722 struct folio *folio = page_folio(page);
723 int i, nr = compound ? compound_nr(page) : 1;
724 unsigned long size = nr * PAGE_SIZE;
725
726 /*
727 * First accumulate quantities that depend only on |size| and the type
728 * of the compound page.

--- 12 unchanged lines hidden (view full) ---

741 /* Accumulate the size in pages that have been accessed. */
742 if (young || folio_test_young(folio) || folio_test_referenced(folio))
743 mss->referenced += size;
744
745 /*
746 * Then accumulate quantities that may depend on sharing, or that may
747 * differ page-by-page.
748 *
446{
447 struct folio *folio = page_folio(page);
448 int i, nr = compound ? compound_nr(page) : 1;
449 unsigned long size = nr * PAGE_SIZE;
450
451 /*
452 * First accumulate quantities that depend only on |size| and the type
453 * of the compound page.

--- 12 unchanged lines hidden (view full) ---

466 /* Accumulate the size in pages that have been accessed. */
467 if (young || folio_test_young(folio) || folio_test_referenced(folio))
468 mss->referenced += size;
469
470 /*
471 * Then accumulate quantities that may depend on sharing, or that may
472 * differ page-by-page.
473 *
749 * refcount == 1 for present entries guarantees that the folio is mapped
750 * exactly once. For large folios this implies that exactly one
751 * PTE/PMD/... maps (a part of) this folio.
474 * refcount == 1 guarantees the page is mapped exactly once.
475 * If any subpage of the compound page mapped with PTE it would elevate
476 * the refcount.
752 *
477 *
753 * Treat all non-present entries (where relying on the mapcount and
754 * refcount doesn't make sense) as "maybe shared, but not sure how
755 * often". We treat device private entries as being fake-present.
756 *
757 * Note that it would not be safe to read the mapcount especially for
758 * pages referenced by migration entries, even with the PTL held.
478 * The page_mapcount() is called to get a snapshot of the mapcount.
479 * Without holding the page lock this snapshot can be slightly wrong as
480 * we cannot always read the mapcount atomically. It is not safe to
481 * call page_mapcount() even with PTL held if the page is not mapped,
482 * especially for migration entries. Treat regular migration entries
483 * as mapcount == 1.
759 */
484 */
760 if (folio_ref_count(folio) == 1 || !present) {
485 if ((folio_ref_count(folio) == 1) || migration) {
761 smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
486 smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
762 dirty, locked, present);
487 dirty, locked, true);
763 return;
764 }
488 return;
489 }
765 /*
766 * We obtain a snapshot of the mapcount. Without holding the folio lock
767 * this snapshot can be slightly wrong as we cannot always read the
768 * mapcount atomically.
769 */
770 for (i = 0; i < nr; i++, page++) {
490 for (i = 0; i < nr; i++, page++) {
771 int mapcount = folio_precise_page_mapcount(folio, page);
491 int mapcount = page_mapcount(page);
772 unsigned long pss = PAGE_SIZE << PSS_SHIFT;
773 if (mapcount >= 2)
774 pss /= mapcount;
775 smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
776 dirty, locked, mapcount < 2);
777 }
778}
779

--- 26 unchanged lines hidden (view full) ---

806
807static void smaps_pte_entry(pte_t *pte, unsigned long addr,
808 struct mm_walk *walk)
809{
810 struct mem_size_stats *mss = walk->private;
811 struct vm_area_struct *vma = walk->vma;
812 bool locked = !!(vma->vm_flags & VM_LOCKED);
813 struct page *page = NULL;
492 unsigned long pss = PAGE_SIZE << PSS_SHIFT;
493 if (mapcount >= 2)
494 pss /= mapcount;
495 smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
496 dirty, locked, mapcount < 2);
497 }
498}
499

--- 26 unchanged lines hidden (view full) ---

526
527static void smaps_pte_entry(pte_t *pte, unsigned long addr,
528 struct mm_walk *walk)
529{
530 struct mem_size_stats *mss = walk->private;
531 struct vm_area_struct *vma = walk->vma;
532 bool locked = !!(vma->vm_flags & VM_LOCKED);
533 struct page *page = NULL;
814 bool present = false, young = false, dirty = false;
534 bool migration = false, young = false, dirty = false;
815 pte_t ptent = ptep_get(pte);
816
817 if (pte_present(ptent)) {
818 page = vm_normal_page(vma, addr, ptent);
819 young = pte_young(ptent);
820 dirty = pte_dirty(ptent);
535 pte_t ptent = ptep_get(pte);
536
537 if (pte_present(ptent)) {
538 page = vm_normal_page(vma, addr, ptent);
539 young = pte_young(ptent);
540 dirty = pte_dirty(ptent);
821 present = true;
822 } else if (is_swap_pte(ptent)) {
823 swp_entry_t swpent = pte_to_swp_entry(ptent);
824
825 if (!non_swap_entry(swpent)) {
826 int mapcount;
827
828 mss->swap += PAGE_SIZE;
829 mapcount = swp_swapcount(swpent);
830 if (mapcount >= 2) {
831 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
832
833 do_div(pss_delta, mapcount);
834 mss->swap_pss += pss_delta;
835 } else {
836 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
837 }
838 } else if (is_pfn_swap_entry(swpent)) {
541 } else if (is_swap_pte(ptent)) {
542 swp_entry_t swpent = pte_to_swp_entry(ptent);
543
544 if (!non_swap_entry(swpent)) {
545 int mapcount;
546
547 mss->swap += PAGE_SIZE;
548 mapcount = swp_swapcount(swpent);
549 if (mapcount >= 2) {
550 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
551
552 do_div(pss_delta, mapcount);
553 mss->swap_pss += pss_delta;
554 } else {
555 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
556 }
557 } else if (is_pfn_swap_entry(swpent)) {
839 if (is_device_private_entry(swpent))
840 present = true;
558 if (is_migration_entry(swpent))
559 migration = true;
841 page = pfn_swap_entry_to_page(swpent);
842 }
843 } else {
844 smaps_pte_hole_lookup(addr, walk);
845 return;
846 }
847
848 if (!page)
849 return;
850
560 page = pfn_swap_entry_to_page(swpent);
561 }
562 } else {
563 smaps_pte_hole_lookup(addr, walk);
564 return;
565 }
566
567 if (!page)
568 return;
569
851 smaps_account(mss, page, false, young, dirty, locked, present);
570 smaps_account(mss, page, false, young, dirty, locked, migration);
852}
853
854#ifdef CONFIG_TRANSPARENT_HUGEPAGE
855static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
856 struct mm_walk *walk)
857{
858 struct mem_size_stats *mss = walk->private;
859 struct vm_area_struct *vma = walk->vma;
860 bool locked = !!(vma->vm_flags & VM_LOCKED);
861 struct page *page = NULL;
571}
572
573#ifdef CONFIG_TRANSPARENT_HUGEPAGE
574static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
575 struct mm_walk *walk)
576{
577 struct mem_size_stats *mss = walk->private;
578 struct vm_area_struct *vma = walk->vma;
579 bool locked = !!(vma->vm_flags & VM_LOCKED);
580 struct page *page = NULL;
862 bool present = false;
863 struct folio *folio;
581 struct folio *folio;
582 bool migration = false;
864
865 if (pmd_present(*pmd)) {
866 page = vm_normal_page_pmd(vma, addr, *pmd);
583
584 if (pmd_present(*pmd)) {
585 page = vm_normal_page_pmd(vma, addr, *pmd);
867 present = true;
868 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
869 swp_entry_t entry = pmd_to_swp_entry(*pmd);
870
586 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
587 swp_entry_t entry = pmd_to_swp_entry(*pmd);
588
871 if (is_pfn_swap_entry(entry))
589 if (is_migration_entry(entry)) {
590 migration = true;
872 page = pfn_swap_entry_to_page(entry);
591 page = pfn_swap_entry_to_page(entry);
592 }
873 }
874 if (IS_ERR_OR_NULL(page))
875 return;
876 folio = page_folio(page);
877 if (folio_test_anon(folio))
878 mss->anonymous_thp += HPAGE_PMD_SIZE;
879 else if (folio_test_swapbacked(folio))
880 mss->shmem_thp += HPAGE_PMD_SIZE;
881 else if (folio_is_zone_device(folio))
882 /* pass */;
883 else
884 mss->file_thp += HPAGE_PMD_SIZE;
885
886 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
593 }
594 if (IS_ERR_OR_NULL(page))
595 return;
596 folio = page_folio(page);
597 if (folio_test_anon(folio))
598 mss->anonymous_thp += HPAGE_PMD_SIZE;
599 else if (folio_test_swapbacked(folio))
600 mss->shmem_thp += HPAGE_PMD_SIZE;
601 else if (folio_is_zone_device(folio))
602 /* pass */;
603 else
604 mss->file_thp += HPAGE_PMD_SIZE;
605
606 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
887 locked, present);
607 locked, migration);
888}
889#else
890static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
891 struct mm_walk *walk)
892{
893}
894#endif
895

--- 87 unchanged lines hidden (view full) ---

983#endif /* CONFIG_ARCH_HAS_PKEYS */
984#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
985 [ilog2(VM_UFFD_MINOR)] = "ui",
986#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
987#ifdef CONFIG_X86_USER_SHADOW_STACK
988 [ilog2(VM_SHADOW_STACK)] = "ss",
989#endif
990#ifdef CONFIG_64BIT
608}
609#else
610static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
611 struct mm_walk *walk)
612{
613}
614#endif
615

--- 87 unchanged lines hidden (view full) ---

703#endif /* CONFIG_ARCH_HAS_PKEYS */
704#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
705 [ilog2(VM_UFFD_MINOR)] = "ui",
706#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
707#ifdef CONFIG_X86_USER_SHADOW_STACK
708 [ilog2(VM_SHADOW_STACK)] = "ss",
709#endif
710#ifdef CONFIG_64BIT
711 [ilog2(VM_DROPPABLE)] = "dp",
991 [ilog2(VM_SEALED)] = "sl",
992#endif
993 };
994 size_t i;
995
996 seq_puts(m, "VmFlags: ");
997 for (i = 0; i < BITS_PER_LONG; i++) {
998 if (!mnemonics[i][0])

--- 9 unchanged lines hidden (view full) ---

1008
1009#ifdef CONFIG_HUGETLB_PAGE
1010static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1011 unsigned long addr, unsigned long end,
1012 struct mm_walk *walk)
1013{
1014 struct mem_size_stats *mss = walk->private;
1015 struct vm_area_struct *vma = walk->vma;
712 [ilog2(VM_SEALED)] = "sl",
713#endif
714 };
715 size_t i;
716
717 seq_puts(m, "VmFlags: ");
718 for (i = 0; i < BITS_PER_LONG; i++) {
719 if (!mnemonics[i][0])

--- 9 unchanged lines hidden (view full) ---

729
730#ifdef CONFIG_HUGETLB_PAGE
731static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
732 unsigned long addr, unsigned long end,
733 struct mm_walk *walk)
734{
735 struct mem_size_stats *mss = walk->private;
736 struct vm_area_struct *vma = walk->vma;
1016 pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
737 pte_t ptent = huge_ptep_get(pte);
1017 struct folio *folio = NULL;
738 struct folio *folio = NULL;
1018 bool present = false;
1019
1020 if (pte_present(ptent)) {
1021 folio = page_folio(pte_page(ptent));
739
740 if (pte_present(ptent)) {
741 folio = page_folio(pte_page(ptent));
1022 present = true;
1023 } else if (is_swap_pte(ptent)) {
1024 swp_entry_t swpent = pte_to_swp_entry(ptent);
1025
1026 if (is_pfn_swap_entry(swpent))
1027 folio = pfn_swap_entry_folio(swpent);
1028 }
742 } else if (is_swap_pte(ptent)) {
743 swp_entry_t swpent = pte_to_swp_entry(ptent);
744
745 if (is_pfn_swap_entry(swpent))
746 folio = pfn_swap_entry_folio(swpent);
747 }
1029
1030 if (folio) {
748 if (folio) {
1031 /* We treat non-present entries as "maybe shared". */
1032 if (!present || folio_likely_mapped_shared(folio) ||
749 if (folio_likely_mapped_shared(folio) ||
1033 hugetlb_pmd_shared(pte))
1034 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1035 else
1036 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1037 }
1038 return 0;
1039}
1040#else

--- 329 unchanged lines hidden (view full) ---

1370struct clear_refs_private {
1371 enum clear_refs_types type;
1372};
1373
1374#ifdef CONFIG_MEM_SOFT_DIRTY
1375
1376static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1377{
750 hugetlb_pmd_shared(pte))
751 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
752 else
753 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
754 }
755 return 0;
756}
757#else

--- 329 unchanged lines hidden (view full) ---

1087struct clear_refs_private {
1088 enum clear_refs_types type;
1089};
1090
1091#ifdef CONFIG_MEM_SOFT_DIRTY
1092
1093static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1094{
1378 struct folio *folio;
1095 struct page *page;
1379
1380 if (!pte_write(pte))
1381 return false;
1382 if (!is_cow_mapping(vma->vm_flags))
1383 return false;
1384 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1385 return false;
1096
1097 if (!pte_write(pte))
1098 return false;
1099 if (!is_cow_mapping(vma->vm_flags))
1100 return false;
1101 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1102 return false;
1386 folio = vm_normal_folio(vma, addr, pte);
1387 if (!folio)
1103 page = vm_normal_page(vma, addr, pte);
1104 if (!page)
1388 return false;
1105 return false;
1389 return folio_maybe_dma_pinned(folio);
1106 return page_maybe_dma_pinned(page);
1390}
1391
1392static inline void clear_soft_dirty(struct vm_area_struct *vma,
1393 unsigned long addr, pte_t *pte)
1394{
1395 /*
1396 * The soft-dirty tracker uses #PF-s to catch writes
1397 * to pages, so write-protect the pte as well. See the

--- 299 unchanged lines hidden (view full) ---

1697 return err;
1698}
1699
1700static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1701 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1702{
1703 u64 frame = 0, flags = 0;
1704 struct page *page = NULL;
1107}
1108
1109static inline void clear_soft_dirty(struct vm_area_struct *vma,
1110 unsigned long addr, pte_t *pte)
1111{
1112 /*
1113 * The soft-dirty tracker uses #PF-s to catch writes
1114 * to pages, so write-protect the pte as well. See the

--- 299 unchanged lines hidden (view full) ---

1414 return err;
1415}
1416
1417static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1418 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1419{
1420 u64 frame = 0, flags = 0;
1421 struct page *page = NULL;
1705 struct folio *folio;
1422 bool migration = false;
1706
1707 if (pte_present(pte)) {
1708 if (pm->show_pfn)
1709 frame = pte_pfn(pte);
1710 flags |= PM_PRESENT;
1711 page = vm_normal_page(vma, addr, pte);
1712 if (pte_soft_dirty(pte))
1713 flags |= PM_SOFT_DIRTY;

--- 15 unchanged lines hidden (view full) ---

1729 if (is_pfn_swap_entry(entry))
1730 offset = swp_offset_pfn(entry);
1731 else
1732 offset = swp_offset(entry);
1733 frame = swp_type(entry) |
1734 (offset << MAX_SWAPFILES_SHIFT);
1735 }
1736 flags |= PM_SWAP;
1423
1424 if (pte_present(pte)) {
1425 if (pm->show_pfn)
1426 frame = pte_pfn(pte);
1427 flags |= PM_PRESENT;
1428 page = vm_normal_page(vma, addr, pte);
1429 if (pte_soft_dirty(pte))
1430 flags |= PM_SOFT_DIRTY;

--- 15 unchanged lines hidden (view full) ---

1446 if (is_pfn_swap_entry(entry))
1447 offset = swp_offset_pfn(entry);
1448 else
1449 offset = swp_offset(entry);
1450 frame = swp_type(entry) |
1451 (offset << MAX_SWAPFILES_SHIFT);
1452 }
1453 flags |= PM_SWAP;
1454 migration = is_migration_entry(entry);
1737 if (is_pfn_swap_entry(entry))
1738 page = pfn_swap_entry_to_page(entry);
1739 if (pte_marker_entry_uffd_wp(entry))
1740 flags |= PM_UFFD_WP;
1741 }
1742
1455 if (is_pfn_swap_entry(entry))
1456 page = pfn_swap_entry_to_page(entry);
1457 if (pte_marker_entry_uffd_wp(entry))
1458 flags |= PM_UFFD_WP;
1459 }
1460
1743 if (page) {
1744 folio = page_folio(page);
1745 if (!folio_test_anon(folio))
1746 flags |= PM_FILE;
1747 if ((flags & PM_PRESENT) &&
1748 folio_precise_page_mapcount(folio, page) == 1)
1749 flags |= PM_MMAP_EXCLUSIVE;
1750 }
1461 if (page && !PageAnon(page))
1462 flags |= PM_FILE;
1463 if (page && !migration && page_mapcount(page) == 1)
1464 flags |= PM_MMAP_EXCLUSIVE;
1751 if (vma->vm_flags & VM_SOFTDIRTY)
1752 flags |= PM_SOFT_DIRTY;
1753
1754 return make_pme(frame, flags);
1755}
1756
1757static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1758 struct mm_walk *walk)
1759{
1760 struct vm_area_struct *vma = walk->vma;
1761 struct pagemapread *pm = walk->private;
1762 spinlock_t *ptl;
1763 pte_t *pte, *orig_pte;
1764 int err = 0;
1765#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1465 if (vma->vm_flags & VM_SOFTDIRTY)
1466 flags |= PM_SOFT_DIRTY;
1467
1468 return make_pme(frame, flags);
1469}
1470
1471static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1472 struct mm_walk *walk)
1473{
1474 struct vm_area_struct *vma = walk->vma;
1475 struct pagemapread *pm = walk->private;
1476 spinlock_t *ptl;
1477 pte_t *pte, *orig_pte;
1478 int err = 0;
1479#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1480 bool migration = false;
1766
1767 ptl = pmd_trans_huge_lock(pmdp, vma);
1768 if (ptl) {
1481
1482 ptl = pmd_trans_huge_lock(pmdp, vma);
1483 if (ptl) {
1769 unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1770 u64 flags = 0, frame = 0;
1771 pmd_t pmd = *pmdp;
1772 struct page *page = NULL;
1484 u64 flags = 0, frame = 0;
1485 pmd_t pmd = *pmdp;
1486 struct page *page = NULL;
1773 struct folio *folio = NULL;
1774
1775 if (vma->vm_flags & VM_SOFTDIRTY)
1776 flags |= PM_SOFT_DIRTY;
1777
1778 if (pmd_present(pmd)) {
1779 page = pmd_page(pmd);
1780
1781 flags |= PM_PRESENT;
1782 if (pmd_soft_dirty(pmd))
1783 flags |= PM_SOFT_DIRTY;
1784 if (pmd_uffd_wp(pmd))
1785 flags |= PM_UFFD_WP;
1786 if (pm->show_pfn)
1487
1488 if (vma->vm_flags & VM_SOFTDIRTY)
1489 flags |= PM_SOFT_DIRTY;
1490
1491 if (pmd_present(pmd)) {
1492 page = pmd_page(pmd);
1493
1494 flags |= PM_PRESENT;
1495 if (pmd_soft_dirty(pmd))
1496 flags |= PM_SOFT_DIRTY;
1497 if (pmd_uffd_wp(pmd))
1498 flags |= PM_UFFD_WP;
1499 if (pm->show_pfn)
1787 frame = pmd_pfn(pmd) + idx;
1500 frame = pmd_pfn(pmd) +
1501 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1788 }
1789#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1790 else if (is_swap_pmd(pmd)) {
1791 swp_entry_t entry = pmd_to_swp_entry(pmd);
1792 unsigned long offset;
1793
1794 if (pm->show_pfn) {
1795 if (is_pfn_swap_entry(entry))
1502 }
1503#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1504 else if (is_swap_pmd(pmd)) {
1505 swp_entry_t entry = pmd_to_swp_entry(pmd);
1506 unsigned long offset;
1507
1508 if (pm->show_pfn) {
1509 if (is_pfn_swap_entry(entry))
1796 offset = swp_offset_pfn(entry) + idx;
1510 offset = swp_offset_pfn(entry);
1797 else
1511 else
1798 offset = swp_offset(entry) + idx;
1512 offset = swp_offset(entry);
1513 offset = offset +
1514 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1799 frame = swp_type(entry) |
1800 (offset << MAX_SWAPFILES_SHIFT);
1801 }
1802 flags |= PM_SWAP;
1803 if (pmd_swp_soft_dirty(pmd))
1804 flags |= PM_SOFT_DIRTY;
1805 if (pmd_swp_uffd_wp(pmd))
1806 flags |= PM_UFFD_WP;
1807 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1515 frame = swp_type(entry) |
1516 (offset << MAX_SWAPFILES_SHIFT);
1517 }
1518 flags |= PM_SWAP;
1519 if (pmd_swp_soft_dirty(pmd))
1520 flags |= PM_SOFT_DIRTY;
1521 if (pmd_swp_uffd_wp(pmd))
1522 flags |= PM_UFFD_WP;
1523 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1524 migration = is_migration_entry(entry);
1808 page = pfn_swap_entry_to_page(entry);
1809 }
1810#endif
1811
1525 page = pfn_swap_entry_to_page(entry);
1526 }
1527#endif
1528
1812 if (page) {
1813 folio = page_folio(page);
1814 if (!folio_test_anon(folio))
1815 flags |= PM_FILE;
1816 }
1529 if (page && !migration && page_mapcount(page) == 1)
1530 flags |= PM_MMAP_EXCLUSIVE;
1817
1531
1818 for (; addr != end; addr += PAGE_SIZE, idx++) {
1819 unsigned long cur_flags = flags;
1820 pagemap_entry_t pme;
1532 for (; addr != end; addr += PAGE_SIZE) {
1533 pagemap_entry_t pme = make_pme(frame, flags);
1821
1534
1822 if (folio && (flags & PM_PRESENT) &&
1823 folio_precise_page_mapcount(folio, page + idx) == 1)
1824 cur_flags |= PM_MMAP_EXCLUSIVE;
1825
1826 pme = make_pme(frame, cur_flags);
1827 err = add_to_pagemap(&pme, pm);
1828 if (err)
1829 break;
1830 if (pm->show_pfn) {
1831 if (flags & PM_PRESENT)
1832 frame++;
1833 else if (flags & PM_SWAP)
1834 frame += (1 << MAX_SWAPFILES_SHIFT);

--- 38 unchanged lines hidden (view full) ---

1873 struct vm_area_struct *vma = walk->vma;
1874 u64 flags = 0, frame = 0;
1875 int err = 0;
1876 pte_t pte;
1877
1878 if (vma->vm_flags & VM_SOFTDIRTY)
1879 flags |= PM_SOFT_DIRTY;
1880
1535 err = add_to_pagemap(&pme, pm);
1536 if (err)
1537 break;
1538 if (pm->show_pfn) {
1539 if (flags & PM_PRESENT)
1540 frame++;
1541 else if (flags & PM_SWAP)
1542 frame += (1 << MAX_SWAPFILES_SHIFT);

--- 38 unchanged lines hidden (view full) ---

1581 struct vm_area_struct *vma = walk->vma;
1582 u64 flags = 0, frame = 0;
1583 int err = 0;
1584 pte_t pte;
1585
1586 if (vma->vm_flags & VM_SOFTDIRTY)
1587 flags |= PM_SOFT_DIRTY;
1588
1881 pte = huge_ptep_get(walk->mm, addr, ptep);
1589 pte = huge_ptep_get(ptep);
1882 if (pte_present(pte)) {
1883 struct folio *folio = page_folio(pte_page(pte));
1884
1885 if (!folio_test_anon(folio))
1886 flags |= PM_FILE;
1887
1888 if (!folio_likely_mapped_shared(folio) &&
1889 !hugetlb_pmd_shared(ptep))

--- 672 unchanged lines hidden (view full) ---

2562 unsigned long categories;
2563 spinlock_t *ptl;
2564 int ret = 0;
2565 pte_t pte;
2566
2567 if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2568 /* Go the short route when not write-protecting pages. */
2569
1590 if (pte_present(pte)) {
1591 struct folio *folio = page_folio(pte_page(pte));
1592
1593 if (!folio_test_anon(folio))
1594 flags |= PM_FILE;
1595
1596 if (!folio_likely_mapped_shared(folio) &&
1597 !hugetlb_pmd_shared(ptep))

--- 672 unchanged lines hidden (view full) ---

2270 unsigned long categories;
2271 spinlock_t *ptl;
2272 int ret = 0;
2273 pte_t pte;
2274
2275 if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2276 /* Go the short route when not write-protecting pages. */
2277
2570 pte = huge_ptep_get(walk->mm, start, ptep);
2278 pte = huge_ptep_get(ptep);
2571 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2572
2573 if (!pagemap_scan_is_interesting_page(categories, p))
2574 return 0;
2575
2576 return pagemap_scan_output(categories, p, start, &end);
2577 }
2578
2579 i_mmap_lock_write(vma->vm_file->f_mapping);
2580 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2581
2279 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2280
2281 if (!pagemap_scan_is_interesting_page(categories, p))
2282 return 0;
2283
2284 return pagemap_scan_output(categories, p, start, &end);
2285 }
2286
2287 i_mmap_lock_write(vma->vm_file->f_mapping);
2288 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2289
2582 pte = huge_ptep_get(walk->mm, start, ptep);
2290 pte = huge_ptep_get(ptep);
2583 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2584
2585 if (!pagemap_scan_is_interesting_page(categories, p))
2586 goto out_unlock;
2587
2588 ret = pagemap_scan_output(categories, p, start, &end);
2589 if (start == end)
2590 goto out_unlock;

--- 263 unchanged lines hidden (view full) ---

2854 struct proc_maps_private proc_maps;
2855 struct numa_maps md;
2856};
2857
2858static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2859 unsigned long nr_pages)
2860{
2861 struct folio *folio = page_folio(page);
2291 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2292
2293 if (!pagemap_scan_is_interesting_page(categories, p))
2294 goto out_unlock;
2295
2296 ret = pagemap_scan_output(categories, p, start, &end);
2297 if (start == end)
2298 goto out_unlock;

--- 263 unchanged lines hidden (view full) ---

2562 struct proc_maps_private proc_maps;
2563 struct numa_maps md;
2564};
2565
2566static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2567 unsigned long nr_pages)
2568{
2569 struct folio *folio = page_folio(page);
2862 int count = folio_precise_page_mapcount(folio, page);
2570 int count = page_mapcount(page);
2863
2864 md->pages += nr_pages;
2865 if (pte_dirty || folio_test_dirty(folio))
2866 md->dirty += nr_pages;
2867
2868 if (folio_test_swapcache(folio))
2869 md->swapcache += nr_pages;
2870

--- 99 unchanged lines hidden (view full) ---

2970 pte_unmap_unlock(orig_pte, ptl);
2971 cond_resched();
2972 return 0;
2973}
2974#ifdef CONFIG_HUGETLB_PAGE
2975static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2976 unsigned long addr, unsigned long end, struct mm_walk *walk)
2977{
2571
2572 md->pages += nr_pages;
2573 if (pte_dirty || folio_test_dirty(folio))
2574 md->dirty += nr_pages;
2575
2576 if (folio_test_swapcache(folio))
2577 md->swapcache += nr_pages;
2578

--- 99 unchanged lines hidden (view full) ---

2678 pte_unmap_unlock(orig_pte, ptl);
2679 cond_resched();
2680 return 0;
2681}
2682#ifdef CONFIG_HUGETLB_PAGE
2683static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2684 unsigned long addr, unsigned long end, struct mm_walk *walk)
2685{
2978 pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
2686 pte_t huge_pte = huge_ptep_get(pte);
2979 struct numa_maps *md;
2980 struct page *page;
2981
2982 if (!pte_present(huge_pte))
2983 return 0;
2984
2985 page = pte_page(huge_pte);
2986

--- 121 unchanged lines hidden ---
2687 struct numa_maps *md;
2688 struct page *page;
2689
2690 if (!pte_present(huge_pte))
2691 return 0;
2692
2693 page = pte_page(huge_pte);
2694

--- 121 unchanged lines hidden ---