1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pagewalk.h> 3 #include <linux/mm_inline.h> 4 #include <linux/hugetlb.h> 5 #include <linux/huge_mm.h> 6 #include <linux/mount.h> 7 #include <linux/ksm.h> 8 #include <linux/seq_file.h> 9 #include <linux/highmem.h> 10 #include <linux/ptrace.h> 11 #include <linux/slab.h> 12 #include <linux/pagemap.h> 13 #include <linux/mempolicy.h> 14 #include <linux/rmap.h> 15 #include <linux/swap.h> 16 #include <linux/sched/mm.h> 17 #include <linux/swapops.h> 18 #include <linux/mmu_notifier.h> 19 #include <linux/page_idle.h> 20 #include <linux/shmem_fs.h> 21 #include <linux/uaccess.h> 22 #include <linux/pkeys.h> 23 #include <linux/minmax.h> 24 #include <linux/overflow.h> 25 26 #include <asm/elf.h> 27 #include <asm/tlb.h> 28 #include <asm/tlbflush.h> 29 #include "internal.h" 30 31 #define SEQ_PUT_DEC(str, val) \ 32 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) 33 void task_mem(struct seq_file *m, struct mm_struct *mm) 34 { 35 unsigned long text, lib, swap, anon, file, shmem; 36 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 37 38 anon = get_mm_counter(mm, MM_ANONPAGES); 39 file = get_mm_counter(mm, MM_FILEPAGES); 40 shmem = get_mm_counter(mm, MM_SHMEMPAGES); 41 42 /* 43 * Note: to minimize their overhead, mm maintains hiwater_vm and 44 * hiwater_rss only when about to *lower* total_vm or rss. Any 45 * collector of these hiwater stats must therefore get total_vm 46 * and rss too, which will usually be the higher. Barriers? not 47 * worth the effort, such snapshots can always be inconsistent. 48 */ 49 hiwater_vm = total_vm = mm->total_vm; 50 if (hiwater_vm < mm->hiwater_vm) 51 hiwater_vm = mm->hiwater_vm; 52 hiwater_rss = total_rss = anon + file + shmem; 53 if (hiwater_rss < mm->hiwater_rss) 54 hiwater_rss = mm->hiwater_rss; 55 56 /* split executable areas between text and lib */ 57 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); 58 text = min(text, mm->exec_vm << PAGE_SHIFT); 59 lib = (mm->exec_vm << PAGE_SHIFT) - text; 60 61 swap = get_mm_counter(mm, MM_SWAPENTS); 62 SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); 63 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); 64 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); 65 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); 66 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); 67 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); 68 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); 69 SEQ_PUT_DEC(" kB\nRssFile:\t", file); 70 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); 71 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); 72 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); 73 seq_put_decimal_ull_width(m, 74 " kB\nVmExe:\t", text >> 10, 8); 75 seq_put_decimal_ull_width(m, 76 " kB\nVmLib:\t", lib >> 10, 8); 77 seq_put_decimal_ull_width(m, 78 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); 79 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); 80 seq_puts(m, " kB\n"); 81 hugetlb_report_usage(m, mm); 82 } 83 #undef SEQ_PUT_DEC 84 85 unsigned long task_vsize(struct mm_struct *mm) 86 { 87 return PAGE_SIZE * mm->total_vm; 88 } 89 90 unsigned long task_statm(struct mm_struct *mm, 91 unsigned long *shared, unsigned long *text, 92 unsigned long *data, unsigned long *resident) 93 { 94 *shared = get_mm_counter(mm, MM_FILEPAGES) + 95 get_mm_counter(mm, MM_SHMEMPAGES); 96 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 97 >> PAGE_SHIFT; 98 *data = mm->data_vm + mm->stack_vm; 99 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 100 return mm->total_vm; 101 } 102 103 #ifdef CONFIG_NUMA 104 /* 105 * Save get_task_policy() for show_numa_map(). 106 */ 107 static void hold_task_mempolicy(struct proc_maps_private *priv) 108 { 109 struct task_struct *task = priv->task; 110 111 task_lock(task); 112 priv->task_mempolicy = get_task_policy(task); 113 mpol_get(priv->task_mempolicy); 114 task_unlock(task); 115 } 116 static void release_task_mempolicy(struct proc_maps_private *priv) 117 { 118 mpol_put(priv->task_mempolicy); 119 } 120 #else 121 static void hold_task_mempolicy(struct proc_maps_private *priv) 122 { 123 } 124 static void release_task_mempolicy(struct proc_maps_private *priv) 125 { 126 } 127 #endif 128 129 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv, 130 loff_t *ppos) 131 { 132 struct vm_area_struct *vma = vma_next(&priv->iter); 133 134 if (vma) { 135 *ppos = vma->vm_start; 136 } else { 137 *ppos = -2UL; 138 vma = get_gate_vma(priv->mm); 139 } 140 141 return vma; 142 } 143 144 static void *m_start(struct seq_file *m, loff_t *ppos) 145 { 146 struct proc_maps_private *priv = m->private; 147 unsigned long last_addr = *ppos; 148 struct mm_struct *mm; 149 150 /* See m_next(). Zero at the start or after lseek. */ 151 if (last_addr == -1UL) 152 return NULL; 153 154 priv->task = get_proc_task(priv->inode); 155 if (!priv->task) 156 return ERR_PTR(-ESRCH); 157 158 mm = priv->mm; 159 if (!mm || !mmget_not_zero(mm)) { 160 put_task_struct(priv->task); 161 priv->task = NULL; 162 return NULL; 163 } 164 165 if (mmap_read_lock_killable(mm)) { 166 mmput(mm); 167 put_task_struct(priv->task); 168 priv->task = NULL; 169 return ERR_PTR(-EINTR); 170 } 171 172 vma_iter_init(&priv->iter, mm, last_addr); 173 hold_task_mempolicy(priv); 174 if (last_addr == -2UL) 175 return get_gate_vma(mm); 176 177 return proc_get_vma(priv, ppos); 178 } 179 180 static void *m_next(struct seq_file *m, void *v, loff_t *ppos) 181 { 182 if (*ppos == -2UL) { 183 *ppos = -1UL; 184 return NULL; 185 } 186 return proc_get_vma(m->private, ppos); 187 } 188 189 static void m_stop(struct seq_file *m, void *v) 190 { 191 struct proc_maps_private *priv = m->private; 192 struct mm_struct *mm = priv->mm; 193 194 if (!priv->task) 195 return; 196 197 release_task_mempolicy(priv); 198 mmap_read_unlock(mm); 199 mmput(mm); 200 put_task_struct(priv->task); 201 priv->task = NULL; 202 } 203 204 static int proc_maps_open(struct inode *inode, struct file *file, 205 const struct seq_operations *ops, int psize) 206 { 207 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 208 209 if (!priv) 210 return -ENOMEM; 211 212 priv->inode = inode; 213 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 214 if (IS_ERR(priv->mm)) { 215 int err = PTR_ERR(priv->mm); 216 217 seq_release_private(inode, file); 218 return err; 219 } 220 221 return 0; 222 } 223 224 static int proc_map_release(struct inode *inode, struct file *file) 225 { 226 struct seq_file *seq = file->private_data; 227 struct proc_maps_private *priv = seq->private; 228 229 if (priv->mm) 230 mmdrop(priv->mm); 231 232 return seq_release_private(inode, file); 233 } 234 235 static int do_maps_open(struct inode *inode, struct file *file, 236 const struct seq_operations *ops) 237 { 238 return proc_maps_open(inode, file, ops, 239 sizeof(struct proc_maps_private)); 240 } 241 242 static void show_vma_header_prefix(struct seq_file *m, 243 unsigned long start, unsigned long end, 244 vm_flags_t flags, unsigned long long pgoff, 245 dev_t dev, unsigned long ino) 246 { 247 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 248 seq_put_hex_ll(m, NULL, start, 8); 249 seq_put_hex_ll(m, "-", end, 8); 250 seq_putc(m, ' '); 251 seq_putc(m, flags & VM_READ ? 'r' : '-'); 252 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); 253 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); 254 seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p'); 255 seq_put_hex_ll(m, " ", pgoff, 8); 256 seq_put_hex_ll(m, " ", MAJOR(dev), 2); 257 seq_put_hex_ll(m, ":", MINOR(dev), 2); 258 seq_put_decimal_ull(m, " ", ino); 259 seq_putc(m, ' '); 260 } 261 262 static void 263 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) 264 { 265 struct anon_vma_name *anon_name = NULL; 266 struct mm_struct *mm = vma->vm_mm; 267 struct file *file = vma->vm_file; 268 vm_flags_t flags = vma->vm_flags; 269 unsigned long ino = 0; 270 unsigned long long pgoff = 0; 271 unsigned long start, end; 272 dev_t dev = 0; 273 const char *name = NULL; 274 275 if (file) { 276 struct inode *inode = file_inode(vma->vm_file); 277 dev = inode->i_sb->s_dev; 278 ino = inode->i_ino; 279 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 280 } 281 282 start = vma->vm_start; 283 end = vma->vm_end; 284 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); 285 if (mm) 286 anon_name = anon_vma_name(vma); 287 288 /* 289 * Print the dentry name for named mappings, and a 290 * special [heap] marker for the heap: 291 */ 292 if (file) { 293 seq_pad(m, ' '); 294 /* 295 * If user named this anon shared memory via 296 * prctl(PR_SET_VMA ..., use the provided name. 297 */ 298 if (anon_name) 299 seq_printf(m, "[anon_shmem:%s]", anon_name->name); 300 else 301 seq_path(m, file_user_path(file), "\n"); 302 goto done; 303 } 304 305 if (vma->vm_ops && vma->vm_ops->name) { 306 name = vma->vm_ops->name(vma); 307 if (name) 308 goto done; 309 } 310 311 name = arch_vma_name(vma); 312 if (!name) { 313 if (!mm) { 314 name = "[vdso]"; 315 goto done; 316 } 317 318 if (vma_is_initial_heap(vma)) { 319 name = "[heap]"; 320 goto done; 321 } 322 323 if (vma_is_initial_stack(vma)) { 324 name = "[stack]"; 325 goto done; 326 } 327 328 if (anon_name) { 329 seq_pad(m, ' '); 330 seq_printf(m, "[anon:%s]", anon_name->name); 331 } 332 } 333 334 done: 335 if (name) { 336 seq_pad(m, ' '); 337 seq_puts(m, name); 338 } 339 seq_putc(m, '\n'); 340 } 341 342 static int show_map(struct seq_file *m, void *v) 343 { 344 show_map_vma(m, v); 345 return 0; 346 } 347 348 static const struct seq_operations proc_pid_maps_op = { 349 .start = m_start, 350 .next = m_next, 351 .stop = m_stop, 352 .show = show_map 353 }; 354 355 static int pid_maps_open(struct inode *inode, struct file *file) 356 { 357 return do_maps_open(inode, file, &proc_pid_maps_op); 358 } 359 360 const struct file_operations proc_pid_maps_operations = { 361 .open = pid_maps_open, 362 .read = seq_read, 363 .llseek = seq_lseek, 364 .release = proc_map_release, 365 }; 366 367 /* 368 * Proportional Set Size(PSS): my share of RSS. 369 * 370 * PSS of a process is the count of pages it has in memory, where each 371 * page is divided by the number of processes sharing it. So if a 372 * process has 1000 pages all to itself, and 1000 shared with one other 373 * process, its PSS will be 1500. 374 * 375 * To keep (accumulated) division errors low, we adopt a 64bit 376 * fixed-point pss counter to minimize division errors. So (pss >> 377 * PSS_SHIFT) would be the real byte count. 378 * 379 * A shift of 12 before division means (assuming 4K page size): 380 * - 1M 3-user-pages add up to 8KB errors; 381 * - supports mapcount up to 2^24, or 16M; 382 * - supports PSS up to 2^52 bytes, or 4PB. 383 */ 384 #define PSS_SHIFT 12 385 386 #ifdef CONFIG_PROC_PAGE_MONITOR 387 struct mem_size_stats { 388 unsigned long resident; 389 unsigned long shared_clean; 390 unsigned long shared_dirty; 391 unsigned long private_clean; 392 unsigned long private_dirty; 393 unsigned long referenced; 394 unsigned long anonymous; 395 unsigned long lazyfree; 396 unsigned long anonymous_thp; 397 unsigned long shmem_thp; 398 unsigned long file_thp; 399 unsigned long swap; 400 unsigned long shared_hugetlb; 401 unsigned long private_hugetlb; 402 unsigned long ksm; 403 u64 pss; 404 u64 pss_anon; 405 u64 pss_file; 406 u64 pss_shmem; 407 u64 pss_dirty; 408 u64 pss_locked; 409 u64 swap_pss; 410 }; 411 412 static void smaps_page_accumulate(struct mem_size_stats *mss, 413 struct page *page, unsigned long size, unsigned long pss, 414 bool dirty, bool locked, bool private) 415 { 416 mss->pss += pss; 417 418 if (PageAnon(page)) 419 mss->pss_anon += pss; 420 else if (PageSwapBacked(page)) 421 mss->pss_shmem += pss; 422 else 423 mss->pss_file += pss; 424 425 if (locked) 426 mss->pss_locked += pss; 427 428 if (dirty || PageDirty(page)) { 429 mss->pss_dirty += pss; 430 if (private) 431 mss->private_dirty += size; 432 else 433 mss->shared_dirty += size; 434 } else { 435 if (private) 436 mss->private_clean += size; 437 else 438 mss->shared_clean += size; 439 } 440 } 441 442 static void smaps_account(struct mem_size_stats *mss, struct page *page, 443 bool compound, bool young, bool dirty, bool locked, 444 bool migration) 445 { 446 int i, nr = compound ? compound_nr(page) : 1; 447 unsigned long size = nr * PAGE_SIZE; 448 449 /* 450 * First accumulate quantities that depend only on |size| and the type 451 * of the compound page. 452 */ 453 if (PageAnon(page)) { 454 mss->anonymous += size; 455 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) 456 mss->lazyfree += size; 457 } 458 459 if (PageKsm(page)) 460 mss->ksm += size; 461 462 mss->resident += size; 463 /* Accumulate the size in pages that have been accessed. */ 464 if (young || page_is_young(page) || PageReferenced(page)) 465 mss->referenced += size; 466 467 /* 468 * Then accumulate quantities that may depend on sharing, or that may 469 * differ page-by-page. 470 * 471 * page_count(page) == 1 guarantees the page is mapped exactly once. 472 * If any subpage of the compound page mapped with PTE it would elevate 473 * page_count(). 474 * 475 * The page_mapcount() is called to get a snapshot of the mapcount. 476 * Without holding the page lock this snapshot can be slightly wrong as 477 * we cannot always read the mapcount atomically. It is not safe to 478 * call page_mapcount() even with PTL held if the page is not mapped, 479 * especially for migration entries. Treat regular migration entries 480 * as mapcount == 1. 481 */ 482 if ((page_count(page) == 1) || migration) { 483 smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, 484 locked, true); 485 return; 486 } 487 for (i = 0; i < nr; i++, page++) { 488 int mapcount = page_mapcount(page); 489 unsigned long pss = PAGE_SIZE << PSS_SHIFT; 490 if (mapcount >= 2) 491 pss /= mapcount; 492 smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, 493 mapcount < 2); 494 } 495 } 496 497 #ifdef CONFIG_SHMEM 498 static int smaps_pte_hole(unsigned long addr, unsigned long end, 499 __always_unused int depth, struct mm_walk *walk) 500 { 501 struct mem_size_stats *mss = walk->private; 502 struct vm_area_struct *vma = walk->vma; 503 504 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, 505 linear_page_index(vma, addr), 506 linear_page_index(vma, end)); 507 508 return 0; 509 } 510 #else 511 #define smaps_pte_hole NULL 512 #endif /* CONFIG_SHMEM */ 513 514 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk) 515 { 516 #ifdef CONFIG_SHMEM 517 if (walk->ops->pte_hole) { 518 /* depth is not used */ 519 smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk); 520 } 521 #endif 522 } 523 524 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 525 struct mm_walk *walk) 526 { 527 struct mem_size_stats *mss = walk->private; 528 struct vm_area_struct *vma = walk->vma; 529 bool locked = !!(vma->vm_flags & VM_LOCKED); 530 struct page *page = NULL; 531 bool migration = false, young = false, dirty = false; 532 pte_t ptent = ptep_get(pte); 533 534 if (pte_present(ptent)) { 535 page = vm_normal_page(vma, addr, ptent); 536 young = pte_young(ptent); 537 dirty = pte_dirty(ptent); 538 } else if (is_swap_pte(ptent)) { 539 swp_entry_t swpent = pte_to_swp_entry(ptent); 540 541 if (!non_swap_entry(swpent)) { 542 int mapcount; 543 544 mss->swap += PAGE_SIZE; 545 mapcount = swp_swapcount(swpent); 546 if (mapcount >= 2) { 547 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; 548 549 do_div(pss_delta, mapcount); 550 mss->swap_pss += pss_delta; 551 } else { 552 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; 553 } 554 } else if (is_pfn_swap_entry(swpent)) { 555 if (is_migration_entry(swpent)) 556 migration = true; 557 page = pfn_swap_entry_to_page(swpent); 558 } 559 } else { 560 smaps_pte_hole_lookup(addr, walk); 561 return; 562 } 563 564 if (!page) 565 return; 566 567 smaps_account(mss, page, false, young, dirty, locked, migration); 568 } 569 570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 571 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 572 struct mm_walk *walk) 573 { 574 struct mem_size_stats *mss = walk->private; 575 struct vm_area_struct *vma = walk->vma; 576 bool locked = !!(vma->vm_flags & VM_LOCKED); 577 struct page *page = NULL; 578 bool migration = false; 579 580 if (pmd_present(*pmd)) { 581 page = vm_normal_page_pmd(vma, addr, *pmd); 582 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { 583 swp_entry_t entry = pmd_to_swp_entry(*pmd); 584 585 if (is_migration_entry(entry)) { 586 migration = true; 587 page = pfn_swap_entry_to_page(entry); 588 } 589 } 590 if (IS_ERR_OR_NULL(page)) 591 return; 592 if (PageAnon(page)) 593 mss->anonymous_thp += HPAGE_PMD_SIZE; 594 else if (PageSwapBacked(page)) 595 mss->shmem_thp += HPAGE_PMD_SIZE; 596 else if (is_zone_device_page(page)) 597 /* pass */; 598 else 599 mss->file_thp += HPAGE_PMD_SIZE; 600 601 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), 602 locked, migration); 603 } 604 #else 605 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 606 struct mm_walk *walk) 607 { 608 } 609 #endif 610 611 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 612 struct mm_walk *walk) 613 { 614 struct vm_area_struct *vma = walk->vma; 615 pte_t *pte; 616 spinlock_t *ptl; 617 618 ptl = pmd_trans_huge_lock(pmd, vma); 619 if (ptl) { 620 smaps_pmd_entry(pmd, addr, walk); 621 spin_unlock(ptl); 622 goto out; 623 } 624 625 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 626 if (!pte) { 627 walk->action = ACTION_AGAIN; 628 return 0; 629 } 630 for (; addr != end; pte++, addr += PAGE_SIZE) 631 smaps_pte_entry(pte, addr, walk); 632 pte_unmap_unlock(pte - 1, ptl); 633 out: 634 cond_resched(); 635 return 0; 636 } 637 638 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 639 { 640 /* 641 * Don't forget to update Documentation/ on changes. 642 */ 643 static const char mnemonics[BITS_PER_LONG][2] = { 644 /* 645 * In case if we meet a flag we don't know about. 646 */ 647 [0 ... (BITS_PER_LONG-1)] = "??", 648 649 [ilog2(VM_READ)] = "rd", 650 [ilog2(VM_WRITE)] = "wr", 651 [ilog2(VM_EXEC)] = "ex", 652 [ilog2(VM_SHARED)] = "sh", 653 [ilog2(VM_MAYREAD)] = "mr", 654 [ilog2(VM_MAYWRITE)] = "mw", 655 [ilog2(VM_MAYEXEC)] = "me", 656 [ilog2(VM_MAYSHARE)] = "ms", 657 [ilog2(VM_GROWSDOWN)] = "gd", 658 [ilog2(VM_PFNMAP)] = "pf", 659 [ilog2(VM_LOCKED)] = "lo", 660 [ilog2(VM_IO)] = "io", 661 [ilog2(VM_SEQ_READ)] = "sr", 662 [ilog2(VM_RAND_READ)] = "rr", 663 [ilog2(VM_DONTCOPY)] = "dc", 664 [ilog2(VM_DONTEXPAND)] = "de", 665 [ilog2(VM_LOCKONFAULT)] = "lf", 666 [ilog2(VM_ACCOUNT)] = "ac", 667 [ilog2(VM_NORESERVE)] = "nr", 668 [ilog2(VM_HUGETLB)] = "ht", 669 [ilog2(VM_SYNC)] = "sf", 670 [ilog2(VM_ARCH_1)] = "ar", 671 [ilog2(VM_WIPEONFORK)] = "wf", 672 [ilog2(VM_DONTDUMP)] = "dd", 673 #ifdef CONFIG_ARM64_BTI 674 [ilog2(VM_ARM64_BTI)] = "bt", 675 #endif 676 #ifdef CONFIG_MEM_SOFT_DIRTY 677 [ilog2(VM_SOFTDIRTY)] = "sd", 678 #endif 679 [ilog2(VM_MIXEDMAP)] = "mm", 680 [ilog2(VM_HUGEPAGE)] = "hg", 681 [ilog2(VM_NOHUGEPAGE)] = "nh", 682 [ilog2(VM_MERGEABLE)] = "mg", 683 [ilog2(VM_UFFD_MISSING)]= "um", 684 [ilog2(VM_UFFD_WP)] = "uw", 685 #ifdef CONFIG_ARM64_MTE 686 [ilog2(VM_MTE)] = "mt", 687 [ilog2(VM_MTE_ALLOWED)] = "", 688 #endif 689 #ifdef CONFIG_ARCH_HAS_PKEYS 690 /* These come out via ProtectionKey: */ 691 [ilog2(VM_PKEY_BIT0)] = "", 692 [ilog2(VM_PKEY_BIT1)] = "", 693 [ilog2(VM_PKEY_BIT2)] = "", 694 [ilog2(VM_PKEY_BIT3)] = "", 695 #if VM_PKEY_BIT4 696 [ilog2(VM_PKEY_BIT4)] = "", 697 #endif 698 #endif /* CONFIG_ARCH_HAS_PKEYS */ 699 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 700 [ilog2(VM_UFFD_MINOR)] = "ui", 701 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 702 #ifdef CONFIG_X86_USER_SHADOW_STACK 703 [ilog2(VM_SHADOW_STACK)] = "ss", 704 #endif 705 }; 706 size_t i; 707 708 seq_puts(m, "VmFlags: "); 709 for (i = 0; i < BITS_PER_LONG; i++) { 710 if (!mnemonics[i][0]) 711 continue; 712 if (vma->vm_flags & (1UL << i)) { 713 seq_putc(m, mnemonics[i][0]); 714 seq_putc(m, mnemonics[i][1]); 715 seq_putc(m, ' '); 716 } 717 } 718 seq_putc(m, '\n'); 719 } 720 721 #ifdef CONFIG_HUGETLB_PAGE 722 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, 723 unsigned long addr, unsigned long end, 724 struct mm_walk *walk) 725 { 726 struct mem_size_stats *mss = walk->private; 727 struct vm_area_struct *vma = walk->vma; 728 struct page *page = NULL; 729 pte_t ptent = ptep_get(pte); 730 731 if (pte_present(ptent)) { 732 page = vm_normal_page(vma, addr, ptent); 733 } else if (is_swap_pte(ptent)) { 734 swp_entry_t swpent = pte_to_swp_entry(ptent); 735 736 if (is_pfn_swap_entry(swpent)) 737 page = pfn_swap_entry_to_page(swpent); 738 } 739 if (page) { 740 if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte)) 741 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); 742 else 743 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 744 } 745 return 0; 746 } 747 #else 748 #define smaps_hugetlb_range NULL 749 #endif /* HUGETLB_PAGE */ 750 751 static const struct mm_walk_ops smaps_walk_ops = { 752 .pmd_entry = smaps_pte_range, 753 .hugetlb_entry = smaps_hugetlb_range, 754 .walk_lock = PGWALK_RDLOCK, 755 }; 756 757 static const struct mm_walk_ops smaps_shmem_walk_ops = { 758 .pmd_entry = smaps_pte_range, 759 .hugetlb_entry = smaps_hugetlb_range, 760 .pte_hole = smaps_pte_hole, 761 .walk_lock = PGWALK_RDLOCK, 762 }; 763 764 /* 765 * Gather mem stats from @vma with the indicated beginning 766 * address @start, and keep them in @mss. 767 * 768 * Use vm_start of @vma as the beginning address if @start is 0. 769 */ 770 static void smap_gather_stats(struct vm_area_struct *vma, 771 struct mem_size_stats *mss, unsigned long start) 772 { 773 const struct mm_walk_ops *ops = &smaps_walk_ops; 774 775 /* Invalid start */ 776 if (start >= vma->vm_end) 777 return; 778 779 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { 780 /* 781 * For shared or readonly shmem mappings we know that all 782 * swapped out pages belong to the shmem object, and we can 783 * obtain the swap value much more efficiently. For private 784 * writable mappings, we might have COW pages that are 785 * not affected by the parent swapped out pages of the shmem 786 * object, so we have to distinguish them during the page walk. 787 * Unless we know that the shmem object (or the part mapped by 788 * our VMA) has no swapped out pages at all. 789 */ 790 unsigned long shmem_swapped = shmem_swap_usage(vma); 791 792 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || 793 !(vma->vm_flags & VM_WRITE))) { 794 mss->swap += shmem_swapped; 795 } else { 796 ops = &smaps_shmem_walk_ops; 797 } 798 } 799 800 /* mmap_lock is held in m_start */ 801 if (!start) 802 walk_page_vma(vma, ops, mss); 803 else 804 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); 805 } 806 807 #define SEQ_PUT_DEC(str, val) \ 808 seq_put_decimal_ull_width(m, str, (val) >> 10, 8) 809 810 /* Show the contents common for smaps and smaps_rollup */ 811 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, 812 bool rollup_mode) 813 { 814 SEQ_PUT_DEC("Rss: ", mss->resident); 815 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); 816 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT); 817 if (rollup_mode) { 818 /* 819 * These are meaningful only for smaps_rollup, otherwise two of 820 * them are zero, and the other one is the same as Pss. 821 */ 822 SEQ_PUT_DEC(" kB\nPss_Anon: ", 823 mss->pss_anon >> PSS_SHIFT); 824 SEQ_PUT_DEC(" kB\nPss_File: ", 825 mss->pss_file >> PSS_SHIFT); 826 SEQ_PUT_DEC(" kB\nPss_Shmem: ", 827 mss->pss_shmem >> PSS_SHIFT); 828 } 829 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); 830 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); 831 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); 832 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); 833 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); 834 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); 835 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm); 836 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); 837 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); 838 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); 839 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); 840 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); 841 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", 842 mss->private_hugetlb >> 10, 7); 843 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); 844 SEQ_PUT_DEC(" kB\nSwapPss: ", 845 mss->swap_pss >> PSS_SHIFT); 846 SEQ_PUT_DEC(" kB\nLocked: ", 847 mss->pss_locked >> PSS_SHIFT); 848 seq_puts(m, " kB\n"); 849 } 850 851 static int show_smap(struct seq_file *m, void *v) 852 { 853 struct vm_area_struct *vma = v; 854 struct mem_size_stats mss = {}; 855 856 smap_gather_stats(vma, &mss, 0); 857 858 show_map_vma(m, vma); 859 860 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); 861 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); 862 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); 863 seq_puts(m, " kB\n"); 864 865 __show_smap(m, &mss, false); 866 867 seq_printf(m, "THPeligible: %8u\n", 868 hugepage_vma_check(vma, vma->vm_flags, true, false, true)); 869 870 if (arch_pkeys_enabled()) 871 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); 872 show_smap_vma_flags(m, vma); 873 874 return 0; 875 } 876 877 static int show_smaps_rollup(struct seq_file *m, void *v) 878 { 879 struct proc_maps_private *priv = m->private; 880 struct mem_size_stats mss = {}; 881 struct mm_struct *mm = priv->mm; 882 struct vm_area_struct *vma; 883 unsigned long vma_start = 0, last_vma_end = 0; 884 int ret = 0; 885 VMA_ITERATOR(vmi, mm, 0); 886 887 priv->task = get_proc_task(priv->inode); 888 if (!priv->task) 889 return -ESRCH; 890 891 if (!mm || !mmget_not_zero(mm)) { 892 ret = -ESRCH; 893 goto out_put_task; 894 } 895 896 ret = mmap_read_lock_killable(mm); 897 if (ret) 898 goto out_put_mm; 899 900 hold_task_mempolicy(priv); 901 vma = vma_next(&vmi); 902 903 if (unlikely(!vma)) 904 goto empty_set; 905 906 vma_start = vma->vm_start; 907 do { 908 smap_gather_stats(vma, &mss, 0); 909 last_vma_end = vma->vm_end; 910 911 /* 912 * Release mmap_lock temporarily if someone wants to 913 * access it for write request. 914 */ 915 if (mmap_lock_is_contended(mm)) { 916 vma_iter_invalidate(&vmi); 917 mmap_read_unlock(mm); 918 ret = mmap_read_lock_killable(mm); 919 if (ret) { 920 release_task_mempolicy(priv); 921 goto out_put_mm; 922 } 923 924 /* 925 * After dropping the lock, there are four cases to 926 * consider. See the following example for explanation. 927 * 928 * +------+------+-----------+ 929 * | VMA1 | VMA2 | VMA3 | 930 * +------+------+-----------+ 931 * | | | | 932 * 4k 8k 16k 400k 933 * 934 * Suppose we drop the lock after reading VMA2 due to 935 * contention, then we get: 936 * 937 * last_vma_end = 16k 938 * 939 * 1) VMA2 is freed, but VMA3 exists: 940 * 941 * vma_next(vmi) will return VMA3. 942 * In this case, just continue from VMA3. 943 * 944 * 2) VMA2 still exists: 945 * 946 * vma_next(vmi) will return VMA3. 947 * In this case, just continue from VMA3. 948 * 949 * 3) No more VMAs can be found: 950 * 951 * vma_next(vmi) will return NULL. 952 * No more things to do, just break. 953 * 954 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): 955 * 956 * vma_next(vmi) will return VMA' whose range 957 * contains last_vma_end. 958 * Iterate VMA' from last_vma_end. 959 */ 960 vma = vma_next(&vmi); 961 /* Case 3 above */ 962 if (!vma) 963 break; 964 965 /* Case 1 and 2 above */ 966 if (vma->vm_start >= last_vma_end) 967 continue; 968 969 /* Case 4 above */ 970 if (vma->vm_end > last_vma_end) 971 smap_gather_stats(vma, &mss, last_vma_end); 972 } 973 } for_each_vma(vmi, vma); 974 975 empty_set: 976 show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0); 977 seq_pad(m, ' '); 978 seq_puts(m, "[rollup]\n"); 979 980 __show_smap(m, &mss, true); 981 982 release_task_mempolicy(priv); 983 mmap_read_unlock(mm); 984 985 out_put_mm: 986 mmput(mm); 987 out_put_task: 988 put_task_struct(priv->task); 989 priv->task = NULL; 990 991 return ret; 992 } 993 #undef SEQ_PUT_DEC 994 995 static const struct seq_operations proc_pid_smaps_op = { 996 .start = m_start, 997 .next = m_next, 998 .stop = m_stop, 999 .show = show_smap 1000 }; 1001 1002 static int pid_smaps_open(struct inode *inode, struct file *file) 1003 { 1004 return do_maps_open(inode, file, &proc_pid_smaps_op); 1005 } 1006 1007 static int smaps_rollup_open(struct inode *inode, struct file *file) 1008 { 1009 int ret; 1010 struct proc_maps_private *priv; 1011 1012 priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); 1013 if (!priv) 1014 return -ENOMEM; 1015 1016 ret = single_open(file, show_smaps_rollup, priv); 1017 if (ret) 1018 goto out_free; 1019 1020 priv->inode = inode; 1021 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 1022 if (IS_ERR(priv->mm)) { 1023 ret = PTR_ERR(priv->mm); 1024 1025 single_release(inode, file); 1026 goto out_free; 1027 } 1028 1029 return 0; 1030 1031 out_free: 1032 kfree(priv); 1033 return ret; 1034 } 1035 1036 static int smaps_rollup_release(struct inode *inode, struct file *file) 1037 { 1038 struct seq_file *seq = file->private_data; 1039 struct proc_maps_private *priv = seq->private; 1040 1041 if (priv->mm) 1042 mmdrop(priv->mm); 1043 1044 kfree(priv); 1045 return single_release(inode, file); 1046 } 1047 1048 const struct file_operations proc_pid_smaps_operations = { 1049 .open = pid_smaps_open, 1050 .read = seq_read, 1051 .llseek = seq_lseek, 1052 .release = proc_map_release, 1053 }; 1054 1055 const struct file_operations proc_pid_smaps_rollup_operations = { 1056 .open = smaps_rollup_open, 1057 .read = seq_read, 1058 .llseek = seq_lseek, 1059 .release = smaps_rollup_release, 1060 }; 1061 1062 enum clear_refs_types { 1063 CLEAR_REFS_ALL = 1, 1064 CLEAR_REFS_ANON, 1065 CLEAR_REFS_MAPPED, 1066 CLEAR_REFS_SOFT_DIRTY, 1067 CLEAR_REFS_MM_HIWATER_RSS, 1068 CLEAR_REFS_LAST, 1069 }; 1070 1071 struct clear_refs_private { 1072 enum clear_refs_types type; 1073 }; 1074 1075 #ifdef CONFIG_MEM_SOFT_DIRTY 1076 1077 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1078 { 1079 struct page *page; 1080 1081 if (!pte_write(pte)) 1082 return false; 1083 if (!is_cow_mapping(vma->vm_flags)) 1084 return false; 1085 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) 1086 return false; 1087 page = vm_normal_page(vma, addr, pte); 1088 if (!page) 1089 return false; 1090 return page_maybe_dma_pinned(page); 1091 } 1092 1093 static inline void clear_soft_dirty(struct vm_area_struct *vma, 1094 unsigned long addr, pte_t *pte) 1095 { 1096 /* 1097 * The soft-dirty tracker uses #PF-s to catch writes 1098 * to pages, so write-protect the pte as well. See the 1099 * Documentation/admin-guide/mm/soft-dirty.rst for full description 1100 * of how soft-dirty works. 1101 */ 1102 pte_t ptent = ptep_get(pte); 1103 1104 if (pte_present(ptent)) { 1105 pte_t old_pte; 1106 1107 if (pte_is_pinned(vma, addr, ptent)) 1108 return; 1109 old_pte = ptep_modify_prot_start(vma, addr, pte); 1110 ptent = pte_wrprotect(old_pte); 1111 ptent = pte_clear_soft_dirty(ptent); 1112 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); 1113 } else if (is_swap_pte(ptent)) { 1114 ptent = pte_swp_clear_soft_dirty(ptent); 1115 set_pte_at(vma->vm_mm, addr, pte, ptent); 1116 } 1117 } 1118 #else 1119 static inline void clear_soft_dirty(struct vm_area_struct *vma, 1120 unsigned long addr, pte_t *pte) 1121 { 1122 } 1123 #endif 1124 1125 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1126 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 1127 unsigned long addr, pmd_t *pmdp) 1128 { 1129 pmd_t old, pmd = *pmdp; 1130 1131 if (pmd_present(pmd)) { 1132 /* See comment in change_huge_pmd() */ 1133 old = pmdp_invalidate(vma, addr, pmdp); 1134 if (pmd_dirty(old)) 1135 pmd = pmd_mkdirty(pmd); 1136 if (pmd_young(old)) 1137 pmd = pmd_mkyoung(pmd); 1138 1139 pmd = pmd_wrprotect(pmd); 1140 pmd = pmd_clear_soft_dirty(pmd); 1141 1142 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1143 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 1144 pmd = pmd_swp_clear_soft_dirty(pmd); 1145 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1146 } 1147 } 1148 #else 1149 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 1150 unsigned long addr, pmd_t *pmdp) 1151 { 1152 } 1153 #endif 1154 1155 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 1156 unsigned long end, struct mm_walk *walk) 1157 { 1158 struct clear_refs_private *cp = walk->private; 1159 struct vm_area_struct *vma = walk->vma; 1160 pte_t *pte, ptent; 1161 spinlock_t *ptl; 1162 struct page *page; 1163 1164 ptl = pmd_trans_huge_lock(pmd, vma); 1165 if (ptl) { 1166 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1167 clear_soft_dirty_pmd(vma, addr, pmd); 1168 goto out; 1169 } 1170 1171 if (!pmd_present(*pmd)) 1172 goto out; 1173 1174 page = pmd_page(*pmd); 1175 1176 /* Clear accessed and referenced bits. */ 1177 pmdp_test_and_clear_young(vma, addr, pmd); 1178 test_and_clear_page_young(page); 1179 ClearPageReferenced(page); 1180 out: 1181 spin_unlock(ptl); 1182 return 0; 1183 } 1184 1185 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1186 if (!pte) { 1187 walk->action = ACTION_AGAIN; 1188 return 0; 1189 } 1190 for (; addr != end; pte++, addr += PAGE_SIZE) { 1191 ptent = ptep_get(pte); 1192 1193 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1194 clear_soft_dirty(vma, addr, pte); 1195 continue; 1196 } 1197 1198 if (!pte_present(ptent)) 1199 continue; 1200 1201 page = vm_normal_page(vma, addr, ptent); 1202 if (!page) 1203 continue; 1204 1205 /* Clear accessed and referenced bits. */ 1206 ptep_test_and_clear_young(vma, addr, pte); 1207 test_and_clear_page_young(page); 1208 ClearPageReferenced(page); 1209 } 1210 pte_unmap_unlock(pte - 1, ptl); 1211 cond_resched(); 1212 return 0; 1213 } 1214 1215 static int clear_refs_test_walk(unsigned long start, unsigned long end, 1216 struct mm_walk *walk) 1217 { 1218 struct clear_refs_private *cp = walk->private; 1219 struct vm_area_struct *vma = walk->vma; 1220 1221 if (vma->vm_flags & VM_PFNMAP) 1222 return 1; 1223 1224 /* 1225 * Writing 1 to /proc/pid/clear_refs affects all pages. 1226 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 1227 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 1228 * Writing 4 to /proc/pid/clear_refs affects all pages. 1229 */ 1230 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 1231 return 1; 1232 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 1233 return 1; 1234 return 0; 1235 } 1236 1237 static const struct mm_walk_ops clear_refs_walk_ops = { 1238 .pmd_entry = clear_refs_pte_range, 1239 .test_walk = clear_refs_test_walk, 1240 .walk_lock = PGWALK_WRLOCK, 1241 }; 1242 1243 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 1244 size_t count, loff_t *ppos) 1245 { 1246 struct task_struct *task; 1247 char buffer[PROC_NUMBUF] = {}; 1248 struct mm_struct *mm; 1249 struct vm_area_struct *vma; 1250 enum clear_refs_types type; 1251 int itype; 1252 int rv; 1253 1254 if (count > sizeof(buffer) - 1) 1255 count = sizeof(buffer) - 1; 1256 if (copy_from_user(buffer, buf, count)) 1257 return -EFAULT; 1258 rv = kstrtoint(strstrip(buffer), 10, &itype); 1259 if (rv < 0) 1260 return rv; 1261 type = (enum clear_refs_types)itype; 1262 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 1263 return -EINVAL; 1264 1265 task = get_proc_task(file_inode(file)); 1266 if (!task) 1267 return -ESRCH; 1268 mm = get_task_mm(task); 1269 if (mm) { 1270 VMA_ITERATOR(vmi, mm, 0); 1271 struct mmu_notifier_range range; 1272 struct clear_refs_private cp = { 1273 .type = type, 1274 }; 1275 1276 if (mmap_write_lock_killable(mm)) { 1277 count = -EINTR; 1278 goto out_mm; 1279 } 1280 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 1281 /* 1282 * Writing 5 to /proc/pid/clear_refs resets the peak 1283 * resident set size to this mm's current rss value. 1284 */ 1285 reset_mm_hiwater_rss(mm); 1286 goto out_unlock; 1287 } 1288 1289 if (type == CLEAR_REFS_SOFT_DIRTY) { 1290 for_each_vma(vmi, vma) { 1291 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1292 continue; 1293 vm_flags_clear(vma, VM_SOFTDIRTY); 1294 vma_set_page_prot(vma); 1295 } 1296 1297 inc_tlb_flush_pending(mm); 1298 mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY, 1299 0, mm, 0, -1UL); 1300 mmu_notifier_invalidate_range_start(&range); 1301 } 1302 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); 1303 if (type == CLEAR_REFS_SOFT_DIRTY) { 1304 mmu_notifier_invalidate_range_end(&range); 1305 flush_tlb_mm(mm); 1306 dec_tlb_flush_pending(mm); 1307 } 1308 out_unlock: 1309 mmap_write_unlock(mm); 1310 out_mm: 1311 mmput(mm); 1312 } 1313 put_task_struct(task); 1314 1315 return count; 1316 } 1317 1318 const struct file_operations proc_clear_refs_operations = { 1319 .write = clear_refs_write, 1320 .llseek = noop_llseek, 1321 }; 1322 1323 typedef struct { 1324 u64 pme; 1325 } pagemap_entry_t; 1326 1327 struct pagemapread { 1328 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 1329 pagemap_entry_t *buffer; 1330 bool show_pfn; 1331 }; 1332 1333 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 1334 #define PAGEMAP_WALK_MASK (PMD_MASK) 1335 1336 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 1337 #define PM_PFRAME_BITS 55 1338 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 1339 #define PM_SOFT_DIRTY BIT_ULL(55) 1340 #define PM_MMAP_EXCLUSIVE BIT_ULL(56) 1341 #define PM_UFFD_WP BIT_ULL(57) 1342 #define PM_FILE BIT_ULL(61) 1343 #define PM_SWAP BIT_ULL(62) 1344 #define PM_PRESENT BIT_ULL(63) 1345 1346 #define PM_END_OF_BUFFER 1 1347 1348 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 1349 { 1350 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 1351 } 1352 1353 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 1354 struct pagemapread *pm) 1355 { 1356 pm->buffer[pm->pos++] = *pme; 1357 if (pm->pos >= pm->len) 1358 return PM_END_OF_BUFFER; 1359 return 0; 1360 } 1361 1362 static int pagemap_pte_hole(unsigned long start, unsigned long end, 1363 __always_unused int depth, struct mm_walk *walk) 1364 { 1365 struct pagemapread *pm = walk->private; 1366 unsigned long addr = start; 1367 int err = 0; 1368 1369 while (addr < end) { 1370 struct vm_area_struct *vma = find_vma(walk->mm, addr); 1371 pagemap_entry_t pme = make_pme(0, 0); 1372 /* End of address space hole, which we mark as non-present. */ 1373 unsigned long hole_end; 1374 1375 if (vma) 1376 hole_end = min(end, vma->vm_start); 1377 else 1378 hole_end = end; 1379 1380 for (; addr < hole_end; addr += PAGE_SIZE) { 1381 err = add_to_pagemap(addr, &pme, pm); 1382 if (err) 1383 goto out; 1384 } 1385 1386 if (!vma) 1387 break; 1388 1389 /* Addresses in the VMA. */ 1390 if (vma->vm_flags & VM_SOFTDIRTY) 1391 pme = make_pme(0, PM_SOFT_DIRTY); 1392 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1393 err = add_to_pagemap(addr, &pme, pm); 1394 if (err) 1395 goto out; 1396 } 1397 } 1398 out: 1399 return err; 1400 } 1401 1402 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1403 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1404 { 1405 u64 frame = 0, flags = 0; 1406 struct page *page = NULL; 1407 bool migration = false; 1408 1409 if (pte_present(pte)) { 1410 if (pm->show_pfn) 1411 frame = pte_pfn(pte); 1412 flags |= PM_PRESENT; 1413 page = vm_normal_page(vma, addr, pte); 1414 if (pte_soft_dirty(pte)) 1415 flags |= PM_SOFT_DIRTY; 1416 if (pte_uffd_wp(pte)) 1417 flags |= PM_UFFD_WP; 1418 } else if (is_swap_pte(pte)) { 1419 swp_entry_t entry; 1420 if (pte_swp_soft_dirty(pte)) 1421 flags |= PM_SOFT_DIRTY; 1422 if (pte_swp_uffd_wp(pte)) 1423 flags |= PM_UFFD_WP; 1424 entry = pte_to_swp_entry(pte); 1425 if (pm->show_pfn) { 1426 pgoff_t offset; 1427 /* 1428 * For PFN swap offsets, keeping the offset field 1429 * to be PFN only to be compatible with old smaps. 1430 */ 1431 if (is_pfn_swap_entry(entry)) 1432 offset = swp_offset_pfn(entry); 1433 else 1434 offset = swp_offset(entry); 1435 frame = swp_type(entry) | 1436 (offset << MAX_SWAPFILES_SHIFT); 1437 } 1438 flags |= PM_SWAP; 1439 migration = is_migration_entry(entry); 1440 if (is_pfn_swap_entry(entry)) 1441 page = pfn_swap_entry_to_page(entry); 1442 if (pte_marker_entry_uffd_wp(entry)) 1443 flags |= PM_UFFD_WP; 1444 } 1445 1446 if (page && !PageAnon(page)) 1447 flags |= PM_FILE; 1448 if (page && !migration && page_mapcount(page) == 1) 1449 flags |= PM_MMAP_EXCLUSIVE; 1450 if (vma->vm_flags & VM_SOFTDIRTY) 1451 flags |= PM_SOFT_DIRTY; 1452 1453 return make_pme(frame, flags); 1454 } 1455 1456 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1457 struct mm_walk *walk) 1458 { 1459 struct vm_area_struct *vma = walk->vma; 1460 struct pagemapread *pm = walk->private; 1461 spinlock_t *ptl; 1462 pte_t *pte, *orig_pte; 1463 int err = 0; 1464 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1465 bool migration = false; 1466 1467 ptl = pmd_trans_huge_lock(pmdp, vma); 1468 if (ptl) { 1469 u64 flags = 0, frame = 0; 1470 pmd_t pmd = *pmdp; 1471 struct page *page = NULL; 1472 1473 if (vma->vm_flags & VM_SOFTDIRTY) 1474 flags |= PM_SOFT_DIRTY; 1475 1476 if (pmd_present(pmd)) { 1477 page = pmd_page(pmd); 1478 1479 flags |= PM_PRESENT; 1480 if (pmd_soft_dirty(pmd)) 1481 flags |= PM_SOFT_DIRTY; 1482 if (pmd_uffd_wp(pmd)) 1483 flags |= PM_UFFD_WP; 1484 if (pm->show_pfn) 1485 frame = pmd_pfn(pmd) + 1486 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1487 } 1488 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1489 else if (is_swap_pmd(pmd)) { 1490 swp_entry_t entry = pmd_to_swp_entry(pmd); 1491 unsigned long offset; 1492 1493 if (pm->show_pfn) { 1494 if (is_pfn_swap_entry(entry)) 1495 offset = swp_offset_pfn(entry); 1496 else 1497 offset = swp_offset(entry); 1498 offset = offset + 1499 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1500 frame = swp_type(entry) | 1501 (offset << MAX_SWAPFILES_SHIFT); 1502 } 1503 flags |= PM_SWAP; 1504 if (pmd_swp_soft_dirty(pmd)) 1505 flags |= PM_SOFT_DIRTY; 1506 if (pmd_swp_uffd_wp(pmd)) 1507 flags |= PM_UFFD_WP; 1508 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1509 migration = is_migration_entry(entry); 1510 page = pfn_swap_entry_to_page(entry); 1511 } 1512 #endif 1513 1514 if (page && !migration && page_mapcount(page) == 1) 1515 flags |= PM_MMAP_EXCLUSIVE; 1516 1517 for (; addr != end; addr += PAGE_SIZE) { 1518 pagemap_entry_t pme = make_pme(frame, flags); 1519 1520 err = add_to_pagemap(addr, &pme, pm); 1521 if (err) 1522 break; 1523 if (pm->show_pfn) { 1524 if (flags & PM_PRESENT) 1525 frame++; 1526 else if (flags & PM_SWAP) 1527 frame += (1 << MAX_SWAPFILES_SHIFT); 1528 } 1529 } 1530 spin_unlock(ptl); 1531 return err; 1532 } 1533 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1534 1535 /* 1536 * We can assume that @vma always points to a valid one and @end never 1537 * goes beyond vma->vm_end. 1538 */ 1539 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1540 if (!pte) { 1541 walk->action = ACTION_AGAIN; 1542 return err; 1543 } 1544 for (; addr < end; pte++, addr += PAGE_SIZE) { 1545 pagemap_entry_t pme; 1546 1547 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte)); 1548 err = add_to_pagemap(addr, &pme, pm); 1549 if (err) 1550 break; 1551 } 1552 pte_unmap_unlock(orig_pte, ptl); 1553 1554 cond_resched(); 1555 1556 return err; 1557 } 1558 1559 #ifdef CONFIG_HUGETLB_PAGE 1560 /* This function walks within one hugetlb entry in the single call */ 1561 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1562 unsigned long addr, unsigned long end, 1563 struct mm_walk *walk) 1564 { 1565 struct pagemapread *pm = walk->private; 1566 struct vm_area_struct *vma = walk->vma; 1567 u64 flags = 0, frame = 0; 1568 int err = 0; 1569 pte_t pte; 1570 1571 if (vma->vm_flags & VM_SOFTDIRTY) 1572 flags |= PM_SOFT_DIRTY; 1573 1574 pte = huge_ptep_get(ptep); 1575 if (pte_present(pte)) { 1576 struct page *page = pte_page(pte); 1577 1578 if (!PageAnon(page)) 1579 flags |= PM_FILE; 1580 1581 if (page_mapcount(page) == 1) 1582 flags |= PM_MMAP_EXCLUSIVE; 1583 1584 if (huge_pte_uffd_wp(pte)) 1585 flags |= PM_UFFD_WP; 1586 1587 flags |= PM_PRESENT; 1588 if (pm->show_pfn) 1589 frame = pte_pfn(pte) + 1590 ((addr & ~hmask) >> PAGE_SHIFT); 1591 } else if (pte_swp_uffd_wp_any(pte)) { 1592 flags |= PM_UFFD_WP; 1593 } 1594 1595 for (; addr != end; addr += PAGE_SIZE) { 1596 pagemap_entry_t pme = make_pme(frame, flags); 1597 1598 err = add_to_pagemap(addr, &pme, pm); 1599 if (err) 1600 return err; 1601 if (pm->show_pfn && (flags & PM_PRESENT)) 1602 frame++; 1603 } 1604 1605 cond_resched(); 1606 1607 return err; 1608 } 1609 #else 1610 #define pagemap_hugetlb_range NULL 1611 #endif /* HUGETLB_PAGE */ 1612 1613 static const struct mm_walk_ops pagemap_ops = { 1614 .pmd_entry = pagemap_pmd_range, 1615 .pte_hole = pagemap_pte_hole, 1616 .hugetlb_entry = pagemap_hugetlb_range, 1617 .walk_lock = PGWALK_RDLOCK, 1618 }; 1619 1620 /* 1621 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1622 * 1623 * For each page in the address space, this file contains one 64-bit entry 1624 * consisting of the following: 1625 * 1626 * Bits 0-54 page frame number (PFN) if present 1627 * Bits 0-4 swap type if swapped 1628 * Bits 5-54 swap offset if swapped 1629 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) 1630 * Bit 56 page exclusively mapped 1631 * Bit 57 pte is uffd-wp write-protected 1632 * Bits 58-60 zero 1633 * Bit 61 page is file-page or shared-anon 1634 * Bit 62 page swapped 1635 * Bit 63 page present 1636 * 1637 * If the page is not present but in swap, then the PFN contains an 1638 * encoding of the swap file number and the page's offset into the 1639 * swap. Unmapped pages return a null PFN. This allows determining 1640 * precisely which pages are mapped (or in swap) and comparing mapped 1641 * pages between processes. 1642 * 1643 * Efficient users of this interface will use /proc/pid/maps to 1644 * determine which areas of memory are actually mapped and llseek to 1645 * skip over unmapped regions. 1646 */ 1647 static ssize_t pagemap_read(struct file *file, char __user *buf, 1648 size_t count, loff_t *ppos) 1649 { 1650 struct mm_struct *mm = file->private_data; 1651 struct pagemapread pm; 1652 unsigned long src; 1653 unsigned long svpfn; 1654 unsigned long start_vaddr; 1655 unsigned long end_vaddr; 1656 int ret = 0, copied = 0; 1657 1658 if (!mm || !mmget_not_zero(mm)) 1659 goto out; 1660 1661 ret = -EINVAL; 1662 /* file position must be aligned */ 1663 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1664 goto out_mm; 1665 1666 ret = 0; 1667 if (!count) 1668 goto out_mm; 1669 1670 /* do not disclose physical addresses: attack vector */ 1671 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1672 1673 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1674 pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); 1675 ret = -ENOMEM; 1676 if (!pm.buffer) 1677 goto out_mm; 1678 1679 src = *ppos; 1680 svpfn = src / PM_ENTRY_BYTES; 1681 end_vaddr = mm->task_size; 1682 1683 /* watch out for wraparound */ 1684 start_vaddr = end_vaddr; 1685 if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) { 1686 unsigned long end; 1687 1688 ret = mmap_read_lock_killable(mm); 1689 if (ret) 1690 goto out_free; 1691 start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT); 1692 mmap_read_unlock(mm); 1693 1694 end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT); 1695 if (end >= start_vaddr && end < mm->task_size) 1696 end_vaddr = end; 1697 } 1698 1699 /* Ensure the address is inside the task */ 1700 if (start_vaddr > mm->task_size) 1701 start_vaddr = end_vaddr; 1702 1703 ret = 0; 1704 while (count && (start_vaddr < end_vaddr)) { 1705 int len; 1706 unsigned long end; 1707 1708 pm.pos = 0; 1709 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1710 /* overflow ? */ 1711 if (end < start_vaddr || end > end_vaddr) 1712 end = end_vaddr; 1713 ret = mmap_read_lock_killable(mm); 1714 if (ret) 1715 goto out_free; 1716 ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm); 1717 mmap_read_unlock(mm); 1718 start_vaddr = end; 1719 1720 len = min(count, PM_ENTRY_BYTES * pm.pos); 1721 if (copy_to_user(buf, pm.buffer, len)) { 1722 ret = -EFAULT; 1723 goto out_free; 1724 } 1725 copied += len; 1726 buf += len; 1727 count -= len; 1728 } 1729 *ppos += copied; 1730 if (!ret || ret == PM_END_OF_BUFFER) 1731 ret = copied; 1732 1733 out_free: 1734 kfree(pm.buffer); 1735 out_mm: 1736 mmput(mm); 1737 out: 1738 return ret; 1739 } 1740 1741 static int pagemap_open(struct inode *inode, struct file *file) 1742 { 1743 struct mm_struct *mm; 1744 1745 mm = proc_mem_open(inode, PTRACE_MODE_READ); 1746 if (IS_ERR(mm)) 1747 return PTR_ERR(mm); 1748 file->private_data = mm; 1749 return 0; 1750 } 1751 1752 static int pagemap_release(struct inode *inode, struct file *file) 1753 { 1754 struct mm_struct *mm = file->private_data; 1755 1756 if (mm) 1757 mmdrop(mm); 1758 return 0; 1759 } 1760 1761 #define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \ 1762 PAGE_IS_FILE | PAGE_IS_PRESENT | \ 1763 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \ 1764 PAGE_IS_HUGE) 1765 #define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC) 1766 1767 struct pagemap_scan_private { 1768 struct pm_scan_arg arg; 1769 unsigned long masks_of_interest, cur_vma_category; 1770 struct page_region *vec_buf; 1771 unsigned long vec_buf_len, vec_buf_index, found_pages; 1772 struct page_region __user *vec_out; 1773 }; 1774 1775 static unsigned long pagemap_page_category(struct pagemap_scan_private *p, 1776 struct vm_area_struct *vma, 1777 unsigned long addr, pte_t pte) 1778 { 1779 unsigned long categories = 0; 1780 1781 if (pte_present(pte)) { 1782 struct page *page; 1783 1784 categories |= PAGE_IS_PRESENT; 1785 if (!pte_uffd_wp(pte)) 1786 categories |= PAGE_IS_WRITTEN; 1787 1788 if (p->masks_of_interest & PAGE_IS_FILE) { 1789 page = vm_normal_page(vma, addr, pte); 1790 if (page && !PageAnon(page)) 1791 categories |= PAGE_IS_FILE; 1792 } 1793 1794 if (is_zero_pfn(pte_pfn(pte))) 1795 categories |= PAGE_IS_PFNZERO; 1796 } else if (is_swap_pte(pte)) { 1797 swp_entry_t swp; 1798 1799 categories |= PAGE_IS_SWAPPED; 1800 if (!pte_swp_uffd_wp_any(pte)) 1801 categories |= PAGE_IS_WRITTEN; 1802 1803 if (p->masks_of_interest & PAGE_IS_FILE) { 1804 swp = pte_to_swp_entry(pte); 1805 if (is_pfn_swap_entry(swp) && 1806 !PageAnon(pfn_swap_entry_to_page(swp))) 1807 categories |= PAGE_IS_FILE; 1808 } 1809 } 1810 1811 return categories; 1812 } 1813 1814 static void make_uffd_wp_pte(struct vm_area_struct *vma, 1815 unsigned long addr, pte_t *pte) 1816 { 1817 pte_t ptent = ptep_get(pte); 1818 1819 if (pte_present(ptent)) { 1820 pte_t old_pte; 1821 1822 old_pte = ptep_modify_prot_start(vma, addr, pte); 1823 ptent = pte_mkuffd_wp(ptent); 1824 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); 1825 } else if (is_swap_pte(ptent)) { 1826 ptent = pte_swp_mkuffd_wp(ptent); 1827 set_pte_at(vma->vm_mm, addr, pte, ptent); 1828 } else { 1829 set_pte_at(vma->vm_mm, addr, pte, 1830 make_pte_marker(PTE_MARKER_UFFD_WP)); 1831 } 1832 } 1833 1834 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1835 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, 1836 struct vm_area_struct *vma, 1837 unsigned long addr, pmd_t pmd) 1838 { 1839 unsigned long categories = PAGE_IS_HUGE; 1840 1841 if (pmd_present(pmd)) { 1842 struct page *page; 1843 1844 categories |= PAGE_IS_PRESENT; 1845 if (!pmd_uffd_wp(pmd)) 1846 categories |= PAGE_IS_WRITTEN; 1847 1848 if (p->masks_of_interest & PAGE_IS_FILE) { 1849 page = vm_normal_page_pmd(vma, addr, pmd); 1850 if (page && !PageAnon(page)) 1851 categories |= PAGE_IS_FILE; 1852 } 1853 1854 if (is_zero_pfn(pmd_pfn(pmd))) 1855 categories |= PAGE_IS_PFNZERO; 1856 } else if (is_swap_pmd(pmd)) { 1857 swp_entry_t swp; 1858 1859 categories |= PAGE_IS_SWAPPED; 1860 if (!pmd_swp_uffd_wp(pmd)) 1861 categories |= PAGE_IS_WRITTEN; 1862 1863 if (p->masks_of_interest & PAGE_IS_FILE) { 1864 swp = pmd_to_swp_entry(pmd); 1865 if (is_pfn_swap_entry(swp) && 1866 !PageAnon(pfn_swap_entry_to_page(swp))) 1867 categories |= PAGE_IS_FILE; 1868 } 1869 } 1870 1871 return categories; 1872 } 1873 1874 static void make_uffd_wp_pmd(struct vm_area_struct *vma, 1875 unsigned long addr, pmd_t *pmdp) 1876 { 1877 pmd_t old, pmd = *pmdp; 1878 1879 if (pmd_present(pmd)) { 1880 old = pmdp_invalidate_ad(vma, addr, pmdp); 1881 pmd = pmd_mkuffd_wp(old); 1882 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1883 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 1884 pmd = pmd_swp_mkuffd_wp(pmd); 1885 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1886 } 1887 } 1888 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1889 1890 #ifdef CONFIG_HUGETLB_PAGE 1891 static unsigned long pagemap_hugetlb_category(pte_t pte) 1892 { 1893 unsigned long categories = PAGE_IS_HUGE; 1894 1895 /* 1896 * According to pagemap_hugetlb_range(), file-backed HugeTLB 1897 * page cannot be swapped. So PAGE_IS_FILE is not checked for 1898 * swapped pages. 1899 */ 1900 if (pte_present(pte)) { 1901 categories |= PAGE_IS_PRESENT; 1902 if (!huge_pte_uffd_wp(pte)) 1903 categories |= PAGE_IS_WRITTEN; 1904 if (!PageAnon(pte_page(pte))) 1905 categories |= PAGE_IS_FILE; 1906 if (is_zero_pfn(pte_pfn(pte))) 1907 categories |= PAGE_IS_PFNZERO; 1908 } else if (is_swap_pte(pte)) { 1909 categories |= PAGE_IS_SWAPPED; 1910 if (!pte_swp_uffd_wp_any(pte)) 1911 categories |= PAGE_IS_WRITTEN; 1912 } 1913 1914 return categories; 1915 } 1916 1917 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma, 1918 unsigned long addr, pte_t *ptep, 1919 pte_t ptent) 1920 { 1921 unsigned long psize; 1922 1923 if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent)) 1924 return; 1925 1926 psize = huge_page_size(hstate_vma(vma)); 1927 1928 if (is_hugetlb_entry_migration(ptent)) 1929 set_huge_pte_at(vma->vm_mm, addr, ptep, 1930 pte_swp_mkuffd_wp(ptent), psize); 1931 else if (!huge_pte_none(ptent)) 1932 huge_ptep_modify_prot_commit(vma, addr, ptep, ptent, 1933 huge_pte_mkuffd_wp(ptent)); 1934 else 1935 set_huge_pte_at(vma->vm_mm, addr, ptep, 1936 make_pte_marker(PTE_MARKER_UFFD_WP), psize); 1937 } 1938 #endif /* CONFIG_HUGETLB_PAGE */ 1939 1940 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1941 static void pagemap_scan_backout_range(struct pagemap_scan_private *p, 1942 unsigned long addr, unsigned long end) 1943 { 1944 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; 1945 1946 if (cur_buf->start != addr) 1947 cur_buf->end = addr; 1948 else 1949 cur_buf->start = cur_buf->end = 0; 1950 1951 p->found_pages -= (end - addr) / PAGE_SIZE; 1952 } 1953 #endif 1954 1955 static bool pagemap_scan_is_interesting_page(unsigned long categories, 1956 const struct pagemap_scan_private *p) 1957 { 1958 categories ^= p->arg.category_inverted; 1959 if ((categories & p->arg.category_mask) != p->arg.category_mask) 1960 return false; 1961 if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask)) 1962 return false; 1963 1964 return true; 1965 } 1966 1967 static bool pagemap_scan_is_interesting_vma(unsigned long categories, 1968 const struct pagemap_scan_private *p) 1969 { 1970 unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED; 1971 1972 categories ^= p->arg.category_inverted; 1973 if ((categories & required) != required) 1974 return false; 1975 1976 return true; 1977 } 1978 1979 static int pagemap_scan_test_walk(unsigned long start, unsigned long end, 1980 struct mm_walk *walk) 1981 { 1982 struct pagemap_scan_private *p = walk->private; 1983 struct vm_area_struct *vma = walk->vma; 1984 unsigned long vma_category = 0; 1985 1986 if (userfaultfd_wp_async(vma) && userfaultfd_wp_use_markers(vma)) 1987 vma_category |= PAGE_IS_WPALLOWED; 1988 else if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) 1989 return -EPERM; 1990 1991 if (vma->vm_flags & VM_PFNMAP) 1992 return 1; 1993 1994 if (!pagemap_scan_is_interesting_vma(vma_category, p)) 1995 return 1; 1996 1997 p->cur_vma_category = vma_category; 1998 1999 return 0; 2000 } 2001 2002 static bool pagemap_scan_push_range(unsigned long categories, 2003 struct pagemap_scan_private *p, 2004 unsigned long addr, unsigned long end) 2005 { 2006 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; 2007 2008 /* 2009 * When there is no output buffer provided at all, the sentinel values 2010 * won't match here. There is no other way for `cur_buf->end` to be 2011 * non-zero other than it being non-empty. 2012 */ 2013 if (addr == cur_buf->end && categories == cur_buf->categories) { 2014 cur_buf->end = end; 2015 return true; 2016 } 2017 2018 if (cur_buf->end) { 2019 if (p->vec_buf_index >= p->vec_buf_len - 1) 2020 return false; 2021 2022 cur_buf = &p->vec_buf[++p->vec_buf_index]; 2023 } 2024 2025 cur_buf->start = addr; 2026 cur_buf->end = end; 2027 cur_buf->categories = categories; 2028 2029 return true; 2030 } 2031 2032 static int pagemap_scan_output(unsigned long categories, 2033 struct pagemap_scan_private *p, 2034 unsigned long addr, unsigned long *end) 2035 { 2036 unsigned long n_pages, total_pages; 2037 int ret = 0; 2038 2039 if (!p->vec_buf) 2040 return 0; 2041 2042 categories &= p->arg.return_mask; 2043 2044 n_pages = (*end - addr) / PAGE_SIZE; 2045 if (check_add_overflow(p->found_pages, n_pages, &total_pages) || 2046 total_pages > p->arg.max_pages) { 2047 size_t n_too_much = total_pages - p->arg.max_pages; 2048 *end -= n_too_much * PAGE_SIZE; 2049 n_pages -= n_too_much; 2050 ret = -ENOSPC; 2051 } 2052 2053 if (!pagemap_scan_push_range(categories, p, addr, *end)) { 2054 *end = addr; 2055 n_pages = 0; 2056 ret = -ENOSPC; 2057 } 2058 2059 p->found_pages += n_pages; 2060 if (ret) 2061 p->arg.walk_end = *end; 2062 2063 return ret; 2064 } 2065 2066 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, 2067 unsigned long end, struct mm_walk *walk) 2068 { 2069 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2070 struct pagemap_scan_private *p = walk->private; 2071 struct vm_area_struct *vma = walk->vma; 2072 unsigned long categories; 2073 spinlock_t *ptl; 2074 int ret = 0; 2075 2076 ptl = pmd_trans_huge_lock(pmd, vma); 2077 if (!ptl) 2078 return -ENOENT; 2079 2080 categories = p->cur_vma_category | 2081 pagemap_thp_category(p, vma, start, *pmd); 2082 2083 if (!pagemap_scan_is_interesting_page(categories, p)) 2084 goto out_unlock; 2085 2086 ret = pagemap_scan_output(categories, p, start, &end); 2087 if (start == end) 2088 goto out_unlock; 2089 2090 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2091 goto out_unlock; 2092 if (~categories & PAGE_IS_WRITTEN) 2093 goto out_unlock; 2094 2095 /* 2096 * Break huge page into small pages if the WP operation 2097 * needs to be performed on a portion of the huge page. 2098 */ 2099 if (end != start + HPAGE_SIZE) { 2100 spin_unlock(ptl); 2101 split_huge_pmd(vma, pmd, start); 2102 pagemap_scan_backout_range(p, start, end); 2103 /* Report as if there was no THP */ 2104 return -ENOENT; 2105 } 2106 2107 make_uffd_wp_pmd(vma, start, pmd); 2108 flush_tlb_range(vma, start, end); 2109 out_unlock: 2110 spin_unlock(ptl); 2111 return ret; 2112 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 2113 return -ENOENT; 2114 #endif 2115 } 2116 2117 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, 2118 unsigned long end, struct mm_walk *walk) 2119 { 2120 struct pagemap_scan_private *p = walk->private; 2121 struct vm_area_struct *vma = walk->vma; 2122 unsigned long addr, flush_end = 0; 2123 pte_t *pte, *start_pte; 2124 spinlock_t *ptl; 2125 int ret; 2126 2127 arch_enter_lazy_mmu_mode(); 2128 2129 ret = pagemap_scan_thp_entry(pmd, start, end, walk); 2130 if (ret != -ENOENT) { 2131 arch_leave_lazy_mmu_mode(); 2132 return ret; 2133 } 2134 2135 ret = 0; 2136 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); 2137 if (!pte) { 2138 arch_leave_lazy_mmu_mode(); 2139 walk->action = ACTION_AGAIN; 2140 return 0; 2141 } 2142 2143 if (!p->vec_out) { 2144 /* Fast path for performing exclusive WP */ 2145 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { 2146 if (pte_uffd_wp(ptep_get(pte))) 2147 continue; 2148 make_uffd_wp_pte(vma, addr, pte); 2149 if (!flush_end) 2150 start = addr; 2151 flush_end = addr + PAGE_SIZE; 2152 } 2153 goto flush_and_return; 2154 } 2155 2156 if (!p->arg.category_anyof_mask && !p->arg.category_inverted && 2157 p->arg.category_mask == PAGE_IS_WRITTEN && 2158 p->arg.return_mask == PAGE_IS_WRITTEN) { 2159 for (addr = start; addr < end; pte++, addr += PAGE_SIZE) { 2160 unsigned long next = addr + PAGE_SIZE; 2161 2162 if (pte_uffd_wp(ptep_get(pte))) 2163 continue; 2164 ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN, 2165 p, addr, &next); 2166 if (next == addr) 2167 break; 2168 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2169 continue; 2170 make_uffd_wp_pte(vma, addr, pte); 2171 if (!flush_end) 2172 start = addr; 2173 flush_end = next; 2174 } 2175 goto flush_and_return; 2176 } 2177 2178 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { 2179 unsigned long categories = p->cur_vma_category | 2180 pagemap_page_category(p, vma, addr, ptep_get(pte)); 2181 unsigned long next = addr + PAGE_SIZE; 2182 2183 if (!pagemap_scan_is_interesting_page(categories, p)) 2184 continue; 2185 2186 ret = pagemap_scan_output(categories, p, addr, &next); 2187 if (next == addr) 2188 break; 2189 2190 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2191 continue; 2192 if (~categories & PAGE_IS_WRITTEN) 2193 continue; 2194 2195 make_uffd_wp_pte(vma, addr, pte); 2196 if (!flush_end) 2197 start = addr; 2198 flush_end = next; 2199 } 2200 2201 flush_and_return: 2202 if (flush_end) 2203 flush_tlb_range(vma, start, addr); 2204 2205 pte_unmap_unlock(start_pte, ptl); 2206 arch_leave_lazy_mmu_mode(); 2207 2208 cond_resched(); 2209 return ret; 2210 } 2211 2212 #ifdef CONFIG_HUGETLB_PAGE 2213 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask, 2214 unsigned long start, unsigned long end, 2215 struct mm_walk *walk) 2216 { 2217 struct pagemap_scan_private *p = walk->private; 2218 struct vm_area_struct *vma = walk->vma; 2219 unsigned long categories; 2220 spinlock_t *ptl; 2221 int ret = 0; 2222 pte_t pte; 2223 2224 if (~p->arg.flags & PM_SCAN_WP_MATCHING) { 2225 /* Go the short route when not write-protecting pages. */ 2226 2227 pte = huge_ptep_get(ptep); 2228 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); 2229 2230 if (!pagemap_scan_is_interesting_page(categories, p)) 2231 return 0; 2232 2233 return pagemap_scan_output(categories, p, start, &end); 2234 } 2235 2236 i_mmap_lock_write(vma->vm_file->f_mapping); 2237 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep); 2238 2239 pte = huge_ptep_get(ptep); 2240 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); 2241 2242 if (!pagemap_scan_is_interesting_page(categories, p)) 2243 goto out_unlock; 2244 2245 ret = pagemap_scan_output(categories, p, start, &end); 2246 if (start == end) 2247 goto out_unlock; 2248 2249 if (~categories & PAGE_IS_WRITTEN) 2250 goto out_unlock; 2251 2252 if (end != start + HPAGE_SIZE) { 2253 /* Partial HugeTLB page WP isn't possible. */ 2254 pagemap_scan_backout_range(p, start, end); 2255 p->arg.walk_end = start; 2256 ret = 0; 2257 goto out_unlock; 2258 } 2259 2260 make_uffd_wp_huge_pte(vma, start, ptep, pte); 2261 flush_hugetlb_tlb_range(vma, start, end); 2262 2263 out_unlock: 2264 spin_unlock(ptl); 2265 i_mmap_unlock_write(vma->vm_file->f_mapping); 2266 2267 return ret; 2268 } 2269 #else 2270 #define pagemap_scan_hugetlb_entry NULL 2271 #endif 2272 2273 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end, 2274 int depth, struct mm_walk *walk) 2275 { 2276 struct pagemap_scan_private *p = walk->private; 2277 struct vm_area_struct *vma = walk->vma; 2278 int ret, err; 2279 2280 if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p)) 2281 return 0; 2282 2283 ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end); 2284 if (addr == end) 2285 return ret; 2286 2287 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2288 return ret; 2289 2290 err = uffd_wp_range(vma, addr, end - addr, true); 2291 if (err < 0) 2292 ret = err; 2293 2294 return ret; 2295 } 2296 2297 static const struct mm_walk_ops pagemap_scan_ops = { 2298 .test_walk = pagemap_scan_test_walk, 2299 .pmd_entry = pagemap_scan_pmd_entry, 2300 .pte_hole = pagemap_scan_pte_hole, 2301 .hugetlb_entry = pagemap_scan_hugetlb_entry, 2302 }; 2303 2304 static int pagemap_scan_get_args(struct pm_scan_arg *arg, 2305 unsigned long uarg) 2306 { 2307 if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg))) 2308 return -EFAULT; 2309 2310 if (arg->size != sizeof(struct pm_scan_arg)) 2311 return -EINVAL; 2312 2313 /* Validate requested features */ 2314 if (arg->flags & ~PM_SCAN_FLAGS) 2315 return -EINVAL; 2316 if ((arg->category_inverted | arg->category_mask | 2317 arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES) 2318 return -EINVAL; 2319 2320 arg->start = untagged_addr((unsigned long)arg->start); 2321 arg->end = untagged_addr((unsigned long)arg->end); 2322 arg->vec = untagged_addr((unsigned long)arg->vec); 2323 2324 /* Validate memory pointers */ 2325 if (!IS_ALIGNED(arg->start, PAGE_SIZE)) 2326 return -EINVAL; 2327 if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start)) 2328 return -EFAULT; 2329 if (!arg->vec && arg->vec_len) 2330 return -EINVAL; 2331 if (arg->vec && !access_ok((void __user *)(long)arg->vec, 2332 arg->vec_len * sizeof(struct page_region))) 2333 return -EFAULT; 2334 2335 /* Fixup default values */ 2336 arg->end = ALIGN(arg->end, PAGE_SIZE); 2337 arg->walk_end = 0; 2338 if (!arg->max_pages) 2339 arg->max_pages = ULONG_MAX; 2340 2341 return 0; 2342 } 2343 2344 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg, 2345 unsigned long uargl) 2346 { 2347 struct pm_scan_arg __user *uarg = (void __user *)uargl; 2348 2349 if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end))) 2350 return -EFAULT; 2351 2352 return 0; 2353 } 2354 2355 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p) 2356 { 2357 if (!p->arg.vec_len) 2358 return 0; 2359 2360 p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT, 2361 p->arg.vec_len); 2362 p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf), 2363 GFP_KERNEL); 2364 if (!p->vec_buf) 2365 return -ENOMEM; 2366 2367 p->vec_buf->start = p->vec_buf->end = 0; 2368 p->vec_out = (struct page_region __user *)(long)p->arg.vec; 2369 2370 return 0; 2371 } 2372 2373 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p) 2374 { 2375 const struct page_region *buf = p->vec_buf; 2376 long n = p->vec_buf_index; 2377 2378 if (!p->vec_buf) 2379 return 0; 2380 2381 if (buf[n].end != buf[n].start) 2382 n++; 2383 2384 if (!n) 2385 return 0; 2386 2387 if (copy_to_user(p->vec_out, buf, n * sizeof(*buf))) 2388 return -EFAULT; 2389 2390 p->arg.vec_len -= n; 2391 p->vec_out += n; 2392 2393 p->vec_buf_index = 0; 2394 p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len); 2395 p->vec_buf->start = p->vec_buf->end = 0; 2396 2397 return n; 2398 } 2399 2400 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) 2401 { 2402 struct mmu_notifier_range range; 2403 struct pagemap_scan_private p = {0}; 2404 unsigned long walk_start; 2405 size_t n_ranges_out = 0; 2406 int ret; 2407 2408 ret = pagemap_scan_get_args(&p.arg, uarg); 2409 if (ret) 2410 return ret; 2411 2412 p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask | 2413 p.arg.return_mask; 2414 ret = pagemap_scan_init_bounce_buffer(&p); 2415 if (ret) 2416 return ret; 2417 2418 /* Protection change for the range is going to happen. */ 2419 if (p.arg.flags & PM_SCAN_WP_MATCHING) { 2420 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, 2421 mm, p.arg.start, p.arg.end); 2422 mmu_notifier_invalidate_range_start(&range); 2423 } 2424 2425 for (walk_start = p.arg.start; walk_start < p.arg.end; 2426 walk_start = p.arg.walk_end) { 2427 long n_out; 2428 2429 if (fatal_signal_pending(current)) { 2430 ret = -EINTR; 2431 break; 2432 } 2433 2434 ret = mmap_read_lock_killable(mm); 2435 if (ret) 2436 break; 2437 ret = walk_page_range(mm, walk_start, p.arg.end, 2438 &pagemap_scan_ops, &p); 2439 mmap_read_unlock(mm); 2440 2441 n_out = pagemap_scan_flush_buffer(&p); 2442 if (n_out < 0) 2443 ret = n_out; 2444 else 2445 n_ranges_out += n_out; 2446 2447 if (ret != -ENOSPC) 2448 break; 2449 2450 if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages) 2451 break; 2452 } 2453 2454 /* ENOSPC signifies early stop (buffer full) from the walk. */ 2455 if (!ret || ret == -ENOSPC) 2456 ret = n_ranges_out; 2457 2458 /* The walk_end isn't set when ret is zero */ 2459 if (!p.arg.walk_end) 2460 p.arg.walk_end = p.arg.end; 2461 if (pagemap_scan_writeback_args(&p.arg, uarg)) 2462 ret = -EFAULT; 2463 2464 if (p.arg.flags & PM_SCAN_WP_MATCHING) 2465 mmu_notifier_invalidate_range_end(&range); 2466 2467 kfree(p.vec_buf); 2468 return ret; 2469 } 2470 2471 static long do_pagemap_cmd(struct file *file, unsigned int cmd, 2472 unsigned long arg) 2473 { 2474 struct mm_struct *mm = file->private_data; 2475 2476 switch (cmd) { 2477 case PAGEMAP_SCAN: 2478 return do_pagemap_scan(mm, arg); 2479 2480 default: 2481 return -EINVAL; 2482 } 2483 } 2484 2485 const struct file_operations proc_pagemap_operations = { 2486 .llseek = mem_lseek, /* borrow this */ 2487 .read = pagemap_read, 2488 .open = pagemap_open, 2489 .release = pagemap_release, 2490 .unlocked_ioctl = do_pagemap_cmd, 2491 .compat_ioctl = do_pagemap_cmd, 2492 }; 2493 #endif /* CONFIG_PROC_PAGE_MONITOR */ 2494 2495 #ifdef CONFIG_NUMA 2496 2497 struct numa_maps { 2498 unsigned long pages; 2499 unsigned long anon; 2500 unsigned long active; 2501 unsigned long writeback; 2502 unsigned long mapcount_max; 2503 unsigned long dirty; 2504 unsigned long swapcache; 2505 unsigned long node[MAX_NUMNODES]; 2506 }; 2507 2508 struct numa_maps_private { 2509 struct proc_maps_private proc_maps; 2510 struct numa_maps md; 2511 }; 2512 2513 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 2514 unsigned long nr_pages) 2515 { 2516 int count = page_mapcount(page); 2517 2518 md->pages += nr_pages; 2519 if (pte_dirty || PageDirty(page)) 2520 md->dirty += nr_pages; 2521 2522 if (PageSwapCache(page)) 2523 md->swapcache += nr_pages; 2524 2525 if (PageActive(page) || PageUnevictable(page)) 2526 md->active += nr_pages; 2527 2528 if (PageWriteback(page)) 2529 md->writeback += nr_pages; 2530 2531 if (PageAnon(page)) 2532 md->anon += nr_pages; 2533 2534 if (count > md->mapcount_max) 2535 md->mapcount_max = count; 2536 2537 md->node[page_to_nid(page)] += nr_pages; 2538 } 2539 2540 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 2541 unsigned long addr) 2542 { 2543 struct page *page; 2544 int nid; 2545 2546 if (!pte_present(pte)) 2547 return NULL; 2548 2549 page = vm_normal_page(vma, addr, pte); 2550 if (!page || is_zone_device_page(page)) 2551 return NULL; 2552 2553 if (PageReserved(page)) 2554 return NULL; 2555 2556 nid = page_to_nid(page); 2557 if (!node_isset(nid, node_states[N_MEMORY])) 2558 return NULL; 2559 2560 return page; 2561 } 2562 2563 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2564 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, 2565 struct vm_area_struct *vma, 2566 unsigned long addr) 2567 { 2568 struct page *page; 2569 int nid; 2570 2571 if (!pmd_present(pmd)) 2572 return NULL; 2573 2574 page = vm_normal_page_pmd(vma, addr, pmd); 2575 if (!page) 2576 return NULL; 2577 2578 if (PageReserved(page)) 2579 return NULL; 2580 2581 nid = page_to_nid(page); 2582 if (!node_isset(nid, node_states[N_MEMORY])) 2583 return NULL; 2584 2585 return page; 2586 } 2587 #endif 2588 2589 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 2590 unsigned long end, struct mm_walk *walk) 2591 { 2592 struct numa_maps *md = walk->private; 2593 struct vm_area_struct *vma = walk->vma; 2594 spinlock_t *ptl; 2595 pte_t *orig_pte; 2596 pte_t *pte; 2597 2598 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2599 ptl = pmd_trans_huge_lock(pmd, vma); 2600 if (ptl) { 2601 struct page *page; 2602 2603 page = can_gather_numa_stats_pmd(*pmd, vma, addr); 2604 if (page) 2605 gather_stats(page, md, pmd_dirty(*pmd), 2606 HPAGE_PMD_SIZE/PAGE_SIZE); 2607 spin_unlock(ptl); 2608 return 0; 2609 } 2610 #endif 2611 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 2612 if (!pte) { 2613 walk->action = ACTION_AGAIN; 2614 return 0; 2615 } 2616 do { 2617 pte_t ptent = ptep_get(pte); 2618 struct page *page = can_gather_numa_stats(ptent, vma, addr); 2619 if (!page) 2620 continue; 2621 gather_stats(page, md, pte_dirty(ptent), 1); 2622 2623 } while (pte++, addr += PAGE_SIZE, addr != end); 2624 pte_unmap_unlock(orig_pte, ptl); 2625 cond_resched(); 2626 return 0; 2627 } 2628 #ifdef CONFIG_HUGETLB_PAGE 2629 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 2630 unsigned long addr, unsigned long end, struct mm_walk *walk) 2631 { 2632 pte_t huge_pte = huge_ptep_get(pte); 2633 struct numa_maps *md; 2634 struct page *page; 2635 2636 if (!pte_present(huge_pte)) 2637 return 0; 2638 2639 page = pte_page(huge_pte); 2640 2641 md = walk->private; 2642 gather_stats(page, md, pte_dirty(huge_pte), 1); 2643 return 0; 2644 } 2645 2646 #else 2647 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 2648 unsigned long addr, unsigned long end, struct mm_walk *walk) 2649 { 2650 return 0; 2651 } 2652 #endif 2653 2654 static const struct mm_walk_ops show_numa_ops = { 2655 .hugetlb_entry = gather_hugetlb_stats, 2656 .pmd_entry = gather_pte_stats, 2657 .walk_lock = PGWALK_RDLOCK, 2658 }; 2659 2660 /* 2661 * Display pages allocated per node and memory policy via /proc. 2662 */ 2663 static int show_numa_map(struct seq_file *m, void *v) 2664 { 2665 struct numa_maps_private *numa_priv = m->private; 2666 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 2667 struct vm_area_struct *vma = v; 2668 struct numa_maps *md = &numa_priv->md; 2669 struct file *file = vma->vm_file; 2670 struct mm_struct *mm = vma->vm_mm; 2671 char buffer[64]; 2672 struct mempolicy *pol; 2673 pgoff_t ilx; 2674 int nid; 2675 2676 if (!mm) 2677 return 0; 2678 2679 /* Ensure we start with an empty set of numa_maps statistics. */ 2680 memset(md, 0, sizeof(*md)); 2681 2682 pol = __get_vma_policy(vma, vma->vm_start, &ilx); 2683 if (pol) { 2684 mpol_to_str(buffer, sizeof(buffer), pol); 2685 mpol_cond_put(pol); 2686 } else { 2687 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 2688 } 2689 2690 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 2691 2692 if (file) { 2693 seq_puts(m, " file="); 2694 seq_path(m, file_user_path(file), "\n\t= "); 2695 } else if (vma_is_initial_heap(vma)) { 2696 seq_puts(m, " heap"); 2697 } else if (vma_is_initial_stack(vma)) { 2698 seq_puts(m, " stack"); 2699 } 2700 2701 if (is_vm_hugetlb_page(vma)) 2702 seq_puts(m, " huge"); 2703 2704 /* mmap_lock is held by m_start */ 2705 walk_page_vma(vma, &show_numa_ops, md); 2706 2707 if (!md->pages) 2708 goto out; 2709 2710 if (md->anon) 2711 seq_printf(m, " anon=%lu", md->anon); 2712 2713 if (md->dirty) 2714 seq_printf(m, " dirty=%lu", md->dirty); 2715 2716 if (md->pages != md->anon && md->pages != md->dirty) 2717 seq_printf(m, " mapped=%lu", md->pages); 2718 2719 if (md->mapcount_max > 1) 2720 seq_printf(m, " mapmax=%lu", md->mapcount_max); 2721 2722 if (md->swapcache) 2723 seq_printf(m, " swapcache=%lu", md->swapcache); 2724 2725 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 2726 seq_printf(m, " active=%lu", md->active); 2727 2728 if (md->writeback) 2729 seq_printf(m, " writeback=%lu", md->writeback); 2730 2731 for_each_node_state(nid, N_MEMORY) 2732 if (md->node[nid]) 2733 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 2734 2735 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 2736 out: 2737 seq_putc(m, '\n'); 2738 return 0; 2739 } 2740 2741 static const struct seq_operations proc_pid_numa_maps_op = { 2742 .start = m_start, 2743 .next = m_next, 2744 .stop = m_stop, 2745 .show = show_numa_map, 2746 }; 2747 2748 static int pid_numa_maps_open(struct inode *inode, struct file *file) 2749 { 2750 return proc_maps_open(inode, file, &proc_pid_numa_maps_op, 2751 sizeof(struct numa_maps_private)); 2752 } 2753 2754 const struct file_operations proc_pid_numa_maps_operations = { 2755 .open = pid_numa_maps_open, 2756 .read = seq_read, 2757 .llseek = seq_lseek, 2758 .release = proc_map_release, 2759 }; 2760 2761 #endif /* CONFIG_NUMA */ 2762