1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pagewalk.h> 3 #include <linux/mm_inline.h> 4 #include <linux/hugetlb.h> 5 #include <linux/huge_mm.h> 6 #include <linux/mount.h> 7 #include <linux/ksm.h> 8 #include <linux/seq_file.h> 9 #include <linux/highmem.h> 10 #include <linux/ptrace.h> 11 #include <linux/slab.h> 12 #include <linux/pagemap.h> 13 #include <linux/mempolicy.h> 14 #include <linux/rmap.h> 15 #include <linux/swap.h> 16 #include <linux/sched/mm.h> 17 #include <linux/swapops.h> 18 #include <linux/mmu_notifier.h> 19 #include <linux/page_idle.h> 20 #include <linux/shmem_fs.h> 21 #include <linux/uaccess.h> 22 #include <linux/pkeys.h> 23 #include <linux/minmax.h> 24 #include <linux/overflow.h> 25 #include <linux/buildid.h> 26 27 #include <asm/elf.h> 28 #include <asm/tlb.h> 29 #include <asm/tlbflush.h> 30 #include "internal.h" 31 32 #define SEQ_PUT_DEC(str, val) \ 33 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) 34 void task_mem(struct seq_file *m, struct mm_struct *mm) 35 { 36 unsigned long text, lib, swap, anon, file, shmem; 37 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 38 39 anon = get_mm_counter(mm, MM_ANONPAGES); 40 file = get_mm_counter(mm, MM_FILEPAGES); 41 shmem = get_mm_counter(mm, MM_SHMEMPAGES); 42 43 /* 44 * Note: to minimize their overhead, mm maintains hiwater_vm and 45 * hiwater_rss only when about to *lower* total_vm or rss. Any 46 * collector of these hiwater stats must therefore get total_vm 47 * and rss too, which will usually be the higher. Barriers? not 48 * worth the effort, such snapshots can always be inconsistent. 49 */ 50 hiwater_vm = total_vm = mm->total_vm; 51 if (hiwater_vm < mm->hiwater_vm) 52 hiwater_vm = mm->hiwater_vm; 53 hiwater_rss = total_rss = anon + file + shmem; 54 if (hiwater_rss < mm->hiwater_rss) 55 hiwater_rss = mm->hiwater_rss; 56 57 /* split executable areas between text and lib */ 58 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); 59 text = min(text, mm->exec_vm << PAGE_SHIFT); 60 lib = (mm->exec_vm << PAGE_SHIFT) - text; 61 62 swap = get_mm_counter(mm, MM_SWAPENTS); 63 SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); 64 SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); 65 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); 66 SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); 67 SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); 68 SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); 69 SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); 70 SEQ_PUT_DEC(" kB\nRssFile:\t", file); 71 SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); 72 SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); 73 SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); 74 seq_put_decimal_ull_width(m, 75 " kB\nVmExe:\t", text >> 10, 8); 76 seq_put_decimal_ull_width(m, 77 " kB\nVmLib:\t", lib >> 10, 8); 78 seq_put_decimal_ull_width(m, 79 " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); 80 SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); 81 seq_puts(m, " kB\n"); 82 hugetlb_report_usage(m, mm); 83 } 84 #undef SEQ_PUT_DEC 85 86 unsigned long task_vsize(struct mm_struct *mm) 87 { 88 return PAGE_SIZE * mm->total_vm; 89 } 90 91 unsigned long task_statm(struct mm_struct *mm, 92 unsigned long *shared, unsigned long *text, 93 unsigned long *data, unsigned long *resident) 94 { 95 *shared = get_mm_counter(mm, MM_FILEPAGES) + 96 get_mm_counter(mm, MM_SHMEMPAGES); 97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 98 >> PAGE_SHIFT; 99 *data = mm->data_vm + mm->stack_vm; 100 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 101 return mm->total_vm; 102 } 103 104 #ifdef CONFIG_NUMA 105 /* 106 * Save get_task_policy() for show_numa_map(). 107 */ 108 static void hold_task_mempolicy(struct proc_maps_private *priv) 109 { 110 struct task_struct *task = priv->task; 111 112 task_lock(task); 113 priv->task_mempolicy = get_task_policy(task); 114 mpol_get(priv->task_mempolicy); 115 task_unlock(task); 116 } 117 static void release_task_mempolicy(struct proc_maps_private *priv) 118 { 119 mpol_put(priv->task_mempolicy); 120 } 121 #else 122 static void hold_task_mempolicy(struct proc_maps_private *priv) 123 { 124 } 125 static void release_task_mempolicy(struct proc_maps_private *priv) 126 { 127 } 128 #endif 129 130 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv, 131 loff_t *ppos) 132 { 133 struct vm_area_struct *vma = vma_next(&priv->iter); 134 135 if (vma) { 136 *ppos = vma->vm_start; 137 } else { 138 *ppos = -2UL; 139 vma = get_gate_vma(priv->mm); 140 } 141 142 return vma; 143 } 144 145 static void *m_start(struct seq_file *m, loff_t *ppos) 146 { 147 struct proc_maps_private *priv = m->private; 148 unsigned long last_addr = *ppos; 149 struct mm_struct *mm; 150 151 /* See m_next(). Zero at the start or after lseek. */ 152 if (last_addr == -1UL) 153 return NULL; 154 155 priv->task = get_proc_task(priv->inode); 156 if (!priv->task) 157 return ERR_PTR(-ESRCH); 158 159 mm = priv->mm; 160 if (!mm || !mmget_not_zero(mm)) { 161 put_task_struct(priv->task); 162 priv->task = NULL; 163 return NULL; 164 } 165 166 if (mmap_read_lock_killable(mm)) { 167 mmput(mm); 168 put_task_struct(priv->task); 169 priv->task = NULL; 170 return ERR_PTR(-EINTR); 171 } 172 173 vma_iter_init(&priv->iter, mm, last_addr); 174 hold_task_mempolicy(priv); 175 if (last_addr == -2UL) 176 return get_gate_vma(mm); 177 178 return proc_get_vma(priv, ppos); 179 } 180 181 static void *m_next(struct seq_file *m, void *v, loff_t *ppos) 182 { 183 if (*ppos == -2UL) { 184 *ppos = -1UL; 185 return NULL; 186 } 187 return proc_get_vma(m->private, ppos); 188 } 189 190 static void m_stop(struct seq_file *m, void *v) 191 { 192 struct proc_maps_private *priv = m->private; 193 struct mm_struct *mm = priv->mm; 194 195 if (!priv->task) 196 return; 197 198 release_task_mempolicy(priv); 199 mmap_read_unlock(mm); 200 mmput(mm); 201 put_task_struct(priv->task); 202 priv->task = NULL; 203 } 204 205 static int proc_maps_open(struct inode *inode, struct file *file, 206 const struct seq_operations *ops, int psize) 207 { 208 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 209 210 if (!priv) 211 return -ENOMEM; 212 213 priv->inode = inode; 214 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 215 if (IS_ERR(priv->mm)) { 216 int err = PTR_ERR(priv->mm); 217 218 seq_release_private(inode, file); 219 return err; 220 } 221 222 return 0; 223 } 224 225 static int proc_map_release(struct inode *inode, struct file *file) 226 { 227 struct seq_file *seq = file->private_data; 228 struct proc_maps_private *priv = seq->private; 229 230 if (priv->mm) 231 mmdrop(priv->mm); 232 233 return seq_release_private(inode, file); 234 } 235 236 static int do_maps_open(struct inode *inode, struct file *file, 237 const struct seq_operations *ops) 238 { 239 return proc_maps_open(inode, file, ops, 240 sizeof(struct proc_maps_private)); 241 } 242 243 static void get_vma_name(struct vm_area_struct *vma, 244 const struct path **path, 245 const char **name, 246 const char **name_fmt) 247 { 248 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; 249 250 *name = NULL; 251 *path = NULL; 252 *name_fmt = NULL; 253 254 /* 255 * Print the dentry name for named mappings, and a 256 * special [heap] marker for the heap: 257 */ 258 if (vma->vm_file) { 259 /* 260 * If user named this anon shared memory via 261 * prctl(PR_SET_VMA ..., use the provided name. 262 */ 263 if (anon_name) { 264 *name_fmt = "[anon_shmem:%s]"; 265 *name = anon_name->name; 266 } else { 267 *path = file_user_path(vma->vm_file); 268 } 269 return; 270 } 271 272 if (vma->vm_ops && vma->vm_ops->name) { 273 *name = vma->vm_ops->name(vma); 274 if (*name) 275 return; 276 } 277 278 *name = arch_vma_name(vma); 279 if (*name) 280 return; 281 282 if (!vma->vm_mm) { 283 *name = "[vdso]"; 284 return; 285 } 286 287 if (vma_is_initial_heap(vma)) { 288 *name = "[heap]"; 289 return; 290 } 291 292 if (vma_is_initial_stack(vma)) { 293 *name = "[stack]"; 294 return; 295 } 296 297 if (anon_name) { 298 *name_fmt = "[anon:%s]"; 299 *name = anon_name->name; 300 return; 301 } 302 } 303 304 static void show_vma_header_prefix(struct seq_file *m, 305 unsigned long start, unsigned long end, 306 vm_flags_t flags, unsigned long long pgoff, 307 dev_t dev, unsigned long ino) 308 { 309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 310 seq_put_hex_ll(m, NULL, start, 8); 311 seq_put_hex_ll(m, "-", end, 8); 312 seq_putc(m, ' '); 313 seq_putc(m, flags & VM_READ ? 'r' : '-'); 314 seq_putc(m, flags & VM_WRITE ? 'w' : '-'); 315 seq_putc(m, flags & VM_EXEC ? 'x' : '-'); 316 seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p'); 317 seq_put_hex_ll(m, " ", pgoff, 8); 318 seq_put_hex_ll(m, " ", MAJOR(dev), 2); 319 seq_put_hex_ll(m, ":", MINOR(dev), 2); 320 seq_put_decimal_ull(m, " ", ino); 321 seq_putc(m, ' '); 322 } 323 324 static void 325 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) 326 { 327 const struct path *path; 328 const char *name_fmt, *name; 329 vm_flags_t flags = vma->vm_flags; 330 unsigned long ino = 0; 331 unsigned long long pgoff = 0; 332 unsigned long start, end; 333 dev_t dev = 0; 334 335 if (vma->vm_file) { 336 const struct inode *inode = file_user_inode(vma->vm_file); 337 338 dev = inode->i_sb->s_dev; 339 ino = inode->i_ino; 340 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 341 } 342 343 start = vma->vm_start; 344 end = vma->vm_end; 345 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); 346 347 get_vma_name(vma, &path, &name, &name_fmt); 348 if (path) { 349 seq_pad(m, ' '); 350 seq_path(m, path, "\n"); 351 } else if (name_fmt) { 352 seq_pad(m, ' '); 353 seq_printf(m, name_fmt, name); 354 } else if (name) { 355 seq_pad(m, ' '); 356 seq_puts(m, name); 357 } 358 seq_putc(m, '\n'); 359 } 360 361 static int show_map(struct seq_file *m, void *v) 362 { 363 show_map_vma(m, v); 364 return 0; 365 } 366 367 static const struct seq_operations proc_pid_maps_op = { 368 .start = m_start, 369 .next = m_next, 370 .stop = m_stop, 371 .show = show_map 372 }; 373 374 static int pid_maps_open(struct inode *inode, struct file *file) 375 { 376 return do_maps_open(inode, file, &proc_pid_maps_op); 377 } 378 379 #define PROCMAP_QUERY_VMA_FLAGS ( \ 380 PROCMAP_QUERY_VMA_READABLE | \ 381 PROCMAP_QUERY_VMA_WRITABLE | \ 382 PROCMAP_QUERY_VMA_EXECUTABLE | \ 383 PROCMAP_QUERY_VMA_SHARED \ 384 ) 385 386 #define PROCMAP_QUERY_VALID_FLAGS_MASK ( \ 387 PROCMAP_QUERY_COVERING_OR_NEXT_VMA | \ 388 PROCMAP_QUERY_FILE_BACKED_VMA | \ 389 PROCMAP_QUERY_VMA_FLAGS \ 390 ) 391 392 static int query_vma_setup(struct mm_struct *mm) 393 { 394 return mmap_read_lock_killable(mm); 395 } 396 397 static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) 398 { 399 mmap_read_unlock(mm); 400 } 401 402 static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) 403 { 404 return find_vma(mm, addr); 405 } 406 407 static struct vm_area_struct *query_matching_vma(struct mm_struct *mm, 408 unsigned long addr, u32 flags) 409 { 410 struct vm_area_struct *vma; 411 412 next_vma: 413 vma = query_vma_find_by_addr(mm, addr); 414 if (!vma) 415 goto no_vma; 416 417 /* user requested only file-backed VMA, keep iterating */ 418 if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file) 419 goto skip_vma; 420 421 /* VMA permissions should satisfy query flags */ 422 if (flags & PROCMAP_QUERY_VMA_FLAGS) { 423 u32 perm = 0; 424 425 if (flags & PROCMAP_QUERY_VMA_READABLE) 426 perm |= VM_READ; 427 if (flags & PROCMAP_QUERY_VMA_WRITABLE) 428 perm |= VM_WRITE; 429 if (flags & PROCMAP_QUERY_VMA_EXECUTABLE) 430 perm |= VM_EXEC; 431 if (flags & PROCMAP_QUERY_VMA_SHARED) 432 perm |= VM_MAYSHARE; 433 434 if ((vma->vm_flags & perm) != perm) 435 goto skip_vma; 436 } 437 438 /* found covering VMA or user is OK with the matching next VMA */ 439 if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr) 440 return vma; 441 442 skip_vma: 443 /* 444 * If the user needs closest matching VMA, keep iterating. 445 */ 446 addr = vma->vm_end; 447 if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) 448 goto next_vma; 449 450 no_vma: 451 return ERR_PTR(-ENOENT); 452 } 453 454 static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) 455 { 456 struct procmap_query karg; 457 struct vm_area_struct *vma; 458 struct mm_struct *mm; 459 const char *name = NULL; 460 char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; 461 __u64 usize; 462 int err; 463 464 if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize))) 465 return -EFAULT; 466 /* argument struct can never be that large, reject abuse */ 467 if (usize > PAGE_SIZE) 468 return -E2BIG; 469 /* argument struct should have at least query_flags and query_addr fields */ 470 if (usize < offsetofend(struct procmap_query, query_addr)) 471 return -EINVAL; 472 err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); 473 if (err) 474 return err; 475 476 /* reject unknown flags */ 477 if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK) 478 return -EINVAL; 479 /* either both buffer address and size are set, or both should be zero */ 480 if (!!karg.vma_name_size != !!karg.vma_name_addr) 481 return -EINVAL; 482 if (!!karg.build_id_size != !!karg.build_id_addr) 483 return -EINVAL; 484 485 mm = priv->mm; 486 if (!mm || !mmget_not_zero(mm)) 487 return -ESRCH; 488 489 err = query_vma_setup(mm); 490 if (err) { 491 mmput(mm); 492 return err; 493 } 494 495 vma = query_matching_vma(mm, karg.query_addr, karg.query_flags); 496 if (IS_ERR(vma)) { 497 err = PTR_ERR(vma); 498 vma = NULL; 499 goto out; 500 } 501 502 karg.vma_start = vma->vm_start; 503 karg.vma_end = vma->vm_end; 504 505 karg.vma_flags = 0; 506 if (vma->vm_flags & VM_READ) 507 karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE; 508 if (vma->vm_flags & VM_WRITE) 509 karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE; 510 if (vma->vm_flags & VM_EXEC) 511 karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE; 512 if (vma->vm_flags & VM_MAYSHARE) 513 karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED; 514 515 karg.vma_page_size = vma_kernel_pagesize(vma); 516 517 if (vma->vm_file) { 518 const struct inode *inode = file_user_inode(vma->vm_file); 519 520 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT; 521 karg.dev_major = MAJOR(inode->i_sb->s_dev); 522 karg.dev_minor = MINOR(inode->i_sb->s_dev); 523 karg.inode = inode->i_ino; 524 } else { 525 karg.vma_offset = 0; 526 karg.dev_major = 0; 527 karg.dev_minor = 0; 528 karg.inode = 0; 529 } 530 531 if (karg.build_id_size) { 532 __u32 build_id_sz; 533 534 err = build_id_parse(vma, build_id_buf, &build_id_sz); 535 if (err) { 536 karg.build_id_size = 0; 537 } else { 538 if (karg.build_id_size < build_id_sz) { 539 err = -ENAMETOOLONG; 540 goto out; 541 } 542 karg.build_id_size = build_id_sz; 543 } 544 } 545 546 if (karg.vma_name_size) { 547 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); 548 const struct path *path; 549 const char *name_fmt; 550 size_t name_sz = 0; 551 552 get_vma_name(vma, &path, &name, &name_fmt); 553 554 if (path || name_fmt || name) { 555 name_buf = kmalloc(name_buf_sz, GFP_KERNEL); 556 if (!name_buf) { 557 err = -ENOMEM; 558 goto out; 559 } 560 } 561 if (path) { 562 name = d_path(path, name_buf, name_buf_sz); 563 if (IS_ERR(name)) { 564 err = PTR_ERR(name); 565 goto out; 566 } 567 name_sz = name_buf + name_buf_sz - name; 568 } else if (name || name_fmt) { 569 name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name); 570 name = name_buf; 571 } 572 if (name_sz > name_buf_sz) { 573 err = -ENAMETOOLONG; 574 goto out; 575 } 576 karg.vma_name_size = name_sz; 577 } 578 579 /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ 580 query_vma_teardown(mm, vma); 581 mmput(mm); 582 583 if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), 584 name, karg.vma_name_size)) { 585 kfree(name_buf); 586 return -EFAULT; 587 } 588 kfree(name_buf); 589 590 if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr), 591 build_id_buf, karg.build_id_size)) 592 return -EFAULT; 593 594 if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize))) 595 return -EFAULT; 596 597 return 0; 598 599 out: 600 query_vma_teardown(mm, vma); 601 mmput(mm); 602 kfree(name_buf); 603 return err; 604 } 605 606 static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 607 { 608 struct seq_file *seq = file->private_data; 609 struct proc_maps_private *priv = seq->private; 610 611 switch (cmd) { 612 case PROCMAP_QUERY: 613 return do_procmap_query(priv, (void __user *)arg); 614 default: 615 return -ENOIOCTLCMD; 616 } 617 } 618 619 const struct file_operations proc_pid_maps_operations = { 620 .open = pid_maps_open, 621 .read = seq_read, 622 .llseek = seq_lseek, 623 .release = proc_map_release, 624 .unlocked_ioctl = procfs_procmap_ioctl, 625 .compat_ioctl = compat_ptr_ioctl, 626 }; 627 628 /* 629 * Proportional Set Size(PSS): my share of RSS. 630 * 631 * PSS of a process is the count of pages it has in memory, where each 632 * page is divided by the number of processes sharing it. So if a 633 * process has 1000 pages all to itself, and 1000 shared with one other 634 * process, its PSS will be 1500. 635 * 636 * To keep (accumulated) division errors low, we adopt a 64bit 637 * fixed-point pss counter to minimize division errors. So (pss >> 638 * PSS_SHIFT) would be the real byte count. 639 * 640 * A shift of 12 before division means (assuming 4K page size): 641 * - 1M 3-user-pages add up to 8KB errors; 642 * - supports mapcount up to 2^24, or 16M; 643 * - supports PSS up to 2^52 bytes, or 4PB. 644 */ 645 #define PSS_SHIFT 12 646 647 #ifdef CONFIG_PROC_PAGE_MONITOR 648 struct mem_size_stats { 649 unsigned long resident; 650 unsigned long shared_clean; 651 unsigned long shared_dirty; 652 unsigned long private_clean; 653 unsigned long private_dirty; 654 unsigned long referenced; 655 unsigned long anonymous; 656 unsigned long lazyfree; 657 unsigned long anonymous_thp; 658 unsigned long shmem_thp; 659 unsigned long file_thp; 660 unsigned long swap; 661 unsigned long shared_hugetlb; 662 unsigned long private_hugetlb; 663 unsigned long ksm; 664 u64 pss; 665 u64 pss_anon; 666 u64 pss_file; 667 u64 pss_shmem; 668 u64 pss_dirty; 669 u64 pss_locked; 670 u64 swap_pss; 671 }; 672 673 static void smaps_page_accumulate(struct mem_size_stats *mss, 674 struct folio *folio, unsigned long size, unsigned long pss, 675 bool dirty, bool locked, bool private) 676 { 677 mss->pss += pss; 678 679 if (folio_test_anon(folio)) 680 mss->pss_anon += pss; 681 else if (folio_test_swapbacked(folio)) 682 mss->pss_shmem += pss; 683 else 684 mss->pss_file += pss; 685 686 if (locked) 687 mss->pss_locked += pss; 688 689 if (dirty || folio_test_dirty(folio)) { 690 mss->pss_dirty += pss; 691 if (private) 692 mss->private_dirty += size; 693 else 694 mss->shared_dirty += size; 695 } else { 696 if (private) 697 mss->private_clean += size; 698 else 699 mss->shared_clean += size; 700 } 701 } 702 703 static void smaps_account(struct mem_size_stats *mss, struct page *page, 704 bool compound, bool young, bool dirty, bool locked, 705 bool present) 706 { 707 struct folio *folio = page_folio(page); 708 int i, nr = compound ? compound_nr(page) : 1; 709 unsigned long size = nr * PAGE_SIZE; 710 711 /* 712 * First accumulate quantities that depend only on |size| and the type 713 * of the compound page. 714 */ 715 if (folio_test_anon(folio)) { 716 mss->anonymous += size; 717 if (!folio_test_swapbacked(folio) && !dirty && 718 !folio_test_dirty(folio)) 719 mss->lazyfree += size; 720 } 721 722 if (folio_test_ksm(folio)) 723 mss->ksm += size; 724 725 mss->resident += size; 726 /* Accumulate the size in pages that have been accessed. */ 727 if (young || folio_test_young(folio) || folio_test_referenced(folio)) 728 mss->referenced += size; 729 730 /* 731 * Then accumulate quantities that may depend on sharing, or that may 732 * differ page-by-page. 733 * 734 * refcount == 1 for present entries guarantees that the folio is mapped 735 * exactly once. For large folios this implies that exactly one 736 * PTE/PMD/... maps (a part of) this folio. 737 * 738 * Treat all non-present entries (where relying on the mapcount and 739 * refcount doesn't make sense) as "maybe shared, but not sure how 740 * often". We treat device private entries as being fake-present. 741 * 742 * Note that it would not be safe to read the mapcount especially for 743 * pages referenced by migration entries, even with the PTL held. 744 */ 745 if (folio_ref_count(folio) == 1 || !present) { 746 smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT, 747 dirty, locked, present); 748 return; 749 } 750 /* 751 * We obtain a snapshot of the mapcount. Without holding the folio lock 752 * this snapshot can be slightly wrong as we cannot always read the 753 * mapcount atomically. 754 */ 755 for (i = 0; i < nr; i++, page++) { 756 int mapcount = folio_precise_page_mapcount(folio, page); 757 unsigned long pss = PAGE_SIZE << PSS_SHIFT; 758 if (mapcount >= 2) 759 pss /= mapcount; 760 smaps_page_accumulate(mss, folio, PAGE_SIZE, pss, 761 dirty, locked, mapcount < 2); 762 } 763 } 764 765 #ifdef CONFIG_SHMEM 766 static int smaps_pte_hole(unsigned long addr, unsigned long end, 767 __always_unused int depth, struct mm_walk *walk) 768 { 769 struct mem_size_stats *mss = walk->private; 770 struct vm_area_struct *vma = walk->vma; 771 772 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, 773 linear_page_index(vma, addr), 774 linear_page_index(vma, end)); 775 776 return 0; 777 } 778 #else 779 #define smaps_pte_hole NULL 780 #endif /* CONFIG_SHMEM */ 781 782 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk) 783 { 784 #ifdef CONFIG_SHMEM 785 if (walk->ops->pte_hole) { 786 /* depth is not used */ 787 smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk); 788 } 789 #endif 790 } 791 792 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 793 struct mm_walk *walk) 794 { 795 struct mem_size_stats *mss = walk->private; 796 struct vm_area_struct *vma = walk->vma; 797 bool locked = !!(vma->vm_flags & VM_LOCKED); 798 struct page *page = NULL; 799 bool present = false, young = false, dirty = false; 800 pte_t ptent = ptep_get(pte); 801 802 if (pte_present(ptent)) { 803 page = vm_normal_page(vma, addr, ptent); 804 young = pte_young(ptent); 805 dirty = pte_dirty(ptent); 806 present = true; 807 } else if (is_swap_pte(ptent)) { 808 swp_entry_t swpent = pte_to_swp_entry(ptent); 809 810 if (!non_swap_entry(swpent)) { 811 int mapcount; 812 813 mss->swap += PAGE_SIZE; 814 mapcount = swp_swapcount(swpent); 815 if (mapcount >= 2) { 816 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; 817 818 do_div(pss_delta, mapcount); 819 mss->swap_pss += pss_delta; 820 } else { 821 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; 822 } 823 } else if (is_pfn_swap_entry(swpent)) { 824 if (is_device_private_entry(swpent)) 825 present = true; 826 page = pfn_swap_entry_to_page(swpent); 827 } 828 } else { 829 smaps_pte_hole_lookup(addr, walk); 830 return; 831 } 832 833 if (!page) 834 return; 835 836 smaps_account(mss, page, false, young, dirty, locked, present); 837 } 838 839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 840 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 841 struct mm_walk *walk) 842 { 843 struct mem_size_stats *mss = walk->private; 844 struct vm_area_struct *vma = walk->vma; 845 bool locked = !!(vma->vm_flags & VM_LOCKED); 846 struct page *page = NULL; 847 bool present = false; 848 struct folio *folio; 849 850 if (pmd_present(*pmd)) { 851 page = vm_normal_page_pmd(vma, addr, *pmd); 852 present = true; 853 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { 854 swp_entry_t entry = pmd_to_swp_entry(*pmd); 855 856 if (is_pfn_swap_entry(entry)) 857 page = pfn_swap_entry_to_page(entry); 858 } 859 if (IS_ERR_OR_NULL(page)) 860 return; 861 folio = page_folio(page); 862 if (folio_test_anon(folio)) 863 mss->anonymous_thp += HPAGE_PMD_SIZE; 864 else if (folio_test_swapbacked(folio)) 865 mss->shmem_thp += HPAGE_PMD_SIZE; 866 else if (folio_is_zone_device(folio)) 867 /* pass */; 868 else 869 mss->file_thp += HPAGE_PMD_SIZE; 870 871 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), 872 locked, present); 873 } 874 #else 875 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 876 struct mm_walk *walk) 877 { 878 } 879 #endif 880 881 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 882 struct mm_walk *walk) 883 { 884 struct vm_area_struct *vma = walk->vma; 885 pte_t *pte; 886 spinlock_t *ptl; 887 888 ptl = pmd_trans_huge_lock(pmd, vma); 889 if (ptl) { 890 smaps_pmd_entry(pmd, addr, walk); 891 spin_unlock(ptl); 892 goto out; 893 } 894 895 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 896 if (!pte) { 897 walk->action = ACTION_AGAIN; 898 return 0; 899 } 900 for (; addr != end; pte++, addr += PAGE_SIZE) 901 smaps_pte_entry(pte, addr, walk); 902 pte_unmap_unlock(pte - 1, ptl); 903 out: 904 cond_resched(); 905 return 0; 906 } 907 908 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 909 { 910 /* 911 * Don't forget to update Documentation/ on changes. 912 * 913 * The length of the second argument of mnemonics[] 914 * needs to be 3 instead of previously set 2 915 * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3]) 916 * to avoid spurious 917 * -Werror=unterminated-string-initialization warning 918 * with GCC 15 919 */ 920 static const char mnemonics[BITS_PER_LONG][3] = { 921 /* 922 * In case if we meet a flag we don't know about. 923 */ 924 [0 ... (BITS_PER_LONG-1)] = "??", 925 926 [ilog2(VM_READ)] = "rd", 927 [ilog2(VM_WRITE)] = "wr", 928 [ilog2(VM_EXEC)] = "ex", 929 [ilog2(VM_SHARED)] = "sh", 930 [ilog2(VM_MAYREAD)] = "mr", 931 [ilog2(VM_MAYWRITE)] = "mw", 932 [ilog2(VM_MAYEXEC)] = "me", 933 [ilog2(VM_MAYSHARE)] = "ms", 934 [ilog2(VM_GROWSDOWN)] = "gd", 935 [ilog2(VM_PFNMAP)] = "pf", 936 [ilog2(VM_LOCKED)] = "lo", 937 [ilog2(VM_IO)] = "io", 938 [ilog2(VM_SEQ_READ)] = "sr", 939 [ilog2(VM_RAND_READ)] = "rr", 940 [ilog2(VM_DONTCOPY)] = "dc", 941 [ilog2(VM_DONTEXPAND)] = "de", 942 [ilog2(VM_LOCKONFAULT)] = "lf", 943 [ilog2(VM_ACCOUNT)] = "ac", 944 [ilog2(VM_NORESERVE)] = "nr", 945 [ilog2(VM_HUGETLB)] = "ht", 946 [ilog2(VM_SYNC)] = "sf", 947 [ilog2(VM_ARCH_1)] = "ar", 948 [ilog2(VM_WIPEONFORK)] = "wf", 949 [ilog2(VM_DONTDUMP)] = "dd", 950 #ifdef CONFIG_ARM64_BTI 951 [ilog2(VM_ARM64_BTI)] = "bt", 952 #endif 953 #ifdef CONFIG_MEM_SOFT_DIRTY 954 [ilog2(VM_SOFTDIRTY)] = "sd", 955 #endif 956 [ilog2(VM_MIXEDMAP)] = "mm", 957 [ilog2(VM_HUGEPAGE)] = "hg", 958 [ilog2(VM_NOHUGEPAGE)] = "nh", 959 [ilog2(VM_MERGEABLE)] = "mg", 960 [ilog2(VM_UFFD_MISSING)]= "um", 961 [ilog2(VM_UFFD_WP)] = "uw", 962 #ifdef CONFIG_ARM64_MTE 963 [ilog2(VM_MTE)] = "mt", 964 [ilog2(VM_MTE_ALLOWED)] = "", 965 #endif 966 #ifdef CONFIG_ARCH_HAS_PKEYS 967 /* These come out via ProtectionKey: */ 968 [ilog2(VM_PKEY_BIT0)] = "", 969 [ilog2(VM_PKEY_BIT1)] = "", 970 [ilog2(VM_PKEY_BIT2)] = "", 971 #if VM_PKEY_BIT3 972 [ilog2(VM_PKEY_BIT3)] = "", 973 #endif 974 #if VM_PKEY_BIT4 975 [ilog2(VM_PKEY_BIT4)] = "", 976 #endif 977 #endif /* CONFIG_ARCH_HAS_PKEYS */ 978 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 979 [ilog2(VM_UFFD_MINOR)] = "ui", 980 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 981 #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK 982 [ilog2(VM_SHADOW_STACK)] = "ss", 983 #endif 984 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) 985 [ilog2(VM_DROPPABLE)] = "dp", 986 #endif 987 #ifdef CONFIG_64BIT 988 [ilog2(VM_SEALED)] = "sl", 989 #endif 990 }; 991 size_t i; 992 993 seq_puts(m, "VmFlags: "); 994 for (i = 0; i < BITS_PER_LONG; i++) { 995 if (!mnemonics[i][0]) 996 continue; 997 if (vma->vm_flags & (1UL << i)) 998 seq_printf(m, "%s ", mnemonics[i]); 999 } 1000 seq_putc(m, '\n'); 1001 } 1002 1003 #ifdef CONFIG_HUGETLB_PAGE 1004 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, 1005 unsigned long addr, unsigned long end, 1006 struct mm_walk *walk) 1007 { 1008 struct mem_size_stats *mss = walk->private; 1009 struct vm_area_struct *vma = walk->vma; 1010 pte_t ptent = huge_ptep_get(walk->mm, addr, pte); 1011 struct folio *folio = NULL; 1012 bool present = false; 1013 1014 if (pte_present(ptent)) { 1015 folio = page_folio(pte_page(ptent)); 1016 present = true; 1017 } else if (is_swap_pte(ptent)) { 1018 swp_entry_t swpent = pte_to_swp_entry(ptent); 1019 1020 if (is_pfn_swap_entry(swpent)) 1021 folio = pfn_swap_entry_folio(swpent); 1022 } 1023 1024 if (folio) { 1025 /* We treat non-present entries as "maybe shared". */ 1026 if (!present || folio_likely_mapped_shared(folio) || 1027 hugetlb_pmd_shared(pte)) 1028 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); 1029 else 1030 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 1031 } 1032 return 0; 1033 } 1034 #else 1035 #define smaps_hugetlb_range NULL 1036 #endif /* HUGETLB_PAGE */ 1037 1038 static const struct mm_walk_ops smaps_walk_ops = { 1039 .pmd_entry = smaps_pte_range, 1040 .hugetlb_entry = smaps_hugetlb_range, 1041 .walk_lock = PGWALK_RDLOCK, 1042 }; 1043 1044 static const struct mm_walk_ops smaps_shmem_walk_ops = { 1045 .pmd_entry = smaps_pte_range, 1046 .hugetlb_entry = smaps_hugetlb_range, 1047 .pte_hole = smaps_pte_hole, 1048 .walk_lock = PGWALK_RDLOCK, 1049 }; 1050 1051 /* 1052 * Gather mem stats from @vma with the indicated beginning 1053 * address @start, and keep them in @mss. 1054 * 1055 * Use vm_start of @vma as the beginning address if @start is 0. 1056 */ 1057 static void smap_gather_stats(struct vm_area_struct *vma, 1058 struct mem_size_stats *mss, unsigned long start) 1059 { 1060 const struct mm_walk_ops *ops = &smaps_walk_ops; 1061 1062 /* Invalid start */ 1063 if (start >= vma->vm_end) 1064 return; 1065 1066 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { 1067 /* 1068 * For shared or readonly shmem mappings we know that all 1069 * swapped out pages belong to the shmem object, and we can 1070 * obtain the swap value much more efficiently. For private 1071 * writable mappings, we might have COW pages that are 1072 * not affected by the parent swapped out pages of the shmem 1073 * object, so we have to distinguish them during the page walk. 1074 * Unless we know that the shmem object (or the part mapped by 1075 * our VMA) has no swapped out pages at all. 1076 */ 1077 unsigned long shmem_swapped = shmem_swap_usage(vma); 1078 1079 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || 1080 !(vma->vm_flags & VM_WRITE))) { 1081 mss->swap += shmem_swapped; 1082 } else { 1083 ops = &smaps_shmem_walk_ops; 1084 } 1085 } 1086 1087 /* mmap_lock is held in m_start */ 1088 if (!start) 1089 walk_page_vma(vma, ops, mss); 1090 else 1091 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); 1092 } 1093 1094 #define SEQ_PUT_DEC(str, val) \ 1095 seq_put_decimal_ull_width(m, str, (val) >> 10, 8) 1096 1097 /* Show the contents common for smaps and smaps_rollup */ 1098 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, 1099 bool rollup_mode) 1100 { 1101 SEQ_PUT_DEC("Rss: ", mss->resident); 1102 SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); 1103 SEQ_PUT_DEC(" kB\nPss_Dirty: ", mss->pss_dirty >> PSS_SHIFT); 1104 if (rollup_mode) { 1105 /* 1106 * These are meaningful only for smaps_rollup, otherwise two of 1107 * them are zero, and the other one is the same as Pss. 1108 */ 1109 SEQ_PUT_DEC(" kB\nPss_Anon: ", 1110 mss->pss_anon >> PSS_SHIFT); 1111 SEQ_PUT_DEC(" kB\nPss_File: ", 1112 mss->pss_file >> PSS_SHIFT); 1113 SEQ_PUT_DEC(" kB\nPss_Shmem: ", 1114 mss->pss_shmem >> PSS_SHIFT); 1115 } 1116 SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); 1117 SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); 1118 SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); 1119 SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); 1120 SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); 1121 SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); 1122 SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm); 1123 SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); 1124 SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); 1125 SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); 1126 SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); 1127 SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); 1128 seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", 1129 mss->private_hugetlb >> 10, 7); 1130 SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); 1131 SEQ_PUT_DEC(" kB\nSwapPss: ", 1132 mss->swap_pss >> PSS_SHIFT); 1133 SEQ_PUT_DEC(" kB\nLocked: ", 1134 mss->pss_locked >> PSS_SHIFT); 1135 seq_puts(m, " kB\n"); 1136 } 1137 1138 static int show_smap(struct seq_file *m, void *v) 1139 { 1140 struct vm_area_struct *vma = v; 1141 struct mem_size_stats mss = {}; 1142 1143 smap_gather_stats(vma, &mss, 0); 1144 1145 show_map_vma(m, vma); 1146 1147 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); 1148 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); 1149 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); 1150 seq_puts(m, " kB\n"); 1151 1152 __show_smap(m, &mss, false); 1153 1154 seq_printf(m, "THPeligible: %8u\n", 1155 !!thp_vma_allowable_orders(vma, vma->vm_flags, 1156 TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL)); 1157 1158 if (arch_pkeys_enabled()) 1159 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); 1160 show_smap_vma_flags(m, vma); 1161 1162 return 0; 1163 } 1164 1165 static int show_smaps_rollup(struct seq_file *m, void *v) 1166 { 1167 struct proc_maps_private *priv = m->private; 1168 struct mem_size_stats mss = {}; 1169 struct mm_struct *mm = priv->mm; 1170 struct vm_area_struct *vma; 1171 unsigned long vma_start = 0, last_vma_end = 0; 1172 int ret = 0; 1173 VMA_ITERATOR(vmi, mm, 0); 1174 1175 priv->task = get_proc_task(priv->inode); 1176 if (!priv->task) 1177 return -ESRCH; 1178 1179 if (!mm || !mmget_not_zero(mm)) { 1180 ret = -ESRCH; 1181 goto out_put_task; 1182 } 1183 1184 ret = mmap_read_lock_killable(mm); 1185 if (ret) 1186 goto out_put_mm; 1187 1188 hold_task_mempolicy(priv); 1189 vma = vma_next(&vmi); 1190 1191 if (unlikely(!vma)) 1192 goto empty_set; 1193 1194 vma_start = vma->vm_start; 1195 do { 1196 smap_gather_stats(vma, &mss, 0); 1197 last_vma_end = vma->vm_end; 1198 1199 /* 1200 * Release mmap_lock temporarily if someone wants to 1201 * access it for write request. 1202 */ 1203 if (mmap_lock_is_contended(mm)) { 1204 vma_iter_invalidate(&vmi); 1205 mmap_read_unlock(mm); 1206 ret = mmap_read_lock_killable(mm); 1207 if (ret) { 1208 release_task_mempolicy(priv); 1209 goto out_put_mm; 1210 } 1211 1212 /* 1213 * After dropping the lock, there are four cases to 1214 * consider. See the following example for explanation. 1215 * 1216 * +------+------+-----------+ 1217 * | VMA1 | VMA2 | VMA3 | 1218 * +------+------+-----------+ 1219 * | | | | 1220 * 4k 8k 16k 400k 1221 * 1222 * Suppose we drop the lock after reading VMA2 due to 1223 * contention, then we get: 1224 * 1225 * last_vma_end = 16k 1226 * 1227 * 1) VMA2 is freed, but VMA3 exists: 1228 * 1229 * vma_next(vmi) will return VMA3. 1230 * In this case, just continue from VMA3. 1231 * 1232 * 2) VMA2 still exists: 1233 * 1234 * vma_next(vmi) will return VMA3. 1235 * In this case, just continue from VMA3. 1236 * 1237 * 3) No more VMAs can be found: 1238 * 1239 * vma_next(vmi) will return NULL. 1240 * No more things to do, just break. 1241 * 1242 * 4) (last_vma_end - 1) is the middle of a vma (VMA'): 1243 * 1244 * vma_next(vmi) will return VMA' whose range 1245 * contains last_vma_end. 1246 * Iterate VMA' from last_vma_end. 1247 */ 1248 vma = vma_next(&vmi); 1249 /* Case 3 above */ 1250 if (!vma) 1251 break; 1252 1253 /* Case 1 and 2 above */ 1254 if (vma->vm_start >= last_vma_end) { 1255 smap_gather_stats(vma, &mss, 0); 1256 last_vma_end = vma->vm_end; 1257 continue; 1258 } 1259 1260 /* Case 4 above */ 1261 if (vma->vm_end > last_vma_end) { 1262 smap_gather_stats(vma, &mss, last_vma_end); 1263 last_vma_end = vma->vm_end; 1264 } 1265 } 1266 } for_each_vma(vmi, vma); 1267 1268 empty_set: 1269 show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0); 1270 seq_pad(m, ' '); 1271 seq_puts(m, "[rollup]\n"); 1272 1273 __show_smap(m, &mss, true); 1274 1275 release_task_mempolicy(priv); 1276 mmap_read_unlock(mm); 1277 1278 out_put_mm: 1279 mmput(mm); 1280 out_put_task: 1281 put_task_struct(priv->task); 1282 priv->task = NULL; 1283 1284 return ret; 1285 } 1286 #undef SEQ_PUT_DEC 1287 1288 static const struct seq_operations proc_pid_smaps_op = { 1289 .start = m_start, 1290 .next = m_next, 1291 .stop = m_stop, 1292 .show = show_smap 1293 }; 1294 1295 static int pid_smaps_open(struct inode *inode, struct file *file) 1296 { 1297 return do_maps_open(inode, file, &proc_pid_smaps_op); 1298 } 1299 1300 static int smaps_rollup_open(struct inode *inode, struct file *file) 1301 { 1302 int ret; 1303 struct proc_maps_private *priv; 1304 1305 priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); 1306 if (!priv) 1307 return -ENOMEM; 1308 1309 ret = single_open(file, show_smaps_rollup, priv); 1310 if (ret) 1311 goto out_free; 1312 1313 priv->inode = inode; 1314 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 1315 if (IS_ERR(priv->mm)) { 1316 ret = PTR_ERR(priv->mm); 1317 1318 single_release(inode, file); 1319 goto out_free; 1320 } 1321 1322 return 0; 1323 1324 out_free: 1325 kfree(priv); 1326 return ret; 1327 } 1328 1329 static int smaps_rollup_release(struct inode *inode, struct file *file) 1330 { 1331 struct seq_file *seq = file->private_data; 1332 struct proc_maps_private *priv = seq->private; 1333 1334 if (priv->mm) 1335 mmdrop(priv->mm); 1336 1337 kfree(priv); 1338 return single_release(inode, file); 1339 } 1340 1341 const struct file_operations proc_pid_smaps_operations = { 1342 .open = pid_smaps_open, 1343 .read = seq_read, 1344 .llseek = seq_lseek, 1345 .release = proc_map_release, 1346 }; 1347 1348 const struct file_operations proc_pid_smaps_rollup_operations = { 1349 .open = smaps_rollup_open, 1350 .read = seq_read, 1351 .llseek = seq_lseek, 1352 .release = smaps_rollup_release, 1353 }; 1354 1355 enum clear_refs_types { 1356 CLEAR_REFS_ALL = 1, 1357 CLEAR_REFS_ANON, 1358 CLEAR_REFS_MAPPED, 1359 CLEAR_REFS_SOFT_DIRTY, 1360 CLEAR_REFS_MM_HIWATER_RSS, 1361 CLEAR_REFS_LAST, 1362 }; 1363 1364 struct clear_refs_private { 1365 enum clear_refs_types type; 1366 }; 1367 1368 #ifdef CONFIG_MEM_SOFT_DIRTY 1369 1370 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1371 { 1372 struct folio *folio; 1373 1374 if (!pte_write(pte)) 1375 return false; 1376 if (!is_cow_mapping(vma->vm_flags)) 1377 return false; 1378 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) 1379 return false; 1380 folio = vm_normal_folio(vma, addr, pte); 1381 if (!folio) 1382 return false; 1383 return folio_maybe_dma_pinned(folio); 1384 } 1385 1386 static inline void clear_soft_dirty(struct vm_area_struct *vma, 1387 unsigned long addr, pte_t *pte) 1388 { 1389 /* 1390 * The soft-dirty tracker uses #PF-s to catch writes 1391 * to pages, so write-protect the pte as well. See the 1392 * Documentation/admin-guide/mm/soft-dirty.rst for full description 1393 * of how soft-dirty works. 1394 */ 1395 pte_t ptent = ptep_get(pte); 1396 1397 if (pte_present(ptent)) { 1398 pte_t old_pte; 1399 1400 if (pte_is_pinned(vma, addr, ptent)) 1401 return; 1402 old_pte = ptep_modify_prot_start(vma, addr, pte); 1403 ptent = pte_wrprotect(old_pte); 1404 ptent = pte_clear_soft_dirty(ptent); 1405 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); 1406 } else if (is_swap_pte(ptent)) { 1407 ptent = pte_swp_clear_soft_dirty(ptent); 1408 set_pte_at(vma->vm_mm, addr, pte, ptent); 1409 } 1410 } 1411 #else 1412 static inline void clear_soft_dirty(struct vm_area_struct *vma, 1413 unsigned long addr, pte_t *pte) 1414 { 1415 } 1416 #endif 1417 1418 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1419 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 1420 unsigned long addr, pmd_t *pmdp) 1421 { 1422 pmd_t old, pmd = *pmdp; 1423 1424 if (pmd_present(pmd)) { 1425 /* See comment in change_huge_pmd() */ 1426 old = pmdp_invalidate(vma, addr, pmdp); 1427 if (pmd_dirty(old)) 1428 pmd = pmd_mkdirty(pmd); 1429 if (pmd_young(old)) 1430 pmd = pmd_mkyoung(pmd); 1431 1432 pmd = pmd_wrprotect(pmd); 1433 pmd = pmd_clear_soft_dirty(pmd); 1434 1435 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1436 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 1437 pmd = pmd_swp_clear_soft_dirty(pmd); 1438 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 1439 } 1440 } 1441 #else 1442 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 1443 unsigned long addr, pmd_t *pmdp) 1444 { 1445 } 1446 #endif 1447 1448 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 1449 unsigned long end, struct mm_walk *walk) 1450 { 1451 struct clear_refs_private *cp = walk->private; 1452 struct vm_area_struct *vma = walk->vma; 1453 pte_t *pte, ptent; 1454 spinlock_t *ptl; 1455 struct folio *folio; 1456 1457 ptl = pmd_trans_huge_lock(pmd, vma); 1458 if (ptl) { 1459 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1460 clear_soft_dirty_pmd(vma, addr, pmd); 1461 goto out; 1462 } 1463 1464 if (!pmd_present(*pmd)) 1465 goto out; 1466 1467 folio = pmd_folio(*pmd); 1468 1469 /* Clear accessed and referenced bits. */ 1470 pmdp_test_and_clear_young(vma, addr, pmd); 1471 folio_test_clear_young(folio); 1472 folio_clear_referenced(folio); 1473 out: 1474 spin_unlock(ptl); 1475 return 0; 1476 } 1477 1478 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1479 if (!pte) { 1480 walk->action = ACTION_AGAIN; 1481 return 0; 1482 } 1483 for (; addr != end; pte++, addr += PAGE_SIZE) { 1484 ptent = ptep_get(pte); 1485 1486 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1487 clear_soft_dirty(vma, addr, pte); 1488 continue; 1489 } 1490 1491 if (!pte_present(ptent)) 1492 continue; 1493 1494 folio = vm_normal_folio(vma, addr, ptent); 1495 if (!folio) 1496 continue; 1497 1498 /* Clear accessed and referenced bits. */ 1499 ptep_test_and_clear_young(vma, addr, pte); 1500 folio_test_clear_young(folio); 1501 folio_clear_referenced(folio); 1502 } 1503 pte_unmap_unlock(pte - 1, ptl); 1504 cond_resched(); 1505 return 0; 1506 } 1507 1508 static int clear_refs_test_walk(unsigned long start, unsigned long end, 1509 struct mm_walk *walk) 1510 { 1511 struct clear_refs_private *cp = walk->private; 1512 struct vm_area_struct *vma = walk->vma; 1513 1514 if (vma->vm_flags & VM_PFNMAP) 1515 return 1; 1516 1517 /* 1518 * Writing 1 to /proc/pid/clear_refs affects all pages. 1519 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 1520 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 1521 * Writing 4 to /proc/pid/clear_refs affects all pages. 1522 */ 1523 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 1524 return 1; 1525 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 1526 return 1; 1527 return 0; 1528 } 1529 1530 static const struct mm_walk_ops clear_refs_walk_ops = { 1531 .pmd_entry = clear_refs_pte_range, 1532 .test_walk = clear_refs_test_walk, 1533 .walk_lock = PGWALK_WRLOCK, 1534 }; 1535 1536 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 1537 size_t count, loff_t *ppos) 1538 { 1539 struct task_struct *task; 1540 char buffer[PROC_NUMBUF] = {}; 1541 struct mm_struct *mm; 1542 struct vm_area_struct *vma; 1543 enum clear_refs_types type; 1544 int itype; 1545 int rv; 1546 1547 if (count > sizeof(buffer) - 1) 1548 count = sizeof(buffer) - 1; 1549 if (copy_from_user(buffer, buf, count)) 1550 return -EFAULT; 1551 rv = kstrtoint(strstrip(buffer), 10, &itype); 1552 if (rv < 0) 1553 return rv; 1554 type = (enum clear_refs_types)itype; 1555 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 1556 return -EINVAL; 1557 1558 task = get_proc_task(file_inode(file)); 1559 if (!task) 1560 return -ESRCH; 1561 mm = get_task_mm(task); 1562 if (mm) { 1563 VMA_ITERATOR(vmi, mm, 0); 1564 struct mmu_notifier_range range; 1565 struct clear_refs_private cp = { 1566 .type = type, 1567 }; 1568 1569 if (mmap_write_lock_killable(mm)) { 1570 count = -EINTR; 1571 goto out_mm; 1572 } 1573 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 1574 /* 1575 * Writing 5 to /proc/pid/clear_refs resets the peak 1576 * resident set size to this mm's current rss value. 1577 */ 1578 reset_mm_hiwater_rss(mm); 1579 goto out_unlock; 1580 } 1581 1582 if (type == CLEAR_REFS_SOFT_DIRTY) { 1583 for_each_vma(vmi, vma) { 1584 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1585 continue; 1586 vm_flags_clear(vma, VM_SOFTDIRTY); 1587 vma_set_page_prot(vma); 1588 } 1589 1590 inc_tlb_flush_pending(mm); 1591 mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY, 1592 0, mm, 0, -1UL); 1593 mmu_notifier_invalidate_range_start(&range); 1594 } 1595 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); 1596 if (type == CLEAR_REFS_SOFT_DIRTY) { 1597 mmu_notifier_invalidate_range_end(&range); 1598 flush_tlb_mm(mm); 1599 dec_tlb_flush_pending(mm); 1600 } 1601 out_unlock: 1602 mmap_write_unlock(mm); 1603 out_mm: 1604 mmput(mm); 1605 } 1606 put_task_struct(task); 1607 1608 return count; 1609 } 1610 1611 const struct file_operations proc_clear_refs_operations = { 1612 .write = clear_refs_write, 1613 .llseek = noop_llseek, 1614 }; 1615 1616 typedef struct { 1617 u64 pme; 1618 } pagemap_entry_t; 1619 1620 struct pagemapread { 1621 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 1622 pagemap_entry_t *buffer; 1623 bool show_pfn; 1624 }; 1625 1626 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 1627 #define PAGEMAP_WALK_MASK (PMD_MASK) 1628 1629 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 1630 #define PM_PFRAME_BITS 55 1631 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 1632 #define PM_SOFT_DIRTY BIT_ULL(55) 1633 #define PM_MMAP_EXCLUSIVE BIT_ULL(56) 1634 #define PM_UFFD_WP BIT_ULL(57) 1635 #define PM_GUARD_REGION BIT_ULL(58) 1636 #define PM_FILE BIT_ULL(61) 1637 #define PM_SWAP BIT_ULL(62) 1638 #define PM_PRESENT BIT_ULL(63) 1639 1640 #define PM_END_OF_BUFFER 1 1641 1642 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 1643 { 1644 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 1645 } 1646 1647 static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm) 1648 { 1649 pm->buffer[pm->pos++] = *pme; 1650 if (pm->pos >= pm->len) 1651 return PM_END_OF_BUFFER; 1652 return 0; 1653 } 1654 1655 static int pagemap_pte_hole(unsigned long start, unsigned long end, 1656 __always_unused int depth, struct mm_walk *walk) 1657 { 1658 struct pagemapread *pm = walk->private; 1659 unsigned long addr = start; 1660 int err = 0; 1661 1662 while (addr < end) { 1663 struct vm_area_struct *vma = find_vma(walk->mm, addr); 1664 pagemap_entry_t pme = make_pme(0, 0); 1665 /* End of address space hole, which we mark as non-present. */ 1666 unsigned long hole_end; 1667 1668 if (vma) 1669 hole_end = min(end, vma->vm_start); 1670 else 1671 hole_end = end; 1672 1673 for (; addr < hole_end; addr += PAGE_SIZE) { 1674 err = add_to_pagemap(&pme, pm); 1675 if (err) 1676 goto out; 1677 } 1678 1679 if (!vma) 1680 break; 1681 1682 /* Addresses in the VMA. */ 1683 if (vma->vm_flags & VM_SOFTDIRTY) 1684 pme = make_pme(0, PM_SOFT_DIRTY); 1685 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1686 err = add_to_pagemap(&pme, pm); 1687 if (err) 1688 goto out; 1689 } 1690 } 1691 out: 1692 return err; 1693 } 1694 1695 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1696 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1697 { 1698 u64 frame = 0, flags = 0; 1699 struct page *page = NULL; 1700 struct folio *folio; 1701 1702 if (pte_present(pte)) { 1703 if (pm->show_pfn) 1704 frame = pte_pfn(pte); 1705 flags |= PM_PRESENT; 1706 page = vm_normal_page(vma, addr, pte); 1707 if (pte_soft_dirty(pte)) 1708 flags |= PM_SOFT_DIRTY; 1709 if (pte_uffd_wp(pte)) 1710 flags |= PM_UFFD_WP; 1711 } else if (is_swap_pte(pte)) { 1712 swp_entry_t entry; 1713 if (pte_swp_soft_dirty(pte)) 1714 flags |= PM_SOFT_DIRTY; 1715 if (pte_swp_uffd_wp(pte)) 1716 flags |= PM_UFFD_WP; 1717 entry = pte_to_swp_entry(pte); 1718 if (pm->show_pfn) { 1719 pgoff_t offset; 1720 /* 1721 * For PFN swap offsets, keeping the offset field 1722 * to be PFN only to be compatible with old smaps. 1723 */ 1724 if (is_pfn_swap_entry(entry)) 1725 offset = swp_offset_pfn(entry); 1726 else 1727 offset = swp_offset(entry); 1728 frame = swp_type(entry) | 1729 (offset << MAX_SWAPFILES_SHIFT); 1730 } 1731 flags |= PM_SWAP; 1732 if (is_pfn_swap_entry(entry)) 1733 page = pfn_swap_entry_to_page(entry); 1734 if (pte_marker_entry_uffd_wp(entry)) 1735 flags |= PM_UFFD_WP; 1736 if (is_guard_swp_entry(entry)) 1737 flags |= PM_GUARD_REGION; 1738 } 1739 1740 if (page) { 1741 folio = page_folio(page); 1742 if (!folio_test_anon(folio)) 1743 flags |= PM_FILE; 1744 if ((flags & PM_PRESENT) && 1745 folio_precise_page_mapcount(folio, page) == 1) 1746 flags |= PM_MMAP_EXCLUSIVE; 1747 } 1748 if (vma->vm_flags & VM_SOFTDIRTY) 1749 flags |= PM_SOFT_DIRTY; 1750 1751 return make_pme(frame, flags); 1752 } 1753 1754 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1755 struct mm_walk *walk) 1756 { 1757 struct vm_area_struct *vma = walk->vma; 1758 struct pagemapread *pm = walk->private; 1759 spinlock_t *ptl; 1760 pte_t *pte, *orig_pte; 1761 int err = 0; 1762 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1763 1764 ptl = pmd_trans_huge_lock(pmdp, vma); 1765 if (ptl) { 1766 unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT; 1767 u64 flags = 0, frame = 0; 1768 pmd_t pmd = *pmdp; 1769 struct page *page = NULL; 1770 struct folio *folio = NULL; 1771 1772 if (vma->vm_flags & VM_SOFTDIRTY) 1773 flags |= PM_SOFT_DIRTY; 1774 1775 if (pmd_present(pmd)) { 1776 page = pmd_page(pmd); 1777 1778 flags |= PM_PRESENT; 1779 if (pmd_soft_dirty(pmd)) 1780 flags |= PM_SOFT_DIRTY; 1781 if (pmd_uffd_wp(pmd)) 1782 flags |= PM_UFFD_WP; 1783 if (pm->show_pfn) 1784 frame = pmd_pfn(pmd) + idx; 1785 } 1786 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1787 else if (is_swap_pmd(pmd)) { 1788 swp_entry_t entry = pmd_to_swp_entry(pmd); 1789 unsigned long offset; 1790 1791 if (pm->show_pfn) { 1792 if (is_pfn_swap_entry(entry)) 1793 offset = swp_offset_pfn(entry) + idx; 1794 else 1795 offset = swp_offset(entry) + idx; 1796 frame = swp_type(entry) | 1797 (offset << MAX_SWAPFILES_SHIFT); 1798 } 1799 flags |= PM_SWAP; 1800 if (pmd_swp_soft_dirty(pmd)) 1801 flags |= PM_SOFT_DIRTY; 1802 if (pmd_swp_uffd_wp(pmd)) 1803 flags |= PM_UFFD_WP; 1804 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1805 page = pfn_swap_entry_to_page(entry); 1806 } 1807 #endif 1808 1809 if (page) { 1810 folio = page_folio(page); 1811 if (!folio_test_anon(folio)) 1812 flags |= PM_FILE; 1813 } 1814 1815 for (; addr != end; addr += PAGE_SIZE, idx++) { 1816 u64 cur_flags = flags; 1817 pagemap_entry_t pme; 1818 1819 if (folio && (flags & PM_PRESENT) && 1820 folio_precise_page_mapcount(folio, page + idx) == 1) 1821 cur_flags |= PM_MMAP_EXCLUSIVE; 1822 1823 pme = make_pme(frame, cur_flags); 1824 err = add_to_pagemap(&pme, pm); 1825 if (err) 1826 break; 1827 if (pm->show_pfn) { 1828 if (flags & PM_PRESENT) 1829 frame++; 1830 else if (flags & PM_SWAP) 1831 frame += (1 << MAX_SWAPFILES_SHIFT); 1832 } 1833 } 1834 spin_unlock(ptl); 1835 return err; 1836 } 1837 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1838 1839 /* 1840 * We can assume that @vma always points to a valid one and @end never 1841 * goes beyond vma->vm_end. 1842 */ 1843 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1844 if (!pte) { 1845 walk->action = ACTION_AGAIN; 1846 return err; 1847 } 1848 for (; addr < end; pte++, addr += PAGE_SIZE) { 1849 pagemap_entry_t pme; 1850 1851 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte)); 1852 err = add_to_pagemap(&pme, pm); 1853 if (err) 1854 break; 1855 } 1856 pte_unmap_unlock(orig_pte, ptl); 1857 1858 cond_resched(); 1859 1860 return err; 1861 } 1862 1863 #ifdef CONFIG_HUGETLB_PAGE 1864 /* This function walks within one hugetlb entry in the single call */ 1865 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1866 unsigned long addr, unsigned long end, 1867 struct mm_walk *walk) 1868 { 1869 struct pagemapread *pm = walk->private; 1870 struct vm_area_struct *vma = walk->vma; 1871 u64 flags = 0, frame = 0; 1872 int err = 0; 1873 pte_t pte; 1874 1875 if (vma->vm_flags & VM_SOFTDIRTY) 1876 flags |= PM_SOFT_DIRTY; 1877 1878 pte = huge_ptep_get(walk->mm, addr, ptep); 1879 if (pte_present(pte)) { 1880 struct folio *folio = page_folio(pte_page(pte)); 1881 1882 if (!folio_test_anon(folio)) 1883 flags |= PM_FILE; 1884 1885 if (!folio_likely_mapped_shared(folio) && 1886 !hugetlb_pmd_shared(ptep)) 1887 flags |= PM_MMAP_EXCLUSIVE; 1888 1889 if (huge_pte_uffd_wp(pte)) 1890 flags |= PM_UFFD_WP; 1891 1892 flags |= PM_PRESENT; 1893 if (pm->show_pfn) 1894 frame = pte_pfn(pte) + 1895 ((addr & ~hmask) >> PAGE_SHIFT); 1896 } else if (pte_swp_uffd_wp_any(pte)) { 1897 flags |= PM_UFFD_WP; 1898 } 1899 1900 for (; addr != end; addr += PAGE_SIZE) { 1901 pagemap_entry_t pme = make_pme(frame, flags); 1902 1903 err = add_to_pagemap(&pme, pm); 1904 if (err) 1905 return err; 1906 if (pm->show_pfn && (flags & PM_PRESENT)) 1907 frame++; 1908 } 1909 1910 cond_resched(); 1911 1912 return err; 1913 } 1914 #else 1915 #define pagemap_hugetlb_range NULL 1916 #endif /* HUGETLB_PAGE */ 1917 1918 static const struct mm_walk_ops pagemap_ops = { 1919 .pmd_entry = pagemap_pmd_range, 1920 .pte_hole = pagemap_pte_hole, 1921 .hugetlb_entry = pagemap_hugetlb_range, 1922 .walk_lock = PGWALK_RDLOCK, 1923 }; 1924 1925 /* 1926 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1927 * 1928 * For each page in the address space, this file contains one 64-bit entry 1929 * consisting of the following: 1930 * 1931 * Bits 0-54 page frame number (PFN) if present 1932 * Bits 0-4 swap type if swapped 1933 * Bits 5-54 swap offset if swapped 1934 * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) 1935 * Bit 56 page exclusively mapped 1936 * Bit 57 pte is uffd-wp write-protected 1937 * Bit 58 pte is a guard region 1938 * Bits 59-60 zero 1939 * Bit 61 page is file-page or shared-anon 1940 * Bit 62 page swapped 1941 * Bit 63 page present 1942 * 1943 * If the page is not present but in swap, then the PFN contains an 1944 * encoding of the swap file number and the page's offset into the 1945 * swap. Unmapped pages return a null PFN. This allows determining 1946 * precisely which pages are mapped (or in swap) and comparing mapped 1947 * pages between processes. 1948 * 1949 * Efficient users of this interface will use /proc/pid/maps to 1950 * determine which areas of memory are actually mapped and llseek to 1951 * skip over unmapped regions. 1952 */ 1953 static ssize_t pagemap_read(struct file *file, char __user *buf, 1954 size_t count, loff_t *ppos) 1955 { 1956 struct mm_struct *mm = file->private_data; 1957 struct pagemapread pm; 1958 unsigned long src; 1959 unsigned long svpfn; 1960 unsigned long start_vaddr; 1961 unsigned long end_vaddr; 1962 int ret = 0, copied = 0; 1963 1964 if (!mm || !mmget_not_zero(mm)) 1965 goto out; 1966 1967 ret = -EINVAL; 1968 /* file position must be aligned */ 1969 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1970 goto out_mm; 1971 1972 ret = 0; 1973 if (!count) 1974 goto out_mm; 1975 1976 /* do not disclose physical addresses: attack vector */ 1977 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1978 1979 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1980 pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); 1981 ret = -ENOMEM; 1982 if (!pm.buffer) 1983 goto out_mm; 1984 1985 src = *ppos; 1986 svpfn = src / PM_ENTRY_BYTES; 1987 end_vaddr = mm->task_size; 1988 1989 /* watch out for wraparound */ 1990 start_vaddr = end_vaddr; 1991 if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) { 1992 unsigned long end; 1993 1994 ret = mmap_read_lock_killable(mm); 1995 if (ret) 1996 goto out_free; 1997 start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT); 1998 mmap_read_unlock(mm); 1999 2000 end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT); 2001 if (end >= start_vaddr && end < mm->task_size) 2002 end_vaddr = end; 2003 } 2004 2005 /* Ensure the address is inside the task */ 2006 if (start_vaddr > mm->task_size) 2007 start_vaddr = end_vaddr; 2008 2009 ret = 0; 2010 while (count && (start_vaddr < end_vaddr)) { 2011 int len; 2012 unsigned long end; 2013 2014 pm.pos = 0; 2015 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 2016 /* overflow ? */ 2017 if (end < start_vaddr || end > end_vaddr) 2018 end = end_vaddr; 2019 ret = mmap_read_lock_killable(mm); 2020 if (ret) 2021 goto out_free; 2022 ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm); 2023 mmap_read_unlock(mm); 2024 start_vaddr = end; 2025 2026 len = min(count, PM_ENTRY_BYTES * pm.pos); 2027 if (copy_to_user(buf, pm.buffer, len)) { 2028 ret = -EFAULT; 2029 goto out_free; 2030 } 2031 copied += len; 2032 buf += len; 2033 count -= len; 2034 } 2035 *ppos += copied; 2036 if (!ret || ret == PM_END_OF_BUFFER) 2037 ret = copied; 2038 2039 out_free: 2040 kfree(pm.buffer); 2041 out_mm: 2042 mmput(mm); 2043 out: 2044 return ret; 2045 } 2046 2047 static int pagemap_open(struct inode *inode, struct file *file) 2048 { 2049 struct mm_struct *mm; 2050 2051 mm = proc_mem_open(inode, PTRACE_MODE_READ); 2052 if (IS_ERR(mm)) 2053 return PTR_ERR(mm); 2054 file->private_data = mm; 2055 return 0; 2056 } 2057 2058 static int pagemap_release(struct inode *inode, struct file *file) 2059 { 2060 struct mm_struct *mm = file->private_data; 2061 2062 if (mm) 2063 mmdrop(mm); 2064 return 0; 2065 } 2066 2067 #define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \ 2068 PAGE_IS_FILE | PAGE_IS_PRESENT | \ 2069 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \ 2070 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY) 2071 #define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC) 2072 2073 struct pagemap_scan_private { 2074 struct pm_scan_arg arg; 2075 unsigned long masks_of_interest, cur_vma_category; 2076 struct page_region *vec_buf; 2077 unsigned long vec_buf_len, vec_buf_index, found_pages; 2078 struct page_region __user *vec_out; 2079 }; 2080 2081 static unsigned long pagemap_page_category(struct pagemap_scan_private *p, 2082 struct vm_area_struct *vma, 2083 unsigned long addr, pte_t pte) 2084 { 2085 unsigned long categories = 0; 2086 2087 if (pte_present(pte)) { 2088 struct page *page; 2089 2090 categories |= PAGE_IS_PRESENT; 2091 if (!pte_uffd_wp(pte)) 2092 categories |= PAGE_IS_WRITTEN; 2093 2094 if (p->masks_of_interest & PAGE_IS_FILE) { 2095 page = vm_normal_page(vma, addr, pte); 2096 if (page && !PageAnon(page)) 2097 categories |= PAGE_IS_FILE; 2098 } 2099 2100 if (is_zero_pfn(pte_pfn(pte))) 2101 categories |= PAGE_IS_PFNZERO; 2102 if (pte_soft_dirty(pte)) 2103 categories |= PAGE_IS_SOFT_DIRTY; 2104 } else if (is_swap_pte(pte)) { 2105 swp_entry_t swp; 2106 2107 categories |= PAGE_IS_SWAPPED; 2108 if (!pte_swp_uffd_wp_any(pte)) 2109 categories |= PAGE_IS_WRITTEN; 2110 2111 if (p->masks_of_interest & PAGE_IS_FILE) { 2112 swp = pte_to_swp_entry(pte); 2113 if (is_pfn_swap_entry(swp) && 2114 !folio_test_anon(pfn_swap_entry_folio(swp))) 2115 categories |= PAGE_IS_FILE; 2116 } 2117 if (pte_swp_soft_dirty(pte)) 2118 categories |= PAGE_IS_SOFT_DIRTY; 2119 } 2120 2121 return categories; 2122 } 2123 2124 static void make_uffd_wp_pte(struct vm_area_struct *vma, 2125 unsigned long addr, pte_t *pte, pte_t ptent) 2126 { 2127 if (pte_present(ptent)) { 2128 pte_t old_pte; 2129 2130 old_pte = ptep_modify_prot_start(vma, addr, pte); 2131 ptent = pte_mkuffd_wp(old_pte); 2132 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); 2133 } else if (is_swap_pte(ptent)) { 2134 ptent = pte_swp_mkuffd_wp(ptent); 2135 set_pte_at(vma->vm_mm, addr, pte, ptent); 2136 } else { 2137 set_pte_at(vma->vm_mm, addr, pte, 2138 make_pte_marker(PTE_MARKER_UFFD_WP)); 2139 } 2140 } 2141 2142 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2143 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, 2144 struct vm_area_struct *vma, 2145 unsigned long addr, pmd_t pmd) 2146 { 2147 unsigned long categories = PAGE_IS_HUGE; 2148 2149 if (pmd_present(pmd)) { 2150 struct page *page; 2151 2152 categories |= PAGE_IS_PRESENT; 2153 if (!pmd_uffd_wp(pmd)) 2154 categories |= PAGE_IS_WRITTEN; 2155 2156 if (p->masks_of_interest & PAGE_IS_FILE) { 2157 page = vm_normal_page_pmd(vma, addr, pmd); 2158 if (page && !PageAnon(page)) 2159 categories |= PAGE_IS_FILE; 2160 } 2161 2162 if (is_zero_pfn(pmd_pfn(pmd))) 2163 categories |= PAGE_IS_PFNZERO; 2164 if (pmd_soft_dirty(pmd)) 2165 categories |= PAGE_IS_SOFT_DIRTY; 2166 } else if (is_swap_pmd(pmd)) { 2167 swp_entry_t swp; 2168 2169 categories |= PAGE_IS_SWAPPED; 2170 if (!pmd_swp_uffd_wp(pmd)) 2171 categories |= PAGE_IS_WRITTEN; 2172 if (pmd_swp_soft_dirty(pmd)) 2173 categories |= PAGE_IS_SOFT_DIRTY; 2174 2175 if (p->masks_of_interest & PAGE_IS_FILE) { 2176 swp = pmd_to_swp_entry(pmd); 2177 if (is_pfn_swap_entry(swp) && 2178 !folio_test_anon(pfn_swap_entry_folio(swp))) 2179 categories |= PAGE_IS_FILE; 2180 } 2181 } 2182 2183 return categories; 2184 } 2185 2186 static void make_uffd_wp_pmd(struct vm_area_struct *vma, 2187 unsigned long addr, pmd_t *pmdp) 2188 { 2189 pmd_t old, pmd = *pmdp; 2190 2191 if (pmd_present(pmd)) { 2192 old = pmdp_invalidate_ad(vma, addr, pmdp); 2193 pmd = pmd_mkuffd_wp(old); 2194 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 2195 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 2196 pmd = pmd_swp_mkuffd_wp(pmd); 2197 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 2198 } 2199 } 2200 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2201 2202 #ifdef CONFIG_HUGETLB_PAGE 2203 static unsigned long pagemap_hugetlb_category(pte_t pte) 2204 { 2205 unsigned long categories = PAGE_IS_HUGE; 2206 2207 /* 2208 * According to pagemap_hugetlb_range(), file-backed HugeTLB 2209 * page cannot be swapped. So PAGE_IS_FILE is not checked for 2210 * swapped pages. 2211 */ 2212 if (pte_present(pte)) { 2213 categories |= PAGE_IS_PRESENT; 2214 if (!huge_pte_uffd_wp(pte)) 2215 categories |= PAGE_IS_WRITTEN; 2216 if (!PageAnon(pte_page(pte))) 2217 categories |= PAGE_IS_FILE; 2218 if (is_zero_pfn(pte_pfn(pte))) 2219 categories |= PAGE_IS_PFNZERO; 2220 if (pte_soft_dirty(pte)) 2221 categories |= PAGE_IS_SOFT_DIRTY; 2222 } else if (is_swap_pte(pte)) { 2223 categories |= PAGE_IS_SWAPPED; 2224 if (!pte_swp_uffd_wp_any(pte)) 2225 categories |= PAGE_IS_WRITTEN; 2226 if (pte_swp_soft_dirty(pte)) 2227 categories |= PAGE_IS_SOFT_DIRTY; 2228 } 2229 2230 return categories; 2231 } 2232 2233 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma, 2234 unsigned long addr, pte_t *ptep, 2235 pte_t ptent) 2236 { 2237 unsigned long psize; 2238 2239 if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent)) 2240 return; 2241 2242 psize = huge_page_size(hstate_vma(vma)); 2243 2244 if (is_hugetlb_entry_migration(ptent)) 2245 set_huge_pte_at(vma->vm_mm, addr, ptep, 2246 pte_swp_mkuffd_wp(ptent), psize); 2247 else if (!huge_pte_none(ptent)) 2248 huge_ptep_modify_prot_commit(vma, addr, ptep, ptent, 2249 huge_pte_mkuffd_wp(ptent)); 2250 else 2251 set_huge_pte_at(vma->vm_mm, addr, ptep, 2252 make_pte_marker(PTE_MARKER_UFFD_WP), psize); 2253 } 2254 #endif /* CONFIG_HUGETLB_PAGE */ 2255 2256 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 2257 static void pagemap_scan_backout_range(struct pagemap_scan_private *p, 2258 unsigned long addr, unsigned long end) 2259 { 2260 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; 2261 2262 if (cur_buf->start != addr) 2263 cur_buf->end = addr; 2264 else 2265 cur_buf->start = cur_buf->end = 0; 2266 2267 p->found_pages -= (end - addr) / PAGE_SIZE; 2268 } 2269 #endif 2270 2271 static bool pagemap_scan_is_interesting_page(unsigned long categories, 2272 const struct pagemap_scan_private *p) 2273 { 2274 categories ^= p->arg.category_inverted; 2275 if ((categories & p->arg.category_mask) != p->arg.category_mask) 2276 return false; 2277 if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask)) 2278 return false; 2279 2280 return true; 2281 } 2282 2283 static bool pagemap_scan_is_interesting_vma(unsigned long categories, 2284 const struct pagemap_scan_private *p) 2285 { 2286 unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED; 2287 2288 categories ^= p->arg.category_inverted; 2289 if ((categories & required) != required) 2290 return false; 2291 2292 return true; 2293 } 2294 2295 static int pagemap_scan_test_walk(unsigned long start, unsigned long end, 2296 struct mm_walk *walk) 2297 { 2298 struct pagemap_scan_private *p = walk->private; 2299 struct vm_area_struct *vma = walk->vma; 2300 unsigned long vma_category = 0; 2301 bool wp_allowed = userfaultfd_wp_async(vma) && 2302 userfaultfd_wp_use_markers(vma); 2303 2304 if (!wp_allowed) { 2305 /* User requested explicit failure over wp-async capability */ 2306 if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) 2307 return -EPERM; 2308 /* 2309 * User requires wr-protect, and allows silently skipping 2310 * unsupported vmas. 2311 */ 2312 if (p->arg.flags & PM_SCAN_WP_MATCHING) 2313 return 1; 2314 /* 2315 * Then the request doesn't involve wr-protects at all, 2316 * fall through to the rest checks, and allow vma walk. 2317 */ 2318 } 2319 2320 if (vma->vm_flags & VM_PFNMAP) 2321 return 1; 2322 2323 if (wp_allowed) 2324 vma_category |= PAGE_IS_WPALLOWED; 2325 2326 if (vma->vm_flags & VM_SOFTDIRTY) 2327 vma_category |= PAGE_IS_SOFT_DIRTY; 2328 2329 if (!pagemap_scan_is_interesting_vma(vma_category, p)) 2330 return 1; 2331 2332 p->cur_vma_category = vma_category; 2333 2334 return 0; 2335 } 2336 2337 static bool pagemap_scan_push_range(unsigned long categories, 2338 struct pagemap_scan_private *p, 2339 unsigned long addr, unsigned long end) 2340 { 2341 struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; 2342 2343 /* 2344 * When there is no output buffer provided at all, the sentinel values 2345 * won't match here. There is no other way for `cur_buf->end` to be 2346 * non-zero other than it being non-empty. 2347 */ 2348 if (addr == cur_buf->end && categories == cur_buf->categories) { 2349 cur_buf->end = end; 2350 return true; 2351 } 2352 2353 if (cur_buf->end) { 2354 if (p->vec_buf_index >= p->vec_buf_len - 1) 2355 return false; 2356 2357 cur_buf = &p->vec_buf[++p->vec_buf_index]; 2358 } 2359 2360 cur_buf->start = addr; 2361 cur_buf->end = end; 2362 cur_buf->categories = categories; 2363 2364 return true; 2365 } 2366 2367 static int pagemap_scan_output(unsigned long categories, 2368 struct pagemap_scan_private *p, 2369 unsigned long addr, unsigned long *end) 2370 { 2371 unsigned long n_pages, total_pages; 2372 int ret = 0; 2373 2374 if (!p->vec_buf) 2375 return 0; 2376 2377 categories &= p->arg.return_mask; 2378 2379 n_pages = (*end - addr) / PAGE_SIZE; 2380 if (check_add_overflow(p->found_pages, n_pages, &total_pages) || 2381 total_pages > p->arg.max_pages) { 2382 size_t n_too_much = total_pages - p->arg.max_pages; 2383 *end -= n_too_much * PAGE_SIZE; 2384 n_pages -= n_too_much; 2385 ret = -ENOSPC; 2386 } 2387 2388 if (!pagemap_scan_push_range(categories, p, addr, *end)) { 2389 *end = addr; 2390 n_pages = 0; 2391 ret = -ENOSPC; 2392 } 2393 2394 p->found_pages += n_pages; 2395 if (ret) 2396 p->arg.walk_end = *end; 2397 2398 return ret; 2399 } 2400 2401 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, 2402 unsigned long end, struct mm_walk *walk) 2403 { 2404 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2405 struct pagemap_scan_private *p = walk->private; 2406 struct vm_area_struct *vma = walk->vma; 2407 unsigned long categories; 2408 spinlock_t *ptl; 2409 int ret = 0; 2410 2411 ptl = pmd_trans_huge_lock(pmd, vma); 2412 if (!ptl) 2413 return -ENOENT; 2414 2415 categories = p->cur_vma_category | 2416 pagemap_thp_category(p, vma, start, *pmd); 2417 2418 if (!pagemap_scan_is_interesting_page(categories, p)) 2419 goto out_unlock; 2420 2421 ret = pagemap_scan_output(categories, p, start, &end); 2422 if (start == end) 2423 goto out_unlock; 2424 2425 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2426 goto out_unlock; 2427 if (~categories & PAGE_IS_WRITTEN) 2428 goto out_unlock; 2429 2430 /* 2431 * Break huge page into small pages if the WP operation 2432 * needs to be performed on a portion of the huge page. 2433 */ 2434 if (end != start + HPAGE_SIZE) { 2435 spin_unlock(ptl); 2436 split_huge_pmd(vma, pmd, start); 2437 pagemap_scan_backout_range(p, start, end); 2438 /* Report as if there was no THP */ 2439 return -ENOENT; 2440 } 2441 2442 make_uffd_wp_pmd(vma, start, pmd); 2443 flush_tlb_range(vma, start, end); 2444 out_unlock: 2445 spin_unlock(ptl); 2446 return ret; 2447 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 2448 return -ENOENT; 2449 #endif 2450 } 2451 2452 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, 2453 unsigned long end, struct mm_walk *walk) 2454 { 2455 struct pagemap_scan_private *p = walk->private; 2456 struct vm_area_struct *vma = walk->vma; 2457 unsigned long addr, flush_end = 0; 2458 pte_t *pte, *start_pte; 2459 spinlock_t *ptl; 2460 int ret; 2461 2462 arch_enter_lazy_mmu_mode(); 2463 2464 ret = pagemap_scan_thp_entry(pmd, start, end, walk); 2465 if (ret != -ENOENT) { 2466 arch_leave_lazy_mmu_mode(); 2467 return ret; 2468 } 2469 2470 ret = 0; 2471 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); 2472 if (!pte) { 2473 arch_leave_lazy_mmu_mode(); 2474 walk->action = ACTION_AGAIN; 2475 return 0; 2476 } 2477 2478 if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { 2479 /* Fast path for performing exclusive WP */ 2480 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { 2481 pte_t ptent = ptep_get(pte); 2482 2483 if ((pte_present(ptent) && pte_uffd_wp(ptent)) || 2484 pte_swp_uffd_wp_any(ptent)) 2485 continue; 2486 make_uffd_wp_pte(vma, addr, pte, ptent); 2487 if (!flush_end) 2488 start = addr; 2489 flush_end = addr + PAGE_SIZE; 2490 } 2491 goto flush_and_return; 2492 } 2493 2494 if (!p->arg.category_anyof_mask && !p->arg.category_inverted && 2495 p->arg.category_mask == PAGE_IS_WRITTEN && 2496 p->arg.return_mask == PAGE_IS_WRITTEN) { 2497 for (addr = start; addr < end; pte++, addr += PAGE_SIZE) { 2498 unsigned long next = addr + PAGE_SIZE; 2499 pte_t ptent = ptep_get(pte); 2500 2501 if ((pte_present(ptent) && pte_uffd_wp(ptent)) || 2502 pte_swp_uffd_wp_any(ptent)) 2503 continue; 2504 ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN, 2505 p, addr, &next); 2506 if (next == addr) 2507 break; 2508 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2509 continue; 2510 make_uffd_wp_pte(vma, addr, pte, ptent); 2511 if (!flush_end) 2512 start = addr; 2513 flush_end = next; 2514 } 2515 goto flush_and_return; 2516 } 2517 2518 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { 2519 pte_t ptent = ptep_get(pte); 2520 unsigned long categories = p->cur_vma_category | 2521 pagemap_page_category(p, vma, addr, ptent); 2522 unsigned long next = addr + PAGE_SIZE; 2523 2524 if (!pagemap_scan_is_interesting_page(categories, p)) 2525 continue; 2526 2527 ret = pagemap_scan_output(categories, p, addr, &next); 2528 if (next == addr) 2529 break; 2530 2531 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2532 continue; 2533 if (~categories & PAGE_IS_WRITTEN) 2534 continue; 2535 2536 make_uffd_wp_pte(vma, addr, pte, ptent); 2537 if (!flush_end) 2538 start = addr; 2539 flush_end = next; 2540 } 2541 2542 flush_and_return: 2543 if (flush_end) 2544 flush_tlb_range(vma, start, addr); 2545 2546 pte_unmap_unlock(start_pte, ptl); 2547 arch_leave_lazy_mmu_mode(); 2548 2549 cond_resched(); 2550 return ret; 2551 } 2552 2553 #ifdef CONFIG_HUGETLB_PAGE 2554 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask, 2555 unsigned long start, unsigned long end, 2556 struct mm_walk *walk) 2557 { 2558 struct pagemap_scan_private *p = walk->private; 2559 struct vm_area_struct *vma = walk->vma; 2560 unsigned long categories; 2561 spinlock_t *ptl; 2562 int ret = 0; 2563 pte_t pte; 2564 2565 if (~p->arg.flags & PM_SCAN_WP_MATCHING) { 2566 /* Go the short route when not write-protecting pages. */ 2567 2568 pte = huge_ptep_get(walk->mm, start, ptep); 2569 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); 2570 2571 if (!pagemap_scan_is_interesting_page(categories, p)) 2572 return 0; 2573 2574 return pagemap_scan_output(categories, p, start, &end); 2575 } 2576 2577 i_mmap_lock_write(vma->vm_file->f_mapping); 2578 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep); 2579 2580 pte = huge_ptep_get(walk->mm, start, ptep); 2581 categories = p->cur_vma_category | pagemap_hugetlb_category(pte); 2582 2583 if (!pagemap_scan_is_interesting_page(categories, p)) 2584 goto out_unlock; 2585 2586 ret = pagemap_scan_output(categories, p, start, &end); 2587 if (start == end) 2588 goto out_unlock; 2589 2590 if (~categories & PAGE_IS_WRITTEN) 2591 goto out_unlock; 2592 2593 if (end != start + HPAGE_SIZE) { 2594 /* Partial HugeTLB page WP isn't possible. */ 2595 pagemap_scan_backout_range(p, start, end); 2596 p->arg.walk_end = start; 2597 ret = 0; 2598 goto out_unlock; 2599 } 2600 2601 make_uffd_wp_huge_pte(vma, start, ptep, pte); 2602 flush_hugetlb_tlb_range(vma, start, end); 2603 2604 out_unlock: 2605 spin_unlock(ptl); 2606 i_mmap_unlock_write(vma->vm_file->f_mapping); 2607 2608 return ret; 2609 } 2610 #else 2611 #define pagemap_scan_hugetlb_entry NULL 2612 #endif 2613 2614 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end, 2615 int depth, struct mm_walk *walk) 2616 { 2617 struct pagemap_scan_private *p = walk->private; 2618 struct vm_area_struct *vma = walk->vma; 2619 int ret, err; 2620 2621 if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p)) 2622 return 0; 2623 2624 ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end); 2625 if (addr == end) 2626 return ret; 2627 2628 if (~p->arg.flags & PM_SCAN_WP_MATCHING) 2629 return ret; 2630 2631 err = uffd_wp_range(vma, addr, end - addr, true); 2632 if (err < 0) 2633 ret = err; 2634 2635 return ret; 2636 } 2637 2638 static const struct mm_walk_ops pagemap_scan_ops = { 2639 .test_walk = pagemap_scan_test_walk, 2640 .pmd_entry = pagemap_scan_pmd_entry, 2641 .pte_hole = pagemap_scan_pte_hole, 2642 .hugetlb_entry = pagemap_scan_hugetlb_entry, 2643 }; 2644 2645 static int pagemap_scan_get_args(struct pm_scan_arg *arg, 2646 unsigned long uarg) 2647 { 2648 if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg))) 2649 return -EFAULT; 2650 2651 if (arg->size != sizeof(struct pm_scan_arg)) 2652 return -EINVAL; 2653 2654 /* Validate requested features */ 2655 if (arg->flags & ~PM_SCAN_FLAGS) 2656 return -EINVAL; 2657 if ((arg->category_inverted | arg->category_mask | 2658 arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES) 2659 return -EINVAL; 2660 2661 arg->start = untagged_addr((unsigned long)arg->start); 2662 arg->end = untagged_addr((unsigned long)arg->end); 2663 arg->vec = untagged_addr((unsigned long)arg->vec); 2664 2665 /* Validate memory pointers */ 2666 if (!IS_ALIGNED(arg->start, PAGE_SIZE)) 2667 return -EINVAL; 2668 if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start)) 2669 return -EFAULT; 2670 if (!arg->vec && arg->vec_len) 2671 return -EINVAL; 2672 if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX) 2673 return -EINVAL; 2674 if (arg->vec && !access_ok((void __user *)(long)arg->vec, 2675 size_mul(arg->vec_len, sizeof(struct page_region)))) 2676 return -EFAULT; 2677 2678 /* Fixup default values */ 2679 arg->end = ALIGN(arg->end, PAGE_SIZE); 2680 arg->walk_end = 0; 2681 if (!arg->max_pages) 2682 arg->max_pages = ULONG_MAX; 2683 2684 return 0; 2685 } 2686 2687 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg, 2688 unsigned long uargl) 2689 { 2690 struct pm_scan_arg __user *uarg = (void __user *)uargl; 2691 2692 if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end))) 2693 return -EFAULT; 2694 2695 return 0; 2696 } 2697 2698 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p) 2699 { 2700 if (!p->arg.vec_len) 2701 return 0; 2702 2703 p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT, 2704 p->arg.vec_len); 2705 p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf), 2706 GFP_KERNEL); 2707 if (!p->vec_buf) 2708 return -ENOMEM; 2709 2710 p->vec_buf->start = p->vec_buf->end = 0; 2711 p->vec_out = (struct page_region __user *)(long)p->arg.vec; 2712 2713 return 0; 2714 } 2715 2716 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p) 2717 { 2718 const struct page_region *buf = p->vec_buf; 2719 long n = p->vec_buf_index; 2720 2721 if (!p->vec_buf) 2722 return 0; 2723 2724 if (buf[n].end != buf[n].start) 2725 n++; 2726 2727 if (!n) 2728 return 0; 2729 2730 if (copy_to_user(p->vec_out, buf, n * sizeof(*buf))) 2731 return -EFAULT; 2732 2733 p->arg.vec_len -= n; 2734 p->vec_out += n; 2735 2736 p->vec_buf_index = 0; 2737 p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len); 2738 p->vec_buf->start = p->vec_buf->end = 0; 2739 2740 return n; 2741 } 2742 2743 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) 2744 { 2745 struct pagemap_scan_private p = {0}; 2746 unsigned long walk_start; 2747 size_t n_ranges_out = 0; 2748 int ret; 2749 2750 ret = pagemap_scan_get_args(&p.arg, uarg); 2751 if (ret) 2752 return ret; 2753 2754 p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask | 2755 p.arg.return_mask; 2756 ret = pagemap_scan_init_bounce_buffer(&p); 2757 if (ret) 2758 return ret; 2759 2760 for (walk_start = p.arg.start; walk_start < p.arg.end; 2761 walk_start = p.arg.walk_end) { 2762 struct mmu_notifier_range range; 2763 long n_out; 2764 2765 if (fatal_signal_pending(current)) { 2766 ret = -EINTR; 2767 break; 2768 } 2769 2770 ret = mmap_read_lock_killable(mm); 2771 if (ret) 2772 break; 2773 2774 /* Protection change for the range is going to happen. */ 2775 if (p.arg.flags & PM_SCAN_WP_MATCHING) { 2776 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, 2777 mm, walk_start, p.arg.end); 2778 mmu_notifier_invalidate_range_start(&range); 2779 } 2780 2781 ret = walk_page_range(mm, walk_start, p.arg.end, 2782 &pagemap_scan_ops, &p); 2783 2784 if (p.arg.flags & PM_SCAN_WP_MATCHING) 2785 mmu_notifier_invalidate_range_end(&range); 2786 2787 mmap_read_unlock(mm); 2788 2789 n_out = pagemap_scan_flush_buffer(&p); 2790 if (n_out < 0) 2791 ret = n_out; 2792 else 2793 n_ranges_out += n_out; 2794 2795 if (ret != -ENOSPC) 2796 break; 2797 2798 if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages) 2799 break; 2800 } 2801 2802 /* ENOSPC signifies early stop (buffer full) from the walk. */ 2803 if (!ret || ret == -ENOSPC) 2804 ret = n_ranges_out; 2805 2806 /* The walk_end isn't set when ret is zero */ 2807 if (!p.arg.walk_end) 2808 p.arg.walk_end = p.arg.end; 2809 if (pagemap_scan_writeback_args(&p.arg, uarg)) 2810 ret = -EFAULT; 2811 2812 kfree(p.vec_buf); 2813 return ret; 2814 } 2815 2816 static long do_pagemap_cmd(struct file *file, unsigned int cmd, 2817 unsigned long arg) 2818 { 2819 struct mm_struct *mm = file->private_data; 2820 2821 switch (cmd) { 2822 case PAGEMAP_SCAN: 2823 return do_pagemap_scan(mm, arg); 2824 2825 default: 2826 return -EINVAL; 2827 } 2828 } 2829 2830 const struct file_operations proc_pagemap_operations = { 2831 .llseek = mem_lseek, /* borrow this */ 2832 .read = pagemap_read, 2833 .open = pagemap_open, 2834 .release = pagemap_release, 2835 .unlocked_ioctl = do_pagemap_cmd, 2836 .compat_ioctl = do_pagemap_cmd, 2837 }; 2838 #endif /* CONFIG_PROC_PAGE_MONITOR */ 2839 2840 #ifdef CONFIG_NUMA 2841 2842 struct numa_maps { 2843 unsigned long pages; 2844 unsigned long anon; 2845 unsigned long active; 2846 unsigned long writeback; 2847 unsigned long mapcount_max; 2848 unsigned long dirty; 2849 unsigned long swapcache; 2850 unsigned long node[MAX_NUMNODES]; 2851 }; 2852 2853 struct numa_maps_private { 2854 struct proc_maps_private proc_maps; 2855 struct numa_maps md; 2856 }; 2857 2858 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 2859 unsigned long nr_pages) 2860 { 2861 struct folio *folio = page_folio(page); 2862 int count = folio_precise_page_mapcount(folio, page); 2863 2864 md->pages += nr_pages; 2865 if (pte_dirty || folio_test_dirty(folio)) 2866 md->dirty += nr_pages; 2867 2868 if (folio_test_swapcache(folio)) 2869 md->swapcache += nr_pages; 2870 2871 if (folio_test_active(folio) || folio_test_unevictable(folio)) 2872 md->active += nr_pages; 2873 2874 if (folio_test_writeback(folio)) 2875 md->writeback += nr_pages; 2876 2877 if (folio_test_anon(folio)) 2878 md->anon += nr_pages; 2879 2880 if (count > md->mapcount_max) 2881 md->mapcount_max = count; 2882 2883 md->node[folio_nid(folio)] += nr_pages; 2884 } 2885 2886 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 2887 unsigned long addr) 2888 { 2889 struct page *page; 2890 int nid; 2891 2892 if (!pte_present(pte)) 2893 return NULL; 2894 2895 page = vm_normal_page(vma, addr, pte); 2896 if (!page || is_zone_device_page(page)) 2897 return NULL; 2898 2899 if (PageReserved(page)) 2900 return NULL; 2901 2902 nid = page_to_nid(page); 2903 if (!node_isset(nid, node_states[N_MEMORY])) 2904 return NULL; 2905 2906 return page; 2907 } 2908 2909 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2910 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, 2911 struct vm_area_struct *vma, 2912 unsigned long addr) 2913 { 2914 struct page *page; 2915 int nid; 2916 2917 if (!pmd_present(pmd)) 2918 return NULL; 2919 2920 page = vm_normal_page_pmd(vma, addr, pmd); 2921 if (!page) 2922 return NULL; 2923 2924 if (PageReserved(page)) 2925 return NULL; 2926 2927 nid = page_to_nid(page); 2928 if (!node_isset(nid, node_states[N_MEMORY])) 2929 return NULL; 2930 2931 return page; 2932 } 2933 #endif 2934 2935 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 2936 unsigned long end, struct mm_walk *walk) 2937 { 2938 struct numa_maps *md = walk->private; 2939 struct vm_area_struct *vma = walk->vma; 2940 spinlock_t *ptl; 2941 pte_t *orig_pte; 2942 pte_t *pte; 2943 2944 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2945 ptl = pmd_trans_huge_lock(pmd, vma); 2946 if (ptl) { 2947 struct page *page; 2948 2949 page = can_gather_numa_stats_pmd(*pmd, vma, addr); 2950 if (page) 2951 gather_stats(page, md, pmd_dirty(*pmd), 2952 HPAGE_PMD_SIZE/PAGE_SIZE); 2953 spin_unlock(ptl); 2954 return 0; 2955 } 2956 #endif 2957 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 2958 if (!pte) { 2959 walk->action = ACTION_AGAIN; 2960 return 0; 2961 } 2962 do { 2963 pte_t ptent = ptep_get(pte); 2964 struct page *page = can_gather_numa_stats(ptent, vma, addr); 2965 if (!page) 2966 continue; 2967 gather_stats(page, md, pte_dirty(ptent), 1); 2968 2969 } while (pte++, addr += PAGE_SIZE, addr != end); 2970 pte_unmap_unlock(orig_pte, ptl); 2971 cond_resched(); 2972 return 0; 2973 } 2974 #ifdef CONFIG_HUGETLB_PAGE 2975 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 2976 unsigned long addr, unsigned long end, struct mm_walk *walk) 2977 { 2978 pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte); 2979 struct numa_maps *md; 2980 struct page *page; 2981 2982 if (!pte_present(huge_pte)) 2983 return 0; 2984 2985 page = pte_page(huge_pte); 2986 2987 md = walk->private; 2988 gather_stats(page, md, pte_dirty(huge_pte), 1); 2989 return 0; 2990 } 2991 2992 #else 2993 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 2994 unsigned long addr, unsigned long end, struct mm_walk *walk) 2995 { 2996 return 0; 2997 } 2998 #endif 2999 3000 static const struct mm_walk_ops show_numa_ops = { 3001 .hugetlb_entry = gather_hugetlb_stats, 3002 .pmd_entry = gather_pte_stats, 3003 .walk_lock = PGWALK_RDLOCK, 3004 }; 3005 3006 /* 3007 * Display pages allocated per node and memory policy via /proc. 3008 */ 3009 static int show_numa_map(struct seq_file *m, void *v) 3010 { 3011 struct numa_maps_private *numa_priv = m->private; 3012 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 3013 struct vm_area_struct *vma = v; 3014 struct numa_maps *md = &numa_priv->md; 3015 struct file *file = vma->vm_file; 3016 struct mm_struct *mm = vma->vm_mm; 3017 char buffer[64]; 3018 struct mempolicy *pol; 3019 pgoff_t ilx; 3020 int nid; 3021 3022 if (!mm) 3023 return 0; 3024 3025 /* Ensure we start with an empty set of numa_maps statistics. */ 3026 memset(md, 0, sizeof(*md)); 3027 3028 pol = __get_vma_policy(vma, vma->vm_start, &ilx); 3029 if (pol) { 3030 mpol_to_str(buffer, sizeof(buffer), pol); 3031 mpol_cond_put(pol); 3032 } else { 3033 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 3034 } 3035 3036 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 3037 3038 if (file) { 3039 seq_puts(m, " file="); 3040 seq_path(m, file_user_path(file), "\n\t= "); 3041 } else if (vma_is_initial_heap(vma)) { 3042 seq_puts(m, " heap"); 3043 } else if (vma_is_initial_stack(vma)) { 3044 seq_puts(m, " stack"); 3045 } 3046 3047 if (is_vm_hugetlb_page(vma)) 3048 seq_puts(m, " huge"); 3049 3050 /* mmap_lock is held by m_start */ 3051 walk_page_vma(vma, &show_numa_ops, md); 3052 3053 if (!md->pages) 3054 goto out; 3055 3056 if (md->anon) 3057 seq_printf(m, " anon=%lu", md->anon); 3058 3059 if (md->dirty) 3060 seq_printf(m, " dirty=%lu", md->dirty); 3061 3062 if (md->pages != md->anon && md->pages != md->dirty) 3063 seq_printf(m, " mapped=%lu", md->pages); 3064 3065 if (md->mapcount_max > 1) 3066 seq_printf(m, " mapmax=%lu", md->mapcount_max); 3067 3068 if (md->swapcache) 3069 seq_printf(m, " swapcache=%lu", md->swapcache); 3070 3071 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 3072 seq_printf(m, " active=%lu", md->active); 3073 3074 if (md->writeback) 3075 seq_printf(m, " writeback=%lu", md->writeback); 3076 3077 for_each_node_state(nid, N_MEMORY) 3078 if (md->node[nid]) 3079 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 3080 3081 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 3082 out: 3083 seq_putc(m, '\n'); 3084 return 0; 3085 } 3086 3087 static const struct seq_operations proc_pid_numa_maps_op = { 3088 .start = m_start, 3089 .next = m_next, 3090 .stop = m_stop, 3091 .show = show_numa_map, 3092 }; 3093 3094 static int pid_numa_maps_open(struct inode *inode, struct file *file) 3095 { 3096 return proc_maps_open(inode, file, &proc_pid_numa_maps_op, 3097 sizeof(struct numa_maps_private)); 3098 } 3099 3100 const struct file_operations proc_pid_numa_maps_operations = { 3101 .open = pid_numa_maps_open, 3102 .read = seq_read, 3103 .llseek = seq_lseek, 3104 .release = proc_map_release, 3105 }; 3106 3107 #endif /* CONFIG_NUMA */ 3108