1 #include <linux/mm.h> 2 #include <linux/vmacache.h> 3 #include <linux/hugetlb.h> 4 #include <linux/huge_mm.h> 5 #include <linux/mount.h> 6 #include <linux/seq_file.h> 7 #include <linux/highmem.h> 8 #include <linux/ptrace.h> 9 #include <linux/slab.h> 10 #include <linux/pagemap.h> 11 #include <linux/mempolicy.h> 12 #include <linux/rmap.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mmu_notifier.h> 16 17 #include <asm/elf.h> 18 #include <asm/uaccess.h> 19 #include <asm/tlbflush.h> 20 #include "internal.h" 21 22 void task_mem(struct seq_file *m, struct mm_struct *mm) 23 { 24 unsigned long data, text, lib, swap; 25 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 26 27 /* 28 * Note: to minimize their overhead, mm maintains hiwater_vm and 29 * hiwater_rss only when about to *lower* total_vm or rss. Any 30 * collector of these hiwater stats must therefore get total_vm 31 * and rss too, which will usually be the higher. Barriers? not 32 * worth the effort, such snapshots can always be inconsistent. 33 */ 34 hiwater_vm = total_vm = mm->total_vm; 35 if (hiwater_vm < mm->hiwater_vm) 36 hiwater_vm = mm->hiwater_vm; 37 hiwater_rss = total_rss = get_mm_rss(mm); 38 if (hiwater_rss < mm->hiwater_rss) 39 hiwater_rss = mm->hiwater_rss; 40 41 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 42 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 43 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 44 swap = get_mm_counter(mm, MM_SWAPENTS); 45 seq_printf(m, 46 "VmPeak:\t%8lu kB\n" 47 "VmSize:\t%8lu kB\n" 48 "VmLck:\t%8lu kB\n" 49 "VmPin:\t%8lu kB\n" 50 "VmHWM:\t%8lu kB\n" 51 "VmRSS:\t%8lu kB\n" 52 "VmData:\t%8lu kB\n" 53 "VmStk:\t%8lu kB\n" 54 "VmExe:\t%8lu kB\n" 55 "VmLib:\t%8lu kB\n" 56 "VmPTE:\t%8lu kB\n" 57 "VmSwap:\t%8lu kB\n", 58 hiwater_vm << (PAGE_SHIFT-10), 59 total_vm << (PAGE_SHIFT-10), 60 mm->locked_vm << (PAGE_SHIFT-10), 61 mm->pinned_vm << (PAGE_SHIFT-10), 62 hiwater_rss << (PAGE_SHIFT-10), 63 total_rss << (PAGE_SHIFT-10), 64 data << (PAGE_SHIFT-10), 65 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 66 (PTRS_PER_PTE * sizeof(pte_t) * 67 atomic_long_read(&mm->nr_ptes)) >> 10, 68 swap << (PAGE_SHIFT-10)); 69 } 70 71 unsigned long task_vsize(struct mm_struct *mm) 72 { 73 return PAGE_SIZE * mm->total_vm; 74 } 75 76 unsigned long task_statm(struct mm_struct *mm, 77 unsigned long *shared, unsigned long *text, 78 unsigned long *data, unsigned long *resident) 79 { 80 *shared = get_mm_counter(mm, MM_FILEPAGES); 81 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 82 >> PAGE_SHIFT; 83 *data = mm->total_vm - mm->shared_vm; 84 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 85 return mm->total_vm; 86 } 87 88 #ifdef CONFIG_NUMA 89 /* 90 * These functions are for numa_maps but called in generic **maps seq_file 91 * ->start(), ->stop() ops. 92 * 93 * numa_maps scans all vmas under mmap_sem and checks their mempolicy. 94 * Each mempolicy object is controlled by reference counting. The problem here 95 * is how to avoid accessing dead mempolicy object. 96 * 97 * Because we're holding mmap_sem while reading seq_file, it's safe to access 98 * each vma's mempolicy, no vma objects will never drop refs to mempolicy. 99 * 100 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy 101 * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). 102 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot 103 * gurantee the task never exits under us. But taking task_lock() around 104 * get_vma_plicy() causes lock order problem. 105 * 106 * To access task->mempolicy without lock, we hold a reference count of an 107 * object pointed by task->mempolicy and remember it. This will guarantee 108 * that task->mempolicy points to an alive object or NULL in numa_maps accesses. 109 */ 110 static void hold_task_mempolicy(struct proc_maps_private *priv) 111 { 112 struct task_struct *task = priv->task; 113 114 task_lock(task); 115 priv->task_mempolicy = task->mempolicy; 116 mpol_get(priv->task_mempolicy); 117 task_unlock(task); 118 } 119 static void release_task_mempolicy(struct proc_maps_private *priv) 120 { 121 mpol_put(priv->task_mempolicy); 122 } 123 #else 124 static void hold_task_mempolicy(struct proc_maps_private *priv) 125 { 126 } 127 static void release_task_mempolicy(struct proc_maps_private *priv) 128 { 129 } 130 #endif 131 132 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 133 { 134 if (vma && vma != priv->tail_vma) { 135 struct mm_struct *mm = vma->vm_mm; 136 release_task_mempolicy(priv); 137 up_read(&mm->mmap_sem); 138 mmput(mm); 139 } 140 } 141 142 static void *m_start(struct seq_file *m, loff_t *pos) 143 { 144 struct proc_maps_private *priv = m->private; 145 unsigned long last_addr = m->version; 146 struct mm_struct *mm; 147 struct vm_area_struct *vma, *tail_vma = NULL; 148 loff_t l = *pos; 149 150 /* Clear the per syscall fields in priv */ 151 priv->task = NULL; 152 priv->tail_vma = NULL; 153 154 /* 155 * We remember last_addr rather than next_addr to hit with 156 * vmacache most of the time. We have zero last_addr at 157 * the beginning and also after lseek. We will have -1 last_addr 158 * after the end of the vmas. 159 */ 160 161 if (last_addr == -1UL) 162 return NULL; 163 164 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 165 if (!priv->task) 166 return ERR_PTR(-ESRCH); 167 168 mm = mm_access(priv->task, PTRACE_MODE_READ); 169 if (!mm || IS_ERR(mm)) 170 return mm; 171 down_read(&mm->mmap_sem); 172 173 tail_vma = get_gate_vma(priv->task->mm); 174 priv->tail_vma = tail_vma; 175 hold_task_mempolicy(priv); 176 /* Start with last addr hint */ 177 vma = find_vma(mm, last_addr); 178 if (last_addr && vma) { 179 vma = vma->vm_next; 180 goto out; 181 } 182 183 /* 184 * Check the vma index is within the range and do 185 * sequential scan until m_index. 186 */ 187 vma = NULL; 188 if ((unsigned long)l < mm->map_count) { 189 vma = mm->mmap; 190 while (l-- && vma) 191 vma = vma->vm_next; 192 goto out; 193 } 194 195 if (l != mm->map_count) 196 tail_vma = NULL; /* After gate vma */ 197 198 out: 199 if (vma) 200 return vma; 201 202 release_task_mempolicy(priv); 203 /* End of vmas has been reached */ 204 m->version = (tail_vma != NULL)? 0: -1UL; 205 up_read(&mm->mmap_sem); 206 mmput(mm); 207 return tail_vma; 208 } 209 210 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 211 { 212 struct proc_maps_private *priv = m->private; 213 struct vm_area_struct *vma = v; 214 struct vm_area_struct *tail_vma = priv->tail_vma; 215 216 (*pos)++; 217 if (vma && (vma != tail_vma) && vma->vm_next) 218 return vma->vm_next; 219 vma_stop(priv, vma); 220 return (vma != tail_vma)? tail_vma: NULL; 221 } 222 223 static void m_stop(struct seq_file *m, void *v) 224 { 225 struct proc_maps_private *priv = m->private; 226 struct vm_area_struct *vma = v; 227 228 if (!IS_ERR(vma)) 229 vma_stop(priv, vma); 230 if (priv->task) 231 put_task_struct(priv->task); 232 } 233 234 static int do_maps_open(struct inode *inode, struct file *file, 235 const struct seq_operations *ops) 236 { 237 struct proc_maps_private *priv; 238 int ret = -ENOMEM; 239 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 240 if (priv) { 241 priv->pid = proc_pid(inode); 242 ret = seq_open(file, ops); 243 if (!ret) { 244 struct seq_file *m = file->private_data; 245 m->private = priv; 246 } else { 247 kfree(priv); 248 } 249 } 250 return ret; 251 } 252 253 static void 254 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 255 { 256 struct mm_struct *mm = vma->vm_mm; 257 struct file *file = vma->vm_file; 258 struct proc_maps_private *priv = m->private; 259 struct task_struct *task = priv->task; 260 vm_flags_t flags = vma->vm_flags; 261 unsigned long ino = 0; 262 unsigned long long pgoff = 0; 263 unsigned long start, end; 264 dev_t dev = 0; 265 const char *name = NULL; 266 267 if (file) { 268 struct inode *inode = file_inode(vma->vm_file); 269 dev = inode->i_sb->s_dev; 270 ino = inode->i_ino; 271 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 272 } 273 274 /* We don't show the stack guard page in /proc/maps */ 275 start = vma->vm_start; 276 if (stack_guard_page_start(vma, start)) 277 start += PAGE_SIZE; 278 end = vma->vm_end; 279 if (stack_guard_page_end(vma, end)) 280 end -= PAGE_SIZE; 281 282 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 283 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 284 start, 285 end, 286 flags & VM_READ ? 'r' : '-', 287 flags & VM_WRITE ? 'w' : '-', 288 flags & VM_EXEC ? 'x' : '-', 289 flags & VM_MAYSHARE ? 's' : 'p', 290 pgoff, 291 MAJOR(dev), MINOR(dev), ino); 292 293 /* 294 * Print the dentry name for named mappings, and a 295 * special [heap] marker for the heap: 296 */ 297 if (file) { 298 seq_pad(m, ' '); 299 seq_path(m, &file->f_path, "\n"); 300 goto done; 301 } 302 303 if (vma->vm_ops && vma->vm_ops->name) { 304 name = vma->vm_ops->name(vma); 305 if (name) 306 goto done; 307 } 308 309 name = arch_vma_name(vma); 310 if (!name) { 311 pid_t tid; 312 313 if (!mm) { 314 name = "[vdso]"; 315 goto done; 316 } 317 318 if (vma->vm_start <= mm->brk && 319 vma->vm_end >= mm->start_brk) { 320 name = "[heap]"; 321 goto done; 322 } 323 324 tid = vm_is_stack(task, vma, is_pid); 325 326 if (tid != 0) { 327 /* 328 * Thread stack in /proc/PID/task/TID/maps or 329 * the main process stack. 330 */ 331 if (!is_pid || (vma->vm_start <= mm->start_stack && 332 vma->vm_end >= mm->start_stack)) { 333 name = "[stack]"; 334 } else { 335 /* Thread stack in /proc/PID/maps */ 336 seq_pad(m, ' '); 337 seq_printf(m, "[stack:%d]", tid); 338 } 339 } 340 } 341 342 done: 343 if (name) { 344 seq_pad(m, ' '); 345 seq_puts(m, name); 346 } 347 seq_putc(m, '\n'); 348 } 349 350 static int show_map(struct seq_file *m, void *v, int is_pid) 351 { 352 struct vm_area_struct *vma = v; 353 struct proc_maps_private *priv = m->private; 354 struct task_struct *task = priv->task; 355 356 show_map_vma(m, vma, is_pid); 357 358 if (m->count < m->size) /* vma is copied successfully */ 359 m->version = (vma != get_gate_vma(task->mm)) 360 ? vma->vm_start : 0; 361 return 0; 362 } 363 364 static int show_pid_map(struct seq_file *m, void *v) 365 { 366 return show_map(m, v, 1); 367 } 368 369 static int show_tid_map(struct seq_file *m, void *v) 370 { 371 return show_map(m, v, 0); 372 } 373 374 static const struct seq_operations proc_pid_maps_op = { 375 .start = m_start, 376 .next = m_next, 377 .stop = m_stop, 378 .show = show_pid_map 379 }; 380 381 static const struct seq_operations proc_tid_maps_op = { 382 .start = m_start, 383 .next = m_next, 384 .stop = m_stop, 385 .show = show_tid_map 386 }; 387 388 static int pid_maps_open(struct inode *inode, struct file *file) 389 { 390 return do_maps_open(inode, file, &proc_pid_maps_op); 391 } 392 393 static int tid_maps_open(struct inode *inode, struct file *file) 394 { 395 return do_maps_open(inode, file, &proc_tid_maps_op); 396 } 397 398 const struct file_operations proc_pid_maps_operations = { 399 .open = pid_maps_open, 400 .read = seq_read, 401 .llseek = seq_lseek, 402 .release = seq_release_private, 403 }; 404 405 const struct file_operations proc_tid_maps_operations = { 406 .open = tid_maps_open, 407 .read = seq_read, 408 .llseek = seq_lseek, 409 .release = seq_release_private, 410 }; 411 412 /* 413 * Proportional Set Size(PSS): my share of RSS. 414 * 415 * PSS of a process is the count of pages it has in memory, where each 416 * page is divided by the number of processes sharing it. So if a 417 * process has 1000 pages all to itself, and 1000 shared with one other 418 * process, its PSS will be 1500. 419 * 420 * To keep (accumulated) division errors low, we adopt a 64bit 421 * fixed-point pss counter to minimize division errors. So (pss >> 422 * PSS_SHIFT) would be the real byte count. 423 * 424 * A shift of 12 before division means (assuming 4K page size): 425 * - 1M 3-user-pages add up to 8KB errors; 426 * - supports mapcount up to 2^24, or 16M; 427 * - supports PSS up to 2^52 bytes, or 4PB. 428 */ 429 #define PSS_SHIFT 12 430 431 #ifdef CONFIG_PROC_PAGE_MONITOR 432 struct mem_size_stats { 433 struct vm_area_struct *vma; 434 unsigned long resident; 435 unsigned long shared_clean; 436 unsigned long shared_dirty; 437 unsigned long private_clean; 438 unsigned long private_dirty; 439 unsigned long referenced; 440 unsigned long anonymous; 441 unsigned long anonymous_thp; 442 unsigned long swap; 443 unsigned long nonlinear; 444 u64 pss; 445 }; 446 447 448 static void smaps_pte_entry(pte_t ptent, unsigned long addr, 449 unsigned long ptent_size, struct mm_walk *walk) 450 { 451 struct mem_size_stats *mss = walk->private; 452 struct vm_area_struct *vma = mss->vma; 453 pgoff_t pgoff = linear_page_index(vma, addr); 454 struct page *page = NULL; 455 int mapcount; 456 457 if (pte_present(ptent)) { 458 page = vm_normal_page(vma, addr, ptent); 459 } else if (is_swap_pte(ptent)) { 460 swp_entry_t swpent = pte_to_swp_entry(ptent); 461 462 if (!non_swap_entry(swpent)) 463 mss->swap += ptent_size; 464 else if (is_migration_entry(swpent)) 465 page = migration_entry_to_page(swpent); 466 } else if (pte_file(ptent)) { 467 if (pte_to_pgoff(ptent) != pgoff) 468 mss->nonlinear += ptent_size; 469 } 470 471 if (!page) 472 return; 473 474 if (PageAnon(page)) 475 mss->anonymous += ptent_size; 476 477 if (page->index != pgoff) 478 mss->nonlinear += ptent_size; 479 480 mss->resident += ptent_size; 481 /* Accumulate the size in pages that have been accessed. */ 482 if (pte_young(ptent) || PageReferenced(page)) 483 mss->referenced += ptent_size; 484 mapcount = page_mapcount(page); 485 if (mapcount >= 2) { 486 if (pte_dirty(ptent) || PageDirty(page)) 487 mss->shared_dirty += ptent_size; 488 else 489 mss->shared_clean += ptent_size; 490 mss->pss += (ptent_size << PSS_SHIFT) / mapcount; 491 } else { 492 if (pte_dirty(ptent) || PageDirty(page)) 493 mss->private_dirty += ptent_size; 494 else 495 mss->private_clean += ptent_size; 496 mss->pss += (ptent_size << PSS_SHIFT); 497 } 498 } 499 500 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 501 struct mm_walk *walk) 502 { 503 struct mem_size_stats *mss = walk->private; 504 struct vm_area_struct *vma = mss->vma; 505 pte_t *pte; 506 spinlock_t *ptl; 507 508 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 509 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); 510 spin_unlock(ptl); 511 mss->anonymous_thp += HPAGE_PMD_SIZE; 512 return 0; 513 } 514 515 if (pmd_trans_unstable(pmd)) 516 return 0; 517 /* 518 * The mmap_sem held all the way back in m_start() is what 519 * keeps khugepaged out of here and from collapsing things 520 * in here. 521 */ 522 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 523 for (; addr != end; pte++, addr += PAGE_SIZE) 524 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); 525 pte_unmap_unlock(pte - 1, ptl); 526 cond_resched(); 527 return 0; 528 } 529 530 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 531 { 532 /* 533 * Don't forget to update Documentation/ on changes. 534 */ 535 static const char mnemonics[BITS_PER_LONG][2] = { 536 /* 537 * In case if we meet a flag we don't know about. 538 */ 539 [0 ... (BITS_PER_LONG-1)] = "??", 540 541 [ilog2(VM_READ)] = "rd", 542 [ilog2(VM_WRITE)] = "wr", 543 [ilog2(VM_EXEC)] = "ex", 544 [ilog2(VM_SHARED)] = "sh", 545 [ilog2(VM_MAYREAD)] = "mr", 546 [ilog2(VM_MAYWRITE)] = "mw", 547 [ilog2(VM_MAYEXEC)] = "me", 548 [ilog2(VM_MAYSHARE)] = "ms", 549 [ilog2(VM_GROWSDOWN)] = "gd", 550 [ilog2(VM_PFNMAP)] = "pf", 551 [ilog2(VM_DENYWRITE)] = "dw", 552 [ilog2(VM_LOCKED)] = "lo", 553 [ilog2(VM_IO)] = "io", 554 [ilog2(VM_SEQ_READ)] = "sr", 555 [ilog2(VM_RAND_READ)] = "rr", 556 [ilog2(VM_DONTCOPY)] = "dc", 557 [ilog2(VM_DONTEXPAND)] = "de", 558 [ilog2(VM_ACCOUNT)] = "ac", 559 [ilog2(VM_NORESERVE)] = "nr", 560 [ilog2(VM_HUGETLB)] = "ht", 561 [ilog2(VM_NONLINEAR)] = "nl", 562 [ilog2(VM_ARCH_1)] = "ar", 563 [ilog2(VM_DONTDUMP)] = "dd", 564 #ifdef CONFIG_MEM_SOFT_DIRTY 565 [ilog2(VM_SOFTDIRTY)] = "sd", 566 #endif 567 [ilog2(VM_MIXEDMAP)] = "mm", 568 [ilog2(VM_HUGEPAGE)] = "hg", 569 [ilog2(VM_NOHUGEPAGE)] = "nh", 570 [ilog2(VM_MERGEABLE)] = "mg", 571 }; 572 size_t i; 573 574 seq_puts(m, "VmFlags: "); 575 for (i = 0; i < BITS_PER_LONG; i++) { 576 if (vma->vm_flags & (1UL << i)) { 577 seq_printf(m, "%c%c ", 578 mnemonics[i][0], mnemonics[i][1]); 579 } 580 } 581 seq_putc(m, '\n'); 582 } 583 584 static int show_smap(struct seq_file *m, void *v, int is_pid) 585 { 586 struct proc_maps_private *priv = m->private; 587 struct task_struct *task = priv->task; 588 struct vm_area_struct *vma = v; 589 struct mem_size_stats mss; 590 struct mm_walk smaps_walk = { 591 .pmd_entry = smaps_pte_range, 592 .mm = vma->vm_mm, 593 .private = &mss, 594 }; 595 596 memset(&mss, 0, sizeof mss); 597 mss.vma = vma; 598 /* mmap_sem is held in m_start */ 599 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 600 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 601 602 show_map_vma(m, vma, is_pid); 603 604 seq_printf(m, 605 "Size: %8lu kB\n" 606 "Rss: %8lu kB\n" 607 "Pss: %8lu kB\n" 608 "Shared_Clean: %8lu kB\n" 609 "Shared_Dirty: %8lu kB\n" 610 "Private_Clean: %8lu kB\n" 611 "Private_Dirty: %8lu kB\n" 612 "Referenced: %8lu kB\n" 613 "Anonymous: %8lu kB\n" 614 "AnonHugePages: %8lu kB\n" 615 "Swap: %8lu kB\n" 616 "KernelPageSize: %8lu kB\n" 617 "MMUPageSize: %8lu kB\n" 618 "Locked: %8lu kB\n", 619 (vma->vm_end - vma->vm_start) >> 10, 620 mss.resident >> 10, 621 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 622 mss.shared_clean >> 10, 623 mss.shared_dirty >> 10, 624 mss.private_clean >> 10, 625 mss.private_dirty >> 10, 626 mss.referenced >> 10, 627 mss.anonymous >> 10, 628 mss.anonymous_thp >> 10, 629 mss.swap >> 10, 630 vma_kernel_pagesize(vma) >> 10, 631 vma_mmu_pagesize(vma) >> 10, 632 (vma->vm_flags & VM_LOCKED) ? 633 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 634 635 if (vma->vm_flags & VM_NONLINEAR) 636 seq_printf(m, "Nonlinear: %8lu kB\n", 637 mss.nonlinear >> 10); 638 639 show_smap_vma_flags(m, vma); 640 641 if (m->count < m->size) /* vma is copied successfully */ 642 m->version = (vma != get_gate_vma(task->mm)) 643 ? vma->vm_start : 0; 644 return 0; 645 } 646 647 static int show_pid_smap(struct seq_file *m, void *v) 648 { 649 return show_smap(m, v, 1); 650 } 651 652 static int show_tid_smap(struct seq_file *m, void *v) 653 { 654 return show_smap(m, v, 0); 655 } 656 657 static const struct seq_operations proc_pid_smaps_op = { 658 .start = m_start, 659 .next = m_next, 660 .stop = m_stop, 661 .show = show_pid_smap 662 }; 663 664 static const struct seq_operations proc_tid_smaps_op = { 665 .start = m_start, 666 .next = m_next, 667 .stop = m_stop, 668 .show = show_tid_smap 669 }; 670 671 static int pid_smaps_open(struct inode *inode, struct file *file) 672 { 673 return do_maps_open(inode, file, &proc_pid_smaps_op); 674 } 675 676 static int tid_smaps_open(struct inode *inode, struct file *file) 677 { 678 return do_maps_open(inode, file, &proc_tid_smaps_op); 679 } 680 681 const struct file_operations proc_pid_smaps_operations = { 682 .open = pid_smaps_open, 683 .read = seq_read, 684 .llseek = seq_lseek, 685 .release = seq_release_private, 686 }; 687 688 const struct file_operations proc_tid_smaps_operations = { 689 .open = tid_smaps_open, 690 .read = seq_read, 691 .llseek = seq_lseek, 692 .release = seq_release_private, 693 }; 694 695 /* 696 * We do not want to have constant page-shift bits sitting in 697 * pagemap entries and are about to reuse them some time soon. 698 * 699 * Here's the "migration strategy": 700 * 1. when the system boots these bits remain what they are, 701 * but a warning about future change is printed in log; 702 * 2. once anyone clears soft-dirty bits via clear_refs file, 703 * these flag is set to denote, that user is aware of the 704 * new API and those page-shift bits change their meaning. 705 * The respective warning is printed in dmesg; 706 * 3. In a couple of releases we will remove all the mentions 707 * of page-shift in pagemap entries. 708 */ 709 710 static bool soft_dirty_cleared __read_mostly; 711 712 enum clear_refs_types { 713 CLEAR_REFS_ALL = 1, 714 CLEAR_REFS_ANON, 715 CLEAR_REFS_MAPPED, 716 CLEAR_REFS_SOFT_DIRTY, 717 CLEAR_REFS_LAST, 718 }; 719 720 struct clear_refs_private { 721 struct vm_area_struct *vma; 722 enum clear_refs_types type; 723 }; 724 725 static inline void clear_soft_dirty(struct vm_area_struct *vma, 726 unsigned long addr, pte_t *pte) 727 { 728 #ifdef CONFIG_MEM_SOFT_DIRTY 729 /* 730 * The soft-dirty tracker uses #PF-s to catch writes 731 * to pages, so write-protect the pte as well. See the 732 * Documentation/vm/soft-dirty.txt for full description 733 * of how soft-dirty works. 734 */ 735 pte_t ptent = *pte; 736 737 if (pte_present(ptent)) { 738 ptent = pte_wrprotect(ptent); 739 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 740 } else if (is_swap_pte(ptent)) { 741 ptent = pte_swp_clear_soft_dirty(ptent); 742 } else if (pte_file(ptent)) { 743 ptent = pte_file_clear_soft_dirty(ptent); 744 } 745 746 set_pte_at(vma->vm_mm, addr, pte, ptent); 747 #endif 748 } 749 750 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 751 unsigned long end, struct mm_walk *walk) 752 { 753 struct clear_refs_private *cp = walk->private; 754 struct vm_area_struct *vma = cp->vma; 755 pte_t *pte, ptent; 756 spinlock_t *ptl; 757 struct page *page; 758 759 split_huge_page_pmd(vma, addr, pmd); 760 if (pmd_trans_unstable(pmd)) 761 return 0; 762 763 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 764 for (; addr != end; pte++, addr += PAGE_SIZE) { 765 ptent = *pte; 766 767 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 768 clear_soft_dirty(vma, addr, pte); 769 continue; 770 } 771 772 if (!pte_present(ptent)) 773 continue; 774 775 page = vm_normal_page(vma, addr, ptent); 776 if (!page) 777 continue; 778 779 /* Clear accessed and referenced bits. */ 780 ptep_test_and_clear_young(vma, addr, pte); 781 ClearPageReferenced(page); 782 } 783 pte_unmap_unlock(pte - 1, ptl); 784 cond_resched(); 785 return 0; 786 } 787 788 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 789 size_t count, loff_t *ppos) 790 { 791 struct task_struct *task; 792 char buffer[PROC_NUMBUF]; 793 struct mm_struct *mm; 794 struct vm_area_struct *vma; 795 enum clear_refs_types type; 796 int itype; 797 int rv; 798 799 memset(buffer, 0, sizeof(buffer)); 800 if (count > sizeof(buffer) - 1) 801 count = sizeof(buffer) - 1; 802 if (copy_from_user(buffer, buf, count)) 803 return -EFAULT; 804 rv = kstrtoint(strstrip(buffer), 10, &itype); 805 if (rv < 0) 806 return rv; 807 type = (enum clear_refs_types)itype; 808 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 809 return -EINVAL; 810 811 if (type == CLEAR_REFS_SOFT_DIRTY) { 812 soft_dirty_cleared = true; 813 pr_warn_once("The pagemap bits 55-60 has changed their meaning!" 814 " See the linux/Documentation/vm/pagemap.txt for " 815 "details.\n"); 816 } 817 818 task = get_proc_task(file_inode(file)); 819 if (!task) 820 return -ESRCH; 821 mm = get_task_mm(task); 822 if (mm) { 823 struct clear_refs_private cp = { 824 .type = type, 825 }; 826 struct mm_walk clear_refs_walk = { 827 .pmd_entry = clear_refs_pte_range, 828 .mm = mm, 829 .private = &cp, 830 }; 831 down_read(&mm->mmap_sem); 832 if (type == CLEAR_REFS_SOFT_DIRTY) 833 mmu_notifier_invalidate_range_start(mm, 0, -1); 834 for (vma = mm->mmap; vma; vma = vma->vm_next) { 835 cp.vma = vma; 836 if (is_vm_hugetlb_page(vma)) 837 continue; 838 /* 839 * Writing 1 to /proc/pid/clear_refs affects all pages. 840 * 841 * Writing 2 to /proc/pid/clear_refs only affects 842 * Anonymous pages. 843 * 844 * Writing 3 to /proc/pid/clear_refs only affects file 845 * mapped pages. 846 * 847 * Writing 4 to /proc/pid/clear_refs affects all pages. 848 */ 849 if (type == CLEAR_REFS_ANON && vma->vm_file) 850 continue; 851 if (type == CLEAR_REFS_MAPPED && !vma->vm_file) 852 continue; 853 if (type == CLEAR_REFS_SOFT_DIRTY) { 854 if (vma->vm_flags & VM_SOFTDIRTY) 855 vma->vm_flags &= ~VM_SOFTDIRTY; 856 } 857 walk_page_range(vma->vm_start, vma->vm_end, 858 &clear_refs_walk); 859 } 860 if (type == CLEAR_REFS_SOFT_DIRTY) 861 mmu_notifier_invalidate_range_end(mm, 0, -1); 862 flush_tlb_mm(mm); 863 up_read(&mm->mmap_sem); 864 mmput(mm); 865 } 866 put_task_struct(task); 867 868 return count; 869 } 870 871 const struct file_operations proc_clear_refs_operations = { 872 .write = clear_refs_write, 873 .llseek = noop_llseek, 874 }; 875 876 typedef struct { 877 u64 pme; 878 } pagemap_entry_t; 879 880 struct pagemapread { 881 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 882 pagemap_entry_t *buffer; 883 bool v2; 884 }; 885 886 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 887 #define PAGEMAP_WALK_MASK (PMD_MASK) 888 889 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 890 #define PM_STATUS_BITS 3 891 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 892 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 893 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) 894 #define PM_PSHIFT_BITS 6 895 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) 896 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) 897 #define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) 898 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) 899 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) 900 /* in "new" pagemap pshift bits are occupied with more status bits */ 901 #define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT)) 902 903 #define __PM_SOFT_DIRTY (1LL) 904 #define PM_PRESENT PM_STATUS(4LL) 905 #define PM_SWAP PM_STATUS(2LL) 906 #define PM_FILE PM_STATUS(1LL) 907 #define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0) 908 #define PM_END_OF_BUFFER 1 909 910 static inline pagemap_entry_t make_pme(u64 val) 911 { 912 return (pagemap_entry_t) { .pme = val }; 913 } 914 915 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 916 struct pagemapread *pm) 917 { 918 pm->buffer[pm->pos++] = *pme; 919 if (pm->pos >= pm->len) 920 return PM_END_OF_BUFFER; 921 return 0; 922 } 923 924 static int pagemap_pte_hole(unsigned long start, unsigned long end, 925 struct mm_walk *walk) 926 { 927 struct pagemapread *pm = walk->private; 928 unsigned long addr = start; 929 int err = 0; 930 931 while (addr < end) { 932 struct vm_area_struct *vma = find_vma(walk->mm, addr); 933 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 934 /* End of address space hole, which we mark as non-present. */ 935 unsigned long hole_end; 936 937 if (vma) 938 hole_end = min(end, vma->vm_start); 939 else 940 hole_end = end; 941 942 for (; addr < hole_end; addr += PAGE_SIZE) { 943 err = add_to_pagemap(addr, &pme, pm); 944 if (err) 945 goto out; 946 } 947 948 if (!vma) 949 break; 950 951 /* Addresses in the VMA. */ 952 if (vma->vm_flags & VM_SOFTDIRTY) 953 pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY); 954 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 955 err = add_to_pagemap(addr, &pme, pm); 956 if (err) 957 goto out; 958 } 959 } 960 out: 961 return err; 962 } 963 964 static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 965 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 966 { 967 u64 frame, flags; 968 struct page *page = NULL; 969 int flags2 = 0; 970 971 if (pte_present(pte)) { 972 frame = pte_pfn(pte); 973 flags = PM_PRESENT; 974 page = vm_normal_page(vma, addr, pte); 975 if (pte_soft_dirty(pte)) 976 flags2 |= __PM_SOFT_DIRTY; 977 } else if (is_swap_pte(pte)) { 978 swp_entry_t entry; 979 if (pte_swp_soft_dirty(pte)) 980 flags2 |= __PM_SOFT_DIRTY; 981 entry = pte_to_swp_entry(pte); 982 frame = swp_type(entry) | 983 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 984 flags = PM_SWAP; 985 if (is_migration_entry(entry)) 986 page = migration_entry_to_page(entry); 987 } else { 988 if (vma->vm_flags & VM_SOFTDIRTY) 989 flags2 |= __PM_SOFT_DIRTY; 990 *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); 991 return; 992 } 993 994 if (page && !PageAnon(page)) 995 flags |= PM_FILE; 996 if ((vma->vm_flags & VM_SOFTDIRTY)) 997 flags2 |= __PM_SOFT_DIRTY; 998 999 *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); 1000 } 1001 1002 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1003 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 1004 pmd_t pmd, int offset, int pmd_flags2) 1005 { 1006 /* 1007 * Currently pmd for thp is always present because thp can not be 1008 * swapped-out, migrated, or HWPOISONed (split in such cases instead.) 1009 * This if-check is just to prepare for future implementation. 1010 */ 1011 if (pmd_present(pmd)) 1012 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) 1013 | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); 1014 else 1015 *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2)); 1016 } 1017 #else 1018 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 1019 pmd_t pmd, int offset, int pmd_flags2) 1020 { 1021 } 1022 #endif 1023 1024 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 1025 struct mm_walk *walk) 1026 { 1027 struct vm_area_struct *vma; 1028 struct pagemapread *pm = walk->private; 1029 spinlock_t *ptl; 1030 pte_t *pte; 1031 int err = 0; 1032 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 1033 1034 /* find the first VMA at or above 'addr' */ 1035 vma = find_vma(walk->mm, addr); 1036 if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1037 int pmd_flags2; 1038 1039 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) 1040 pmd_flags2 = __PM_SOFT_DIRTY; 1041 else 1042 pmd_flags2 = 0; 1043 1044 for (; addr != end; addr += PAGE_SIZE) { 1045 unsigned long offset; 1046 1047 offset = (addr & ~PAGEMAP_WALK_MASK) >> 1048 PAGE_SHIFT; 1049 thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); 1050 err = add_to_pagemap(addr, &pme, pm); 1051 if (err) 1052 break; 1053 } 1054 spin_unlock(ptl); 1055 return err; 1056 } 1057 1058 if (pmd_trans_unstable(pmd)) 1059 return 0; 1060 for (; addr != end; addr += PAGE_SIZE) { 1061 int flags2; 1062 1063 /* check to see if we've left 'vma' behind 1064 * and need a new, higher one */ 1065 if (vma && (addr >= vma->vm_end)) { 1066 vma = find_vma(walk->mm, addr); 1067 if (vma && (vma->vm_flags & VM_SOFTDIRTY)) 1068 flags2 = __PM_SOFT_DIRTY; 1069 else 1070 flags2 = 0; 1071 pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); 1072 } 1073 1074 /* check that 'vma' actually covers this address, 1075 * and that it isn't a huge page vma */ 1076 if (vma && (vma->vm_start <= addr) && 1077 !is_vm_hugetlb_page(vma)) { 1078 pte = pte_offset_map(pmd, addr); 1079 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); 1080 /* unmap before userspace copy */ 1081 pte_unmap(pte); 1082 } 1083 err = add_to_pagemap(addr, &pme, pm); 1084 if (err) 1085 return err; 1086 } 1087 1088 cond_resched(); 1089 1090 return err; 1091 } 1092 1093 #ifdef CONFIG_HUGETLB_PAGE 1094 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 1095 pte_t pte, int offset, int flags2) 1096 { 1097 if (pte_present(pte)) 1098 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | 1099 PM_STATUS2(pm->v2, flags2) | 1100 PM_PRESENT); 1101 else 1102 *pme = make_pme(PM_NOT_PRESENT(pm->v2) | 1103 PM_STATUS2(pm->v2, flags2)); 1104 } 1105 1106 /* This function walks within one hugetlb entry in the single call */ 1107 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, 1108 unsigned long addr, unsigned long end, 1109 struct mm_walk *walk) 1110 { 1111 struct pagemapread *pm = walk->private; 1112 struct vm_area_struct *vma; 1113 int err = 0; 1114 int flags2; 1115 pagemap_entry_t pme; 1116 1117 vma = find_vma(walk->mm, addr); 1118 WARN_ON_ONCE(!vma); 1119 1120 if (vma && (vma->vm_flags & VM_SOFTDIRTY)) 1121 flags2 = __PM_SOFT_DIRTY; 1122 else 1123 flags2 = 0; 1124 1125 for (; addr != end; addr += PAGE_SIZE) { 1126 int offset = (addr & ~hmask) >> PAGE_SHIFT; 1127 huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2); 1128 err = add_to_pagemap(addr, &pme, pm); 1129 if (err) 1130 return err; 1131 } 1132 1133 cond_resched(); 1134 1135 return err; 1136 } 1137 #endif /* HUGETLB_PAGE */ 1138 1139 /* 1140 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1141 * 1142 * For each page in the address space, this file contains one 64-bit entry 1143 * consisting of the following: 1144 * 1145 * Bits 0-54 page frame number (PFN) if present 1146 * Bits 0-4 swap type if swapped 1147 * Bits 5-54 swap offset if swapped 1148 * Bits 55-60 page shift (page size = 1<<page shift) 1149 * Bit 61 page is file-page or shared-anon 1150 * Bit 62 page swapped 1151 * Bit 63 page present 1152 * 1153 * If the page is not present but in swap, then the PFN contains an 1154 * encoding of the swap file number and the page's offset into the 1155 * swap. Unmapped pages return a null PFN. This allows determining 1156 * precisely which pages are mapped (or in swap) and comparing mapped 1157 * pages between processes. 1158 * 1159 * Efficient users of this interface will use /proc/pid/maps to 1160 * determine which areas of memory are actually mapped and llseek to 1161 * skip over unmapped regions. 1162 */ 1163 static ssize_t pagemap_read(struct file *file, char __user *buf, 1164 size_t count, loff_t *ppos) 1165 { 1166 struct task_struct *task = get_proc_task(file_inode(file)); 1167 struct mm_struct *mm; 1168 struct pagemapread pm; 1169 int ret = -ESRCH; 1170 struct mm_walk pagemap_walk = {}; 1171 unsigned long src; 1172 unsigned long svpfn; 1173 unsigned long start_vaddr; 1174 unsigned long end_vaddr; 1175 int copied = 0; 1176 1177 if (!task) 1178 goto out; 1179 1180 ret = -EINVAL; 1181 /* file position must be aligned */ 1182 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1183 goto out_task; 1184 1185 ret = 0; 1186 if (!count) 1187 goto out_task; 1188 1189 pm.v2 = soft_dirty_cleared; 1190 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1191 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1192 ret = -ENOMEM; 1193 if (!pm.buffer) 1194 goto out_task; 1195 1196 mm = mm_access(task, PTRACE_MODE_READ); 1197 ret = PTR_ERR(mm); 1198 if (!mm || IS_ERR(mm)) 1199 goto out_free; 1200 1201 pagemap_walk.pmd_entry = pagemap_pte_range; 1202 pagemap_walk.pte_hole = pagemap_pte_hole; 1203 #ifdef CONFIG_HUGETLB_PAGE 1204 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1205 #endif 1206 pagemap_walk.mm = mm; 1207 pagemap_walk.private = ± 1208 1209 src = *ppos; 1210 svpfn = src / PM_ENTRY_BYTES; 1211 start_vaddr = svpfn << PAGE_SHIFT; 1212 end_vaddr = TASK_SIZE_OF(task); 1213 1214 /* watch out for wraparound */ 1215 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) 1216 start_vaddr = end_vaddr; 1217 1218 /* 1219 * The odds are that this will stop walking way 1220 * before end_vaddr, because the length of the 1221 * user buffer is tracked in "pm", and the walk 1222 * will stop when we hit the end of the buffer. 1223 */ 1224 ret = 0; 1225 while (count && (start_vaddr < end_vaddr)) { 1226 int len; 1227 unsigned long end; 1228 1229 pm.pos = 0; 1230 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1231 /* overflow ? */ 1232 if (end < start_vaddr || end > end_vaddr) 1233 end = end_vaddr; 1234 down_read(&mm->mmap_sem); 1235 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1236 up_read(&mm->mmap_sem); 1237 start_vaddr = end; 1238 1239 len = min(count, PM_ENTRY_BYTES * pm.pos); 1240 if (copy_to_user(buf, pm.buffer, len)) { 1241 ret = -EFAULT; 1242 goto out_mm; 1243 } 1244 copied += len; 1245 buf += len; 1246 count -= len; 1247 } 1248 *ppos += copied; 1249 if (!ret || ret == PM_END_OF_BUFFER) 1250 ret = copied; 1251 1252 out_mm: 1253 mmput(mm); 1254 out_free: 1255 kfree(pm.buffer); 1256 out_task: 1257 put_task_struct(task); 1258 out: 1259 return ret; 1260 } 1261 1262 static int pagemap_open(struct inode *inode, struct file *file) 1263 { 1264 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " 1265 "to stop being page-shift some time soon. See the " 1266 "linux/Documentation/vm/pagemap.txt for details.\n"); 1267 return 0; 1268 } 1269 1270 const struct file_operations proc_pagemap_operations = { 1271 .llseek = mem_lseek, /* borrow this */ 1272 .read = pagemap_read, 1273 .open = pagemap_open, 1274 }; 1275 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1276 1277 #ifdef CONFIG_NUMA 1278 1279 struct numa_maps { 1280 struct vm_area_struct *vma; 1281 unsigned long pages; 1282 unsigned long anon; 1283 unsigned long active; 1284 unsigned long writeback; 1285 unsigned long mapcount_max; 1286 unsigned long dirty; 1287 unsigned long swapcache; 1288 unsigned long node[MAX_NUMNODES]; 1289 }; 1290 1291 struct numa_maps_private { 1292 struct proc_maps_private proc_maps; 1293 struct numa_maps md; 1294 }; 1295 1296 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1297 unsigned long nr_pages) 1298 { 1299 int count = page_mapcount(page); 1300 1301 md->pages += nr_pages; 1302 if (pte_dirty || PageDirty(page)) 1303 md->dirty += nr_pages; 1304 1305 if (PageSwapCache(page)) 1306 md->swapcache += nr_pages; 1307 1308 if (PageActive(page) || PageUnevictable(page)) 1309 md->active += nr_pages; 1310 1311 if (PageWriteback(page)) 1312 md->writeback += nr_pages; 1313 1314 if (PageAnon(page)) 1315 md->anon += nr_pages; 1316 1317 if (count > md->mapcount_max) 1318 md->mapcount_max = count; 1319 1320 md->node[page_to_nid(page)] += nr_pages; 1321 } 1322 1323 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1324 unsigned long addr) 1325 { 1326 struct page *page; 1327 int nid; 1328 1329 if (!pte_present(pte)) 1330 return NULL; 1331 1332 page = vm_normal_page(vma, addr, pte); 1333 if (!page) 1334 return NULL; 1335 1336 if (PageReserved(page)) 1337 return NULL; 1338 1339 nid = page_to_nid(page); 1340 if (!node_isset(nid, node_states[N_MEMORY])) 1341 return NULL; 1342 1343 return page; 1344 } 1345 1346 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1347 unsigned long end, struct mm_walk *walk) 1348 { 1349 struct numa_maps *md; 1350 spinlock_t *ptl; 1351 pte_t *orig_pte; 1352 pte_t *pte; 1353 1354 md = walk->private; 1355 1356 if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) { 1357 pte_t huge_pte = *(pte_t *)pmd; 1358 struct page *page; 1359 1360 page = can_gather_numa_stats(huge_pte, md->vma, addr); 1361 if (page) 1362 gather_stats(page, md, pte_dirty(huge_pte), 1363 HPAGE_PMD_SIZE/PAGE_SIZE); 1364 spin_unlock(ptl); 1365 return 0; 1366 } 1367 1368 if (pmd_trans_unstable(pmd)) 1369 return 0; 1370 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1371 do { 1372 struct page *page = can_gather_numa_stats(*pte, md->vma, addr); 1373 if (!page) 1374 continue; 1375 gather_stats(page, md, pte_dirty(*pte), 1); 1376 1377 } while (pte++, addr += PAGE_SIZE, addr != end); 1378 pte_unmap_unlock(orig_pte, ptl); 1379 return 0; 1380 } 1381 #ifdef CONFIG_HUGETLB_PAGE 1382 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, 1383 unsigned long addr, unsigned long end, struct mm_walk *walk) 1384 { 1385 struct numa_maps *md; 1386 struct page *page; 1387 1388 if (!pte_present(*pte)) 1389 return 0; 1390 1391 page = pte_page(*pte); 1392 if (!page) 1393 return 0; 1394 1395 md = walk->private; 1396 gather_stats(page, md, pte_dirty(*pte), 1); 1397 return 0; 1398 } 1399 1400 #else 1401 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, 1402 unsigned long addr, unsigned long end, struct mm_walk *walk) 1403 { 1404 return 0; 1405 } 1406 #endif 1407 1408 /* 1409 * Display pages allocated per node and memory policy via /proc. 1410 */ 1411 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1412 { 1413 struct numa_maps_private *numa_priv = m->private; 1414 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1415 struct vm_area_struct *vma = v; 1416 struct numa_maps *md = &numa_priv->md; 1417 struct file *file = vma->vm_file; 1418 struct task_struct *task = proc_priv->task; 1419 struct mm_struct *mm = vma->vm_mm; 1420 struct mm_walk walk = {}; 1421 struct mempolicy *pol; 1422 char buffer[64]; 1423 int nid; 1424 1425 if (!mm) 1426 return 0; 1427 1428 /* Ensure we start with an empty set of numa_maps statistics. */ 1429 memset(md, 0, sizeof(*md)); 1430 1431 md->vma = vma; 1432 1433 walk.hugetlb_entry = gather_hugetbl_stats; 1434 walk.pmd_entry = gather_pte_stats; 1435 walk.private = md; 1436 walk.mm = mm; 1437 1438 pol = get_vma_policy(task, vma, vma->vm_start); 1439 mpol_to_str(buffer, sizeof(buffer), pol); 1440 mpol_cond_put(pol); 1441 1442 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1443 1444 if (file) { 1445 seq_puts(m, " file="); 1446 seq_path(m, &file->f_path, "\n\t= "); 1447 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1448 seq_puts(m, " heap"); 1449 } else { 1450 pid_t tid = vm_is_stack(task, vma, is_pid); 1451 if (tid != 0) { 1452 /* 1453 * Thread stack in /proc/PID/task/TID/maps or 1454 * the main process stack. 1455 */ 1456 if (!is_pid || (vma->vm_start <= mm->start_stack && 1457 vma->vm_end >= mm->start_stack)) 1458 seq_puts(m, " stack"); 1459 else 1460 seq_printf(m, " stack:%d", tid); 1461 } 1462 } 1463 1464 if (is_vm_hugetlb_page(vma)) 1465 seq_puts(m, " huge"); 1466 1467 walk_page_range(vma->vm_start, vma->vm_end, &walk); 1468 1469 if (!md->pages) 1470 goto out; 1471 1472 if (md->anon) 1473 seq_printf(m, " anon=%lu", md->anon); 1474 1475 if (md->dirty) 1476 seq_printf(m, " dirty=%lu", md->dirty); 1477 1478 if (md->pages != md->anon && md->pages != md->dirty) 1479 seq_printf(m, " mapped=%lu", md->pages); 1480 1481 if (md->mapcount_max > 1) 1482 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1483 1484 if (md->swapcache) 1485 seq_printf(m, " swapcache=%lu", md->swapcache); 1486 1487 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1488 seq_printf(m, " active=%lu", md->active); 1489 1490 if (md->writeback) 1491 seq_printf(m, " writeback=%lu", md->writeback); 1492 1493 for_each_node_state(nid, N_MEMORY) 1494 if (md->node[nid]) 1495 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 1496 out: 1497 seq_putc(m, '\n'); 1498 1499 if (m->count < m->size) 1500 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; 1501 return 0; 1502 } 1503 1504 static int show_pid_numa_map(struct seq_file *m, void *v) 1505 { 1506 return show_numa_map(m, v, 1); 1507 } 1508 1509 static int show_tid_numa_map(struct seq_file *m, void *v) 1510 { 1511 return show_numa_map(m, v, 0); 1512 } 1513 1514 static const struct seq_operations proc_pid_numa_maps_op = { 1515 .start = m_start, 1516 .next = m_next, 1517 .stop = m_stop, 1518 .show = show_pid_numa_map, 1519 }; 1520 1521 static const struct seq_operations proc_tid_numa_maps_op = { 1522 .start = m_start, 1523 .next = m_next, 1524 .stop = m_stop, 1525 .show = show_tid_numa_map, 1526 }; 1527 1528 static int numa_maps_open(struct inode *inode, struct file *file, 1529 const struct seq_operations *ops) 1530 { 1531 struct numa_maps_private *priv; 1532 int ret = -ENOMEM; 1533 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1534 if (priv) { 1535 priv->proc_maps.pid = proc_pid(inode); 1536 ret = seq_open(file, ops); 1537 if (!ret) { 1538 struct seq_file *m = file->private_data; 1539 m->private = priv; 1540 } else { 1541 kfree(priv); 1542 } 1543 } 1544 return ret; 1545 } 1546 1547 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1548 { 1549 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1550 } 1551 1552 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1553 { 1554 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1555 } 1556 1557 const struct file_operations proc_pid_numa_maps_operations = { 1558 .open = pid_numa_maps_open, 1559 .read = seq_read, 1560 .llseek = seq_lseek, 1561 .release = seq_release_private, 1562 }; 1563 1564 const struct file_operations proc_tid_numa_maps_operations = { 1565 .open = tid_numa_maps_open, 1566 .read = seq_read, 1567 .llseek = seq_lseek, 1568 .release = seq_release_private, 1569 }; 1570 #endif /* CONFIG_NUMA */ 1571