1 #include <linux/mm.h> 2 #include <linux/hugetlb.h> 3 #include <linux/mount.h> 4 #include <linux/seq_file.h> 5 #include <linux/highmem.h> 6 #include <linux/ptrace.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/mempolicy.h> 10 #include <linux/swap.h> 11 #include <linux/swapops.h> 12 13 #include <asm/elf.h> 14 #include <asm/uaccess.h> 15 #include <asm/tlbflush.h> 16 #include "internal.h" 17 18 void task_mem(struct seq_file *m, struct mm_struct *mm) 19 { 20 unsigned long data, text, lib, swap; 21 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 22 23 /* 24 * Note: to minimize their overhead, mm maintains hiwater_vm and 25 * hiwater_rss only when about to *lower* total_vm or rss. Any 26 * collector of these hiwater stats must therefore get total_vm 27 * and rss too, which will usually be the higher. Barriers? not 28 * worth the effort, such snapshots can always be inconsistent. 29 */ 30 hiwater_vm = total_vm = mm->total_vm; 31 if (hiwater_vm < mm->hiwater_vm) 32 hiwater_vm = mm->hiwater_vm; 33 hiwater_rss = total_rss = get_mm_rss(mm); 34 if (hiwater_rss < mm->hiwater_rss) 35 hiwater_rss = mm->hiwater_rss; 36 37 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 38 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 39 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 40 swap = get_mm_counter(mm, MM_SWAPENTS); 41 seq_printf(m, 42 "VmPeak:\t%8lu kB\n" 43 "VmSize:\t%8lu kB\n" 44 "VmLck:\t%8lu kB\n" 45 "VmHWM:\t%8lu kB\n" 46 "VmRSS:\t%8lu kB\n" 47 "VmData:\t%8lu kB\n" 48 "VmStk:\t%8lu kB\n" 49 "VmExe:\t%8lu kB\n" 50 "VmLib:\t%8lu kB\n" 51 "VmPTE:\t%8lu kB\n" 52 "VmSwap:\t%8lu kB\n", 53 hiwater_vm << (PAGE_SHIFT-10), 54 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 55 mm->locked_vm << (PAGE_SHIFT-10), 56 hiwater_rss << (PAGE_SHIFT-10), 57 total_rss << (PAGE_SHIFT-10), 58 data << (PAGE_SHIFT-10), 59 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 60 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, 61 swap << (PAGE_SHIFT-10)); 62 } 63 64 unsigned long task_vsize(struct mm_struct *mm) 65 { 66 return PAGE_SIZE * mm->total_vm; 67 } 68 69 int task_statm(struct mm_struct *mm, int *shared, int *text, 70 int *data, int *resident) 71 { 72 *shared = get_mm_counter(mm, MM_FILEPAGES); 73 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 74 >> PAGE_SHIFT; 75 *data = mm->total_vm - mm->shared_vm; 76 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 77 return mm->total_vm; 78 } 79 80 static void pad_len_spaces(struct seq_file *m, int len) 81 { 82 len = 25 + sizeof(void*) * 6 - len; 83 if (len < 1) 84 len = 1; 85 seq_printf(m, "%*c", len, ' '); 86 } 87 88 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 89 { 90 if (vma && vma != priv->tail_vma) { 91 struct mm_struct *mm = vma->vm_mm; 92 up_read(&mm->mmap_sem); 93 mmput(mm); 94 } 95 } 96 97 static void *m_start(struct seq_file *m, loff_t *pos) 98 { 99 struct proc_maps_private *priv = m->private; 100 unsigned long last_addr = m->version; 101 struct mm_struct *mm; 102 struct vm_area_struct *vma, *tail_vma = NULL; 103 loff_t l = *pos; 104 105 /* Clear the per syscall fields in priv */ 106 priv->task = NULL; 107 priv->tail_vma = NULL; 108 109 /* 110 * We remember last_addr rather than next_addr to hit with 111 * mmap_cache most of the time. We have zero last_addr at 112 * the beginning and also after lseek. We will have -1 last_addr 113 * after the end of the vmas. 114 */ 115 116 if (last_addr == -1UL) 117 return NULL; 118 119 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 120 if (!priv->task) 121 return NULL; 122 123 mm = mm_for_maps(priv->task); 124 if (!mm) 125 return NULL; 126 down_read(&mm->mmap_sem); 127 128 tail_vma = get_gate_vma(priv->task); 129 priv->tail_vma = tail_vma; 130 131 /* Start with last addr hint */ 132 vma = find_vma(mm, last_addr); 133 if (last_addr && vma) { 134 vma = vma->vm_next; 135 goto out; 136 } 137 138 /* 139 * Check the vma index is within the range and do 140 * sequential scan until m_index. 141 */ 142 vma = NULL; 143 if ((unsigned long)l < mm->map_count) { 144 vma = mm->mmap; 145 while (l-- && vma) 146 vma = vma->vm_next; 147 goto out; 148 } 149 150 if (l != mm->map_count) 151 tail_vma = NULL; /* After gate vma */ 152 153 out: 154 if (vma) 155 return vma; 156 157 /* End of vmas has been reached */ 158 m->version = (tail_vma != NULL)? 0: -1UL; 159 up_read(&mm->mmap_sem); 160 mmput(mm); 161 return tail_vma; 162 } 163 164 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 165 { 166 struct proc_maps_private *priv = m->private; 167 struct vm_area_struct *vma = v; 168 struct vm_area_struct *tail_vma = priv->tail_vma; 169 170 (*pos)++; 171 if (vma && (vma != tail_vma) && vma->vm_next) 172 return vma->vm_next; 173 vma_stop(priv, vma); 174 return (vma != tail_vma)? tail_vma: NULL; 175 } 176 177 static void m_stop(struct seq_file *m, void *v) 178 { 179 struct proc_maps_private *priv = m->private; 180 struct vm_area_struct *vma = v; 181 182 vma_stop(priv, vma); 183 if (priv->task) 184 put_task_struct(priv->task); 185 } 186 187 static int do_maps_open(struct inode *inode, struct file *file, 188 const struct seq_operations *ops) 189 { 190 struct proc_maps_private *priv; 191 int ret = -ENOMEM; 192 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 193 if (priv) { 194 priv->pid = proc_pid(inode); 195 ret = seq_open(file, ops); 196 if (!ret) { 197 struct seq_file *m = file->private_data; 198 m->private = priv; 199 } else { 200 kfree(priv); 201 } 202 } 203 return ret; 204 } 205 206 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) 207 { 208 struct mm_struct *mm = vma->vm_mm; 209 struct file *file = vma->vm_file; 210 int flags = vma->vm_flags; 211 unsigned long ino = 0; 212 unsigned long long pgoff = 0; 213 dev_t dev = 0; 214 int len; 215 216 if (file) { 217 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 218 dev = inode->i_sb->s_dev; 219 ino = inode->i_ino; 220 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 221 } 222 223 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 224 vma->vm_start, 225 vma->vm_end, 226 flags & VM_READ ? 'r' : '-', 227 flags & VM_WRITE ? 'w' : '-', 228 flags & VM_EXEC ? 'x' : '-', 229 flags & VM_MAYSHARE ? 's' : 'p', 230 pgoff, 231 MAJOR(dev), MINOR(dev), ino, &len); 232 233 /* 234 * Print the dentry name for named mappings, and a 235 * special [heap] marker for the heap: 236 */ 237 if (file) { 238 pad_len_spaces(m, len); 239 seq_path(m, &file->f_path, "\n"); 240 } else { 241 const char *name = arch_vma_name(vma); 242 if (!name) { 243 if (mm) { 244 if (vma->vm_start <= mm->start_brk && 245 vma->vm_end >= mm->brk) { 246 name = "[heap]"; 247 } else if (vma->vm_start <= mm->start_stack && 248 vma->vm_end >= mm->start_stack) { 249 name = "[stack]"; 250 } 251 } else { 252 name = "[vdso]"; 253 } 254 } 255 if (name) { 256 pad_len_spaces(m, len); 257 seq_puts(m, name); 258 } 259 } 260 seq_putc(m, '\n'); 261 } 262 263 static int show_map(struct seq_file *m, void *v) 264 { 265 struct vm_area_struct *vma = v; 266 struct proc_maps_private *priv = m->private; 267 struct task_struct *task = priv->task; 268 269 show_map_vma(m, vma); 270 271 if (m->count < m->size) /* vma is copied successfully */ 272 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 273 return 0; 274 } 275 276 static const struct seq_operations proc_pid_maps_op = { 277 .start = m_start, 278 .next = m_next, 279 .stop = m_stop, 280 .show = show_map 281 }; 282 283 static int maps_open(struct inode *inode, struct file *file) 284 { 285 return do_maps_open(inode, file, &proc_pid_maps_op); 286 } 287 288 const struct file_operations proc_maps_operations = { 289 .open = maps_open, 290 .read = seq_read, 291 .llseek = seq_lseek, 292 .release = seq_release_private, 293 }; 294 295 /* 296 * Proportional Set Size(PSS): my share of RSS. 297 * 298 * PSS of a process is the count of pages it has in memory, where each 299 * page is divided by the number of processes sharing it. So if a 300 * process has 1000 pages all to itself, and 1000 shared with one other 301 * process, its PSS will be 1500. 302 * 303 * To keep (accumulated) division errors low, we adopt a 64bit 304 * fixed-point pss counter to minimize division errors. So (pss >> 305 * PSS_SHIFT) would be the real byte count. 306 * 307 * A shift of 12 before division means (assuming 4K page size): 308 * - 1M 3-user-pages add up to 8KB errors; 309 * - supports mapcount up to 2^24, or 16M; 310 * - supports PSS up to 2^52 bytes, or 4PB. 311 */ 312 #define PSS_SHIFT 12 313 314 #ifdef CONFIG_PROC_PAGE_MONITOR 315 struct mem_size_stats { 316 struct vm_area_struct *vma; 317 unsigned long resident; 318 unsigned long shared_clean; 319 unsigned long shared_dirty; 320 unsigned long private_clean; 321 unsigned long private_dirty; 322 unsigned long referenced; 323 unsigned long swap; 324 u64 pss; 325 }; 326 327 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 328 struct mm_walk *walk) 329 { 330 struct mem_size_stats *mss = walk->private; 331 struct vm_area_struct *vma = mss->vma; 332 pte_t *pte, ptent; 333 spinlock_t *ptl; 334 struct page *page; 335 int mapcount; 336 337 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 338 for (; addr != end; pte++, addr += PAGE_SIZE) { 339 ptent = *pte; 340 341 if (is_swap_pte(ptent)) { 342 mss->swap += PAGE_SIZE; 343 continue; 344 } 345 346 if (!pte_present(ptent)) 347 continue; 348 349 page = vm_normal_page(vma, addr, ptent); 350 if (!page) 351 continue; 352 353 mss->resident += PAGE_SIZE; 354 /* Accumulate the size in pages that have been accessed. */ 355 if (pte_young(ptent) || PageReferenced(page)) 356 mss->referenced += PAGE_SIZE; 357 mapcount = page_mapcount(page); 358 if (mapcount >= 2) { 359 if (pte_dirty(ptent)) 360 mss->shared_dirty += PAGE_SIZE; 361 else 362 mss->shared_clean += PAGE_SIZE; 363 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 364 } else { 365 if (pte_dirty(ptent)) 366 mss->private_dirty += PAGE_SIZE; 367 else 368 mss->private_clean += PAGE_SIZE; 369 mss->pss += (PAGE_SIZE << PSS_SHIFT); 370 } 371 } 372 pte_unmap_unlock(pte - 1, ptl); 373 cond_resched(); 374 return 0; 375 } 376 377 static int show_smap(struct seq_file *m, void *v) 378 { 379 struct proc_maps_private *priv = m->private; 380 struct task_struct *task = priv->task; 381 struct vm_area_struct *vma = v; 382 struct mem_size_stats mss; 383 struct mm_walk smaps_walk = { 384 .pmd_entry = smaps_pte_range, 385 .mm = vma->vm_mm, 386 .private = &mss, 387 }; 388 389 memset(&mss, 0, sizeof mss); 390 mss.vma = vma; 391 /* mmap_sem is held in m_start */ 392 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 393 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 394 395 show_map_vma(m, vma); 396 397 seq_printf(m, 398 "Size: %8lu kB\n" 399 "Rss: %8lu kB\n" 400 "Pss: %8lu kB\n" 401 "Shared_Clean: %8lu kB\n" 402 "Shared_Dirty: %8lu kB\n" 403 "Private_Clean: %8lu kB\n" 404 "Private_Dirty: %8lu kB\n" 405 "Referenced: %8lu kB\n" 406 "Swap: %8lu kB\n" 407 "KernelPageSize: %8lu kB\n" 408 "MMUPageSize: %8lu kB\n", 409 (vma->vm_end - vma->vm_start) >> 10, 410 mss.resident >> 10, 411 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 412 mss.shared_clean >> 10, 413 mss.shared_dirty >> 10, 414 mss.private_clean >> 10, 415 mss.private_dirty >> 10, 416 mss.referenced >> 10, 417 mss.swap >> 10, 418 vma_kernel_pagesize(vma) >> 10, 419 vma_mmu_pagesize(vma) >> 10); 420 421 if (m->count < m->size) /* vma is copied successfully */ 422 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 423 return 0; 424 } 425 426 static const struct seq_operations proc_pid_smaps_op = { 427 .start = m_start, 428 .next = m_next, 429 .stop = m_stop, 430 .show = show_smap 431 }; 432 433 static int smaps_open(struct inode *inode, struct file *file) 434 { 435 return do_maps_open(inode, file, &proc_pid_smaps_op); 436 } 437 438 const struct file_operations proc_smaps_operations = { 439 .open = smaps_open, 440 .read = seq_read, 441 .llseek = seq_lseek, 442 .release = seq_release_private, 443 }; 444 445 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 446 unsigned long end, struct mm_walk *walk) 447 { 448 struct vm_area_struct *vma = walk->private; 449 pte_t *pte, ptent; 450 spinlock_t *ptl; 451 struct page *page; 452 453 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 454 for (; addr != end; pte++, addr += PAGE_SIZE) { 455 ptent = *pte; 456 if (!pte_present(ptent)) 457 continue; 458 459 page = vm_normal_page(vma, addr, ptent); 460 if (!page) 461 continue; 462 463 /* Clear accessed and referenced bits. */ 464 ptep_test_and_clear_young(vma, addr, pte); 465 ClearPageReferenced(page); 466 } 467 pte_unmap_unlock(pte - 1, ptl); 468 cond_resched(); 469 return 0; 470 } 471 472 #define CLEAR_REFS_ALL 1 473 #define CLEAR_REFS_ANON 2 474 #define CLEAR_REFS_MAPPED 3 475 476 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 477 size_t count, loff_t *ppos) 478 { 479 struct task_struct *task; 480 char buffer[PROC_NUMBUF]; 481 struct mm_struct *mm; 482 struct vm_area_struct *vma; 483 long type; 484 485 memset(buffer, 0, sizeof(buffer)); 486 if (count > sizeof(buffer) - 1) 487 count = sizeof(buffer) - 1; 488 if (copy_from_user(buffer, buf, count)) 489 return -EFAULT; 490 if (strict_strtol(strstrip(buffer), 10, &type)) 491 return -EINVAL; 492 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) 493 return -EINVAL; 494 task = get_proc_task(file->f_path.dentry->d_inode); 495 if (!task) 496 return -ESRCH; 497 mm = get_task_mm(task); 498 if (mm) { 499 struct mm_walk clear_refs_walk = { 500 .pmd_entry = clear_refs_pte_range, 501 .mm = mm, 502 }; 503 down_read(&mm->mmap_sem); 504 for (vma = mm->mmap; vma; vma = vma->vm_next) { 505 clear_refs_walk.private = vma; 506 if (is_vm_hugetlb_page(vma)) 507 continue; 508 /* 509 * Writing 1 to /proc/pid/clear_refs affects all pages. 510 * 511 * Writing 2 to /proc/pid/clear_refs only affects 512 * Anonymous pages. 513 * 514 * Writing 3 to /proc/pid/clear_refs only affects file 515 * mapped pages. 516 */ 517 if (type == CLEAR_REFS_ANON && vma->vm_file) 518 continue; 519 if (type == CLEAR_REFS_MAPPED && !vma->vm_file) 520 continue; 521 walk_page_range(vma->vm_start, vma->vm_end, 522 &clear_refs_walk); 523 } 524 flush_tlb_mm(mm); 525 up_read(&mm->mmap_sem); 526 mmput(mm); 527 } 528 put_task_struct(task); 529 530 return count; 531 } 532 533 const struct file_operations proc_clear_refs_operations = { 534 .write = clear_refs_write, 535 }; 536 537 struct pagemapread { 538 int pos, len; 539 u64 *buffer; 540 }; 541 542 #define PM_ENTRY_BYTES sizeof(u64) 543 #define PM_STATUS_BITS 3 544 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 545 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 546 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) 547 #define PM_PSHIFT_BITS 6 548 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) 549 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) 550 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) 551 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) 552 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) 553 554 #define PM_PRESENT PM_STATUS(4LL) 555 #define PM_SWAP PM_STATUS(2LL) 556 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) 557 #define PM_END_OF_BUFFER 1 558 559 static int add_to_pagemap(unsigned long addr, u64 pfn, 560 struct pagemapread *pm) 561 { 562 pm->buffer[pm->pos++] = pfn; 563 if (pm->pos >= pm->len) 564 return PM_END_OF_BUFFER; 565 return 0; 566 } 567 568 static int pagemap_pte_hole(unsigned long start, unsigned long end, 569 struct mm_walk *walk) 570 { 571 struct pagemapread *pm = walk->private; 572 unsigned long addr; 573 int err = 0; 574 for (addr = start; addr < end; addr += PAGE_SIZE) { 575 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); 576 if (err) 577 break; 578 } 579 return err; 580 } 581 582 static u64 swap_pte_to_pagemap_entry(pte_t pte) 583 { 584 swp_entry_t e = pte_to_swp_entry(pte); 585 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 586 } 587 588 static u64 pte_to_pagemap_entry(pte_t pte) 589 { 590 u64 pme = 0; 591 if (is_swap_pte(pte)) 592 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 593 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 594 else if (pte_present(pte)) 595 pme = PM_PFRAME(pte_pfn(pte)) 596 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 597 return pme; 598 } 599 600 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 601 struct mm_walk *walk) 602 { 603 struct vm_area_struct *vma; 604 struct pagemapread *pm = walk->private; 605 pte_t *pte; 606 int err = 0; 607 608 /* find the first VMA at or above 'addr' */ 609 vma = find_vma(walk->mm, addr); 610 for (; addr != end; addr += PAGE_SIZE) { 611 u64 pfn = PM_NOT_PRESENT; 612 613 /* check to see if we've left 'vma' behind 614 * and need a new, higher one */ 615 if (vma && (addr >= vma->vm_end)) 616 vma = find_vma(walk->mm, addr); 617 618 /* check that 'vma' actually covers this address, 619 * and that it isn't a huge page vma */ 620 if (vma && (vma->vm_start <= addr) && 621 !is_vm_hugetlb_page(vma)) { 622 pte = pte_offset_map(pmd, addr); 623 pfn = pte_to_pagemap_entry(*pte); 624 /* unmap before userspace copy */ 625 pte_unmap(pte); 626 } 627 err = add_to_pagemap(addr, pfn, pm); 628 if (err) 629 return err; 630 } 631 632 cond_resched(); 633 634 return err; 635 } 636 637 #ifdef CONFIG_HUGETLB_PAGE 638 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) 639 { 640 u64 pme = 0; 641 if (pte_present(pte)) 642 pme = PM_PFRAME(pte_pfn(pte) + offset) 643 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 644 return pme; 645 } 646 647 /* This function walks within one hugetlb entry in the single call */ 648 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, 649 unsigned long addr, unsigned long end, 650 struct mm_walk *walk) 651 { 652 struct pagemapread *pm = walk->private; 653 int err = 0; 654 u64 pfn; 655 656 for (; addr != end; addr += PAGE_SIZE) { 657 int offset = (addr & ~hmask) >> PAGE_SHIFT; 658 pfn = huge_pte_to_pagemap_entry(*pte, offset); 659 err = add_to_pagemap(addr, pfn, pm); 660 if (err) 661 return err; 662 } 663 664 cond_resched(); 665 666 return err; 667 } 668 #endif /* HUGETLB_PAGE */ 669 670 /* 671 * /proc/pid/pagemap - an array mapping virtual pages to pfns 672 * 673 * For each page in the address space, this file contains one 64-bit entry 674 * consisting of the following: 675 * 676 * Bits 0-55 page frame number (PFN) if present 677 * Bits 0-4 swap type if swapped 678 * Bits 5-55 swap offset if swapped 679 * Bits 55-60 page shift (page size = 1<<page shift) 680 * Bit 61 reserved for future use 681 * Bit 62 page swapped 682 * Bit 63 page present 683 * 684 * If the page is not present but in swap, then the PFN contains an 685 * encoding of the swap file number and the page's offset into the 686 * swap. Unmapped pages return a null PFN. This allows determining 687 * precisely which pages are mapped (or in swap) and comparing mapped 688 * pages between processes. 689 * 690 * Efficient users of this interface will use /proc/pid/maps to 691 * determine which areas of memory are actually mapped and llseek to 692 * skip over unmapped regions. 693 */ 694 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 695 static ssize_t pagemap_read(struct file *file, char __user *buf, 696 size_t count, loff_t *ppos) 697 { 698 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 699 struct mm_struct *mm; 700 struct pagemapread pm; 701 int ret = -ESRCH; 702 struct mm_walk pagemap_walk = {}; 703 unsigned long src; 704 unsigned long svpfn; 705 unsigned long start_vaddr; 706 unsigned long end_vaddr; 707 int copied = 0; 708 709 if (!task) 710 goto out; 711 712 ret = -EACCES; 713 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 714 goto out_task; 715 716 ret = -EINVAL; 717 /* file position must be aligned */ 718 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 719 goto out_task; 720 721 ret = 0; 722 723 if (!count) 724 goto out_task; 725 726 mm = get_task_mm(task); 727 if (!mm) 728 goto out_task; 729 730 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 731 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 732 ret = -ENOMEM; 733 if (!pm.buffer) 734 goto out_mm; 735 736 pagemap_walk.pmd_entry = pagemap_pte_range; 737 pagemap_walk.pte_hole = pagemap_pte_hole; 738 #ifdef CONFIG_HUGETLB_PAGE 739 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 740 #endif 741 pagemap_walk.mm = mm; 742 pagemap_walk.private = ± 743 744 src = *ppos; 745 svpfn = src / PM_ENTRY_BYTES; 746 start_vaddr = svpfn << PAGE_SHIFT; 747 end_vaddr = TASK_SIZE_OF(task); 748 749 /* watch out for wraparound */ 750 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) 751 start_vaddr = end_vaddr; 752 753 /* 754 * The odds are that this will stop walking way 755 * before end_vaddr, because the length of the 756 * user buffer is tracked in "pm", and the walk 757 * will stop when we hit the end of the buffer. 758 */ 759 ret = 0; 760 while (count && (start_vaddr < end_vaddr)) { 761 int len; 762 unsigned long end; 763 764 pm.pos = 0; 765 end = start_vaddr + PAGEMAP_WALK_SIZE; 766 /* overflow ? */ 767 if (end < start_vaddr || end > end_vaddr) 768 end = end_vaddr; 769 down_read(&mm->mmap_sem); 770 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 771 up_read(&mm->mmap_sem); 772 start_vaddr = end; 773 774 len = min(count, PM_ENTRY_BYTES * pm.pos); 775 if (copy_to_user(buf, pm.buffer, len)) { 776 ret = -EFAULT; 777 goto out_free; 778 } 779 copied += len; 780 buf += len; 781 count -= len; 782 } 783 *ppos += copied; 784 if (!ret || ret == PM_END_OF_BUFFER) 785 ret = copied; 786 787 out_free: 788 kfree(pm.buffer); 789 out_mm: 790 mmput(mm); 791 out_task: 792 put_task_struct(task); 793 out: 794 return ret; 795 } 796 797 const struct file_operations proc_pagemap_operations = { 798 .llseek = mem_lseek, /* borrow this */ 799 .read = pagemap_read, 800 }; 801 #endif /* CONFIG_PROC_PAGE_MONITOR */ 802 803 #ifdef CONFIG_NUMA 804 extern int show_numa_map(struct seq_file *m, void *v); 805 806 static const struct seq_operations proc_pid_numa_maps_op = { 807 .start = m_start, 808 .next = m_next, 809 .stop = m_stop, 810 .show = show_numa_map, 811 }; 812 813 static int numa_maps_open(struct inode *inode, struct file *file) 814 { 815 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 816 } 817 818 const struct file_operations proc_numa_maps_operations = { 819 .open = numa_maps_open, 820 .read = seq_read, 821 .llseek = seq_lseek, 822 .release = seq_release_private, 823 }; 824 #endif 825