1 #include <linux/mm.h> 2 #include <linux/hugetlb.h> 3 #include <linux/mount.h> 4 #include <linux/seq_file.h> 5 #include <linux/highmem.h> 6 #include <linux/ptrace.h> 7 #include <linux/pagemap.h> 8 #include <linux/mempolicy.h> 9 #include <linux/swap.h> 10 #include <linux/swapops.h> 11 12 #include <asm/elf.h> 13 #include <asm/uaccess.h> 14 #include <asm/tlbflush.h> 15 #include "internal.h" 16 17 void task_mem(struct seq_file *m, struct mm_struct *mm) 18 { 19 unsigned long data, text, lib; 20 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 21 22 /* 23 * Note: to minimize their overhead, mm maintains hiwater_vm and 24 * hiwater_rss only when about to *lower* total_vm or rss. Any 25 * collector of these hiwater stats must therefore get total_vm 26 * and rss too, which will usually be the higher. Barriers? not 27 * worth the effort, such snapshots can always be inconsistent. 28 */ 29 hiwater_vm = total_vm = mm->total_vm; 30 if (hiwater_vm < mm->hiwater_vm) 31 hiwater_vm = mm->hiwater_vm; 32 hiwater_rss = total_rss = get_mm_rss(mm); 33 if (hiwater_rss < mm->hiwater_rss) 34 hiwater_rss = mm->hiwater_rss; 35 36 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 37 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 38 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 39 seq_printf(m, 40 "VmPeak:\t%8lu kB\n" 41 "VmSize:\t%8lu kB\n" 42 "VmLck:\t%8lu kB\n" 43 "VmHWM:\t%8lu kB\n" 44 "VmRSS:\t%8lu kB\n" 45 "VmData:\t%8lu kB\n" 46 "VmStk:\t%8lu kB\n" 47 "VmExe:\t%8lu kB\n" 48 "VmLib:\t%8lu kB\n" 49 "VmPTE:\t%8lu kB\n", 50 hiwater_vm << (PAGE_SHIFT-10), 51 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 52 mm->locked_vm << (PAGE_SHIFT-10), 53 hiwater_rss << (PAGE_SHIFT-10), 54 total_rss << (PAGE_SHIFT-10), 55 data << (PAGE_SHIFT-10), 56 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 57 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); 58 } 59 60 unsigned long task_vsize(struct mm_struct *mm) 61 { 62 return PAGE_SIZE * mm->total_vm; 63 } 64 65 int task_statm(struct mm_struct *mm, int *shared, int *text, 66 int *data, int *resident) 67 { 68 *shared = get_mm_counter(mm, file_rss); 69 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 70 >> PAGE_SHIFT; 71 *data = mm->total_vm - mm->shared_vm; 72 *resident = *shared + get_mm_counter(mm, anon_rss); 73 return mm->total_vm; 74 } 75 76 static void pad_len_spaces(struct seq_file *m, int len) 77 { 78 len = 25 + sizeof(void*) * 6 - len; 79 if (len < 1) 80 len = 1; 81 seq_printf(m, "%*c", len, ' '); 82 } 83 84 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 85 { 86 if (vma && vma != priv->tail_vma) { 87 struct mm_struct *mm = vma->vm_mm; 88 up_read(&mm->mmap_sem); 89 mmput(mm); 90 } 91 } 92 93 static void *m_start(struct seq_file *m, loff_t *pos) 94 { 95 struct proc_maps_private *priv = m->private; 96 unsigned long last_addr = m->version; 97 struct mm_struct *mm; 98 struct vm_area_struct *vma, *tail_vma = NULL; 99 loff_t l = *pos; 100 101 /* Clear the per syscall fields in priv */ 102 priv->task = NULL; 103 priv->tail_vma = NULL; 104 105 /* 106 * We remember last_addr rather than next_addr to hit with 107 * mmap_cache most of the time. We have zero last_addr at 108 * the beginning and also after lseek. We will have -1 last_addr 109 * after the end of the vmas. 110 */ 111 112 if (last_addr == -1UL) 113 return NULL; 114 115 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 116 if (!priv->task) 117 return NULL; 118 119 mm = mm_for_maps(priv->task); 120 if (!mm) 121 return NULL; 122 123 tail_vma = get_gate_vma(priv->task); 124 priv->tail_vma = tail_vma; 125 126 /* Start with last addr hint */ 127 vma = find_vma(mm, last_addr); 128 if (last_addr && vma) { 129 vma = vma->vm_next; 130 goto out; 131 } 132 133 /* 134 * Check the vma index is within the range and do 135 * sequential scan until m_index. 136 */ 137 vma = NULL; 138 if ((unsigned long)l < mm->map_count) { 139 vma = mm->mmap; 140 while (l-- && vma) 141 vma = vma->vm_next; 142 goto out; 143 } 144 145 if (l != mm->map_count) 146 tail_vma = NULL; /* After gate vma */ 147 148 out: 149 if (vma) 150 return vma; 151 152 /* End of vmas has been reached */ 153 m->version = (tail_vma != NULL)? 0: -1UL; 154 up_read(&mm->mmap_sem); 155 mmput(mm); 156 return tail_vma; 157 } 158 159 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 160 { 161 struct proc_maps_private *priv = m->private; 162 struct vm_area_struct *vma = v; 163 struct vm_area_struct *tail_vma = priv->tail_vma; 164 165 (*pos)++; 166 if (vma && (vma != tail_vma) && vma->vm_next) 167 return vma->vm_next; 168 vma_stop(priv, vma); 169 return (vma != tail_vma)? tail_vma: NULL; 170 } 171 172 static void m_stop(struct seq_file *m, void *v) 173 { 174 struct proc_maps_private *priv = m->private; 175 struct vm_area_struct *vma = v; 176 177 vma_stop(priv, vma); 178 if (priv->task) 179 put_task_struct(priv->task); 180 } 181 182 static int do_maps_open(struct inode *inode, struct file *file, 183 const struct seq_operations *ops) 184 { 185 struct proc_maps_private *priv; 186 int ret = -ENOMEM; 187 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 188 if (priv) { 189 priv->pid = proc_pid(inode); 190 ret = seq_open(file, ops); 191 if (!ret) { 192 struct seq_file *m = file->private_data; 193 m->private = priv; 194 } else { 195 kfree(priv); 196 } 197 } 198 return ret; 199 } 200 201 static int show_map(struct seq_file *m, void *v) 202 { 203 struct proc_maps_private *priv = m->private; 204 struct task_struct *task = priv->task; 205 struct vm_area_struct *vma = v; 206 struct mm_struct *mm = vma->vm_mm; 207 struct file *file = vma->vm_file; 208 int flags = vma->vm_flags; 209 unsigned long ino = 0; 210 dev_t dev = 0; 211 int len; 212 213 if (file) { 214 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 215 dev = inode->i_sb->s_dev; 216 ino = inode->i_ino; 217 } 218 219 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 220 vma->vm_start, 221 vma->vm_end, 222 flags & VM_READ ? 'r' : '-', 223 flags & VM_WRITE ? 'w' : '-', 224 flags & VM_EXEC ? 'x' : '-', 225 flags & VM_MAYSHARE ? 's' : 'p', 226 ((loff_t)vma->vm_pgoff) << PAGE_SHIFT, 227 MAJOR(dev), MINOR(dev), ino, &len); 228 229 /* 230 * Print the dentry name for named mappings, and a 231 * special [heap] marker for the heap: 232 */ 233 if (file) { 234 pad_len_spaces(m, len); 235 seq_path(m, &file->f_path, "\n"); 236 } else { 237 const char *name = arch_vma_name(vma); 238 if (!name) { 239 if (mm) { 240 if (vma->vm_start <= mm->start_brk && 241 vma->vm_end >= mm->brk) { 242 name = "[heap]"; 243 } else if (vma->vm_start <= mm->start_stack && 244 vma->vm_end >= mm->start_stack) { 245 name = "[stack]"; 246 } 247 } else { 248 name = "[vdso]"; 249 } 250 } 251 if (name) { 252 pad_len_spaces(m, len); 253 seq_puts(m, name); 254 } 255 } 256 seq_putc(m, '\n'); 257 258 if (m->count < m->size) /* vma is copied successfully */ 259 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 260 return 0; 261 } 262 263 static const struct seq_operations proc_pid_maps_op = { 264 .start = m_start, 265 .next = m_next, 266 .stop = m_stop, 267 .show = show_map 268 }; 269 270 static int maps_open(struct inode *inode, struct file *file) 271 { 272 return do_maps_open(inode, file, &proc_pid_maps_op); 273 } 274 275 const struct file_operations proc_maps_operations = { 276 .open = maps_open, 277 .read = seq_read, 278 .llseek = seq_lseek, 279 .release = seq_release_private, 280 }; 281 282 /* 283 * Proportional Set Size(PSS): my share of RSS. 284 * 285 * PSS of a process is the count of pages it has in memory, where each 286 * page is divided by the number of processes sharing it. So if a 287 * process has 1000 pages all to itself, and 1000 shared with one other 288 * process, its PSS will be 1500. 289 * 290 * To keep (accumulated) division errors low, we adopt a 64bit 291 * fixed-point pss counter to minimize division errors. So (pss >> 292 * PSS_SHIFT) would be the real byte count. 293 * 294 * A shift of 12 before division means (assuming 4K page size): 295 * - 1M 3-user-pages add up to 8KB errors; 296 * - supports mapcount up to 2^24, or 16M; 297 * - supports PSS up to 2^52 bytes, or 4PB. 298 */ 299 #define PSS_SHIFT 12 300 301 #ifdef CONFIG_PROC_PAGE_MONITOR 302 struct mem_size_stats { 303 struct vm_area_struct *vma; 304 unsigned long resident; 305 unsigned long shared_clean; 306 unsigned long shared_dirty; 307 unsigned long private_clean; 308 unsigned long private_dirty; 309 unsigned long referenced; 310 unsigned long swap; 311 u64 pss; 312 }; 313 314 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 315 struct mm_walk *walk) 316 { 317 struct mem_size_stats *mss = walk->private; 318 struct vm_area_struct *vma = mss->vma; 319 pte_t *pte, ptent; 320 spinlock_t *ptl; 321 struct page *page; 322 int mapcount; 323 324 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 325 for (; addr != end; pte++, addr += PAGE_SIZE) { 326 ptent = *pte; 327 328 if (is_swap_pte(ptent)) { 329 mss->swap += PAGE_SIZE; 330 continue; 331 } 332 333 if (!pte_present(ptent)) 334 continue; 335 336 mss->resident += PAGE_SIZE; 337 338 page = vm_normal_page(vma, addr, ptent); 339 if (!page) 340 continue; 341 342 /* Accumulate the size in pages that have been accessed. */ 343 if (pte_young(ptent) || PageReferenced(page)) 344 mss->referenced += PAGE_SIZE; 345 mapcount = page_mapcount(page); 346 if (mapcount >= 2) { 347 if (pte_dirty(ptent)) 348 mss->shared_dirty += PAGE_SIZE; 349 else 350 mss->shared_clean += PAGE_SIZE; 351 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 352 } else { 353 if (pte_dirty(ptent)) 354 mss->private_dirty += PAGE_SIZE; 355 else 356 mss->private_clean += PAGE_SIZE; 357 mss->pss += (PAGE_SIZE << PSS_SHIFT); 358 } 359 } 360 pte_unmap_unlock(pte - 1, ptl); 361 cond_resched(); 362 return 0; 363 } 364 365 static int show_smap(struct seq_file *m, void *v) 366 { 367 struct vm_area_struct *vma = v; 368 struct mem_size_stats mss; 369 int ret; 370 struct mm_walk smaps_walk = { 371 .pmd_entry = smaps_pte_range, 372 .mm = vma->vm_mm, 373 .private = &mss, 374 }; 375 376 memset(&mss, 0, sizeof mss); 377 mss.vma = vma; 378 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 379 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 380 381 ret = show_map(m, v); 382 if (ret) 383 return ret; 384 385 seq_printf(m, 386 "Size: %8lu kB\n" 387 "Rss: %8lu kB\n" 388 "Pss: %8lu kB\n" 389 "Shared_Clean: %8lu kB\n" 390 "Shared_Dirty: %8lu kB\n" 391 "Private_Clean: %8lu kB\n" 392 "Private_Dirty: %8lu kB\n" 393 "Referenced: %8lu kB\n" 394 "Swap: %8lu kB\n", 395 (vma->vm_end - vma->vm_start) >> 10, 396 mss.resident >> 10, 397 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 398 mss.shared_clean >> 10, 399 mss.shared_dirty >> 10, 400 mss.private_clean >> 10, 401 mss.private_dirty >> 10, 402 mss.referenced >> 10, 403 mss.swap >> 10); 404 405 return ret; 406 } 407 408 static const struct seq_operations proc_pid_smaps_op = { 409 .start = m_start, 410 .next = m_next, 411 .stop = m_stop, 412 .show = show_smap 413 }; 414 415 static int smaps_open(struct inode *inode, struct file *file) 416 { 417 return do_maps_open(inode, file, &proc_pid_smaps_op); 418 } 419 420 const struct file_operations proc_smaps_operations = { 421 .open = smaps_open, 422 .read = seq_read, 423 .llseek = seq_lseek, 424 .release = seq_release_private, 425 }; 426 427 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 428 unsigned long end, struct mm_walk *walk) 429 { 430 struct vm_area_struct *vma = walk->private; 431 pte_t *pte, ptent; 432 spinlock_t *ptl; 433 struct page *page; 434 435 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 436 for (; addr != end; pte++, addr += PAGE_SIZE) { 437 ptent = *pte; 438 if (!pte_present(ptent)) 439 continue; 440 441 page = vm_normal_page(vma, addr, ptent); 442 if (!page) 443 continue; 444 445 /* Clear accessed and referenced bits. */ 446 ptep_test_and_clear_young(vma, addr, pte); 447 ClearPageReferenced(page); 448 } 449 pte_unmap_unlock(pte - 1, ptl); 450 cond_resched(); 451 return 0; 452 } 453 454 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 455 size_t count, loff_t *ppos) 456 { 457 struct task_struct *task; 458 char buffer[PROC_NUMBUF], *end; 459 struct mm_struct *mm; 460 struct vm_area_struct *vma; 461 462 memset(buffer, 0, sizeof(buffer)); 463 if (count > sizeof(buffer) - 1) 464 count = sizeof(buffer) - 1; 465 if (copy_from_user(buffer, buf, count)) 466 return -EFAULT; 467 if (!simple_strtol(buffer, &end, 0)) 468 return -EINVAL; 469 if (*end == '\n') 470 end++; 471 task = get_proc_task(file->f_path.dentry->d_inode); 472 if (!task) 473 return -ESRCH; 474 mm = get_task_mm(task); 475 if (mm) { 476 struct mm_walk clear_refs_walk = { 477 .pmd_entry = clear_refs_pte_range, 478 .mm = mm, 479 }; 480 down_read(&mm->mmap_sem); 481 for (vma = mm->mmap; vma; vma = vma->vm_next) { 482 clear_refs_walk.private = vma; 483 if (!is_vm_hugetlb_page(vma)) 484 walk_page_range(vma->vm_start, vma->vm_end, 485 &clear_refs_walk); 486 } 487 flush_tlb_mm(mm); 488 up_read(&mm->mmap_sem); 489 mmput(mm); 490 } 491 put_task_struct(task); 492 if (end - buffer == 0) 493 return -EIO; 494 return end - buffer; 495 } 496 497 const struct file_operations proc_clear_refs_operations = { 498 .write = clear_refs_write, 499 }; 500 501 struct pagemapread { 502 u64 __user *out, *end; 503 }; 504 505 #define PM_ENTRY_BYTES sizeof(u64) 506 #define PM_STATUS_BITS 3 507 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 508 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 509 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) 510 #define PM_PSHIFT_BITS 6 511 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) 512 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) 513 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) 514 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) 515 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) 516 517 #define PM_PRESENT PM_STATUS(4LL) 518 #define PM_SWAP PM_STATUS(2LL) 519 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) 520 #define PM_END_OF_BUFFER 1 521 522 static int add_to_pagemap(unsigned long addr, u64 pfn, 523 struct pagemapread *pm) 524 { 525 if (put_user(pfn, pm->out)) 526 return -EFAULT; 527 pm->out++; 528 if (pm->out >= pm->end) 529 return PM_END_OF_BUFFER; 530 return 0; 531 } 532 533 static int pagemap_pte_hole(unsigned long start, unsigned long end, 534 struct mm_walk *walk) 535 { 536 struct pagemapread *pm = walk->private; 537 unsigned long addr; 538 int err = 0; 539 for (addr = start; addr < end; addr += PAGE_SIZE) { 540 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); 541 if (err) 542 break; 543 } 544 return err; 545 } 546 547 static u64 swap_pte_to_pagemap_entry(pte_t pte) 548 { 549 swp_entry_t e = pte_to_swp_entry(pte); 550 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 551 } 552 553 static unsigned long pte_to_pagemap_entry(pte_t pte) 554 { 555 unsigned long pme = 0; 556 if (is_swap_pte(pte)) 557 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 558 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 559 else if (pte_present(pte)) 560 pme = PM_PFRAME(pte_pfn(pte)) 561 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 562 return pme; 563 } 564 565 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 566 struct mm_walk *walk) 567 { 568 struct vm_area_struct *vma; 569 struct pagemapread *pm = walk->private; 570 pte_t *pte; 571 int err = 0; 572 573 /* find the first VMA at or above 'addr' */ 574 vma = find_vma(walk->mm, addr); 575 for (; addr != end; addr += PAGE_SIZE) { 576 u64 pfn = PM_NOT_PRESENT; 577 578 /* check to see if we've left 'vma' behind 579 * and need a new, higher one */ 580 if (vma && (addr >= vma->vm_end)) 581 vma = find_vma(walk->mm, addr); 582 583 /* check that 'vma' actually covers this address, 584 * and that it isn't a huge page vma */ 585 if (vma && (vma->vm_start <= addr) && 586 !is_vm_hugetlb_page(vma)) { 587 pte = pte_offset_map(pmd, addr); 588 pfn = pte_to_pagemap_entry(*pte); 589 /* unmap before userspace copy */ 590 pte_unmap(pte); 591 } 592 err = add_to_pagemap(addr, pfn, pm); 593 if (err) 594 return err; 595 } 596 597 cond_resched(); 598 599 return err; 600 } 601 602 /* 603 * /proc/pid/pagemap - an array mapping virtual pages to pfns 604 * 605 * For each page in the address space, this file contains one 64-bit entry 606 * consisting of the following: 607 * 608 * Bits 0-55 page frame number (PFN) if present 609 * Bits 0-4 swap type if swapped 610 * Bits 5-55 swap offset if swapped 611 * Bits 55-60 page shift (page size = 1<<page shift) 612 * Bit 61 reserved for future use 613 * Bit 62 page swapped 614 * Bit 63 page present 615 * 616 * If the page is not present but in swap, then the PFN contains an 617 * encoding of the swap file number and the page's offset into the 618 * swap. Unmapped pages return a null PFN. This allows determining 619 * precisely which pages are mapped (or in swap) and comparing mapped 620 * pages between processes. 621 * 622 * Efficient users of this interface will use /proc/pid/maps to 623 * determine which areas of memory are actually mapped and llseek to 624 * skip over unmapped regions. 625 */ 626 static ssize_t pagemap_read(struct file *file, char __user *buf, 627 size_t count, loff_t *ppos) 628 { 629 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 630 struct page **pages, *page; 631 unsigned long uaddr, uend; 632 struct mm_struct *mm; 633 struct pagemapread pm; 634 int pagecount; 635 int ret = -ESRCH; 636 struct mm_walk pagemap_walk = {}; 637 unsigned long src; 638 unsigned long svpfn; 639 unsigned long start_vaddr; 640 unsigned long end_vaddr; 641 642 if (!task) 643 goto out; 644 645 ret = -EACCES; 646 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 647 goto out_task; 648 649 ret = -EINVAL; 650 /* file position must be aligned */ 651 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 652 goto out_task; 653 654 ret = 0; 655 mm = get_task_mm(task); 656 if (!mm) 657 goto out_task; 658 659 660 uaddr = (unsigned long)buf & PAGE_MASK; 661 uend = (unsigned long)(buf + count); 662 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; 663 ret = 0; 664 if (pagecount == 0) 665 goto out_mm; 666 pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 667 ret = -ENOMEM; 668 if (!pages) 669 goto out_mm; 670 671 down_read(¤t->mm->mmap_sem); 672 ret = get_user_pages(current, current->mm, uaddr, pagecount, 673 1, 0, pages, NULL); 674 up_read(¤t->mm->mmap_sem); 675 676 if (ret < 0) 677 goto out_free; 678 679 if (ret != pagecount) { 680 pagecount = ret; 681 ret = -EFAULT; 682 goto out_pages; 683 } 684 685 pm.out = (u64 *)buf; 686 pm.end = (u64 *)(buf + count); 687 688 pagemap_walk.pmd_entry = pagemap_pte_range; 689 pagemap_walk.pte_hole = pagemap_pte_hole; 690 pagemap_walk.mm = mm; 691 pagemap_walk.private = ± 692 693 src = *ppos; 694 svpfn = src / PM_ENTRY_BYTES; 695 start_vaddr = svpfn << PAGE_SHIFT; 696 end_vaddr = TASK_SIZE_OF(task); 697 698 /* watch out for wraparound */ 699 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) 700 start_vaddr = end_vaddr; 701 702 /* 703 * The odds are that this will stop walking way 704 * before end_vaddr, because the length of the 705 * user buffer is tracked in "pm", and the walk 706 * will stop when we hit the end of the buffer. 707 */ 708 ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); 709 if (ret == PM_END_OF_BUFFER) 710 ret = 0; 711 /* don't need mmap_sem for these, but this looks cleaner */ 712 *ppos += (char *)pm.out - buf; 713 if (!ret) 714 ret = (char *)pm.out - buf; 715 716 out_pages: 717 for (; pagecount; pagecount--) { 718 page = pages[pagecount-1]; 719 if (!PageReserved(page)) 720 SetPageDirty(page); 721 page_cache_release(page); 722 } 723 out_free: 724 kfree(pages); 725 out_mm: 726 mmput(mm); 727 out_task: 728 put_task_struct(task); 729 out: 730 return ret; 731 } 732 733 const struct file_operations proc_pagemap_operations = { 734 .llseek = mem_lseek, /* borrow this */ 735 .read = pagemap_read, 736 }; 737 #endif /* CONFIG_PROC_PAGE_MONITOR */ 738 739 #ifdef CONFIG_NUMA 740 extern int show_numa_map(struct seq_file *m, void *v); 741 742 static const struct seq_operations proc_pid_numa_maps_op = { 743 .start = m_start, 744 .next = m_next, 745 .stop = m_stop, 746 .show = show_numa_map, 747 }; 748 749 static int numa_maps_open(struct inode *inode, struct file *file) 750 { 751 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 752 } 753 754 const struct file_operations proc_numa_maps_operations = { 755 .open = numa_maps_open, 756 .read = seq_read, 757 .llseek = seq_lseek, 758 .release = seq_release_private, 759 }; 760 #endif 761