1 /* 2 * linux/mm/vmstat.c 3 * 4 * Manages VM statistics 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * zoned VM statistics 8 * Copyright (C) 2006 Silicon Graphics, Inc., 9 * Christoph Lameter <christoph@lameter.com> 10 */ 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/cpu.h> 16 #include <linux/vmstat.h> 17 #include <linux/sched.h> 18 19 #ifdef CONFIG_VM_EVENT_COUNTERS 20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 21 EXPORT_PER_CPU_SYMBOL(vm_event_states); 22 23 static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) 24 { 25 int cpu; 26 int i; 27 28 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 29 30 for_each_cpu(cpu, cpumask) { 31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 32 33 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 34 ret[i] += this->event[i]; 35 } 36 } 37 38 /* 39 * Accumulate the vm event counters across all CPUs. 40 * The result is unavoidably approximate - it can change 41 * during and after execution of this function. 42 */ 43 void all_vm_events(unsigned long *ret) 44 { 45 get_online_cpus(); 46 sum_vm_events(ret, cpu_online_mask); 47 put_online_cpus(); 48 } 49 EXPORT_SYMBOL_GPL(all_vm_events); 50 51 #ifdef CONFIG_HOTPLUG 52 /* 53 * Fold the foreign cpu events into our own. 54 * 55 * This is adding to the events on one processor 56 * but keeps the global counts constant. 57 */ 58 void vm_events_fold_cpu(int cpu) 59 { 60 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 61 int i; 62 63 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 64 count_vm_events(i, fold_state->event[i]); 65 fold_state->event[i] = 0; 66 } 67 } 68 #endif /* CONFIG_HOTPLUG */ 69 70 #endif /* CONFIG_VM_EVENT_COUNTERS */ 71 72 /* 73 * Manage combined zone based / global counters 74 * 75 * vm_stat contains the global counters 76 */ 77 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 78 EXPORT_SYMBOL(vm_stat); 79 80 #ifdef CONFIG_SMP 81 82 static int calculate_threshold(struct zone *zone) 83 { 84 int threshold; 85 int mem; /* memory in 128 MB units */ 86 87 /* 88 * The threshold scales with the number of processors and the amount 89 * of memory per zone. More memory means that we can defer updates for 90 * longer, more processors could lead to more contention. 91 * fls() is used to have a cheap way of logarithmic scaling. 92 * 93 * Some sample thresholds: 94 * 95 * Threshold Processors (fls) Zonesize fls(mem+1) 96 * ------------------------------------------------------------------ 97 * 8 1 1 0.9-1 GB 4 98 * 16 2 2 0.9-1 GB 4 99 * 20 2 2 1-2 GB 5 100 * 24 2 2 2-4 GB 6 101 * 28 2 2 4-8 GB 7 102 * 32 2 2 8-16 GB 8 103 * 4 2 2 <128M 1 104 * 30 4 3 2-4 GB 5 105 * 48 4 3 8-16 GB 8 106 * 32 8 4 1-2 GB 4 107 * 32 8 4 0.9-1GB 4 108 * 10 16 5 <128M 1 109 * 40 16 5 900M 4 110 * 70 64 7 2-4 GB 5 111 * 84 64 7 4-8 GB 6 112 * 108 512 9 4-8 GB 6 113 * 125 1024 10 8-16 GB 8 114 * 125 1024 10 16-32 GB 9 115 */ 116 117 mem = zone->present_pages >> (27 - PAGE_SHIFT); 118 119 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 120 121 /* 122 * Maximum threshold is 125 123 */ 124 threshold = min(125, threshold); 125 126 return threshold; 127 } 128 129 /* 130 * Refresh the thresholds for each zone. 131 */ 132 static void refresh_zone_stat_thresholds(void) 133 { 134 struct zone *zone; 135 int cpu; 136 int threshold; 137 138 for_each_populated_zone(zone) { 139 threshold = calculate_threshold(zone); 140 141 for_each_online_cpu(cpu) 142 zone_pcp(zone, cpu)->stat_threshold = threshold; 143 } 144 } 145 146 /* 147 * For use when we know that interrupts are disabled. 148 */ 149 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 150 int delta) 151 { 152 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 153 s8 *p = pcp->vm_stat_diff + item; 154 long x; 155 156 x = delta + *p; 157 158 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { 159 zone_page_state_add(x, zone, item); 160 x = 0; 161 } 162 *p = x; 163 } 164 EXPORT_SYMBOL(__mod_zone_page_state); 165 166 /* 167 * For an unknown interrupt state 168 */ 169 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 170 int delta) 171 { 172 unsigned long flags; 173 174 local_irq_save(flags); 175 __mod_zone_page_state(zone, item, delta); 176 local_irq_restore(flags); 177 } 178 EXPORT_SYMBOL(mod_zone_page_state); 179 180 /* 181 * Optimized increment and decrement functions. 182 * 183 * These are only for a single page and therefore can take a struct page * 184 * argument instead of struct zone *. This allows the inclusion of the code 185 * generated for page_zone(page) into the optimized functions. 186 * 187 * No overflow check is necessary and therefore the differential can be 188 * incremented or decremented in place which may allow the compilers to 189 * generate better code. 190 * The increment or decrement is known and therefore one boundary check can 191 * be omitted. 192 * 193 * NOTE: These functions are very performance sensitive. Change only 194 * with care. 195 * 196 * Some processors have inc/dec instructions that are atomic vs an interrupt. 197 * However, the code must first determine the differential location in a zone 198 * based on the processor number and then inc/dec the counter. There is no 199 * guarantee without disabling preemption that the processor will not change 200 * in between and therefore the atomicity vs. interrupt cannot be exploited 201 * in a useful way here. 202 */ 203 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 204 { 205 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 206 s8 *p = pcp->vm_stat_diff + item; 207 208 (*p)++; 209 210 if (unlikely(*p > pcp->stat_threshold)) { 211 int overstep = pcp->stat_threshold / 2; 212 213 zone_page_state_add(*p + overstep, zone, item); 214 *p = -overstep; 215 } 216 } 217 218 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 219 { 220 __inc_zone_state(page_zone(page), item); 221 } 222 EXPORT_SYMBOL(__inc_zone_page_state); 223 224 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 225 { 226 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 227 s8 *p = pcp->vm_stat_diff + item; 228 229 (*p)--; 230 231 if (unlikely(*p < - pcp->stat_threshold)) { 232 int overstep = pcp->stat_threshold / 2; 233 234 zone_page_state_add(*p - overstep, zone, item); 235 *p = overstep; 236 } 237 } 238 239 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 240 { 241 __dec_zone_state(page_zone(page), item); 242 } 243 EXPORT_SYMBOL(__dec_zone_page_state); 244 245 void inc_zone_state(struct zone *zone, enum zone_stat_item item) 246 { 247 unsigned long flags; 248 249 local_irq_save(flags); 250 __inc_zone_state(zone, item); 251 local_irq_restore(flags); 252 } 253 254 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 255 { 256 unsigned long flags; 257 struct zone *zone; 258 259 zone = page_zone(page); 260 local_irq_save(flags); 261 __inc_zone_state(zone, item); 262 local_irq_restore(flags); 263 } 264 EXPORT_SYMBOL(inc_zone_page_state); 265 266 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 267 { 268 unsigned long flags; 269 270 local_irq_save(flags); 271 __dec_zone_page_state(page, item); 272 local_irq_restore(flags); 273 } 274 EXPORT_SYMBOL(dec_zone_page_state); 275 276 /* 277 * Update the zone counters for one cpu. 278 * 279 * The cpu specified must be either the current cpu or a processor that 280 * is not online. If it is the current cpu then the execution thread must 281 * be pinned to the current cpu. 282 * 283 * Note that refresh_cpu_vm_stats strives to only access 284 * node local memory. The per cpu pagesets on remote zones are placed 285 * in the memory local to the processor using that pageset. So the 286 * loop over all zones will access a series of cachelines local to 287 * the processor. 288 * 289 * The call to zone_page_state_add updates the cachelines with the 290 * statistics in the remote zone struct as well as the global cachelines 291 * with the global counters. These could cause remote node cache line 292 * bouncing and will have to be only done when necessary. 293 */ 294 void refresh_cpu_vm_stats(int cpu) 295 { 296 struct zone *zone; 297 int i; 298 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 299 300 for_each_populated_zone(zone) { 301 struct per_cpu_pageset *p; 302 303 p = zone_pcp(zone, cpu); 304 305 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 306 if (p->vm_stat_diff[i]) { 307 unsigned long flags; 308 int v; 309 310 local_irq_save(flags); 311 v = p->vm_stat_diff[i]; 312 p->vm_stat_diff[i] = 0; 313 local_irq_restore(flags); 314 atomic_long_add(v, &zone->vm_stat[i]); 315 global_diff[i] += v; 316 #ifdef CONFIG_NUMA 317 /* 3 seconds idle till flush */ 318 p->expire = 3; 319 #endif 320 } 321 cond_resched(); 322 #ifdef CONFIG_NUMA 323 /* 324 * Deal with draining the remote pageset of this 325 * processor 326 * 327 * Check if there are pages remaining in this pageset 328 * if not then there is nothing to expire. 329 */ 330 if (!p->expire || !p->pcp.count) 331 continue; 332 333 /* 334 * We never drain zones local to this processor. 335 */ 336 if (zone_to_nid(zone) == numa_node_id()) { 337 p->expire = 0; 338 continue; 339 } 340 341 p->expire--; 342 if (p->expire) 343 continue; 344 345 if (p->pcp.count) 346 drain_zone_pages(zone, &p->pcp); 347 #endif 348 } 349 350 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 351 if (global_diff[i]) 352 atomic_long_add(global_diff[i], &vm_stat[i]); 353 } 354 355 #endif 356 357 #ifdef CONFIG_NUMA 358 /* 359 * zonelist = the list of zones passed to the allocator 360 * z = the zone from which the allocation occurred. 361 * 362 * Must be called with interrupts disabled. 363 */ 364 void zone_statistics(struct zone *preferred_zone, struct zone *z) 365 { 366 if (z->zone_pgdat == preferred_zone->zone_pgdat) { 367 __inc_zone_state(z, NUMA_HIT); 368 } else { 369 __inc_zone_state(z, NUMA_MISS); 370 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 371 } 372 if (z->node == numa_node_id()) 373 __inc_zone_state(z, NUMA_LOCAL); 374 else 375 __inc_zone_state(z, NUMA_OTHER); 376 } 377 #endif 378 379 #ifdef CONFIG_PROC_FS 380 #include <linux/proc_fs.h> 381 #include <linux/seq_file.h> 382 383 static char * const migratetype_names[MIGRATE_TYPES] = { 384 "Unmovable", 385 "Reclaimable", 386 "Movable", 387 "Reserve", 388 "Isolate", 389 }; 390 391 static void *frag_start(struct seq_file *m, loff_t *pos) 392 { 393 pg_data_t *pgdat; 394 loff_t node = *pos; 395 for (pgdat = first_online_pgdat(); 396 pgdat && node; 397 pgdat = next_online_pgdat(pgdat)) 398 --node; 399 400 return pgdat; 401 } 402 403 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 404 { 405 pg_data_t *pgdat = (pg_data_t *)arg; 406 407 (*pos)++; 408 return next_online_pgdat(pgdat); 409 } 410 411 static void frag_stop(struct seq_file *m, void *arg) 412 { 413 } 414 415 /* Walk all the zones in a node and print using a callback */ 416 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 417 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 418 { 419 struct zone *zone; 420 struct zone *node_zones = pgdat->node_zones; 421 unsigned long flags; 422 423 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 424 if (!populated_zone(zone)) 425 continue; 426 427 spin_lock_irqsave(&zone->lock, flags); 428 print(m, pgdat, zone); 429 spin_unlock_irqrestore(&zone->lock, flags); 430 } 431 } 432 433 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 434 struct zone *zone) 435 { 436 int order; 437 438 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 439 for (order = 0; order < MAX_ORDER; ++order) 440 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 441 seq_putc(m, '\n'); 442 } 443 444 /* 445 * This walks the free areas for each zone. 446 */ 447 static int frag_show(struct seq_file *m, void *arg) 448 { 449 pg_data_t *pgdat = (pg_data_t *)arg; 450 walk_zones_in_node(m, pgdat, frag_show_print); 451 return 0; 452 } 453 454 static void pagetypeinfo_showfree_print(struct seq_file *m, 455 pg_data_t *pgdat, struct zone *zone) 456 { 457 int order, mtype; 458 459 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 460 seq_printf(m, "Node %4d, zone %8s, type %12s ", 461 pgdat->node_id, 462 zone->name, 463 migratetype_names[mtype]); 464 for (order = 0; order < MAX_ORDER; ++order) { 465 unsigned long freecount = 0; 466 struct free_area *area; 467 struct list_head *curr; 468 469 area = &(zone->free_area[order]); 470 471 list_for_each(curr, &area->free_list[mtype]) 472 freecount++; 473 seq_printf(m, "%6lu ", freecount); 474 } 475 seq_putc(m, '\n'); 476 } 477 } 478 479 /* Print out the free pages at each order for each migatetype */ 480 static int pagetypeinfo_showfree(struct seq_file *m, void *arg) 481 { 482 int order; 483 pg_data_t *pgdat = (pg_data_t *)arg; 484 485 /* Print header */ 486 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 487 for (order = 0; order < MAX_ORDER; ++order) 488 seq_printf(m, "%6d ", order); 489 seq_putc(m, '\n'); 490 491 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); 492 493 return 0; 494 } 495 496 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 497 pg_data_t *pgdat, struct zone *zone) 498 { 499 int mtype; 500 unsigned long pfn; 501 unsigned long start_pfn = zone->zone_start_pfn; 502 unsigned long end_pfn = start_pfn + zone->spanned_pages; 503 unsigned long count[MIGRATE_TYPES] = { 0, }; 504 505 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 506 struct page *page; 507 508 if (!pfn_valid(pfn)) 509 continue; 510 511 page = pfn_to_page(pfn); 512 513 /* Watch for unexpected holes punched in the memmap */ 514 if (!memmap_valid_within(pfn, page, zone)) 515 continue; 516 517 mtype = get_pageblock_migratetype(page); 518 519 if (mtype < MIGRATE_TYPES) 520 count[mtype]++; 521 } 522 523 /* Print counts */ 524 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 525 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 526 seq_printf(m, "%12lu ", count[mtype]); 527 seq_putc(m, '\n'); 528 } 529 530 /* Print out the free pages at each order for each migratetype */ 531 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 532 { 533 int mtype; 534 pg_data_t *pgdat = (pg_data_t *)arg; 535 536 seq_printf(m, "\n%-23s", "Number of blocks type "); 537 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 538 seq_printf(m, "%12s ", migratetype_names[mtype]); 539 seq_putc(m, '\n'); 540 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); 541 542 return 0; 543 } 544 545 /* 546 * This prints out statistics in relation to grouping pages by mobility. 547 * It is expensive to collect so do not constantly read the file. 548 */ 549 static int pagetypeinfo_show(struct seq_file *m, void *arg) 550 { 551 pg_data_t *pgdat = (pg_data_t *)arg; 552 553 /* check memoryless node */ 554 if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) 555 return 0; 556 557 seq_printf(m, "Page block order: %d\n", pageblock_order); 558 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 559 seq_putc(m, '\n'); 560 pagetypeinfo_showfree(m, pgdat); 561 pagetypeinfo_showblockcount(m, pgdat); 562 563 return 0; 564 } 565 566 static const struct seq_operations fragmentation_op = { 567 .start = frag_start, 568 .next = frag_next, 569 .stop = frag_stop, 570 .show = frag_show, 571 }; 572 573 static int fragmentation_open(struct inode *inode, struct file *file) 574 { 575 return seq_open(file, &fragmentation_op); 576 } 577 578 static const struct file_operations fragmentation_file_operations = { 579 .open = fragmentation_open, 580 .read = seq_read, 581 .llseek = seq_lseek, 582 .release = seq_release, 583 }; 584 585 static const struct seq_operations pagetypeinfo_op = { 586 .start = frag_start, 587 .next = frag_next, 588 .stop = frag_stop, 589 .show = pagetypeinfo_show, 590 }; 591 592 static int pagetypeinfo_open(struct inode *inode, struct file *file) 593 { 594 return seq_open(file, &pagetypeinfo_op); 595 } 596 597 static const struct file_operations pagetypeinfo_file_ops = { 598 .open = pagetypeinfo_open, 599 .read = seq_read, 600 .llseek = seq_lseek, 601 .release = seq_release, 602 }; 603 604 #ifdef CONFIG_ZONE_DMA 605 #define TEXT_FOR_DMA(xx) xx "_dma", 606 #else 607 #define TEXT_FOR_DMA(xx) 608 #endif 609 610 #ifdef CONFIG_ZONE_DMA32 611 #define TEXT_FOR_DMA32(xx) xx "_dma32", 612 #else 613 #define TEXT_FOR_DMA32(xx) 614 #endif 615 616 #ifdef CONFIG_HIGHMEM 617 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 618 #else 619 #define TEXT_FOR_HIGHMEM(xx) 620 #endif 621 622 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 623 TEXT_FOR_HIGHMEM(xx) xx "_movable", 624 625 static const char * const vmstat_text[] = { 626 /* Zoned VM counters */ 627 "nr_free_pages", 628 "nr_inactive_anon", 629 "nr_active_anon", 630 "nr_inactive_file", 631 "nr_active_file", 632 "nr_unevictable", 633 "nr_mlock", 634 "nr_anon_pages", 635 "nr_mapped", 636 "nr_file_pages", 637 "nr_dirty", 638 "nr_writeback", 639 "nr_slab_reclaimable", 640 "nr_slab_unreclaimable", 641 "nr_page_table_pages", 642 "nr_kernel_stack", 643 "nr_unstable", 644 "nr_bounce", 645 "nr_vmscan_write", 646 "nr_writeback_temp", 647 "nr_isolated_anon", 648 "nr_isolated_file", 649 "nr_shmem", 650 #ifdef CONFIG_NUMA 651 "numa_hit", 652 "numa_miss", 653 "numa_foreign", 654 "numa_interleave", 655 "numa_local", 656 "numa_other", 657 #endif 658 659 #ifdef CONFIG_VM_EVENT_COUNTERS 660 "pgpgin", 661 "pgpgout", 662 "pswpin", 663 "pswpout", 664 665 TEXTS_FOR_ZONES("pgalloc") 666 667 "pgfree", 668 "pgactivate", 669 "pgdeactivate", 670 671 "pgfault", 672 "pgmajfault", 673 674 TEXTS_FOR_ZONES("pgrefill") 675 TEXTS_FOR_ZONES("pgsteal") 676 TEXTS_FOR_ZONES("pgscan_kswapd") 677 TEXTS_FOR_ZONES("pgscan_direct") 678 679 #ifdef CONFIG_NUMA 680 "zone_reclaim_failed", 681 #endif 682 "pginodesteal", 683 "slabs_scanned", 684 "kswapd_steal", 685 "kswapd_inodesteal", 686 "kswapd_low_wmark_hit_quickly", 687 "kswapd_high_wmark_hit_quickly", 688 "kswapd_skip_congestion_wait", 689 "pageoutrun", 690 "allocstall", 691 692 "pgrotated", 693 #ifdef CONFIG_HUGETLB_PAGE 694 "htlb_buddy_alloc_success", 695 "htlb_buddy_alloc_fail", 696 #endif 697 "unevictable_pgs_culled", 698 "unevictable_pgs_scanned", 699 "unevictable_pgs_rescued", 700 "unevictable_pgs_mlocked", 701 "unevictable_pgs_munlocked", 702 "unevictable_pgs_cleared", 703 "unevictable_pgs_stranded", 704 "unevictable_pgs_mlockfreed", 705 #endif 706 }; 707 708 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 709 struct zone *zone) 710 { 711 int i; 712 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 713 seq_printf(m, 714 "\n pages free %lu" 715 "\n min %lu" 716 "\n low %lu" 717 "\n high %lu" 718 "\n scanned %lu" 719 "\n spanned %lu" 720 "\n present %lu", 721 zone_page_state(zone, NR_FREE_PAGES), 722 min_wmark_pages(zone), 723 low_wmark_pages(zone), 724 high_wmark_pages(zone), 725 zone->pages_scanned, 726 zone->spanned_pages, 727 zone->present_pages); 728 729 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 730 seq_printf(m, "\n %-12s %lu", vmstat_text[i], 731 zone_page_state(zone, i)); 732 733 seq_printf(m, 734 "\n protection: (%lu", 735 zone->lowmem_reserve[0]); 736 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 737 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 738 seq_printf(m, 739 ")" 740 "\n pagesets"); 741 for_each_online_cpu(i) { 742 struct per_cpu_pageset *pageset; 743 744 pageset = zone_pcp(zone, i); 745 seq_printf(m, 746 "\n cpu: %i" 747 "\n count: %i" 748 "\n high: %i" 749 "\n batch: %i", 750 i, 751 pageset->pcp.count, 752 pageset->pcp.high, 753 pageset->pcp.batch); 754 #ifdef CONFIG_SMP 755 seq_printf(m, "\n vm stats threshold: %d", 756 pageset->stat_threshold); 757 #endif 758 } 759 seq_printf(m, 760 "\n all_unreclaimable: %u" 761 "\n prev_priority: %i" 762 "\n start_pfn: %lu" 763 "\n inactive_ratio: %u", 764 zone_is_all_unreclaimable(zone), 765 zone->prev_priority, 766 zone->zone_start_pfn, 767 zone->inactive_ratio); 768 seq_putc(m, '\n'); 769 } 770 771 /* 772 * Output information about zones in @pgdat. 773 */ 774 static int zoneinfo_show(struct seq_file *m, void *arg) 775 { 776 pg_data_t *pgdat = (pg_data_t *)arg; 777 walk_zones_in_node(m, pgdat, zoneinfo_show_print); 778 return 0; 779 } 780 781 static const struct seq_operations zoneinfo_op = { 782 .start = frag_start, /* iterate over all zones. The same as in 783 * fragmentation. */ 784 .next = frag_next, 785 .stop = frag_stop, 786 .show = zoneinfo_show, 787 }; 788 789 static int zoneinfo_open(struct inode *inode, struct file *file) 790 { 791 return seq_open(file, &zoneinfo_op); 792 } 793 794 static const struct file_operations proc_zoneinfo_file_operations = { 795 .open = zoneinfo_open, 796 .read = seq_read, 797 .llseek = seq_lseek, 798 .release = seq_release, 799 }; 800 801 static void *vmstat_start(struct seq_file *m, loff_t *pos) 802 { 803 unsigned long *v; 804 #ifdef CONFIG_VM_EVENT_COUNTERS 805 unsigned long *e; 806 #endif 807 int i; 808 809 if (*pos >= ARRAY_SIZE(vmstat_text)) 810 return NULL; 811 812 #ifdef CONFIG_VM_EVENT_COUNTERS 813 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) 814 + sizeof(struct vm_event_state), GFP_KERNEL); 815 #else 816 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), 817 GFP_KERNEL); 818 #endif 819 m->private = v; 820 if (!v) 821 return ERR_PTR(-ENOMEM); 822 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 823 v[i] = global_page_state(i); 824 #ifdef CONFIG_VM_EVENT_COUNTERS 825 e = v + NR_VM_ZONE_STAT_ITEMS; 826 all_vm_events(e); 827 e[PGPGIN] /= 2; /* sectors -> kbytes */ 828 e[PGPGOUT] /= 2; 829 #endif 830 return v + *pos; 831 } 832 833 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 834 { 835 (*pos)++; 836 if (*pos >= ARRAY_SIZE(vmstat_text)) 837 return NULL; 838 return (unsigned long *)m->private + *pos; 839 } 840 841 static int vmstat_show(struct seq_file *m, void *arg) 842 { 843 unsigned long *l = arg; 844 unsigned long off = l - (unsigned long *)m->private; 845 846 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 847 return 0; 848 } 849 850 static void vmstat_stop(struct seq_file *m, void *arg) 851 { 852 kfree(m->private); 853 m->private = NULL; 854 } 855 856 static const struct seq_operations vmstat_op = { 857 .start = vmstat_start, 858 .next = vmstat_next, 859 .stop = vmstat_stop, 860 .show = vmstat_show, 861 }; 862 863 static int vmstat_open(struct inode *inode, struct file *file) 864 { 865 return seq_open(file, &vmstat_op); 866 } 867 868 static const struct file_operations proc_vmstat_file_operations = { 869 .open = vmstat_open, 870 .read = seq_read, 871 .llseek = seq_lseek, 872 .release = seq_release, 873 }; 874 #endif /* CONFIG_PROC_FS */ 875 876 #ifdef CONFIG_SMP 877 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 878 int sysctl_stat_interval __read_mostly = HZ; 879 880 static void vmstat_update(struct work_struct *w) 881 { 882 refresh_cpu_vm_stats(smp_processor_id()); 883 schedule_delayed_work(&__get_cpu_var(vmstat_work), 884 round_jiffies_relative(sysctl_stat_interval)); 885 } 886 887 static void __cpuinit start_cpu_timer(int cpu) 888 { 889 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 890 891 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); 892 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); 893 } 894 895 /* 896 * Use the cpu notifier to insure that the thresholds are recalculated 897 * when necessary. 898 */ 899 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 900 unsigned long action, 901 void *hcpu) 902 { 903 long cpu = (long)hcpu; 904 905 switch (action) { 906 case CPU_ONLINE: 907 case CPU_ONLINE_FROZEN: 908 start_cpu_timer(cpu); 909 break; 910 case CPU_DOWN_PREPARE: 911 case CPU_DOWN_PREPARE_FROZEN: 912 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); 913 per_cpu(vmstat_work, cpu).work.func = NULL; 914 break; 915 case CPU_DOWN_FAILED: 916 case CPU_DOWN_FAILED_FROZEN: 917 start_cpu_timer(cpu); 918 break; 919 case CPU_DEAD: 920 case CPU_DEAD_FROZEN: 921 refresh_zone_stat_thresholds(); 922 break; 923 default: 924 break; 925 } 926 return NOTIFY_OK; 927 } 928 929 static struct notifier_block __cpuinitdata vmstat_notifier = 930 { &vmstat_cpuup_callback, NULL, 0 }; 931 #endif 932 933 static int __init setup_vmstat(void) 934 { 935 #ifdef CONFIG_SMP 936 int cpu; 937 938 refresh_zone_stat_thresholds(); 939 register_cpu_notifier(&vmstat_notifier); 940 941 for_each_online_cpu(cpu) 942 start_cpu_timer(cpu); 943 #endif 944 #ifdef CONFIG_PROC_FS 945 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 946 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); 947 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 948 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 949 #endif 950 return 0; 951 } 952 module_init(setup_vmstat) 953