1 /* 2 * linux/mm/vmstat.c 3 * 4 * Manages VM statistics 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * zoned VM statistics 8 * Copyright (C) 2006 Silicon Graphics, Inc., 9 * Christoph Lameter <christoph@lameter.com> 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/cpu.h> 16 #include <linux/sched.h> 17 18 #ifdef CONFIG_VM_EVENT_COUNTERS 19 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 20 EXPORT_PER_CPU_SYMBOL(vm_event_states); 21 22 static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) 23 { 24 int cpu = 0; 25 int i; 26 27 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 28 29 cpu = first_cpu(*cpumask); 30 while (cpu < NR_CPUS) { 31 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 32 33 cpu = next_cpu(cpu, *cpumask); 34 35 if (cpu < NR_CPUS) 36 prefetch(&per_cpu(vm_event_states, cpu)); 37 38 39 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 40 ret[i] += this->event[i]; 41 } 42 } 43 44 /* 45 * Accumulate the vm event counters across all CPUs. 46 * The result is unavoidably approximate - it can change 47 * during and after execution of this function. 48 */ 49 void all_vm_events(unsigned long *ret) 50 { 51 sum_vm_events(ret, &cpu_online_map); 52 } 53 EXPORT_SYMBOL_GPL(all_vm_events); 54 55 #ifdef CONFIG_HOTPLUG 56 /* 57 * Fold the foreign cpu events into our own. 58 * 59 * This is adding to the events on one processor 60 * but keeps the global counts constant. 61 */ 62 void vm_events_fold_cpu(int cpu) 63 { 64 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 65 int i; 66 67 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 68 count_vm_events(i, fold_state->event[i]); 69 fold_state->event[i] = 0; 70 } 71 } 72 #endif /* CONFIG_HOTPLUG */ 73 74 #endif /* CONFIG_VM_EVENT_COUNTERS */ 75 76 /* 77 * Manage combined zone based / global counters 78 * 79 * vm_stat contains the global counters 80 */ 81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 82 EXPORT_SYMBOL(vm_stat); 83 84 #ifdef CONFIG_SMP 85 86 static int calculate_threshold(struct zone *zone) 87 { 88 int threshold; 89 int mem; /* memory in 128 MB units */ 90 91 /* 92 * The threshold scales with the number of processors and the amount 93 * of memory per zone. More memory means that we can defer updates for 94 * longer, more processors could lead to more contention. 95 * fls() is used to have a cheap way of logarithmic scaling. 96 * 97 * Some sample thresholds: 98 * 99 * Threshold Processors (fls) Zonesize fls(mem+1) 100 * ------------------------------------------------------------------ 101 * 8 1 1 0.9-1 GB 4 102 * 16 2 2 0.9-1 GB 4 103 * 20 2 2 1-2 GB 5 104 * 24 2 2 2-4 GB 6 105 * 28 2 2 4-8 GB 7 106 * 32 2 2 8-16 GB 8 107 * 4 2 2 <128M 1 108 * 30 4 3 2-4 GB 5 109 * 48 4 3 8-16 GB 8 110 * 32 8 4 1-2 GB 4 111 * 32 8 4 0.9-1GB 4 112 * 10 16 5 <128M 1 113 * 40 16 5 900M 4 114 * 70 64 7 2-4 GB 5 115 * 84 64 7 4-8 GB 6 116 * 108 512 9 4-8 GB 6 117 * 125 1024 10 8-16 GB 8 118 * 125 1024 10 16-32 GB 9 119 */ 120 121 mem = zone->present_pages >> (27 - PAGE_SHIFT); 122 123 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 124 125 /* 126 * Maximum threshold is 125 127 */ 128 threshold = min(125, threshold); 129 130 return threshold; 131 } 132 133 /* 134 * Refresh the thresholds for each zone. 135 */ 136 static void refresh_zone_stat_thresholds(void) 137 { 138 struct zone *zone; 139 int cpu; 140 int threshold; 141 142 for_each_zone(zone) { 143 144 if (!zone->present_pages) 145 continue; 146 147 threshold = calculate_threshold(zone); 148 149 for_each_online_cpu(cpu) 150 zone_pcp(zone, cpu)->stat_threshold = threshold; 151 } 152 } 153 154 /* 155 * For use when we know that interrupts are disabled. 156 */ 157 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 158 int delta) 159 { 160 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 161 s8 *p = pcp->vm_stat_diff + item; 162 long x; 163 164 x = delta + *p; 165 166 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { 167 zone_page_state_add(x, zone, item); 168 x = 0; 169 } 170 *p = x; 171 } 172 EXPORT_SYMBOL(__mod_zone_page_state); 173 174 /* 175 * For an unknown interrupt state 176 */ 177 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 178 int delta) 179 { 180 unsigned long flags; 181 182 local_irq_save(flags); 183 __mod_zone_page_state(zone, item, delta); 184 local_irq_restore(flags); 185 } 186 EXPORT_SYMBOL(mod_zone_page_state); 187 188 /* 189 * Optimized increment and decrement functions. 190 * 191 * These are only for a single page and therefore can take a struct page * 192 * argument instead of struct zone *. This allows the inclusion of the code 193 * generated for page_zone(page) into the optimized functions. 194 * 195 * No overflow check is necessary and therefore the differential can be 196 * incremented or decremented in place which may allow the compilers to 197 * generate better code. 198 * The increment or decrement is known and therefore one boundary check can 199 * be omitted. 200 * 201 * NOTE: These functions are very performance sensitive. Change only 202 * with care. 203 * 204 * Some processors have inc/dec instructions that are atomic vs an interrupt. 205 * However, the code must first determine the differential location in a zone 206 * based on the processor number and then inc/dec the counter. There is no 207 * guarantee without disabling preemption that the processor will not change 208 * in between and therefore the atomicity vs. interrupt cannot be exploited 209 * in a useful way here. 210 */ 211 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 212 { 213 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 214 s8 *p = pcp->vm_stat_diff + item; 215 216 (*p)++; 217 218 if (unlikely(*p > pcp->stat_threshold)) { 219 int overstep = pcp->stat_threshold / 2; 220 221 zone_page_state_add(*p + overstep, zone, item); 222 *p = -overstep; 223 } 224 } 225 226 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 227 { 228 __inc_zone_state(page_zone(page), item); 229 } 230 EXPORT_SYMBOL(__inc_zone_page_state); 231 232 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 233 { 234 struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 235 s8 *p = pcp->vm_stat_diff + item; 236 237 (*p)--; 238 239 if (unlikely(*p < - pcp->stat_threshold)) { 240 int overstep = pcp->stat_threshold / 2; 241 242 zone_page_state_add(*p - overstep, zone, item); 243 *p = overstep; 244 } 245 } 246 247 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 248 { 249 __dec_zone_state(page_zone(page), item); 250 } 251 EXPORT_SYMBOL(__dec_zone_page_state); 252 253 void inc_zone_state(struct zone *zone, enum zone_stat_item item) 254 { 255 unsigned long flags; 256 257 local_irq_save(flags); 258 __inc_zone_state(zone, item); 259 local_irq_restore(flags); 260 } 261 262 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 263 { 264 unsigned long flags; 265 struct zone *zone; 266 267 zone = page_zone(page); 268 local_irq_save(flags); 269 __inc_zone_state(zone, item); 270 local_irq_restore(flags); 271 } 272 EXPORT_SYMBOL(inc_zone_page_state); 273 274 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 275 { 276 unsigned long flags; 277 278 local_irq_save(flags); 279 __dec_zone_page_state(page, item); 280 local_irq_restore(flags); 281 } 282 EXPORT_SYMBOL(dec_zone_page_state); 283 284 /* 285 * Update the zone counters for one cpu. 286 * 287 * Note that refresh_cpu_vm_stats strives to only access 288 * node local memory. The per cpu pagesets on remote zones are placed 289 * in the memory local to the processor using that pageset. So the 290 * loop over all zones will access a series of cachelines local to 291 * the processor. 292 * 293 * The call to zone_page_state_add updates the cachelines with the 294 * statistics in the remote zone struct as well as the global cachelines 295 * with the global counters. These could cause remote node cache line 296 * bouncing and will have to be only done when necessary. 297 */ 298 void refresh_cpu_vm_stats(int cpu) 299 { 300 struct zone *zone; 301 int i; 302 unsigned long flags; 303 304 for_each_zone(zone) { 305 struct per_cpu_pageset *p; 306 307 if (!populated_zone(zone)) 308 continue; 309 310 p = zone_pcp(zone, cpu); 311 312 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 313 if (p->vm_stat_diff[i]) { 314 local_irq_save(flags); 315 zone_page_state_add(p->vm_stat_diff[i], 316 zone, i); 317 p->vm_stat_diff[i] = 0; 318 #ifdef CONFIG_NUMA 319 /* 3 seconds idle till flush */ 320 p->expire = 3; 321 #endif 322 local_irq_restore(flags); 323 } 324 #ifdef CONFIG_NUMA 325 /* 326 * Deal with draining the remote pageset of this 327 * processor 328 * 329 * Check if there are pages remaining in this pageset 330 * if not then there is nothing to expire. 331 */ 332 if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count)) 333 continue; 334 335 /* 336 * We never drain zones local to this processor. 337 */ 338 if (zone_to_nid(zone) == numa_node_id()) { 339 p->expire = 0; 340 continue; 341 } 342 343 p->expire--; 344 if (p->expire) 345 continue; 346 347 if (p->pcp[0].count) 348 drain_zone_pages(zone, p->pcp + 0); 349 350 if (p->pcp[1].count) 351 drain_zone_pages(zone, p->pcp + 1); 352 #endif 353 } 354 } 355 356 static void __refresh_cpu_vm_stats(void *dummy) 357 { 358 refresh_cpu_vm_stats(smp_processor_id()); 359 } 360 361 /* 362 * Consolidate all counters. 363 * 364 * Note that the result is less inaccurate but still inaccurate 365 * if concurrent processes are allowed to run. 366 */ 367 void refresh_vm_stats(void) 368 { 369 on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1); 370 } 371 EXPORT_SYMBOL(refresh_vm_stats); 372 373 #endif 374 375 #ifdef CONFIG_NUMA 376 /* 377 * zonelist = the list of zones passed to the allocator 378 * z = the zone from which the allocation occurred. 379 * 380 * Must be called with interrupts disabled. 381 */ 382 void zone_statistics(struct zonelist *zonelist, struct zone *z) 383 { 384 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { 385 __inc_zone_state(z, NUMA_HIT); 386 } else { 387 __inc_zone_state(z, NUMA_MISS); 388 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); 389 } 390 if (z->node == numa_node_id()) 391 __inc_zone_state(z, NUMA_LOCAL); 392 else 393 __inc_zone_state(z, NUMA_OTHER); 394 } 395 #endif 396 397 #ifdef CONFIG_PROC_FS 398 399 #include <linux/seq_file.h> 400 401 static void *frag_start(struct seq_file *m, loff_t *pos) 402 { 403 pg_data_t *pgdat; 404 loff_t node = *pos; 405 for (pgdat = first_online_pgdat(); 406 pgdat && node; 407 pgdat = next_online_pgdat(pgdat)) 408 --node; 409 410 return pgdat; 411 } 412 413 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 414 { 415 pg_data_t *pgdat = (pg_data_t *)arg; 416 417 (*pos)++; 418 return next_online_pgdat(pgdat); 419 } 420 421 static void frag_stop(struct seq_file *m, void *arg) 422 { 423 } 424 425 /* 426 * This walks the free areas for each zone. 427 */ 428 static int frag_show(struct seq_file *m, void *arg) 429 { 430 pg_data_t *pgdat = (pg_data_t *)arg; 431 struct zone *zone; 432 struct zone *node_zones = pgdat->node_zones; 433 unsigned long flags; 434 int order; 435 436 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 437 if (!populated_zone(zone)) 438 continue; 439 440 spin_lock_irqsave(&zone->lock, flags); 441 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 442 for (order = 0; order < MAX_ORDER; ++order) 443 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 444 spin_unlock_irqrestore(&zone->lock, flags); 445 seq_putc(m, '\n'); 446 } 447 return 0; 448 } 449 450 const struct seq_operations fragmentation_op = { 451 .start = frag_start, 452 .next = frag_next, 453 .stop = frag_stop, 454 .show = frag_show, 455 }; 456 457 #ifdef CONFIG_ZONE_DMA 458 #define TEXT_FOR_DMA(xx) xx "_dma", 459 #else 460 #define TEXT_FOR_DMA(xx) 461 #endif 462 463 #ifdef CONFIG_ZONE_DMA32 464 #define TEXT_FOR_DMA32(xx) xx "_dma32", 465 #else 466 #define TEXT_FOR_DMA32(xx) 467 #endif 468 469 #ifdef CONFIG_HIGHMEM 470 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 471 #else 472 #define TEXT_FOR_HIGHMEM(xx) 473 #endif 474 475 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 476 TEXT_FOR_HIGHMEM(xx) xx "_movable", 477 478 static const char * const vmstat_text[] = { 479 /* Zoned VM counters */ 480 "nr_free_pages", 481 "nr_inactive", 482 "nr_active", 483 "nr_anon_pages", 484 "nr_mapped", 485 "nr_file_pages", 486 "nr_dirty", 487 "nr_writeback", 488 "nr_slab_reclaimable", 489 "nr_slab_unreclaimable", 490 "nr_page_table_pages", 491 "nr_unstable", 492 "nr_bounce", 493 "nr_vmscan_write", 494 495 #ifdef CONFIG_NUMA 496 "numa_hit", 497 "numa_miss", 498 "numa_foreign", 499 "numa_interleave", 500 "numa_local", 501 "numa_other", 502 #endif 503 504 #ifdef CONFIG_VM_EVENT_COUNTERS 505 "pgpgin", 506 "pgpgout", 507 "pswpin", 508 "pswpout", 509 510 TEXTS_FOR_ZONES("pgalloc") 511 512 "pgfree", 513 "pgactivate", 514 "pgdeactivate", 515 516 "pgfault", 517 "pgmajfault", 518 519 TEXTS_FOR_ZONES("pgrefill") 520 TEXTS_FOR_ZONES("pgsteal") 521 TEXTS_FOR_ZONES("pgscan_kswapd") 522 TEXTS_FOR_ZONES("pgscan_direct") 523 524 "pginodesteal", 525 "slabs_scanned", 526 "kswapd_steal", 527 "kswapd_inodesteal", 528 "pageoutrun", 529 "allocstall", 530 531 "pgrotated", 532 #endif 533 }; 534 535 /* 536 * Output information about zones in @pgdat. 537 */ 538 static int zoneinfo_show(struct seq_file *m, void *arg) 539 { 540 pg_data_t *pgdat = arg; 541 struct zone *zone; 542 struct zone *node_zones = pgdat->node_zones; 543 unsigned long flags; 544 545 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { 546 int i; 547 548 if (!populated_zone(zone)) 549 continue; 550 551 spin_lock_irqsave(&zone->lock, flags); 552 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 553 seq_printf(m, 554 "\n pages free %lu" 555 "\n min %lu" 556 "\n low %lu" 557 "\n high %lu" 558 "\n scanned %lu (a: %lu i: %lu)" 559 "\n spanned %lu" 560 "\n present %lu", 561 zone_page_state(zone, NR_FREE_PAGES), 562 zone->pages_min, 563 zone->pages_low, 564 zone->pages_high, 565 zone->pages_scanned, 566 zone->nr_scan_active, zone->nr_scan_inactive, 567 zone->spanned_pages, 568 zone->present_pages); 569 570 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 571 seq_printf(m, "\n %-12s %lu", vmstat_text[i], 572 zone_page_state(zone, i)); 573 574 seq_printf(m, 575 "\n protection: (%lu", 576 zone->lowmem_reserve[0]); 577 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 578 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 579 seq_printf(m, 580 ")" 581 "\n pagesets"); 582 for_each_online_cpu(i) { 583 struct per_cpu_pageset *pageset; 584 int j; 585 586 pageset = zone_pcp(zone, i); 587 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 588 seq_printf(m, 589 "\n cpu: %i pcp: %i" 590 "\n count: %i" 591 "\n high: %i" 592 "\n batch: %i", 593 i, j, 594 pageset->pcp[j].count, 595 pageset->pcp[j].high, 596 pageset->pcp[j].batch); 597 } 598 #ifdef CONFIG_SMP 599 seq_printf(m, "\n vm stats threshold: %d", 600 pageset->stat_threshold); 601 #endif 602 } 603 seq_printf(m, 604 "\n all_unreclaimable: %u" 605 "\n prev_priority: %i" 606 "\n start_pfn: %lu", 607 zone->all_unreclaimable, 608 zone->prev_priority, 609 zone->zone_start_pfn); 610 spin_unlock_irqrestore(&zone->lock, flags); 611 seq_putc(m, '\n'); 612 } 613 return 0; 614 } 615 616 const struct seq_operations zoneinfo_op = { 617 .start = frag_start, /* iterate over all zones. The same as in 618 * fragmentation. */ 619 .next = frag_next, 620 .stop = frag_stop, 621 .show = zoneinfo_show, 622 }; 623 624 static void *vmstat_start(struct seq_file *m, loff_t *pos) 625 { 626 unsigned long *v; 627 #ifdef CONFIG_VM_EVENT_COUNTERS 628 unsigned long *e; 629 #endif 630 int i; 631 632 if (*pos >= ARRAY_SIZE(vmstat_text)) 633 return NULL; 634 635 #ifdef CONFIG_VM_EVENT_COUNTERS 636 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) 637 + sizeof(struct vm_event_state), GFP_KERNEL); 638 #else 639 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), 640 GFP_KERNEL); 641 #endif 642 m->private = v; 643 if (!v) 644 return ERR_PTR(-ENOMEM); 645 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 646 v[i] = global_page_state(i); 647 #ifdef CONFIG_VM_EVENT_COUNTERS 648 e = v + NR_VM_ZONE_STAT_ITEMS; 649 all_vm_events(e); 650 e[PGPGIN] /= 2; /* sectors -> kbytes */ 651 e[PGPGOUT] /= 2; 652 #endif 653 return v + *pos; 654 } 655 656 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 657 { 658 (*pos)++; 659 if (*pos >= ARRAY_SIZE(vmstat_text)) 660 return NULL; 661 return (unsigned long *)m->private + *pos; 662 } 663 664 static int vmstat_show(struct seq_file *m, void *arg) 665 { 666 unsigned long *l = arg; 667 unsigned long off = l - (unsigned long *)m->private; 668 669 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 670 return 0; 671 } 672 673 static void vmstat_stop(struct seq_file *m, void *arg) 674 { 675 kfree(m->private); 676 m->private = NULL; 677 } 678 679 const struct seq_operations vmstat_op = { 680 .start = vmstat_start, 681 .next = vmstat_next, 682 .stop = vmstat_stop, 683 .show = vmstat_show, 684 }; 685 686 #endif /* CONFIG_PROC_FS */ 687 688 #ifdef CONFIG_SMP 689 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 690 int sysctl_stat_interval __read_mostly = HZ; 691 692 static void vmstat_update(struct work_struct *w) 693 { 694 refresh_cpu_vm_stats(smp_processor_id()); 695 schedule_delayed_work(&__get_cpu_var(vmstat_work), 696 sysctl_stat_interval); 697 } 698 699 static void __devinit start_cpu_timer(int cpu) 700 { 701 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); 702 703 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); 704 schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); 705 } 706 707 /* 708 * Use the cpu notifier to insure that the thresholds are recalculated 709 * when necessary. 710 */ 711 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 712 unsigned long action, 713 void *hcpu) 714 { 715 long cpu = (long)hcpu; 716 717 switch (action) { 718 case CPU_ONLINE: 719 case CPU_ONLINE_FROZEN: 720 start_cpu_timer(cpu); 721 break; 722 case CPU_DOWN_PREPARE: 723 case CPU_DOWN_PREPARE_FROZEN: 724 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); 725 per_cpu(vmstat_work, cpu).work.func = NULL; 726 break; 727 case CPU_DOWN_FAILED: 728 case CPU_DOWN_FAILED_FROZEN: 729 start_cpu_timer(cpu); 730 break; 731 case CPU_DEAD: 732 case CPU_DEAD_FROZEN: 733 refresh_zone_stat_thresholds(); 734 break; 735 default: 736 break; 737 } 738 return NOTIFY_OK; 739 } 740 741 static struct notifier_block __cpuinitdata vmstat_notifier = 742 { &vmstat_cpuup_callback, NULL, 0 }; 743 744 int __init setup_vmstat(void) 745 { 746 int cpu; 747 748 refresh_zone_stat_thresholds(); 749 register_cpu_notifier(&vmstat_notifier); 750 751 for_each_online_cpu(cpu) 752 start_cpu_timer(cpu); 753 return 0; 754 } 755 module_init(setup_vmstat) 756 #endif 757