1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/vmstat.c 4 * 5 * Manages VM statistics 6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 7 * 8 * zoned VM statistics 9 * Copyright (C) 2006 Silicon Graphics, Inc., 10 * Christoph Lameter <christoph@lameter.com> 11 * Copyright (C) 2008-2014 Christoph Lameter 12 */ 13 #include <linux/fs.h> 14 #include <linux/mm.h> 15 #include <linux/err.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/cpumask.h> 20 #include <linux/vmstat.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/debugfs.h> 24 #include <linux/sched.h> 25 #include <linux/math64.h> 26 #include <linux/writeback.h> 27 #include <linux/compaction.h> 28 #include <linux/mm_inline.h> 29 #include <linux/page_ext.h> 30 #include <linux/page_owner.h> 31 32 #include "internal.h" 33 34 #ifdef CONFIG_NUMA 35 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT; 36 37 /* zero numa counters within a zone */ 38 static void zero_zone_numa_counters(struct zone *zone) 39 { 40 int item, cpu; 41 42 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) { 43 atomic_long_set(&zone->vm_numa_event[item], 0); 44 for_each_online_cpu(cpu) { 45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] 46 = 0; 47 } 48 } 49 } 50 51 /* zero numa counters of all the populated zones */ 52 static void zero_zones_numa_counters(void) 53 { 54 struct zone *zone; 55 56 for_each_populated_zone(zone) 57 zero_zone_numa_counters(zone); 58 } 59 60 /* zero global numa counters */ 61 static void zero_global_numa_counters(void) 62 { 63 int item; 64 65 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 66 atomic_long_set(&vm_numa_event[item], 0); 67 } 68 69 static void invalid_numa_statistics(void) 70 { 71 zero_zones_numa_counters(); 72 zero_global_numa_counters(); 73 } 74 75 static DEFINE_MUTEX(vm_numa_stat_lock); 76 77 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 78 void *buffer, size_t *length, loff_t *ppos) 79 { 80 int ret, oldval; 81 82 mutex_lock(&vm_numa_stat_lock); 83 if (write) 84 oldval = sysctl_vm_numa_stat; 85 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 86 if (ret || !write) 87 goto out; 88 89 if (oldval == sysctl_vm_numa_stat) 90 goto out; 91 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) { 92 static_branch_enable(&vm_numa_stat_key); 93 pr_info("enable numa statistics\n"); 94 } else { 95 static_branch_disable(&vm_numa_stat_key); 96 invalid_numa_statistics(); 97 pr_info("disable numa statistics, and clear numa counters\n"); 98 } 99 100 out: 101 mutex_unlock(&vm_numa_stat_lock); 102 return ret; 103 } 104 #endif 105 106 #ifdef CONFIG_VM_EVENT_COUNTERS 107 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 108 EXPORT_PER_CPU_SYMBOL(vm_event_states); 109 110 static void sum_vm_events(unsigned long *ret) 111 { 112 int cpu; 113 int i; 114 115 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 116 117 for_each_online_cpu(cpu) { 118 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 119 120 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 121 ret[i] += this->event[i]; 122 } 123 } 124 125 /* 126 * Accumulate the vm event counters across all CPUs. 127 * The result is unavoidably approximate - it can change 128 * during and after execution of this function. 129 */ 130 void all_vm_events(unsigned long *ret) 131 { 132 cpus_read_lock(); 133 sum_vm_events(ret); 134 cpus_read_unlock(); 135 } 136 EXPORT_SYMBOL_GPL(all_vm_events); 137 138 /* 139 * Fold the foreign cpu events into our own. 140 * 141 * This is adding to the events on one processor 142 * but keeps the global counts constant. 143 */ 144 void vm_events_fold_cpu(int cpu) 145 { 146 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 147 int i; 148 149 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 150 count_vm_events(i, fold_state->event[i]); 151 fold_state->event[i] = 0; 152 } 153 } 154 155 #endif /* CONFIG_VM_EVENT_COUNTERS */ 156 157 /* 158 * Manage combined zone based / global counters 159 * 160 * vm_stat contains the global counters 161 */ 162 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; 163 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp; 164 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp; 165 EXPORT_SYMBOL(vm_zone_stat); 166 EXPORT_SYMBOL(vm_node_stat); 167 168 #ifdef CONFIG_NUMA 169 static void fold_vm_zone_numa_events(struct zone *zone) 170 { 171 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, }; 172 int cpu; 173 enum numa_stat_item item; 174 175 for_each_online_cpu(cpu) { 176 struct per_cpu_zonestat *pzstats; 177 178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 179 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0); 181 } 182 183 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 184 zone_numa_event_add(zone_numa_events[item], zone, item); 185 } 186 187 void fold_vm_numa_events(void) 188 { 189 struct zone *zone; 190 191 for_each_populated_zone(zone) 192 fold_vm_zone_numa_events(zone); 193 } 194 #endif 195 196 #ifdef CONFIG_SMP 197 198 int calculate_pressure_threshold(struct zone *zone) 199 { 200 int threshold; 201 int watermark_distance; 202 203 /* 204 * As vmstats are not up to date, there is drift between the estimated 205 * and real values. For high thresholds and a high number of CPUs, it 206 * is possible for the min watermark to be breached while the estimated 207 * value looks fine. The pressure threshold is a reduced value such 208 * that even the maximum amount of drift will not accidentally breach 209 * the min watermark 210 */ 211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); 212 threshold = max(1, (int)(watermark_distance / num_online_cpus())); 213 214 /* 215 * Maximum threshold is 125 216 */ 217 threshold = min(125, threshold); 218 219 return threshold; 220 } 221 222 int calculate_normal_threshold(struct zone *zone) 223 { 224 int threshold; 225 int mem; /* memory in 128 MB units */ 226 227 /* 228 * The threshold scales with the number of processors and the amount 229 * of memory per zone. More memory means that we can defer updates for 230 * longer, more processors could lead to more contention. 231 * fls() is used to have a cheap way of logarithmic scaling. 232 * 233 * Some sample thresholds: 234 * 235 * Threshold Processors (fls) Zonesize fls(mem)+1 236 * ------------------------------------------------------------------ 237 * 8 1 1 0.9-1 GB 4 238 * 16 2 2 0.9-1 GB 4 239 * 20 2 2 1-2 GB 5 240 * 24 2 2 2-4 GB 6 241 * 28 2 2 4-8 GB 7 242 * 32 2 2 8-16 GB 8 243 * 4 2 2 <128M 1 244 * 30 4 3 2-4 GB 5 245 * 48 4 3 8-16 GB 8 246 * 32 8 4 1-2 GB 4 247 * 32 8 4 0.9-1GB 4 248 * 10 16 5 <128M 1 249 * 40 16 5 900M 4 250 * 70 64 7 2-4 GB 5 251 * 84 64 7 4-8 GB 6 252 * 108 512 9 4-8 GB 6 253 * 125 1024 10 8-16 GB 8 254 * 125 1024 10 16-32 GB 9 255 */ 256 257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); 258 259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 260 261 /* 262 * Maximum threshold is 125 263 */ 264 threshold = min(125, threshold); 265 266 return threshold; 267 } 268 269 /* 270 * Refresh the thresholds for each zone. 271 */ 272 void refresh_zone_stat_thresholds(void) 273 { 274 struct pglist_data *pgdat; 275 struct zone *zone; 276 int cpu; 277 int threshold; 278 279 /* Zero current pgdat thresholds */ 280 for_each_online_pgdat(pgdat) { 281 for_each_online_cpu(cpu) { 282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; 283 } 284 } 285 286 for_each_populated_zone(zone) { 287 struct pglist_data *pgdat = zone->zone_pgdat; 288 unsigned long max_drift, tolerate_drift; 289 290 threshold = calculate_normal_threshold(zone); 291 292 for_each_online_cpu(cpu) { 293 int pgdat_threshold; 294 295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold 296 = threshold; 297 298 /* Base nodestat threshold on the largest populated zone. */ 299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; 300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold 301 = max(threshold, pgdat_threshold); 302 } 303 304 /* 305 * Only set percpu_drift_mark if there is a danger that 306 * NR_FREE_PAGES reports the low watermark is ok when in fact 307 * the min watermark could be breached by an allocation 308 */ 309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); 310 max_drift = num_online_cpus() * threshold; 311 if (max_drift > tolerate_drift) 312 zone->percpu_drift_mark = high_wmark_pages(zone) + 313 max_drift; 314 } 315 } 316 317 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 318 int (*calculate_pressure)(struct zone *)) 319 { 320 struct zone *zone; 321 int cpu; 322 int threshold; 323 int i; 324 325 for (i = 0; i < pgdat->nr_zones; i++) { 326 zone = &pgdat->node_zones[i]; 327 if (!zone->percpu_drift_mark) 328 continue; 329 330 threshold = (*calculate_pressure)(zone); 331 for_each_online_cpu(cpu) 332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold 333 = threshold; 334 } 335 } 336 337 /* 338 * For use when we know that interrupts are disabled, 339 * or when we know that preemption is disabled and that 340 * particular counter cannot be updated from interrupt context. 341 */ 342 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 343 long delta) 344 { 345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 346 s8 __percpu *p = pcp->vm_stat_diff + item; 347 long x; 348 long t; 349 350 /* 351 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels, 352 * atomicity is provided by IRQs being disabled -- either explicitly 353 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables 354 * CPU migrations and preemption potentially corrupts a counter so 355 * disable preemption. 356 */ 357 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 358 preempt_disable(); 359 360 x = delta + __this_cpu_read(*p); 361 362 t = __this_cpu_read(pcp->stat_threshold); 363 364 if (unlikely(abs(x) > t)) { 365 zone_page_state_add(x, zone, item); 366 x = 0; 367 } 368 __this_cpu_write(*p, x); 369 370 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 371 preempt_enable(); 372 } 373 EXPORT_SYMBOL(__mod_zone_page_state); 374 375 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 376 long delta) 377 { 378 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 379 s8 __percpu *p = pcp->vm_node_stat_diff + item; 380 long x; 381 long t; 382 383 if (vmstat_item_in_bytes(item)) { 384 /* 385 * Only cgroups use subpage accounting right now; at 386 * the global level, these items still change in 387 * multiples of whole pages. Store them as pages 388 * internally to keep the per-cpu counters compact. 389 */ 390 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); 391 delta >>= PAGE_SHIFT; 392 } 393 394 /* See __mod_node_page_state */ 395 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 396 preempt_disable(); 397 398 x = delta + __this_cpu_read(*p); 399 400 t = __this_cpu_read(pcp->stat_threshold); 401 402 if (unlikely(abs(x) > t)) { 403 node_page_state_add(x, pgdat, item); 404 x = 0; 405 } 406 __this_cpu_write(*p, x); 407 408 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 409 preempt_enable(); 410 } 411 EXPORT_SYMBOL(__mod_node_page_state); 412 413 /* 414 * Optimized increment and decrement functions. 415 * 416 * These are only for a single page and therefore can take a struct page * 417 * argument instead of struct zone *. This allows the inclusion of the code 418 * generated for page_zone(page) into the optimized functions. 419 * 420 * No overflow check is necessary and therefore the differential can be 421 * incremented or decremented in place which may allow the compilers to 422 * generate better code. 423 * The increment or decrement is known and therefore one boundary check can 424 * be omitted. 425 * 426 * NOTE: These functions are very performance sensitive. Change only 427 * with care. 428 * 429 * Some processors have inc/dec instructions that are atomic vs an interrupt. 430 * However, the code must first determine the differential location in a zone 431 * based on the processor number and then inc/dec the counter. There is no 432 * guarantee without disabling preemption that the processor will not change 433 * in between and therefore the atomicity vs. interrupt cannot be exploited 434 * in a useful way here. 435 */ 436 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 437 { 438 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 439 s8 __percpu *p = pcp->vm_stat_diff + item; 440 s8 v, t; 441 442 /* See __mod_node_page_state */ 443 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 444 preempt_disable(); 445 446 v = __this_cpu_inc_return(*p); 447 t = __this_cpu_read(pcp->stat_threshold); 448 if (unlikely(v > t)) { 449 s8 overstep = t >> 1; 450 451 zone_page_state_add(v + overstep, zone, item); 452 __this_cpu_write(*p, -overstep); 453 } 454 455 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 456 preempt_enable(); 457 } 458 459 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 460 { 461 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 462 s8 __percpu *p = pcp->vm_node_stat_diff + item; 463 s8 v, t; 464 465 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 466 467 /* See __mod_node_page_state */ 468 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 469 preempt_disable(); 470 471 v = __this_cpu_inc_return(*p); 472 t = __this_cpu_read(pcp->stat_threshold); 473 if (unlikely(v > t)) { 474 s8 overstep = t >> 1; 475 476 node_page_state_add(v + overstep, pgdat, item); 477 __this_cpu_write(*p, -overstep); 478 } 479 480 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 481 preempt_enable(); 482 } 483 484 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 485 { 486 __inc_zone_state(page_zone(page), item); 487 } 488 EXPORT_SYMBOL(__inc_zone_page_state); 489 490 void __inc_node_page_state(struct page *page, enum node_stat_item item) 491 { 492 __inc_node_state(page_pgdat(page), item); 493 } 494 EXPORT_SYMBOL(__inc_node_page_state); 495 496 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 497 { 498 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 499 s8 __percpu *p = pcp->vm_stat_diff + item; 500 s8 v, t; 501 502 /* See __mod_node_page_state */ 503 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 504 preempt_disable(); 505 506 v = __this_cpu_dec_return(*p); 507 t = __this_cpu_read(pcp->stat_threshold); 508 if (unlikely(v < - t)) { 509 s8 overstep = t >> 1; 510 511 zone_page_state_add(v - overstep, zone, item); 512 __this_cpu_write(*p, overstep); 513 } 514 515 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 516 preempt_enable(); 517 } 518 519 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 520 { 521 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 522 s8 __percpu *p = pcp->vm_node_stat_diff + item; 523 s8 v, t; 524 525 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 526 527 /* See __mod_node_page_state */ 528 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 529 preempt_disable(); 530 531 v = __this_cpu_dec_return(*p); 532 t = __this_cpu_read(pcp->stat_threshold); 533 if (unlikely(v < - t)) { 534 s8 overstep = t >> 1; 535 536 node_page_state_add(v - overstep, pgdat, item); 537 __this_cpu_write(*p, overstep); 538 } 539 540 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 541 preempt_enable(); 542 } 543 544 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 545 { 546 __dec_zone_state(page_zone(page), item); 547 } 548 EXPORT_SYMBOL(__dec_zone_page_state); 549 550 void __dec_node_page_state(struct page *page, enum node_stat_item item) 551 { 552 __dec_node_state(page_pgdat(page), item); 553 } 554 EXPORT_SYMBOL(__dec_node_page_state); 555 556 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL 557 /* 558 * If we have cmpxchg_local support then we do not need to incur the overhead 559 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. 560 * 561 * mod_state() modifies the zone counter state through atomic per cpu 562 * operations. 563 * 564 * Overstep mode specifies how overstep should handled: 565 * 0 No overstepping 566 * 1 Overstepping half of threshold 567 * -1 Overstepping minus half of threshold 568 */ 569 static inline void mod_zone_state(struct zone *zone, 570 enum zone_stat_item item, long delta, int overstep_mode) 571 { 572 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 573 s8 __percpu *p = pcp->vm_stat_diff + item; 574 long o, n, t, z; 575 576 do { 577 z = 0; /* overflow to zone counters */ 578 579 /* 580 * The fetching of the stat_threshold is racy. We may apply 581 * a counter threshold to the wrong the cpu if we get 582 * rescheduled while executing here. However, the next 583 * counter update will apply the threshold again and 584 * therefore bring the counter under the threshold again. 585 * 586 * Most of the time the thresholds are the same anyways 587 * for all cpus in a zone. 588 */ 589 t = this_cpu_read(pcp->stat_threshold); 590 591 o = this_cpu_read(*p); 592 n = delta + o; 593 594 if (abs(n) > t) { 595 int os = overstep_mode * (t >> 1) ; 596 597 /* Overflow must be added to zone counters */ 598 z = n + os; 599 n = -os; 600 } 601 } while (this_cpu_cmpxchg(*p, o, n) != o); 602 603 if (z) 604 zone_page_state_add(z, zone, item); 605 } 606 607 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 608 long delta) 609 { 610 mod_zone_state(zone, item, delta, 0); 611 } 612 EXPORT_SYMBOL(mod_zone_page_state); 613 614 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 615 { 616 mod_zone_state(page_zone(page), item, 1, 1); 617 } 618 EXPORT_SYMBOL(inc_zone_page_state); 619 620 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 621 { 622 mod_zone_state(page_zone(page), item, -1, -1); 623 } 624 EXPORT_SYMBOL(dec_zone_page_state); 625 626 static inline void mod_node_state(struct pglist_data *pgdat, 627 enum node_stat_item item, int delta, int overstep_mode) 628 { 629 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 630 s8 __percpu *p = pcp->vm_node_stat_diff + item; 631 long o, n, t, z; 632 633 if (vmstat_item_in_bytes(item)) { 634 /* 635 * Only cgroups use subpage accounting right now; at 636 * the global level, these items still change in 637 * multiples of whole pages. Store them as pages 638 * internally to keep the per-cpu counters compact. 639 */ 640 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); 641 delta >>= PAGE_SHIFT; 642 } 643 644 do { 645 z = 0; /* overflow to node counters */ 646 647 /* 648 * The fetching of the stat_threshold is racy. We may apply 649 * a counter threshold to the wrong the cpu if we get 650 * rescheduled while executing here. However, the next 651 * counter update will apply the threshold again and 652 * therefore bring the counter under the threshold again. 653 * 654 * Most of the time the thresholds are the same anyways 655 * for all cpus in a node. 656 */ 657 t = this_cpu_read(pcp->stat_threshold); 658 659 o = this_cpu_read(*p); 660 n = delta + o; 661 662 if (abs(n) > t) { 663 int os = overstep_mode * (t >> 1) ; 664 665 /* Overflow must be added to node counters */ 666 z = n + os; 667 n = -os; 668 } 669 } while (this_cpu_cmpxchg(*p, o, n) != o); 670 671 if (z) 672 node_page_state_add(z, pgdat, item); 673 } 674 675 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 676 long delta) 677 { 678 mod_node_state(pgdat, item, delta, 0); 679 } 680 EXPORT_SYMBOL(mod_node_page_state); 681 682 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 683 { 684 mod_node_state(pgdat, item, 1, 1); 685 } 686 687 void inc_node_page_state(struct page *page, enum node_stat_item item) 688 { 689 mod_node_state(page_pgdat(page), item, 1, 1); 690 } 691 EXPORT_SYMBOL(inc_node_page_state); 692 693 void dec_node_page_state(struct page *page, enum node_stat_item item) 694 { 695 mod_node_state(page_pgdat(page), item, -1, -1); 696 } 697 EXPORT_SYMBOL(dec_node_page_state); 698 #else 699 /* 700 * Use interrupt disable to serialize counter updates 701 */ 702 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 703 long delta) 704 { 705 unsigned long flags; 706 707 local_irq_save(flags); 708 __mod_zone_page_state(zone, item, delta); 709 local_irq_restore(flags); 710 } 711 EXPORT_SYMBOL(mod_zone_page_state); 712 713 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 714 { 715 unsigned long flags; 716 struct zone *zone; 717 718 zone = page_zone(page); 719 local_irq_save(flags); 720 __inc_zone_state(zone, item); 721 local_irq_restore(flags); 722 } 723 EXPORT_SYMBOL(inc_zone_page_state); 724 725 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 726 { 727 unsigned long flags; 728 729 local_irq_save(flags); 730 __dec_zone_page_state(page, item); 731 local_irq_restore(flags); 732 } 733 EXPORT_SYMBOL(dec_zone_page_state); 734 735 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 736 { 737 unsigned long flags; 738 739 local_irq_save(flags); 740 __inc_node_state(pgdat, item); 741 local_irq_restore(flags); 742 } 743 EXPORT_SYMBOL(inc_node_state); 744 745 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 746 long delta) 747 { 748 unsigned long flags; 749 750 local_irq_save(flags); 751 __mod_node_page_state(pgdat, item, delta); 752 local_irq_restore(flags); 753 } 754 EXPORT_SYMBOL(mod_node_page_state); 755 756 void inc_node_page_state(struct page *page, enum node_stat_item item) 757 { 758 unsigned long flags; 759 struct pglist_data *pgdat; 760 761 pgdat = page_pgdat(page); 762 local_irq_save(flags); 763 __inc_node_state(pgdat, item); 764 local_irq_restore(flags); 765 } 766 EXPORT_SYMBOL(inc_node_page_state); 767 768 void dec_node_page_state(struct page *page, enum node_stat_item item) 769 { 770 unsigned long flags; 771 772 local_irq_save(flags); 773 __dec_node_page_state(page, item); 774 local_irq_restore(flags); 775 } 776 EXPORT_SYMBOL(dec_node_page_state); 777 #endif 778 779 /* 780 * Fold a differential into the global counters. 781 * Returns the number of counters updated. 782 */ 783 static int fold_diff(int *zone_diff, int *node_diff) 784 { 785 int i; 786 int changes = 0; 787 788 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 789 if (zone_diff[i]) { 790 atomic_long_add(zone_diff[i], &vm_zone_stat[i]); 791 changes++; 792 } 793 794 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 795 if (node_diff[i]) { 796 atomic_long_add(node_diff[i], &vm_node_stat[i]); 797 changes++; 798 } 799 return changes; 800 } 801 802 /* 803 * Update the zone counters for the current cpu. 804 * 805 * Note that refresh_cpu_vm_stats strives to only access 806 * node local memory. The per cpu pagesets on remote zones are placed 807 * in the memory local to the processor using that pageset. So the 808 * loop over all zones will access a series of cachelines local to 809 * the processor. 810 * 811 * The call to zone_page_state_add updates the cachelines with the 812 * statistics in the remote zone struct as well as the global cachelines 813 * with the global counters. These could cause remote node cache line 814 * bouncing and will have to be only done when necessary. 815 * 816 * The function returns the number of global counters updated. 817 */ 818 static int refresh_cpu_vm_stats(bool do_pagesets) 819 { 820 struct pglist_data *pgdat; 821 struct zone *zone; 822 int i; 823 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 824 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 825 int changes = 0; 826 827 for_each_populated_zone(zone) { 828 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; 829 #ifdef CONFIG_NUMA 830 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset; 831 #endif 832 833 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 834 int v; 835 836 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0); 837 if (v) { 838 839 atomic_long_add(v, &zone->vm_stat[i]); 840 global_zone_diff[i] += v; 841 #ifdef CONFIG_NUMA 842 /* 3 seconds idle till flush */ 843 __this_cpu_write(pcp->expire, 3); 844 #endif 845 } 846 } 847 #ifdef CONFIG_NUMA 848 849 if (do_pagesets) { 850 cond_resched(); 851 /* 852 * Deal with draining the remote pageset of this 853 * processor 854 * 855 * Check if there are pages remaining in this pageset 856 * if not then there is nothing to expire. 857 */ 858 if (!__this_cpu_read(pcp->expire) || 859 !__this_cpu_read(pcp->count)) 860 continue; 861 862 /* 863 * We never drain zones local to this processor. 864 */ 865 if (zone_to_nid(zone) == numa_node_id()) { 866 __this_cpu_write(pcp->expire, 0); 867 continue; 868 } 869 870 if (__this_cpu_dec_return(pcp->expire)) 871 continue; 872 873 if (__this_cpu_read(pcp->count)) { 874 drain_zone_pages(zone, this_cpu_ptr(pcp)); 875 changes++; 876 } 877 } 878 #endif 879 } 880 881 for_each_online_pgdat(pgdat) { 882 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; 883 884 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 885 int v; 886 887 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); 888 if (v) { 889 atomic_long_add(v, &pgdat->vm_stat[i]); 890 global_node_diff[i] += v; 891 } 892 } 893 } 894 895 changes += fold_diff(global_zone_diff, global_node_diff); 896 return changes; 897 } 898 899 /* 900 * Fold the data for an offline cpu into the global array. 901 * There cannot be any access by the offline cpu and therefore 902 * synchronization is simplified. 903 */ 904 void cpu_vm_stats_fold(int cpu) 905 { 906 struct pglist_data *pgdat; 907 struct zone *zone; 908 int i; 909 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 910 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 911 912 for_each_populated_zone(zone) { 913 struct per_cpu_zonestat *pzstats; 914 915 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 916 917 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 918 if (pzstats->vm_stat_diff[i]) { 919 int v; 920 921 v = pzstats->vm_stat_diff[i]; 922 pzstats->vm_stat_diff[i] = 0; 923 atomic_long_add(v, &zone->vm_stat[i]); 924 global_zone_diff[i] += v; 925 } 926 } 927 #ifdef CONFIG_NUMA 928 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) { 929 if (pzstats->vm_numa_event[i]) { 930 unsigned long v; 931 932 v = pzstats->vm_numa_event[i]; 933 pzstats->vm_numa_event[i] = 0; 934 zone_numa_event_add(v, zone, i); 935 } 936 } 937 #endif 938 } 939 940 for_each_online_pgdat(pgdat) { 941 struct per_cpu_nodestat *p; 942 943 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 944 945 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 946 if (p->vm_node_stat_diff[i]) { 947 int v; 948 949 v = p->vm_node_stat_diff[i]; 950 p->vm_node_stat_diff[i] = 0; 951 atomic_long_add(v, &pgdat->vm_stat[i]); 952 global_node_diff[i] += v; 953 } 954 } 955 956 fold_diff(global_zone_diff, global_node_diff); 957 } 958 959 /* 960 * this is only called if !populated_zone(zone), which implies no other users of 961 * pset->vm_stat_diff[] exist. 962 */ 963 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats) 964 { 965 unsigned long v; 966 int i; 967 968 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 969 if (pzstats->vm_stat_diff[i]) { 970 v = pzstats->vm_stat_diff[i]; 971 pzstats->vm_stat_diff[i] = 0; 972 zone_page_state_add(v, zone, i); 973 } 974 } 975 976 #ifdef CONFIG_NUMA 977 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) { 978 if (pzstats->vm_numa_event[i]) { 979 v = pzstats->vm_numa_event[i]; 980 pzstats->vm_numa_event[i] = 0; 981 zone_numa_event_add(v, zone, i); 982 } 983 } 984 #endif 985 } 986 #endif 987 988 #ifdef CONFIG_NUMA 989 /* 990 * Determine the per node value of a stat item. This function 991 * is called frequently in a NUMA machine, so try to be as 992 * frugal as possible. 993 */ 994 unsigned long sum_zone_node_page_state(int node, 995 enum zone_stat_item item) 996 { 997 struct zone *zones = NODE_DATA(node)->node_zones; 998 int i; 999 unsigned long count = 0; 1000 1001 for (i = 0; i < MAX_NR_ZONES; i++) 1002 count += zone_page_state(zones + i, item); 1003 1004 return count; 1005 } 1006 1007 /* Determine the per node value of a numa stat item. */ 1008 unsigned long sum_zone_numa_event_state(int node, 1009 enum numa_stat_item item) 1010 { 1011 struct zone *zones = NODE_DATA(node)->node_zones; 1012 unsigned long count = 0; 1013 int i; 1014 1015 for (i = 0; i < MAX_NR_ZONES; i++) 1016 count += zone_numa_event_state(zones + i, item); 1017 1018 return count; 1019 } 1020 1021 /* 1022 * Determine the per node value of a stat item. 1023 */ 1024 unsigned long node_page_state_pages(struct pglist_data *pgdat, 1025 enum node_stat_item item) 1026 { 1027 long x = atomic_long_read(&pgdat->vm_stat[item]); 1028 #ifdef CONFIG_SMP 1029 if (x < 0) 1030 x = 0; 1031 #endif 1032 return x; 1033 } 1034 1035 unsigned long node_page_state(struct pglist_data *pgdat, 1036 enum node_stat_item item) 1037 { 1038 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 1039 1040 return node_page_state_pages(pgdat, item); 1041 } 1042 #endif 1043 1044 #ifdef CONFIG_COMPACTION 1045 1046 struct contig_page_info { 1047 unsigned long free_pages; 1048 unsigned long free_blocks_total; 1049 unsigned long free_blocks_suitable; 1050 }; 1051 1052 /* 1053 * Calculate the number of free pages in a zone, how many contiguous 1054 * pages are free and how many are large enough to satisfy an allocation of 1055 * the target size. Note that this function makes no attempt to estimate 1056 * how many suitable free blocks there *might* be if MOVABLE pages were 1057 * migrated. Calculating that is possible, but expensive and can be 1058 * figured out from userspace 1059 */ 1060 static void fill_contig_page_info(struct zone *zone, 1061 unsigned int suitable_order, 1062 struct contig_page_info *info) 1063 { 1064 unsigned int order; 1065 1066 info->free_pages = 0; 1067 info->free_blocks_total = 0; 1068 info->free_blocks_suitable = 0; 1069 1070 for (order = 0; order < MAX_ORDER; order++) { 1071 unsigned long blocks; 1072 1073 /* 1074 * Count number of free blocks. 1075 * 1076 * Access to nr_free is lockless as nr_free is used only for 1077 * diagnostic purposes. Use data_race to avoid KCSAN warning. 1078 */ 1079 blocks = data_race(zone->free_area[order].nr_free); 1080 info->free_blocks_total += blocks; 1081 1082 /* Count free base pages */ 1083 info->free_pages += blocks << order; 1084 1085 /* Count the suitable free blocks */ 1086 if (order >= suitable_order) 1087 info->free_blocks_suitable += blocks << 1088 (order - suitable_order); 1089 } 1090 } 1091 1092 /* 1093 * A fragmentation index only makes sense if an allocation of a requested 1094 * size would fail. If that is true, the fragmentation index indicates 1095 * whether external fragmentation or a lack of memory was the problem. 1096 * The value can be used to determine if page reclaim or compaction 1097 * should be used 1098 */ 1099 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) 1100 { 1101 unsigned long requested = 1UL << order; 1102 1103 if (WARN_ON_ONCE(order >= MAX_ORDER)) 1104 return 0; 1105 1106 if (!info->free_blocks_total) 1107 return 0; 1108 1109 /* Fragmentation index only makes sense when a request would fail */ 1110 if (info->free_blocks_suitable) 1111 return -1000; 1112 1113 /* 1114 * Index is between 0 and 1 so return within 3 decimal places 1115 * 1116 * 0 => allocation would fail due to lack of memory 1117 * 1 => allocation would fail due to fragmentation 1118 */ 1119 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); 1120 } 1121 1122 /* 1123 * Calculates external fragmentation within a zone wrt the given order. 1124 * It is defined as the percentage of pages found in blocks of size 1125 * less than 1 << order. It returns values in range [0, 100]. 1126 */ 1127 unsigned int extfrag_for_order(struct zone *zone, unsigned int order) 1128 { 1129 struct contig_page_info info; 1130 1131 fill_contig_page_info(zone, order, &info); 1132 if (info.free_pages == 0) 1133 return 0; 1134 1135 return div_u64((info.free_pages - 1136 (info.free_blocks_suitable << order)) * 100, 1137 info.free_pages); 1138 } 1139 1140 /* Same as __fragmentation index but allocs contig_page_info on stack */ 1141 int fragmentation_index(struct zone *zone, unsigned int order) 1142 { 1143 struct contig_page_info info; 1144 1145 fill_contig_page_info(zone, order, &info); 1146 return __fragmentation_index(order, &info); 1147 } 1148 #endif 1149 1150 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \ 1151 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG) 1152 #ifdef CONFIG_ZONE_DMA 1153 #define TEXT_FOR_DMA(xx) xx "_dma", 1154 #else 1155 #define TEXT_FOR_DMA(xx) 1156 #endif 1157 1158 #ifdef CONFIG_ZONE_DMA32 1159 #define TEXT_FOR_DMA32(xx) xx "_dma32", 1160 #else 1161 #define TEXT_FOR_DMA32(xx) 1162 #endif 1163 1164 #ifdef CONFIG_HIGHMEM 1165 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 1166 #else 1167 #define TEXT_FOR_HIGHMEM(xx) 1168 #endif 1169 1170 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 1171 TEXT_FOR_HIGHMEM(xx) xx "_movable", 1172 1173 const char * const vmstat_text[] = { 1174 /* enum zone_stat_item counters */ 1175 "nr_free_pages", 1176 "nr_zone_inactive_anon", 1177 "nr_zone_active_anon", 1178 "nr_zone_inactive_file", 1179 "nr_zone_active_file", 1180 "nr_zone_unevictable", 1181 "nr_zone_write_pending", 1182 "nr_mlock", 1183 "nr_bounce", 1184 #if IS_ENABLED(CONFIG_ZSMALLOC) 1185 "nr_zspages", 1186 #endif 1187 "nr_free_cma", 1188 1189 /* enum numa_stat_item counters */ 1190 #ifdef CONFIG_NUMA 1191 "numa_hit", 1192 "numa_miss", 1193 "numa_foreign", 1194 "numa_interleave", 1195 "numa_local", 1196 "numa_other", 1197 #endif 1198 1199 /* enum node_stat_item counters */ 1200 "nr_inactive_anon", 1201 "nr_active_anon", 1202 "nr_inactive_file", 1203 "nr_active_file", 1204 "nr_unevictable", 1205 "nr_slab_reclaimable", 1206 "nr_slab_unreclaimable", 1207 "nr_isolated_anon", 1208 "nr_isolated_file", 1209 "workingset_nodes", 1210 "workingset_refault_anon", 1211 "workingset_refault_file", 1212 "workingset_activate_anon", 1213 "workingset_activate_file", 1214 "workingset_restore_anon", 1215 "workingset_restore_file", 1216 "workingset_nodereclaim", 1217 "nr_anon_pages", 1218 "nr_mapped", 1219 "nr_file_pages", 1220 "nr_dirty", 1221 "nr_writeback", 1222 "nr_writeback_temp", 1223 "nr_shmem", 1224 "nr_shmem_hugepages", 1225 "nr_shmem_pmdmapped", 1226 "nr_file_hugepages", 1227 "nr_file_pmdmapped", 1228 "nr_anon_transparent_hugepages", 1229 "nr_vmscan_write", 1230 "nr_vmscan_immediate_reclaim", 1231 "nr_dirtied", 1232 "nr_written", 1233 "nr_throttled_written", 1234 "nr_kernel_misc_reclaimable", 1235 "nr_foll_pin_acquired", 1236 "nr_foll_pin_released", 1237 "nr_kernel_stack", 1238 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 1239 "nr_shadow_call_stack", 1240 #endif 1241 "nr_page_table_pages", 1242 #ifdef CONFIG_SWAP 1243 "nr_swapcached", 1244 #endif 1245 1246 /* enum writeback_stat_item counters */ 1247 "nr_dirty_threshold", 1248 "nr_dirty_background_threshold", 1249 1250 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) 1251 /* enum vm_event_item counters */ 1252 "pgpgin", 1253 "pgpgout", 1254 "pswpin", 1255 "pswpout", 1256 1257 TEXTS_FOR_ZONES("pgalloc") 1258 TEXTS_FOR_ZONES("allocstall") 1259 TEXTS_FOR_ZONES("pgskip") 1260 1261 "pgfree", 1262 "pgactivate", 1263 "pgdeactivate", 1264 "pglazyfree", 1265 1266 "pgfault", 1267 "pgmajfault", 1268 "pglazyfreed", 1269 1270 "pgrefill", 1271 "pgreuse", 1272 "pgsteal_kswapd", 1273 "pgsteal_direct", 1274 "pgdemote_kswapd", 1275 "pgdemote_direct", 1276 "pgscan_kswapd", 1277 "pgscan_direct", 1278 "pgscan_direct_throttle", 1279 "pgscan_anon", 1280 "pgscan_file", 1281 "pgsteal_anon", 1282 "pgsteal_file", 1283 1284 #ifdef CONFIG_NUMA 1285 "zone_reclaim_failed", 1286 #endif 1287 "pginodesteal", 1288 "slabs_scanned", 1289 "kswapd_inodesteal", 1290 "kswapd_low_wmark_hit_quickly", 1291 "kswapd_high_wmark_hit_quickly", 1292 "pageoutrun", 1293 1294 "pgrotated", 1295 1296 "drop_pagecache", 1297 "drop_slab", 1298 "oom_kill", 1299 1300 #ifdef CONFIG_NUMA_BALANCING 1301 "numa_pte_updates", 1302 "numa_huge_pte_updates", 1303 "numa_hint_faults", 1304 "numa_hint_faults_local", 1305 "numa_pages_migrated", 1306 #endif 1307 #ifdef CONFIG_MIGRATION 1308 "pgmigrate_success", 1309 "pgmigrate_fail", 1310 "thp_migration_success", 1311 "thp_migration_fail", 1312 "thp_migration_split", 1313 #endif 1314 #ifdef CONFIG_COMPACTION 1315 "compact_migrate_scanned", 1316 "compact_free_scanned", 1317 "compact_isolated", 1318 "compact_stall", 1319 "compact_fail", 1320 "compact_success", 1321 "compact_daemon_wake", 1322 "compact_daemon_migrate_scanned", 1323 "compact_daemon_free_scanned", 1324 #endif 1325 1326 #ifdef CONFIG_HUGETLB_PAGE 1327 "htlb_buddy_alloc_success", 1328 "htlb_buddy_alloc_fail", 1329 #endif 1330 #ifdef CONFIG_CMA 1331 "cma_alloc_success", 1332 "cma_alloc_fail", 1333 #endif 1334 "unevictable_pgs_culled", 1335 "unevictable_pgs_scanned", 1336 "unevictable_pgs_rescued", 1337 "unevictable_pgs_mlocked", 1338 "unevictable_pgs_munlocked", 1339 "unevictable_pgs_cleared", 1340 "unevictable_pgs_stranded", 1341 1342 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1343 "thp_fault_alloc", 1344 "thp_fault_fallback", 1345 "thp_fault_fallback_charge", 1346 "thp_collapse_alloc", 1347 "thp_collapse_alloc_failed", 1348 "thp_file_alloc", 1349 "thp_file_fallback", 1350 "thp_file_fallback_charge", 1351 "thp_file_mapped", 1352 "thp_split_page", 1353 "thp_split_page_failed", 1354 "thp_deferred_split_page", 1355 "thp_split_pmd", 1356 "thp_scan_exceed_none_pte", 1357 "thp_scan_exceed_swap_pte", 1358 "thp_scan_exceed_share_pte", 1359 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1360 "thp_split_pud", 1361 #endif 1362 "thp_zero_page_alloc", 1363 "thp_zero_page_alloc_failed", 1364 "thp_swpout", 1365 "thp_swpout_fallback", 1366 #endif 1367 #ifdef CONFIG_MEMORY_BALLOON 1368 "balloon_inflate", 1369 "balloon_deflate", 1370 #ifdef CONFIG_BALLOON_COMPACTION 1371 "balloon_migrate", 1372 #endif 1373 #endif /* CONFIG_MEMORY_BALLOON */ 1374 #ifdef CONFIG_DEBUG_TLBFLUSH 1375 "nr_tlb_remote_flush", 1376 "nr_tlb_remote_flush_received", 1377 "nr_tlb_local_flush_all", 1378 "nr_tlb_local_flush_one", 1379 #endif /* CONFIG_DEBUG_TLBFLUSH */ 1380 1381 #ifdef CONFIG_DEBUG_VM_VMACACHE 1382 "vmacache_find_calls", 1383 "vmacache_find_hits", 1384 #endif 1385 #ifdef CONFIG_SWAP 1386 "swap_ra", 1387 "swap_ra_hit", 1388 #endif 1389 #ifdef CONFIG_X86 1390 "direct_map_level2_splits", 1391 "direct_map_level3_splits", 1392 #endif 1393 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ 1394 }; 1395 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ 1396 1397 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ 1398 defined(CONFIG_PROC_FS) 1399 static void *frag_start(struct seq_file *m, loff_t *pos) 1400 { 1401 pg_data_t *pgdat; 1402 loff_t node = *pos; 1403 1404 for (pgdat = first_online_pgdat(); 1405 pgdat && node; 1406 pgdat = next_online_pgdat(pgdat)) 1407 --node; 1408 1409 return pgdat; 1410 } 1411 1412 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 1413 { 1414 pg_data_t *pgdat = (pg_data_t *)arg; 1415 1416 (*pos)++; 1417 return next_online_pgdat(pgdat); 1418 } 1419 1420 static void frag_stop(struct seq_file *m, void *arg) 1421 { 1422 } 1423 1424 /* 1425 * Walk zones in a node and print using a callback. 1426 * If @assert_populated is true, only use callback for zones that are populated. 1427 */ 1428 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 1429 bool assert_populated, bool nolock, 1430 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 1431 { 1432 struct zone *zone; 1433 struct zone *node_zones = pgdat->node_zones; 1434 unsigned long flags; 1435 1436 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 1437 if (assert_populated && !populated_zone(zone)) 1438 continue; 1439 1440 if (!nolock) 1441 spin_lock_irqsave(&zone->lock, flags); 1442 print(m, pgdat, zone); 1443 if (!nolock) 1444 spin_unlock_irqrestore(&zone->lock, flags); 1445 } 1446 } 1447 #endif 1448 1449 #ifdef CONFIG_PROC_FS 1450 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 1451 struct zone *zone) 1452 { 1453 int order; 1454 1455 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1456 for (order = 0; order < MAX_ORDER; ++order) 1457 /* 1458 * Access to nr_free is lockless as nr_free is used only for 1459 * printing purposes. Use data_race to avoid KCSAN warning. 1460 */ 1461 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); 1462 seq_putc(m, '\n'); 1463 } 1464 1465 /* 1466 * This walks the free areas for each zone. 1467 */ 1468 static int frag_show(struct seq_file *m, void *arg) 1469 { 1470 pg_data_t *pgdat = (pg_data_t *)arg; 1471 walk_zones_in_node(m, pgdat, true, false, frag_show_print); 1472 return 0; 1473 } 1474 1475 static void pagetypeinfo_showfree_print(struct seq_file *m, 1476 pg_data_t *pgdat, struct zone *zone) 1477 { 1478 int order, mtype; 1479 1480 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 1481 seq_printf(m, "Node %4d, zone %8s, type %12s ", 1482 pgdat->node_id, 1483 zone->name, 1484 migratetype_names[mtype]); 1485 for (order = 0; order < MAX_ORDER; ++order) { 1486 unsigned long freecount = 0; 1487 struct free_area *area; 1488 struct list_head *curr; 1489 bool overflow = false; 1490 1491 area = &(zone->free_area[order]); 1492 1493 list_for_each(curr, &area->free_list[mtype]) { 1494 /* 1495 * Cap the free_list iteration because it might 1496 * be really large and we are under a spinlock 1497 * so a long time spent here could trigger a 1498 * hard lockup detector. Anyway this is a 1499 * debugging tool so knowing there is a handful 1500 * of pages of this order should be more than 1501 * sufficient. 1502 */ 1503 if (++freecount >= 100000) { 1504 overflow = true; 1505 break; 1506 } 1507 } 1508 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount); 1509 spin_unlock_irq(&zone->lock); 1510 cond_resched(); 1511 spin_lock_irq(&zone->lock); 1512 } 1513 seq_putc(m, '\n'); 1514 } 1515 } 1516 1517 /* Print out the free pages at each order for each migatetype */ 1518 static void pagetypeinfo_showfree(struct seq_file *m, void *arg) 1519 { 1520 int order; 1521 pg_data_t *pgdat = (pg_data_t *)arg; 1522 1523 /* Print header */ 1524 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 1525 for (order = 0; order < MAX_ORDER; ++order) 1526 seq_printf(m, "%6d ", order); 1527 seq_putc(m, '\n'); 1528 1529 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print); 1530 } 1531 1532 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 1533 pg_data_t *pgdat, struct zone *zone) 1534 { 1535 int mtype; 1536 unsigned long pfn; 1537 unsigned long start_pfn = zone->zone_start_pfn; 1538 unsigned long end_pfn = zone_end_pfn(zone); 1539 unsigned long count[MIGRATE_TYPES] = { 0, }; 1540 1541 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 1542 struct page *page; 1543 1544 page = pfn_to_online_page(pfn); 1545 if (!page) 1546 continue; 1547 1548 if (page_zone(page) != zone) 1549 continue; 1550 1551 mtype = get_pageblock_migratetype(page); 1552 1553 if (mtype < MIGRATE_TYPES) 1554 count[mtype]++; 1555 } 1556 1557 /* Print counts */ 1558 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1559 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1560 seq_printf(m, "%12lu ", count[mtype]); 1561 seq_putc(m, '\n'); 1562 } 1563 1564 /* Print out the number of pageblocks for each migratetype */ 1565 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 1566 { 1567 int mtype; 1568 pg_data_t *pgdat = (pg_data_t *)arg; 1569 1570 seq_printf(m, "\n%-23s", "Number of blocks type "); 1571 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1572 seq_printf(m, "%12s ", migratetype_names[mtype]); 1573 seq_putc(m, '\n'); 1574 walk_zones_in_node(m, pgdat, true, false, 1575 pagetypeinfo_showblockcount_print); 1576 } 1577 1578 /* 1579 * Print out the number of pageblocks for each migratetype that contain pages 1580 * of other types. This gives an indication of how well fallbacks are being 1581 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER 1582 * to determine what is going on 1583 */ 1584 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) 1585 { 1586 #ifdef CONFIG_PAGE_OWNER 1587 int mtype; 1588 1589 if (!static_branch_unlikely(&page_owner_inited)) 1590 return; 1591 1592 drain_all_pages(NULL); 1593 1594 seq_printf(m, "\n%-23s", "Number of mixed blocks "); 1595 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1596 seq_printf(m, "%12s ", migratetype_names[mtype]); 1597 seq_putc(m, '\n'); 1598 1599 walk_zones_in_node(m, pgdat, true, true, 1600 pagetypeinfo_showmixedcount_print); 1601 #endif /* CONFIG_PAGE_OWNER */ 1602 } 1603 1604 /* 1605 * This prints out statistics in relation to grouping pages by mobility. 1606 * It is expensive to collect so do not constantly read the file. 1607 */ 1608 static int pagetypeinfo_show(struct seq_file *m, void *arg) 1609 { 1610 pg_data_t *pgdat = (pg_data_t *)arg; 1611 1612 /* check memoryless node */ 1613 if (!node_state(pgdat->node_id, N_MEMORY)) 1614 return 0; 1615 1616 seq_printf(m, "Page block order: %d\n", pageblock_order); 1617 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 1618 seq_putc(m, '\n'); 1619 pagetypeinfo_showfree(m, pgdat); 1620 pagetypeinfo_showblockcount(m, pgdat); 1621 pagetypeinfo_showmixedcount(m, pgdat); 1622 1623 return 0; 1624 } 1625 1626 static const struct seq_operations fragmentation_op = { 1627 .start = frag_start, 1628 .next = frag_next, 1629 .stop = frag_stop, 1630 .show = frag_show, 1631 }; 1632 1633 static const struct seq_operations pagetypeinfo_op = { 1634 .start = frag_start, 1635 .next = frag_next, 1636 .stop = frag_stop, 1637 .show = pagetypeinfo_show, 1638 }; 1639 1640 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone) 1641 { 1642 int zid; 1643 1644 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1645 struct zone *compare = &pgdat->node_zones[zid]; 1646 1647 if (populated_zone(compare)) 1648 return zone == compare; 1649 } 1650 1651 return false; 1652 } 1653 1654 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 1655 struct zone *zone) 1656 { 1657 int i; 1658 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 1659 if (is_zone_first_populated(pgdat, zone)) { 1660 seq_printf(m, "\n per-node stats"); 1661 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1662 unsigned long pages = node_page_state_pages(pgdat, i); 1663 1664 if (vmstat_item_print_in_thp(i)) 1665 pages /= HPAGE_PMD_NR; 1666 seq_printf(m, "\n %-12s %lu", node_stat_name(i), 1667 pages); 1668 } 1669 } 1670 seq_printf(m, 1671 "\n pages free %lu" 1672 "\n boost %lu" 1673 "\n min %lu" 1674 "\n low %lu" 1675 "\n high %lu" 1676 "\n spanned %lu" 1677 "\n present %lu" 1678 "\n managed %lu" 1679 "\n cma %lu", 1680 zone_page_state(zone, NR_FREE_PAGES), 1681 zone->watermark_boost, 1682 min_wmark_pages(zone), 1683 low_wmark_pages(zone), 1684 high_wmark_pages(zone), 1685 zone->spanned_pages, 1686 zone->present_pages, 1687 zone_managed_pages(zone), 1688 zone_cma_pages(zone)); 1689 1690 seq_printf(m, 1691 "\n protection: (%ld", 1692 zone->lowmem_reserve[0]); 1693 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 1694 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); 1695 seq_putc(m, ')'); 1696 1697 /* If unpopulated, no other information is useful */ 1698 if (!populated_zone(zone)) { 1699 seq_putc(m, '\n'); 1700 return; 1701 } 1702 1703 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1704 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), 1705 zone_page_state(zone, i)); 1706 1707 #ifdef CONFIG_NUMA 1708 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) 1709 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), 1710 zone_numa_event_state(zone, i)); 1711 #endif 1712 1713 seq_printf(m, "\n pagesets"); 1714 for_each_online_cpu(i) { 1715 struct per_cpu_pages *pcp; 1716 struct per_cpu_zonestat __maybe_unused *pzstats; 1717 1718 pcp = per_cpu_ptr(zone->per_cpu_pageset, i); 1719 seq_printf(m, 1720 "\n cpu: %i" 1721 "\n count: %i" 1722 "\n high: %i" 1723 "\n batch: %i", 1724 i, 1725 pcp->count, 1726 pcp->high, 1727 pcp->batch); 1728 #ifdef CONFIG_SMP 1729 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); 1730 seq_printf(m, "\n vm stats threshold: %d", 1731 pzstats->stat_threshold); 1732 #endif 1733 } 1734 seq_printf(m, 1735 "\n node_unreclaimable: %u" 1736 "\n start_pfn: %lu", 1737 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, 1738 zone->zone_start_pfn); 1739 seq_putc(m, '\n'); 1740 } 1741 1742 /* 1743 * Output information about zones in @pgdat. All zones are printed regardless 1744 * of whether they are populated or not: lowmem_reserve_ratio operates on the 1745 * set of all zones and userspace would not be aware of such zones if they are 1746 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio). 1747 */ 1748 static int zoneinfo_show(struct seq_file *m, void *arg) 1749 { 1750 pg_data_t *pgdat = (pg_data_t *)arg; 1751 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print); 1752 return 0; 1753 } 1754 1755 static const struct seq_operations zoneinfo_op = { 1756 .start = frag_start, /* iterate over all zones. The same as in 1757 * fragmentation. */ 1758 .next = frag_next, 1759 .stop = frag_stop, 1760 .show = zoneinfo_show, 1761 }; 1762 1763 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \ 1764 NR_VM_NUMA_EVENT_ITEMS + \ 1765 NR_VM_NODE_STAT_ITEMS + \ 1766 NR_VM_WRITEBACK_STAT_ITEMS + \ 1767 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \ 1768 NR_VM_EVENT_ITEMS : 0)) 1769 1770 static void *vmstat_start(struct seq_file *m, loff_t *pos) 1771 { 1772 unsigned long *v; 1773 int i; 1774 1775 if (*pos >= NR_VMSTAT_ITEMS) 1776 return NULL; 1777 1778 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS); 1779 fold_vm_numa_events(); 1780 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL); 1781 m->private = v; 1782 if (!v) 1783 return ERR_PTR(-ENOMEM); 1784 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1785 v[i] = global_zone_page_state(i); 1786 v += NR_VM_ZONE_STAT_ITEMS; 1787 1788 #ifdef CONFIG_NUMA 1789 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) 1790 v[i] = global_numa_event_state(i); 1791 v += NR_VM_NUMA_EVENT_ITEMS; 1792 #endif 1793 1794 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1795 v[i] = global_node_page_state_pages(i); 1796 if (vmstat_item_print_in_thp(i)) 1797 v[i] /= HPAGE_PMD_NR; 1798 } 1799 v += NR_VM_NODE_STAT_ITEMS; 1800 1801 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, 1802 v + NR_DIRTY_THRESHOLD); 1803 v += NR_VM_WRITEBACK_STAT_ITEMS; 1804 1805 #ifdef CONFIG_VM_EVENT_COUNTERS 1806 all_vm_events(v); 1807 v[PGPGIN] /= 2; /* sectors -> kbytes */ 1808 v[PGPGOUT] /= 2; 1809 #endif 1810 return (unsigned long *)m->private + *pos; 1811 } 1812 1813 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 1814 { 1815 (*pos)++; 1816 if (*pos >= NR_VMSTAT_ITEMS) 1817 return NULL; 1818 return (unsigned long *)m->private + *pos; 1819 } 1820 1821 static int vmstat_show(struct seq_file *m, void *arg) 1822 { 1823 unsigned long *l = arg; 1824 unsigned long off = l - (unsigned long *)m->private; 1825 1826 seq_puts(m, vmstat_text[off]); 1827 seq_put_decimal_ull(m, " ", *l); 1828 seq_putc(m, '\n'); 1829 1830 if (off == NR_VMSTAT_ITEMS - 1) { 1831 /* 1832 * We've come to the end - add any deprecated counters to avoid 1833 * breaking userspace which might depend on them being present. 1834 */ 1835 seq_puts(m, "nr_unstable 0\n"); 1836 } 1837 return 0; 1838 } 1839 1840 static void vmstat_stop(struct seq_file *m, void *arg) 1841 { 1842 kfree(m->private); 1843 m->private = NULL; 1844 } 1845 1846 static const struct seq_operations vmstat_op = { 1847 .start = vmstat_start, 1848 .next = vmstat_next, 1849 .stop = vmstat_stop, 1850 .show = vmstat_show, 1851 }; 1852 #endif /* CONFIG_PROC_FS */ 1853 1854 #ifdef CONFIG_SMP 1855 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1856 int sysctl_stat_interval __read_mostly = HZ; 1857 1858 #ifdef CONFIG_PROC_FS 1859 static void refresh_vm_stats(struct work_struct *work) 1860 { 1861 refresh_cpu_vm_stats(true); 1862 } 1863 1864 int vmstat_refresh(struct ctl_table *table, int write, 1865 void *buffer, size_t *lenp, loff_t *ppos) 1866 { 1867 long val; 1868 int err; 1869 int i; 1870 1871 /* 1872 * The regular update, every sysctl_stat_interval, may come later 1873 * than expected: leaving a significant amount in per_cpu buckets. 1874 * This is particularly misleading when checking a quantity of HUGE 1875 * pages, immediately after running a test. /proc/sys/vm/stat_refresh, 1876 * which can equally be echo'ed to or cat'ted from (by root), 1877 * can be used to update the stats just before reading them. 1878 * 1879 * Oh, and since global_zone_page_state() etc. are so careful to hide 1880 * transiently negative values, report an error here if any of 1881 * the stats is negative, so we know to go looking for imbalance. 1882 */ 1883 err = schedule_on_each_cpu(refresh_vm_stats); 1884 if (err) 1885 return err; 1886 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 1887 /* 1888 * Skip checking stats known to go negative occasionally. 1889 */ 1890 switch (i) { 1891 case NR_ZONE_WRITE_PENDING: 1892 case NR_FREE_CMA_PAGES: 1893 continue; 1894 } 1895 val = atomic_long_read(&vm_zone_stat[i]); 1896 if (val < 0) { 1897 pr_warn("%s: %s %ld\n", 1898 __func__, zone_stat_name(i), val); 1899 } 1900 } 1901 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1902 /* 1903 * Skip checking stats known to go negative occasionally. 1904 */ 1905 switch (i) { 1906 case NR_WRITEBACK: 1907 continue; 1908 } 1909 val = atomic_long_read(&vm_node_stat[i]); 1910 if (val < 0) { 1911 pr_warn("%s: %s %ld\n", 1912 __func__, node_stat_name(i), val); 1913 } 1914 } 1915 if (write) 1916 *ppos += *lenp; 1917 else 1918 *lenp = 0; 1919 return 0; 1920 } 1921 #endif /* CONFIG_PROC_FS */ 1922 1923 static void vmstat_update(struct work_struct *w) 1924 { 1925 if (refresh_cpu_vm_stats(true)) { 1926 /* 1927 * Counters were updated so we expect more updates 1928 * to occur in the future. Keep on running the 1929 * update worker thread. 1930 */ 1931 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, 1932 this_cpu_ptr(&vmstat_work), 1933 round_jiffies_relative(sysctl_stat_interval)); 1934 } 1935 } 1936 1937 /* 1938 * Check if the diffs for a certain cpu indicate that 1939 * an update is needed. 1940 */ 1941 static bool need_update(int cpu) 1942 { 1943 pg_data_t *last_pgdat = NULL; 1944 struct zone *zone; 1945 1946 for_each_populated_zone(zone) { 1947 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 1948 struct per_cpu_nodestat *n; 1949 1950 /* 1951 * The fast way of checking if there are any vmstat diffs. 1952 */ 1953 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff))) 1954 return true; 1955 1956 if (last_pgdat == zone->zone_pgdat) 1957 continue; 1958 last_pgdat = zone->zone_pgdat; 1959 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); 1960 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff))) 1961 return true; 1962 } 1963 return false; 1964 } 1965 1966 /* 1967 * Switch off vmstat processing and then fold all the remaining differentials 1968 * until the diffs stay at zero. The function is used by NOHZ and can only be 1969 * invoked when tick processing is not active. 1970 */ 1971 void quiet_vmstat(void) 1972 { 1973 if (system_state != SYSTEM_RUNNING) 1974 return; 1975 1976 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work))) 1977 return; 1978 1979 if (!need_update(smp_processor_id())) 1980 return; 1981 1982 /* 1983 * Just refresh counters and do not care about the pending delayed 1984 * vmstat_update. It doesn't fire that often to matter and canceling 1985 * it would be too expensive from this path. 1986 * vmstat_shepherd will take care about that for us. 1987 */ 1988 refresh_cpu_vm_stats(false); 1989 } 1990 1991 /* 1992 * Shepherd worker thread that checks the 1993 * differentials of processors that have their worker 1994 * threads for vm statistics updates disabled because of 1995 * inactivity. 1996 */ 1997 static void vmstat_shepherd(struct work_struct *w); 1998 1999 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd); 2000 2001 static void vmstat_shepherd(struct work_struct *w) 2002 { 2003 int cpu; 2004 2005 cpus_read_lock(); 2006 /* Check processors whose vmstat worker threads have been disabled */ 2007 for_each_online_cpu(cpu) { 2008 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 2009 2010 if (!delayed_work_pending(dw) && need_update(cpu)) 2011 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); 2012 2013 cond_resched(); 2014 } 2015 cpus_read_unlock(); 2016 2017 schedule_delayed_work(&shepherd, 2018 round_jiffies_relative(sysctl_stat_interval)); 2019 } 2020 2021 static void __init start_shepherd_timer(void) 2022 { 2023 int cpu; 2024 2025 for_each_possible_cpu(cpu) 2026 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 2027 vmstat_update); 2028 2029 schedule_delayed_work(&shepherd, 2030 round_jiffies_relative(sysctl_stat_interval)); 2031 } 2032 2033 static void __init init_cpu_node_state(void) 2034 { 2035 int node; 2036 2037 for_each_online_node(node) { 2038 if (cpumask_weight(cpumask_of_node(node)) > 0) 2039 node_set_state(node, N_CPU); 2040 } 2041 } 2042 2043 static int vmstat_cpu_online(unsigned int cpu) 2044 { 2045 refresh_zone_stat_thresholds(); 2046 node_set_state(cpu_to_node(cpu), N_CPU); 2047 return 0; 2048 } 2049 2050 static int vmstat_cpu_down_prep(unsigned int cpu) 2051 { 2052 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 2053 return 0; 2054 } 2055 2056 static int vmstat_cpu_dead(unsigned int cpu) 2057 { 2058 const struct cpumask *node_cpus; 2059 int node; 2060 2061 node = cpu_to_node(cpu); 2062 2063 refresh_zone_stat_thresholds(); 2064 node_cpus = cpumask_of_node(node); 2065 if (cpumask_weight(node_cpus) > 0) 2066 return 0; 2067 2068 node_clear_state(node, N_CPU); 2069 return 0; 2070 } 2071 2072 #endif 2073 2074 struct workqueue_struct *mm_percpu_wq; 2075 2076 void __init init_mm_internals(void) 2077 { 2078 int ret __maybe_unused; 2079 2080 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0); 2081 2082 #ifdef CONFIG_SMP 2083 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 2084 NULL, vmstat_cpu_dead); 2085 if (ret < 0) 2086 pr_err("vmstat: failed to register 'dead' hotplug state\n"); 2087 2088 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online", 2089 vmstat_cpu_online, 2090 vmstat_cpu_down_prep); 2091 if (ret < 0) 2092 pr_err("vmstat: failed to register 'online' hotplug state\n"); 2093 2094 cpus_read_lock(); 2095 init_cpu_node_state(); 2096 cpus_read_unlock(); 2097 2098 start_shepherd_timer(); 2099 #endif 2100 #ifdef CONFIG_PROC_FS 2101 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op); 2102 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op); 2103 proc_create_seq("vmstat", 0444, NULL, &vmstat_op); 2104 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op); 2105 #endif 2106 } 2107 2108 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 2109 2110 /* 2111 * Return an index indicating how much of the available free memory is 2112 * unusable for an allocation of the requested size. 2113 */ 2114 static int unusable_free_index(unsigned int order, 2115 struct contig_page_info *info) 2116 { 2117 /* No free memory is interpreted as all free memory is unusable */ 2118 if (info->free_pages == 0) 2119 return 1000; 2120 2121 /* 2122 * Index should be a value between 0 and 1. Return a value to 3 2123 * decimal places. 2124 * 2125 * 0 => no fragmentation 2126 * 1 => high fragmentation 2127 */ 2128 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); 2129 2130 } 2131 2132 static void unusable_show_print(struct seq_file *m, 2133 pg_data_t *pgdat, struct zone *zone) 2134 { 2135 unsigned int order; 2136 int index; 2137 struct contig_page_info info; 2138 2139 seq_printf(m, "Node %d, zone %8s ", 2140 pgdat->node_id, 2141 zone->name); 2142 for (order = 0; order < MAX_ORDER; ++order) { 2143 fill_contig_page_info(zone, order, &info); 2144 index = unusable_free_index(order, &info); 2145 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 2146 } 2147 2148 seq_putc(m, '\n'); 2149 } 2150 2151 /* 2152 * Display unusable free space index 2153 * 2154 * The unusable free space index measures how much of the available free 2155 * memory cannot be used to satisfy an allocation of a given size and is a 2156 * value between 0 and 1. The higher the value, the more of free memory is 2157 * unusable and by implication, the worse the external fragmentation is. This 2158 * can be expressed as a percentage by multiplying by 100. 2159 */ 2160 static int unusable_show(struct seq_file *m, void *arg) 2161 { 2162 pg_data_t *pgdat = (pg_data_t *)arg; 2163 2164 /* check memoryless node */ 2165 if (!node_state(pgdat->node_id, N_MEMORY)) 2166 return 0; 2167 2168 walk_zones_in_node(m, pgdat, true, false, unusable_show_print); 2169 2170 return 0; 2171 } 2172 2173 static const struct seq_operations unusable_sops = { 2174 .start = frag_start, 2175 .next = frag_next, 2176 .stop = frag_stop, 2177 .show = unusable_show, 2178 }; 2179 2180 DEFINE_SEQ_ATTRIBUTE(unusable); 2181 2182 static void extfrag_show_print(struct seq_file *m, 2183 pg_data_t *pgdat, struct zone *zone) 2184 { 2185 unsigned int order; 2186 int index; 2187 2188 /* Alloc on stack as interrupts are disabled for zone walk */ 2189 struct contig_page_info info; 2190 2191 seq_printf(m, "Node %d, zone %8s ", 2192 pgdat->node_id, 2193 zone->name); 2194 for (order = 0; order < MAX_ORDER; ++order) { 2195 fill_contig_page_info(zone, order, &info); 2196 index = __fragmentation_index(order, &info); 2197 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000); 2198 } 2199 2200 seq_putc(m, '\n'); 2201 } 2202 2203 /* 2204 * Display fragmentation index for orders that allocations would fail for 2205 */ 2206 static int extfrag_show(struct seq_file *m, void *arg) 2207 { 2208 pg_data_t *pgdat = (pg_data_t *)arg; 2209 2210 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print); 2211 2212 return 0; 2213 } 2214 2215 static const struct seq_operations extfrag_sops = { 2216 .start = frag_start, 2217 .next = frag_next, 2218 .stop = frag_stop, 2219 .show = extfrag_show, 2220 }; 2221 2222 DEFINE_SEQ_ATTRIBUTE(extfrag); 2223 2224 static int __init extfrag_debug_init(void) 2225 { 2226 struct dentry *extfrag_debug_root; 2227 2228 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 2229 2230 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL, 2231 &unusable_fops); 2232 2233 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL, 2234 &extfrag_fops); 2235 2236 return 0; 2237 } 2238 2239 module_init(extfrag_debug_init); 2240 #endif 2241