1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/vmstat.c 4 * 5 * Manages VM statistics 6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 7 * 8 * zoned VM statistics 9 * Copyright (C) 2006 Silicon Graphics, Inc., 10 * Christoph Lameter <christoph@lameter.com> 11 * Copyright (C) 2008-2014 Christoph Lameter 12 */ 13 #include <linux/fs.h> 14 #include <linux/mm.h> 15 #include <linux/err.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/cpu.h> 19 #include <linux/cpumask.h> 20 #include <linux/vmstat.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/debugfs.h> 24 #include <linux/sched.h> 25 #include <linux/math64.h> 26 #include <linux/writeback.h> 27 #include <linux/compaction.h> 28 #include <linux/mm_inline.h> 29 #include <linux/page_ext.h> 30 #include <linux/page_owner.h> 31 #include <linux/migrate.h> 32 33 #include "internal.h" 34 35 #ifdef CONFIG_NUMA 36 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT; 37 38 /* zero numa counters within a zone */ 39 static void zero_zone_numa_counters(struct zone *zone) 40 { 41 int item, cpu; 42 43 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) { 44 atomic_long_set(&zone->vm_numa_event[item], 0); 45 for_each_online_cpu(cpu) { 46 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] 47 = 0; 48 } 49 } 50 } 51 52 /* zero numa counters of all the populated zones */ 53 static void zero_zones_numa_counters(void) 54 { 55 struct zone *zone; 56 57 for_each_populated_zone(zone) 58 zero_zone_numa_counters(zone); 59 } 60 61 /* zero global numa counters */ 62 static void zero_global_numa_counters(void) 63 { 64 int item; 65 66 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 67 atomic_long_set(&vm_numa_event[item], 0); 68 } 69 70 static void invalid_numa_statistics(void) 71 { 72 zero_zones_numa_counters(); 73 zero_global_numa_counters(); 74 } 75 76 static DEFINE_MUTEX(vm_numa_stat_lock); 77 78 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 79 void *buffer, size_t *length, loff_t *ppos) 80 { 81 int ret, oldval; 82 83 mutex_lock(&vm_numa_stat_lock); 84 if (write) 85 oldval = sysctl_vm_numa_stat; 86 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 87 if (ret || !write) 88 goto out; 89 90 if (oldval == sysctl_vm_numa_stat) 91 goto out; 92 else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) { 93 static_branch_enable(&vm_numa_stat_key); 94 pr_info("enable numa statistics\n"); 95 } else { 96 static_branch_disable(&vm_numa_stat_key); 97 invalid_numa_statistics(); 98 pr_info("disable numa statistics, and clear numa counters\n"); 99 } 100 101 out: 102 mutex_unlock(&vm_numa_stat_lock); 103 return ret; 104 } 105 #endif 106 107 #ifdef CONFIG_VM_EVENT_COUNTERS 108 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 109 EXPORT_PER_CPU_SYMBOL(vm_event_states); 110 111 static void sum_vm_events(unsigned long *ret) 112 { 113 int cpu; 114 int i; 115 116 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 117 118 for_each_online_cpu(cpu) { 119 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 120 121 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 122 ret[i] += this->event[i]; 123 } 124 } 125 126 /* 127 * Accumulate the vm event counters across all CPUs. 128 * The result is unavoidably approximate - it can change 129 * during and after execution of this function. 130 */ 131 void all_vm_events(unsigned long *ret) 132 { 133 cpus_read_lock(); 134 sum_vm_events(ret); 135 cpus_read_unlock(); 136 } 137 EXPORT_SYMBOL_GPL(all_vm_events); 138 139 /* 140 * Fold the foreign cpu events into our own. 141 * 142 * This is adding to the events on one processor 143 * but keeps the global counts constant. 144 */ 145 void vm_events_fold_cpu(int cpu) 146 { 147 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 148 int i; 149 150 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 151 count_vm_events(i, fold_state->event[i]); 152 fold_state->event[i] = 0; 153 } 154 } 155 156 #endif /* CONFIG_VM_EVENT_COUNTERS */ 157 158 /* 159 * Manage combined zone based / global counters 160 * 161 * vm_stat contains the global counters 162 */ 163 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; 164 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp; 165 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp; 166 EXPORT_SYMBOL(vm_zone_stat); 167 EXPORT_SYMBOL(vm_node_stat); 168 169 #ifdef CONFIG_NUMA 170 static void fold_vm_zone_numa_events(struct zone *zone) 171 { 172 unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, }; 173 int cpu; 174 enum numa_stat_item item; 175 176 for_each_online_cpu(cpu) { 177 struct per_cpu_zonestat *pzstats; 178 179 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 180 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 181 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0); 182 } 183 184 for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) 185 zone_numa_event_add(zone_numa_events[item], zone, item); 186 } 187 188 void fold_vm_numa_events(void) 189 { 190 struct zone *zone; 191 192 for_each_populated_zone(zone) 193 fold_vm_zone_numa_events(zone); 194 } 195 #endif 196 197 #ifdef CONFIG_SMP 198 199 int calculate_pressure_threshold(struct zone *zone) 200 { 201 int threshold; 202 int watermark_distance; 203 204 /* 205 * As vmstats are not up to date, there is drift between the estimated 206 * and real values. For high thresholds and a high number of CPUs, it 207 * is possible for the min watermark to be breached while the estimated 208 * value looks fine. The pressure threshold is a reduced value such 209 * that even the maximum amount of drift will not accidentally breach 210 * the min watermark 211 */ 212 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); 213 threshold = max(1, (int)(watermark_distance / num_online_cpus())); 214 215 /* 216 * Maximum threshold is 125 217 */ 218 threshold = min(125, threshold); 219 220 return threshold; 221 } 222 223 int calculate_normal_threshold(struct zone *zone) 224 { 225 int threshold; 226 int mem; /* memory in 128 MB units */ 227 228 /* 229 * The threshold scales with the number of processors and the amount 230 * of memory per zone. More memory means that we can defer updates for 231 * longer, more processors could lead to more contention. 232 * fls() is used to have a cheap way of logarithmic scaling. 233 * 234 * Some sample thresholds: 235 * 236 * Threshold Processors (fls) Zonesize fls(mem)+1 237 * ------------------------------------------------------------------ 238 * 8 1 1 0.9-1 GB 4 239 * 16 2 2 0.9-1 GB 4 240 * 20 2 2 1-2 GB 5 241 * 24 2 2 2-4 GB 6 242 * 28 2 2 4-8 GB 7 243 * 32 2 2 8-16 GB 8 244 * 4 2 2 <128M 1 245 * 30 4 3 2-4 GB 5 246 * 48 4 3 8-16 GB 8 247 * 32 8 4 1-2 GB 4 248 * 32 8 4 0.9-1GB 4 249 * 10 16 5 <128M 1 250 * 40 16 5 900M 4 251 * 70 64 7 2-4 GB 5 252 * 84 64 7 4-8 GB 6 253 * 108 512 9 4-8 GB 6 254 * 125 1024 10 8-16 GB 8 255 * 125 1024 10 16-32 GB 9 256 */ 257 258 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); 259 260 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 261 262 /* 263 * Maximum threshold is 125 264 */ 265 threshold = min(125, threshold); 266 267 return threshold; 268 } 269 270 /* 271 * Refresh the thresholds for each zone. 272 */ 273 void refresh_zone_stat_thresholds(void) 274 { 275 struct pglist_data *pgdat; 276 struct zone *zone; 277 int cpu; 278 int threshold; 279 280 /* Zero current pgdat thresholds */ 281 for_each_online_pgdat(pgdat) { 282 for_each_online_cpu(cpu) { 283 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; 284 } 285 } 286 287 for_each_populated_zone(zone) { 288 struct pglist_data *pgdat = zone->zone_pgdat; 289 unsigned long max_drift, tolerate_drift; 290 291 threshold = calculate_normal_threshold(zone); 292 293 for_each_online_cpu(cpu) { 294 int pgdat_threshold; 295 296 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold 297 = threshold; 298 299 /* Base nodestat threshold on the largest populated zone. */ 300 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; 301 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold 302 = max(threshold, pgdat_threshold); 303 } 304 305 /* 306 * Only set percpu_drift_mark if there is a danger that 307 * NR_FREE_PAGES reports the low watermark is ok when in fact 308 * the min watermark could be breached by an allocation 309 */ 310 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); 311 max_drift = num_online_cpus() * threshold; 312 if (max_drift > tolerate_drift) 313 zone->percpu_drift_mark = high_wmark_pages(zone) + 314 max_drift; 315 } 316 } 317 318 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 319 int (*calculate_pressure)(struct zone *)) 320 { 321 struct zone *zone; 322 int cpu; 323 int threshold; 324 int i; 325 326 for (i = 0; i < pgdat->nr_zones; i++) { 327 zone = &pgdat->node_zones[i]; 328 if (!zone->percpu_drift_mark) 329 continue; 330 331 threshold = (*calculate_pressure)(zone); 332 for_each_online_cpu(cpu) 333 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold 334 = threshold; 335 } 336 } 337 338 /* 339 * For use when we know that interrupts are disabled, 340 * or when we know that preemption is disabled and that 341 * particular counter cannot be updated from interrupt context. 342 */ 343 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 344 long delta) 345 { 346 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 347 s8 __percpu *p = pcp->vm_stat_diff + item; 348 long x; 349 long t; 350 351 /* 352 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels, 353 * atomicity is provided by IRQs being disabled -- either explicitly 354 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables 355 * CPU migrations and preemption potentially corrupts a counter so 356 * disable preemption. 357 */ 358 preempt_disable_nested(); 359 360 x = delta + __this_cpu_read(*p); 361 362 t = __this_cpu_read(pcp->stat_threshold); 363 364 if (unlikely(abs(x) > t)) { 365 zone_page_state_add(x, zone, item); 366 x = 0; 367 } 368 __this_cpu_write(*p, x); 369 370 preempt_enable_nested(); 371 } 372 EXPORT_SYMBOL(__mod_zone_page_state); 373 374 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 375 long delta) 376 { 377 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 378 s8 __percpu *p = pcp->vm_node_stat_diff + item; 379 long x; 380 long t; 381 382 if (vmstat_item_in_bytes(item)) { 383 /* 384 * Only cgroups use subpage accounting right now; at 385 * the global level, these items still change in 386 * multiples of whole pages. Store them as pages 387 * internally to keep the per-cpu counters compact. 388 */ 389 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); 390 delta >>= PAGE_SHIFT; 391 } 392 393 /* See __mod_node_page_state */ 394 preempt_disable_nested(); 395 396 x = delta + __this_cpu_read(*p); 397 398 t = __this_cpu_read(pcp->stat_threshold); 399 400 if (unlikely(abs(x) > t)) { 401 node_page_state_add(x, pgdat, item); 402 x = 0; 403 } 404 __this_cpu_write(*p, x); 405 406 preempt_enable_nested(); 407 } 408 EXPORT_SYMBOL(__mod_node_page_state); 409 410 /* 411 * Optimized increment and decrement functions. 412 * 413 * These are only for a single page and therefore can take a struct page * 414 * argument instead of struct zone *. This allows the inclusion of the code 415 * generated for page_zone(page) into the optimized functions. 416 * 417 * No overflow check is necessary and therefore the differential can be 418 * incremented or decremented in place which may allow the compilers to 419 * generate better code. 420 * The increment or decrement is known and therefore one boundary check can 421 * be omitted. 422 * 423 * NOTE: These functions are very performance sensitive. Change only 424 * with care. 425 * 426 * Some processors have inc/dec instructions that are atomic vs an interrupt. 427 * However, the code must first determine the differential location in a zone 428 * based on the processor number and then inc/dec the counter. There is no 429 * guarantee without disabling preemption that the processor will not change 430 * in between and therefore the atomicity vs. interrupt cannot be exploited 431 * in a useful way here. 432 */ 433 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 434 { 435 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 436 s8 __percpu *p = pcp->vm_stat_diff + item; 437 s8 v, t; 438 439 /* See __mod_node_page_state */ 440 preempt_disable_nested(); 441 442 v = __this_cpu_inc_return(*p); 443 t = __this_cpu_read(pcp->stat_threshold); 444 if (unlikely(v > t)) { 445 s8 overstep = t >> 1; 446 447 zone_page_state_add(v + overstep, zone, item); 448 __this_cpu_write(*p, -overstep); 449 } 450 451 preempt_enable_nested(); 452 } 453 454 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 455 { 456 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 457 s8 __percpu *p = pcp->vm_node_stat_diff + item; 458 s8 v, t; 459 460 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 461 462 /* See __mod_node_page_state */ 463 preempt_disable_nested(); 464 465 v = __this_cpu_inc_return(*p); 466 t = __this_cpu_read(pcp->stat_threshold); 467 if (unlikely(v > t)) { 468 s8 overstep = t >> 1; 469 470 node_page_state_add(v + overstep, pgdat, item); 471 __this_cpu_write(*p, -overstep); 472 } 473 474 preempt_enable_nested(); 475 } 476 477 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 478 { 479 __inc_zone_state(page_zone(page), item); 480 } 481 EXPORT_SYMBOL(__inc_zone_page_state); 482 483 void __inc_node_page_state(struct page *page, enum node_stat_item item) 484 { 485 __inc_node_state(page_pgdat(page), item); 486 } 487 EXPORT_SYMBOL(__inc_node_page_state); 488 489 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 490 { 491 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 492 s8 __percpu *p = pcp->vm_stat_diff + item; 493 s8 v, t; 494 495 /* See __mod_node_page_state */ 496 preempt_disable_nested(); 497 498 v = __this_cpu_dec_return(*p); 499 t = __this_cpu_read(pcp->stat_threshold); 500 if (unlikely(v < - t)) { 501 s8 overstep = t >> 1; 502 503 zone_page_state_add(v - overstep, zone, item); 504 __this_cpu_write(*p, overstep); 505 } 506 507 preempt_enable_nested(); 508 } 509 510 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 511 { 512 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 513 s8 __percpu *p = pcp->vm_node_stat_diff + item; 514 s8 v, t; 515 516 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 517 518 /* See __mod_node_page_state */ 519 preempt_disable_nested(); 520 521 v = __this_cpu_dec_return(*p); 522 t = __this_cpu_read(pcp->stat_threshold); 523 if (unlikely(v < - t)) { 524 s8 overstep = t >> 1; 525 526 node_page_state_add(v - overstep, pgdat, item); 527 __this_cpu_write(*p, overstep); 528 } 529 530 preempt_enable_nested(); 531 } 532 533 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 534 { 535 __dec_zone_state(page_zone(page), item); 536 } 537 EXPORT_SYMBOL(__dec_zone_page_state); 538 539 void __dec_node_page_state(struct page *page, enum node_stat_item item) 540 { 541 __dec_node_state(page_pgdat(page), item); 542 } 543 EXPORT_SYMBOL(__dec_node_page_state); 544 545 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL 546 /* 547 * If we have cmpxchg_local support then we do not need to incur the overhead 548 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. 549 * 550 * mod_state() modifies the zone counter state through atomic per cpu 551 * operations. 552 * 553 * Overstep mode specifies how overstep should handled: 554 * 0 No overstepping 555 * 1 Overstepping half of threshold 556 * -1 Overstepping minus half of threshold 557 */ 558 static inline void mod_zone_state(struct zone *zone, 559 enum zone_stat_item item, long delta, int overstep_mode) 560 { 561 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; 562 s8 __percpu *p = pcp->vm_stat_diff + item; 563 long o, n, t, z; 564 565 do { 566 z = 0; /* overflow to zone counters */ 567 568 /* 569 * The fetching of the stat_threshold is racy. We may apply 570 * a counter threshold to the wrong the cpu if we get 571 * rescheduled while executing here. However, the next 572 * counter update will apply the threshold again and 573 * therefore bring the counter under the threshold again. 574 * 575 * Most of the time the thresholds are the same anyways 576 * for all cpus in a zone. 577 */ 578 t = this_cpu_read(pcp->stat_threshold); 579 580 o = this_cpu_read(*p); 581 n = delta + o; 582 583 if (abs(n) > t) { 584 int os = overstep_mode * (t >> 1) ; 585 586 /* Overflow must be added to zone counters */ 587 z = n + os; 588 n = -os; 589 } 590 } while (this_cpu_cmpxchg(*p, o, n) != o); 591 592 if (z) 593 zone_page_state_add(z, zone, item); 594 } 595 596 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 597 long delta) 598 { 599 mod_zone_state(zone, item, delta, 0); 600 } 601 EXPORT_SYMBOL(mod_zone_page_state); 602 603 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 604 { 605 mod_zone_state(page_zone(page), item, 1, 1); 606 } 607 EXPORT_SYMBOL(inc_zone_page_state); 608 609 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 610 { 611 mod_zone_state(page_zone(page), item, -1, -1); 612 } 613 EXPORT_SYMBOL(dec_zone_page_state); 614 615 static inline void mod_node_state(struct pglist_data *pgdat, 616 enum node_stat_item item, int delta, int overstep_mode) 617 { 618 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 619 s8 __percpu *p = pcp->vm_node_stat_diff + item; 620 long o, n, t, z; 621 622 if (vmstat_item_in_bytes(item)) { 623 /* 624 * Only cgroups use subpage accounting right now; at 625 * the global level, these items still change in 626 * multiples of whole pages. Store them as pages 627 * internally to keep the per-cpu counters compact. 628 */ 629 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); 630 delta >>= PAGE_SHIFT; 631 } 632 633 do { 634 z = 0; /* overflow to node counters */ 635 636 /* 637 * The fetching of the stat_threshold is racy. We may apply 638 * a counter threshold to the wrong the cpu if we get 639 * rescheduled while executing here. However, the next 640 * counter update will apply the threshold again and 641 * therefore bring the counter under the threshold again. 642 * 643 * Most of the time the thresholds are the same anyways 644 * for all cpus in a node. 645 */ 646 t = this_cpu_read(pcp->stat_threshold); 647 648 o = this_cpu_read(*p); 649 n = delta + o; 650 651 if (abs(n) > t) { 652 int os = overstep_mode * (t >> 1) ; 653 654 /* Overflow must be added to node counters */ 655 z = n + os; 656 n = -os; 657 } 658 } while (this_cpu_cmpxchg(*p, o, n) != o); 659 660 if (z) 661 node_page_state_add(z, pgdat, item); 662 } 663 664 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 665 long delta) 666 { 667 mod_node_state(pgdat, item, delta, 0); 668 } 669 EXPORT_SYMBOL(mod_node_page_state); 670 671 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 672 { 673 mod_node_state(pgdat, item, 1, 1); 674 } 675 676 void inc_node_page_state(struct page *page, enum node_stat_item item) 677 { 678 mod_node_state(page_pgdat(page), item, 1, 1); 679 } 680 EXPORT_SYMBOL(inc_node_page_state); 681 682 void dec_node_page_state(struct page *page, enum node_stat_item item) 683 { 684 mod_node_state(page_pgdat(page), item, -1, -1); 685 } 686 EXPORT_SYMBOL(dec_node_page_state); 687 #else 688 /* 689 * Use interrupt disable to serialize counter updates 690 */ 691 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 692 long delta) 693 { 694 unsigned long flags; 695 696 local_irq_save(flags); 697 __mod_zone_page_state(zone, item, delta); 698 local_irq_restore(flags); 699 } 700 EXPORT_SYMBOL(mod_zone_page_state); 701 702 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 703 { 704 unsigned long flags; 705 struct zone *zone; 706 707 zone = page_zone(page); 708 local_irq_save(flags); 709 __inc_zone_state(zone, item); 710 local_irq_restore(flags); 711 } 712 EXPORT_SYMBOL(inc_zone_page_state); 713 714 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 715 { 716 unsigned long flags; 717 718 local_irq_save(flags); 719 __dec_zone_page_state(page, item); 720 local_irq_restore(flags); 721 } 722 EXPORT_SYMBOL(dec_zone_page_state); 723 724 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 725 { 726 unsigned long flags; 727 728 local_irq_save(flags); 729 __inc_node_state(pgdat, item); 730 local_irq_restore(flags); 731 } 732 EXPORT_SYMBOL(inc_node_state); 733 734 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 735 long delta) 736 { 737 unsigned long flags; 738 739 local_irq_save(flags); 740 __mod_node_page_state(pgdat, item, delta); 741 local_irq_restore(flags); 742 } 743 EXPORT_SYMBOL(mod_node_page_state); 744 745 void inc_node_page_state(struct page *page, enum node_stat_item item) 746 { 747 unsigned long flags; 748 struct pglist_data *pgdat; 749 750 pgdat = page_pgdat(page); 751 local_irq_save(flags); 752 __inc_node_state(pgdat, item); 753 local_irq_restore(flags); 754 } 755 EXPORT_SYMBOL(inc_node_page_state); 756 757 void dec_node_page_state(struct page *page, enum node_stat_item item) 758 { 759 unsigned long flags; 760 761 local_irq_save(flags); 762 __dec_node_page_state(page, item); 763 local_irq_restore(flags); 764 } 765 EXPORT_SYMBOL(dec_node_page_state); 766 #endif 767 768 /* 769 * Fold a differential into the global counters. 770 * Returns the number of counters updated. 771 */ 772 static int fold_diff(int *zone_diff, int *node_diff) 773 { 774 int i; 775 int changes = 0; 776 777 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 778 if (zone_diff[i]) { 779 atomic_long_add(zone_diff[i], &vm_zone_stat[i]); 780 changes++; 781 } 782 783 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 784 if (node_diff[i]) { 785 atomic_long_add(node_diff[i], &vm_node_stat[i]); 786 changes++; 787 } 788 return changes; 789 } 790 791 /* 792 * Update the zone counters for the current cpu. 793 * 794 * Note that refresh_cpu_vm_stats strives to only access 795 * node local memory. The per cpu pagesets on remote zones are placed 796 * in the memory local to the processor using that pageset. So the 797 * loop over all zones will access a series of cachelines local to 798 * the processor. 799 * 800 * The call to zone_page_state_add updates the cachelines with the 801 * statistics in the remote zone struct as well as the global cachelines 802 * with the global counters. These could cause remote node cache line 803 * bouncing and will have to be only done when necessary. 804 * 805 * The function returns the number of global counters updated. 806 */ 807 static int refresh_cpu_vm_stats(bool do_pagesets) 808 { 809 struct pglist_data *pgdat; 810 struct zone *zone; 811 int i; 812 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 813 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 814 int changes = 0; 815 816 for_each_populated_zone(zone) { 817 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; 818 #ifdef CONFIG_NUMA 819 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset; 820 #endif 821 822 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 823 int v; 824 825 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0); 826 if (v) { 827 828 atomic_long_add(v, &zone->vm_stat[i]); 829 global_zone_diff[i] += v; 830 #ifdef CONFIG_NUMA 831 /* 3 seconds idle till flush */ 832 __this_cpu_write(pcp->expire, 3); 833 #endif 834 } 835 } 836 #ifdef CONFIG_NUMA 837 838 if (do_pagesets) { 839 cond_resched(); 840 /* 841 * Deal with draining the remote pageset of this 842 * processor 843 * 844 * Check if there are pages remaining in this pageset 845 * if not then there is nothing to expire. 846 */ 847 if (!__this_cpu_read(pcp->expire) || 848 !__this_cpu_read(pcp->count)) 849 continue; 850 851 /* 852 * We never drain zones local to this processor. 853 */ 854 if (zone_to_nid(zone) == numa_node_id()) { 855 __this_cpu_write(pcp->expire, 0); 856 continue; 857 } 858 859 if (__this_cpu_dec_return(pcp->expire)) 860 continue; 861 862 if (__this_cpu_read(pcp->count)) { 863 drain_zone_pages(zone, this_cpu_ptr(pcp)); 864 changes++; 865 } 866 } 867 #endif 868 } 869 870 for_each_online_pgdat(pgdat) { 871 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; 872 873 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 874 int v; 875 876 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); 877 if (v) { 878 atomic_long_add(v, &pgdat->vm_stat[i]); 879 global_node_diff[i] += v; 880 } 881 } 882 } 883 884 changes += fold_diff(global_zone_diff, global_node_diff); 885 return changes; 886 } 887 888 /* 889 * Fold the data for an offline cpu into the global array. 890 * There cannot be any access by the offline cpu and therefore 891 * synchronization is simplified. 892 */ 893 void cpu_vm_stats_fold(int cpu) 894 { 895 struct pglist_data *pgdat; 896 struct zone *zone; 897 int i; 898 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 899 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 900 901 for_each_populated_zone(zone) { 902 struct per_cpu_zonestat *pzstats; 903 904 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 905 906 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 907 if (pzstats->vm_stat_diff[i]) { 908 int v; 909 910 v = pzstats->vm_stat_diff[i]; 911 pzstats->vm_stat_diff[i] = 0; 912 atomic_long_add(v, &zone->vm_stat[i]); 913 global_zone_diff[i] += v; 914 } 915 } 916 #ifdef CONFIG_NUMA 917 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) { 918 if (pzstats->vm_numa_event[i]) { 919 unsigned long v; 920 921 v = pzstats->vm_numa_event[i]; 922 pzstats->vm_numa_event[i] = 0; 923 zone_numa_event_add(v, zone, i); 924 } 925 } 926 #endif 927 } 928 929 for_each_online_pgdat(pgdat) { 930 struct per_cpu_nodestat *p; 931 932 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 933 934 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 935 if (p->vm_node_stat_diff[i]) { 936 int v; 937 938 v = p->vm_node_stat_diff[i]; 939 p->vm_node_stat_diff[i] = 0; 940 atomic_long_add(v, &pgdat->vm_stat[i]); 941 global_node_diff[i] += v; 942 } 943 } 944 945 fold_diff(global_zone_diff, global_node_diff); 946 } 947 948 /* 949 * this is only called if !populated_zone(zone), which implies no other users of 950 * pset->vm_stat_diff[] exist. 951 */ 952 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats) 953 { 954 unsigned long v; 955 int i; 956 957 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 958 if (pzstats->vm_stat_diff[i]) { 959 v = pzstats->vm_stat_diff[i]; 960 pzstats->vm_stat_diff[i] = 0; 961 zone_page_state_add(v, zone, i); 962 } 963 } 964 965 #ifdef CONFIG_NUMA 966 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) { 967 if (pzstats->vm_numa_event[i]) { 968 v = pzstats->vm_numa_event[i]; 969 pzstats->vm_numa_event[i] = 0; 970 zone_numa_event_add(v, zone, i); 971 } 972 } 973 #endif 974 } 975 #endif 976 977 #ifdef CONFIG_NUMA 978 /* 979 * Determine the per node value of a stat item. This function 980 * is called frequently in a NUMA machine, so try to be as 981 * frugal as possible. 982 */ 983 unsigned long sum_zone_node_page_state(int node, 984 enum zone_stat_item item) 985 { 986 struct zone *zones = NODE_DATA(node)->node_zones; 987 int i; 988 unsigned long count = 0; 989 990 for (i = 0; i < MAX_NR_ZONES; i++) 991 count += zone_page_state(zones + i, item); 992 993 return count; 994 } 995 996 /* Determine the per node value of a numa stat item. */ 997 unsigned long sum_zone_numa_event_state(int node, 998 enum numa_stat_item item) 999 { 1000 struct zone *zones = NODE_DATA(node)->node_zones; 1001 unsigned long count = 0; 1002 int i; 1003 1004 for (i = 0; i < MAX_NR_ZONES; i++) 1005 count += zone_numa_event_state(zones + i, item); 1006 1007 return count; 1008 } 1009 1010 /* 1011 * Determine the per node value of a stat item. 1012 */ 1013 unsigned long node_page_state_pages(struct pglist_data *pgdat, 1014 enum node_stat_item item) 1015 { 1016 long x = atomic_long_read(&pgdat->vm_stat[item]); 1017 #ifdef CONFIG_SMP 1018 if (x < 0) 1019 x = 0; 1020 #endif 1021 return x; 1022 } 1023 1024 unsigned long node_page_state(struct pglist_data *pgdat, 1025 enum node_stat_item item) 1026 { 1027 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); 1028 1029 return node_page_state_pages(pgdat, item); 1030 } 1031 #endif 1032 1033 #ifdef CONFIG_COMPACTION 1034 1035 struct contig_page_info { 1036 unsigned long free_pages; 1037 unsigned long free_blocks_total; 1038 unsigned long free_blocks_suitable; 1039 }; 1040 1041 /* 1042 * Calculate the number of free pages in a zone, how many contiguous 1043 * pages are free and how many are large enough to satisfy an allocation of 1044 * the target size. Note that this function makes no attempt to estimate 1045 * how many suitable free blocks there *might* be if MOVABLE pages were 1046 * migrated. Calculating that is possible, but expensive and can be 1047 * figured out from userspace 1048 */ 1049 static void fill_contig_page_info(struct zone *zone, 1050 unsigned int suitable_order, 1051 struct contig_page_info *info) 1052 { 1053 unsigned int order; 1054 1055 info->free_pages = 0; 1056 info->free_blocks_total = 0; 1057 info->free_blocks_suitable = 0; 1058 1059 for (order = 0; order < MAX_ORDER; order++) { 1060 unsigned long blocks; 1061 1062 /* 1063 * Count number of free blocks. 1064 * 1065 * Access to nr_free is lockless as nr_free is used only for 1066 * diagnostic purposes. Use data_race to avoid KCSAN warning. 1067 */ 1068 blocks = data_race(zone->free_area[order].nr_free); 1069 info->free_blocks_total += blocks; 1070 1071 /* Count free base pages */ 1072 info->free_pages += blocks << order; 1073 1074 /* Count the suitable free blocks */ 1075 if (order >= suitable_order) 1076 info->free_blocks_suitable += blocks << 1077 (order - suitable_order); 1078 } 1079 } 1080 1081 /* 1082 * A fragmentation index only makes sense if an allocation of a requested 1083 * size would fail. If that is true, the fragmentation index indicates 1084 * whether external fragmentation or a lack of memory was the problem. 1085 * The value can be used to determine if page reclaim or compaction 1086 * should be used 1087 */ 1088 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) 1089 { 1090 unsigned long requested = 1UL << order; 1091 1092 if (WARN_ON_ONCE(order >= MAX_ORDER)) 1093 return 0; 1094 1095 if (!info->free_blocks_total) 1096 return 0; 1097 1098 /* Fragmentation index only makes sense when a request would fail */ 1099 if (info->free_blocks_suitable) 1100 return -1000; 1101 1102 /* 1103 * Index is between 0 and 1 so return within 3 decimal places 1104 * 1105 * 0 => allocation would fail due to lack of memory 1106 * 1 => allocation would fail due to fragmentation 1107 */ 1108 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); 1109 } 1110 1111 /* 1112 * Calculates external fragmentation within a zone wrt the given order. 1113 * It is defined as the percentage of pages found in blocks of size 1114 * less than 1 << order. It returns values in range [0, 100]. 1115 */ 1116 unsigned int extfrag_for_order(struct zone *zone, unsigned int order) 1117 { 1118 struct contig_page_info info; 1119 1120 fill_contig_page_info(zone, order, &info); 1121 if (info.free_pages == 0) 1122 return 0; 1123 1124 return div_u64((info.free_pages - 1125 (info.free_blocks_suitable << order)) * 100, 1126 info.free_pages); 1127 } 1128 1129 /* Same as __fragmentation index but allocs contig_page_info on stack */ 1130 int fragmentation_index(struct zone *zone, unsigned int order) 1131 { 1132 struct contig_page_info info; 1133 1134 fill_contig_page_info(zone, order, &info); 1135 return __fragmentation_index(order, &info); 1136 } 1137 #endif 1138 1139 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \ 1140 defined(CONFIG_NUMA) || defined(CONFIG_MEMCG) 1141 #ifdef CONFIG_ZONE_DMA 1142 #define TEXT_FOR_DMA(xx) xx "_dma", 1143 #else 1144 #define TEXT_FOR_DMA(xx) 1145 #endif 1146 1147 #ifdef CONFIG_ZONE_DMA32 1148 #define TEXT_FOR_DMA32(xx) xx "_dma32", 1149 #else 1150 #define TEXT_FOR_DMA32(xx) 1151 #endif 1152 1153 #ifdef CONFIG_HIGHMEM 1154 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 1155 #else 1156 #define TEXT_FOR_HIGHMEM(xx) 1157 #endif 1158 1159 #ifdef CONFIG_ZONE_DEVICE 1160 #define TEXT_FOR_DEVICE(xx) xx "_device", 1161 #else 1162 #define TEXT_FOR_DEVICE(xx) 1163 #endif 1164 1165 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 1166 TEXT_FOR_HIGHMEM(xx) xx "_movable", \ 1167 TEXT_FOR_DEVICE(xx) 1168 1169 const char * const vmstat_text[] = { 1170 /* enum zone_stat_item counters */ 1171 "nr_free_pages", 1172 "nr_zone_inactive_anon", 1173 "nr_zone_active_anon", 1174 "nr_zone_inactive_file", 1175 "nr_zone_active_file", 1176 "nr_zone_unevictable", 1177 "nr_zone_write_pending", 1178 "nr_mlock", 1179 "nr_bounce", 1180 #if IS_ENABLED(CONFIG_ZSMALLOC) 1181 "nr_zspages", 1182 #endif 1183 "nr_free_cma", 1184 1185 /* enum numa_stat_item counters */ 1186 #ifdef CONFIG_NUMA 1187 "numa_hit", 1188 "numa_miss", 1189 "numa_foreign", 1190 "numa_interleave", 1191 "numa_local", 1192 "numa_other", 1193 #endif 1194 1195 /* enum node_stat_item counters */ 1196 "nr_inactive_anon", 1197 "nr_active_anon", 1198 "nr_inactive_file", 1199 "nr_active_file", 1200 "nr_unevictable", 1201 "nr_slab_reclaimable", 1202 "nr_slab_unreclaimable", 1203 "nr_isolated_anon", 1204 "nr_isolated_file", 1205 "workingset_nodes", 1206 "workingset_refault_anon", 1207 "workingset_refault_file", 1208 "workingset_activate_anon", 1209 "workingset_activate_file", 1210 "workingset_restore_anon", 1211 "workingset_restore_file", 1212 "workingset_nodereclaim", 1213 "nr_anon_pages", 1214 "nr_mapped", 1215 "nr_file_pages", 1216 "nr_dirty", 1217 "nr_writeback", 1218 "nr_writeback_temp", 1219 "nr_shmem", 1220 "nr_shmem_hugepages", 1221 "nr_shmem_pmdmapped", 1222 "nr_file_hugepages", 1223 "nr_file_pmdmapped", 1224 "nr_anon_transparent_hugepages", 1225 "nr_vmscan_write", 1226 "nr_vmscan_immediate_reclaim", 1227 "nr_dirtied", 1228 "nr_written", 1229 "nr_throttled_written", 1230 "nr_kernel_misc_reclaimable", 1231 "nr_foll_pin_acquired", 1232 "nr_foll_pin_released", 1233 "nr_kernel_stack", 1234 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 1235 "nr_shadow_call_stack", 1236 #endif 1237 "nr_page_table_pages", 1238 "nr_sec_page_table_pages", 1239 #ifdef CONFIG_SWAP 1240 "nr_swapcached", 1241 #endif 1242 #ifdef CONFIG_NUMA_BALANCING 1243 "pgpromote_success", 1244 #endif 1245 1246 /* enum writeback_stat_item counters */ 1247 "nr_dirty_threshold", 1248 "nr_dirty_background_threshold", 1249 1250 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) 1251 /* enum vm_event_item counters */ 1252 "pgpgin", 1253 "pgpgout", 1254 "pswpin", 1255 "pswpout", 1256 1257 TEXTS_FOR_ZONES("pgalloc") 1258 TEXTS_FOR_ZONES("allocstall") 1259 TEXTS_FOR_ZONES("pgskip") 1260 1261 "pgfree", 1262 "pgactivate", 1263 "pgdeactivate", 1264 "pglazyfree", 1265 1266 "pgfault", 1267 "pgmajfault", 1268 "pglazyfreed", 1269 1270 "pgrefill", 1271 "pgreuse", 1272 "pgsteal_kswapd", 1273 "pgsteal_direct", 1274 "pgdemote_kswapd", 1275 "pgdemote_direct", 1276 "pgscan_kswapd", 1277 "pgscan_direct", 1278 "pgscan_direct_throttle", 1279 "pgscan_anon", 1280 "pgscan_file", 1281 "pgsteal_anon", 1282 "pgsteal_file", 1283 1284 #ifdef CONFIG_NUMA 1285 "zone_reclaim_failed", 1286 #endif 1287 "pginodesteal", 1288 "slabs_scanned", 1289 "kswapd_inodesteal", 1290 "kswapd_low_wmark_hit_quickly", 1291 "kswapd_high_wmark_hit_quickly", 1292 "pageoutrun", 1293 1294 "pgrotated", 1295 1296 "drop_pagecache", 1297 "drop_slab", 1298 "oom_kill", 1299 1300 #ifdef CONFIG_NUMA_BALANCING 1301 "numa_pte_updates", 1302 "numa_huge_pte_updates", 1303 "numa_hint_faults", 1304 "numa_hint_faults_local", 1305 "numa_pages_migrated", 1306 #endif 1307 #ifdef CONFIG_MIGRATION 1308 "pgmigrate_success", 1309 "pgmigrate_fail", 1310 "thp_migration_success", 1311 "thp_migration_fail", 1312 "thp_migration_split", 1313 #endif 1314 #ifdef CONFIG_COMPACTION 1315 "compact_migrate_scanned", 1316 "compact_free_scanned", 1317 "compact_isolated", 1318 "compact_stall", 1319 "compact_fail", 1320 "compact_success", 1321 "compact_daemon_wake", 1322 "compact_daemon_migrate_scanned", 1323 "compact_daemon_free_scanned", 1324 #endif 1325 1326 #ifdef CONFIG_HUGETLB_PAGE 1327 "htlb_buddy_alloc_success", 1328 "htlb_buddy_alloc_fail", 1329 #endif 1330 #ifdef CONFIG_CMA 1331 "cma_alloc_success", 1332 "cma_alloc_fail", 1333 #endif 1334 "unevictable_pgs_culled", 1335 "unevictable_pgs_scanned", 1336 "unevictable_pgs_rescued", 1337 "unevictable_pgs_mlocked", 1338 "unevictable_pgs_munlocked", 1339 "unevictable_pgs_cleared", 1340 "unevictable_pgs_stranded", 1341 1342 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1343 "thp_fault_alloc", 1344 "thp_fault_fallback", 1345 "thp_fault_fallback_charge", 1346 "thp_collapse_alloc", 1347 "thp_collapse_alloc_failed", 1348 "thp_file_alloc", 1349 "thp_file_fallback", 1350 "thp_file_fallback_charge", 1351 "thp_file_mapped", 1352 "thp_split_page", 1353 "thp_split_page_failed", 1354 "thp_deferred_split_page", 1355 "thp_split_pmd", 1356 "thp_scan_exceed_none_pte", 1357 "thp_scan_exceed_swap_pte", 1358 "thp_scan_exceed_share_pte", 1359 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1360 "thp_split_pud", 1361 #endif 1362 "thp_zero_page_alloc", 1363 "thp_zero_page_alloc_failed", 1364 "thp_swpout", 1365 "thp_swpout_fallback", 1366 #endif 1367 #ifdef CONFIG_MEMORY_BALLOON 1368 "balloon_inflate", 1369 "balloon_deflate", 1370 #ifdef CONFIG_BALLOON_COMPACTION 1371 "balloon_migrate", 1372 #endif 1373 #endif /* CONFIG_MEMORY_BALLOON */ 1374 #ifdef CONFIG_DEBUG_TLBFLUSH 1375 "nr_tlb_remote_flush", 1376 "nr_tlb_remote_flush_received", 1377 "nr_tlb_local_flush_all", 1378 "nr_tlb_local_flush_one", 1379 #endif /* CONFIG_DEBUG_TLBFLUSH */ 1380 1381 #ifdef CONFIG_DEBUG_VM_VMACACHE 1382 "vmacache_find_calls", 1383 "vmacache_find_hits", 1384 #endif 1385 #ifdef CONFIG_SWAP 1386 "swap_ra", 1387 "swap_ra_hit", 1388 #ifdef CONFIG_KSM 1389 "ksm_swpin_copy", 1390 #endif 1391 #endif 1392 #ifdef CONFIG_KSM 1393 "cow_ksm", 1394 #endif 1395 #ifdef CONFIG_ZSWAP 1396 "zswpin", 1397 "zswpout", 1398 #endif 1399 #ifdef CONFIG_X86 1400 "direct_map_level2_splits", 1401 "direct_map_level3_splits", 1402 #endif 1403 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ 1404 }; 1405 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ 1406 1407 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ 1408 defined(CONFIG_PROC_FS) 1409 static void *frag_start(struct seq_file *m, loff_t *pos) 1410 { 1411 pg_data_t *pgdat; 1412 loff_t node = *pos; 1413 1414 for (pgdat = first_online_pgdat(); 1415 pgdat && node; 1416 pgdat = next_online_pgdat(pgdat)) 1417 --node; 1418 1419 return pgdat; 1420 } 1421 1422 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 1423 { 1424 pg_data_t *pgdat = (pg_data_t *)arg; 1425 1426 (*pos)++; 1427 return next_online_pgdat(pgdat); 1428 } 1429 1430 static void frag_stop(struct seq_file *m, void *arg) 1431 { 1432 } 1433 1434 /* 1435 * Walk zones in a node and print using a callback. 1436 * If @assert_populated is true, only use callback for zones that are populated. 1437 */ 1438 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 1439 bool assert_populated, bool nolock, 1440 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 1441 { 1442 struct zone *zone; 1443 struct zone *node_zones = pgdat->node_zones; 1444 unsigned long flags; 1445 1446 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 1447 if (assert_populated && !populated_zone(zone)) 1448 continue; 1449 1450 if (!nolock) 1451 spin_lock_irqsave(&zone->lock, flags); 1452 print(m, pgdat, zone); 1453 if (!nolock) 1454 spin_unlock_irqrestore(&zone->lock, flags); 1455 } 1456 } 1457 #endif 1458 1459 #ifdef CONFIG_PROC_FS 1460 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 1461 struct zone *zone) 1462 { 1463 int order; 1464 1465 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1466 for (order = 0; order < MAX_ORDER; ++order) 1467 /* 1468 * Access to nr_free is lockless as nr_free is used only for 1469 * printing purposes. Use data_race to avoid KCSAN warning. 1470 */ 1471 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); 1472 seq_putc(m, '\n'); 1473 } 1474 1475 /* 1476 * This walks the free areas for each zone. 1477 */ 1478 static int frag_show(struct seq_file *m, void *arg) 1479 { 1480 pg_data_t *pgdat = (pg_data_t *)arg; 1481 walk_zones_in_node(m, pgdat, true, false, frag_show_print); 1482 return 0; 1483 } 1484 1485 static void pagetypeinfo_showfree_print(struct seq_file *m, 1486 pg_data_t *pgdat, struct zone *zone) 1487 { 1488 int order, mtype; 1489 1490 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 1491 seq_printf(m, "Node %4d, zone %8s, type %12s ", 1492 pgdat->node_id, 1493 zone->name, 1494 migratetype_names[mtype]); 1495 for (order = 0; order < MAX_ORDER; ++order) { 1496 unsigned long freecount = 0; 1497 struct free_area *area; 1498 struct list_head *curr; 1499 bool overflow = false; 1500 1501 area = &(zone->free_area[order]); 1502 1503 list_for_each(curr, &area->free_list[mtype]) { 1504 /* 1505 * Cap the free_list iteration because it might 1506 * be really large and we are under a spinlock 1507 * so a long time spent here could trigger a 1508 * hard lockup detector. Anyway this is a 1509 * debugging tool so knowing there is a handful 1510 * of pages of this order should be more than 1511 * sufficient. 1512 */ 1513 if (++freecount >= 100000) { 1514 overflow = true; 1515 break; 1516 } 1517 } 1518 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount); 1519 spin_unlock_irq(&zone->lock); 1520 cond_resched(); 1521 spin_lock_irq(&zone->lock); 1522 } 1523 seq_putc(m, '\n'); 1524 } 1525 } 1526 1527 /* Print out the free pages at each order for each migatetype */ 1528 static void pagetypeinfo_showfree(struct seq_file *m, void *arg) 1529 { 1530 int order; 1531 pg_data_t *pgdat = (pg_data_t *)arg; 1532 1533 /* Print header */ 1534 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 1535 for (order = 0; order < MAX_ORDER; ++order) 1536 seq_printf(m, "%6d ", order); 1537 seq_putc(m, '\n'); 1538 1539 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print); 1540 } 1541 1542 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 1543 pg_data_t *pgdat, struct zone *zone) 1544 { 1545 int mtype; 1546 unsigned long pfn; 1547 unsigned long start_pfn = zone->zone_start_pfn; 1548 unsigned long end_pfn = zone_end_pfn(zone); 1549 unsigned long count[MIGRATE_TYPES] = { 0, }; 1550 1551 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 1552 struct page *page; 1553 1554 page = pfn_to_online_page(pfn); 1555 if (!page) 1556 continue; 1557 1558 if (page_zone(page) != zone) 1559 continue; 1560 1561 mtype = get_pageblock_migratetype(page); 1562 1563 if (mtype < MIGRATE_TYPES) 1564 count[mtype]++; 1565 } 1566 1567 /* Print counts */ 1568 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1569 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1570 seq_printf(m, "%12lu ", count[mtype]); 1571 seq_putc(m, '\n'); 1572 } 1573 1574 /* Print out the number of pageblocks for each migratetype */ 1575 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 1576 { 1577 int mtype; 1578 pg_data_t *pgdat = (pg_data_t *)arg; 1579 1580 seq_printf(m, "\n%-23s", "Number of blocks type "); 1581 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1582 seq_printf(m, "%12s ", migratetype_names[mtype]); 1583 seq_putc(m, '\n'); 1584 walk_zones_in_node(m, pgdat, true, false, 1585 pagetypeinfo_showblockcount_print); 1586 } 1587 1588 /* 1589 * Print out the number of pageblocks for each migratetype that contain pages 1590 * of other types. This gives an indication of how well fallbacks are being 1591 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER 1592 * to determine what is going on 1593 */ 1594 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) 1595 { 1596 #ifdef CONFIG_PAGE_OWNER 1597 int mtype; 1598 1599 if (!static_branch_unlikely(&page_owner_inited)) 1600 return; 1601 1602 drain_all_pages(NULL); 1603 1604 seq_printf(m, "\n%-23s", "Number of mixed blocks "); 1605 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1606 seq_printf(m, "%12s ", migratetype_names[mtype]); 1607 seq_putc(m, '\n'); 1608 1609 walk_zones_in_node(m, pgdat, true, true, 1610 pagetypeinfo_showmixedcount_print); 1611 #endif /* CONFIG_PAGE_OWNER */ 1612 } 1613 1614 /* 1615 * This prints out statistics in relation to grouping pages by mobility. 1616 * It is expensive to collect so do not constantly read the file. 1617 */ 1618 static int pagetypeinfo_show(struct seq_file *m, void *arg) 1619 { 1620 pg_data_t *pgdat = (pg_data_t *)arg; 1621 1622 /* check memoryless node */ 1623 if (!node_state(pgdat->node_id, N_MEMORY)) 1624 return 0; 1625 1626 seq_printf(m, "Page block order: %d\n", pageblock_order); 1627 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 1628 seq_putc(m, '\n'); 1629 pagetypeinfo_showfree(m, pgdat); 1630 pagetypeinfo_showblockcount(m, pgdat); 1631 pagetypeinfo_showmixedcount(m, pgdat); 1632 1633 return 0; 1634 } 1635 1636 static const struct seq_operations fragmentation_op = { 1637 .start = frag_start, 1638 .next = frag_next, 1639 .stop = frag_stop, 1640 .show = frag_show, 1641 }; 1642 1643 static const struct seq_operations pagetypeinfo_op = { 1644 .start = frag_start, 1645 .next = frag_next, 1646 .stop = frag_stop, 1647 .show = pagetypeinfo_show, 1648 }; 1649 1650 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone) 1651 { 1652 int zid; 1653 1654 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1655 struct zone *compare = &pgdat->node_zones[zid]; 1656 1657 if (populated_zone(compare)) 1658 return zone == compare; 1659 } 1660 1661 return false; 1662 } 1663 1664 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 1665 struct zone *zone) 1666 { 1667 int i; 1668 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 1669 if (is_zone_first_populated(pgdat, zone)) { 1670 seq_printf(m, "\n per-node stats"); 1671 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1672 unsigned long pages = node_page_state_pages(pgdat, i); 1673 1674 if (vmstat_item_print_in_thp(i)) 1675 pages /= HPAGE_PMD_NR; 1676 seq_printf(m, "\n %-12s %lu", node_stat_name(i), 1677 pages); 1678 } 1679 } 1680 seq_printf(m, 1681 "\n pages free %lu" 1682 "\n boost %lu" 1683 "\n min %lu" 1684 "\n low %lu" 1685 "\n high %lu" 1686 "\n spanned %lu" 1687 "\n present %lu" 1688 "\n managed %lu" 1689 "\n cma %lu", 1690 zone_page_state(zone, NR_FREE_PAGES), 1691 zone->watermark_boost, 1692 min_wmark_pages(zone), 1693 low_wmark_pages(zone), 1694 high_wmark_pages(zone), 1695 zone->spanned_pages, 1696 zone->present_pages, 1697 zone_managed_pages(zone), 1698 zone_cma_pages(zone)); 1699 1700 seq_printf(m, 1701 "\n protection: (%ld", 1702 zone->lowmem_reserve[0]); 1703 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 1704 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); 1705 seq_putc(m, ')'); 1706 1707 /* If unpopulated, no other information is useful */ 1708 if (!populated_zone(zone)) { 1709 seq_putc(m, '\n'); 1710 return; 1711 } 1712 1713 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1714 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), 1715 zone_page_state(zone, i)); 1716 1717 #ifdef CONFIG_NUMA 1718 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) 1719 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), 1720 zone_numa_event_state(zone, i)); 1721 #endif 1722 1723 seq_printf(m, "\n pagesets"); 1724 for_each_online_cpu(i) { 1725 struct per_cpu_pages *pcp; 1726 struct per_cpu_zonestat __maybe_unused *pzstats; 1727 1728 pcp = per_cpu_ptr(zone->per_cpu_pageset, i); 1729 seq_printf(m, 1730 "\n cpu: %i" 1731 "\n count: %i" 1732 "\n high: %i" 1733 "\n batch: %i", 1734 i, 1735 pcp->count, 1736 pcp->high, 1737 pcp->batch); 1738 #ifdef CONFIG_SMP 1739 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); 1740 seq_printf(m, "\n vm stats threshold: %d", 1741 pzstats->stat_threshold); 1742 #endif 1743 } 1744 seq_printf(m, 1745 "\n node_unreclaimable: %u" 1746 "\n start_pfn: %lu", 1747 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, 1748 zone->zone_start_pfn); 1749 seq_putc(m, '\n'); 1750 } 1751 1752 /* 1753 * Output information about zones in @pgdat. All zones are printed regardless 1754 * of whether they are populated or not: lowmem_reserve_ratio operates on the 1755 * set of all zones and userspace would not be aware of such zones if they are 1756 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio). 1757 */ 1758 static int zoneinfo_show(struct seq_file *m, void *arg) 1759 { 1760 pg_data_t *pgdat = (pg_data_t *)arg; 1761 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print); 1762 return 0; 1763 } 1764 1765 static const struct seq_operations zoneinfo_op = { 1766 .start = frag_start, /* iterate over all zones. The same as in 1767 * fragmentation. */ 1768 .next = frag_next, 1769 .stop = frag_stop, 1770 .show = zoneinfo_show, 1771 }; 1772 1773 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \ 1774 NR_VM_NUMA_EVENT_ITEMS + \ 1775 NR_VM_NODE_STAT_ITEMS + \ 1776 NR_VM_WRITEBACK_STAT_ITEMS + \ 1777 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \ 1778 NR_VM_EVENT_ITEMS : 0)) 1779 1780 static void *vmstat_start(struct seq_file *m, loff_t *pos) 1781 { 1782 unsigned long *v; 1783 int i; 1784 1785 if (*pos >= NR_VMSTAT_ITEMS) 1786 return NULL; 1787 1788 BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS); 1789 fold_vm_numa_events(); 1790 v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL); 1791 m->private = v; 1792 if (!v) 1793 return ERR_PTR(-ENOMEM); 1794 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1795 v[i] = global_zone_page_state(i); 1796 v += NR_VM_ZONE_STAT_ITEMS; 1797 1798 #ifdef CONFIG_NUMA 1799 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) 1800 v[i] = global_numa_event_state(i); 1801 v += NR_VM_NUMA_EVENT_ITEMS; 1802 #endif 1803 1804 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1805 v[i] = global_node_page_state_pages(i); 1806 if (vmstat_item_print_in_thp(i)) 1807 v[i] /= HPAGE_PMD_NR; 1808 } 1809 v += NR_VM_NODE_STAT_ITEMS; 1810 1811 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, 1812 v + NR_DIRTY_THRESHOLD); 1813 v += NR_VM_WRITEBACK_STAT_ITEMS; 1814 1815 #ifdef CONFIG_VM_EVENT_COUNTERS 1816 all_vm_events(v); 1817 v[PGPGIN] /= 2; /* sectors -> kbytes */ 1818 v[PGPGOUT] /= 2; 1819 #endif 1820 return (unsigned long *)m->private + *pos; 1821 } 1822 1823 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 1824 { 1825 (*pos)++; 1826 if (*pos >= NR_VMSTAT_ITEMS) 1827 return NULL; 1828 return (unsigned long *)m->private + *pos; 1829 } 1830 1831 static int vmstat_show(struct seq_file *m, void *arg) 1832 { 1833 unsigned long *l = arg; 1834 unsigned long off = l - (unsigned long *)m->private; 1835 1836 seq_puts(m, vmstat_text[off]); 1837 seq_put_decimal_ull(m, " ", *l); 1838 seq_putc(m, '\n'); 1839 1840 if (off == NR_VMSTAT_ITEMS - 1) { 1841 /* 1842 * We've come to the end - add any deprecated counters to avoid 1843 * breaking userspace which might depend on them being present. 1844 */ 1845 seq_puts(m, "nr_unstable 0\n"); 1846 } 1847 return 0; 1848 } 1849 1850 static void vmstat_stop(struct seq_file *m, void *arg) 1851 { 1852 kfree(m->private); 1853 m->private = NULL; 1854 } 1855 1856 static const struct seq_operations vmstat_op = { 1857 .start = vmstat_start, 1858 .next = vmstat_next, 1859 .stop = vmstat_stop, 1860 .show = vmstat_show, 1861 }; 1862 #endif /* CONFIG_PROC_FS */ 1863 1864 #ifdef CONFIG_SMP 1865 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1866 int sysctl_stat_interval __read_mostly = HZ; 1867 1868 #ifdef CONFIG_PROC_FS 1869 static void refresh_vm_stats(struct work_struct *work) 1870 { 1871 refresh_cpu_vm_stats(true); 1872 } 1873 1874 int vmstat_refresh(struct ctl_table *table, int write, 1875 void *buffer, size_t *lenp, loff_t *ppos) 1876 { 1877 long val; 1878 int err; 1879 int i; 1880 1881 /* 1882 * The regular update, every sysctl_stat_interval, may come later 1883 * than expected: leaving a significant amount in per_cpu buckets. 1884 * This is particularly misleading when checking a quantity of HUGE 1885 * pages, immediately after running a test. /proc/sys/vm/stat_refresh, 1886 * which can equally be echo'ed to or cat'ted from (by root), 1887 * can be used to update the stats just before reading them. 1888 * 1889 * Oh, and since global_zone_page_state() etc. are so careful to hide 1890 * transiently negative values, report an error here if any of 1891 * the stats is negative, so we know to go looking for imbalance. 1892 */ 1893 err = schedule_on_each_cpu(refresh_vm_stats); 1894 if (err) 1895 return err; 1896 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 1897 /* 1898 * Skip checking stats known to go negative occasionally. 1899 */ 1900 switch (i) { 1901 case NR_ZONE_WRITE_PENDING: 1902 case NR_FREE_CMA_PAGES: 1903 continue; 1904 } 1905 val = atomic_long_read(&vm_zone_stat[i]); 1906 if (val < 0) { 1907 pr_warn("%s: %s %ld\n", 1908 __func__, zone_stat_name(i), val); 1909 } 1910 } 1911 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1912 /* 1913 * Skip checking stats known to go negative occasionally. 1914 */ 1915 switch (i) { 1916 case NR_WRITEBACK: 1917 continue; 1918 } 1919 val = atomic_long_read(&vm_node_stat[i]); 1920 if (val < 0) { 1921 pr_warn("%s: %s %ld\n", 1922 __func__, node_stat_name(i), val); 1923 } 1924 } 1925 if (write) 1926 *ppos += *lenp; 1927 else 1928 *lenp = 0; 1929 return 0; 1930 } 1931 #endif /* CONFIG_PROC_FS */ 1932 1933 static void vmstat_update(struct work_struct *w) 1934 { 1935 if (refresh_cpu_vm_stats(true)) { 1936 /* 1937 * Counters were updated so we expect more updates 1938 * to occur in the future. Keep on running the 1939 * update worker thread. 1940 */ 1941 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, 1942 this_cpu_ptr(&vmstat_work), 1943 round_jiffies_relative(sysctl_stat_interval)); 1944 } 1945 } 1946 1947 /* 1948 * Check if the diffs for a certain cpu indicate that 1949 * an update is needed. 1950 */ 1951 static bool need_update(int cpu) 1952 { 1953 pg_data_t *last_pgdat = NULL; 1954 struct zone *zone; 1955 1956 for_each_populated_zone(zone) { 1957 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 1958 struct per_cpu_nodestat *n; 1959 1960 /* 1961 * The fast way of checking if there are any vmstat diffs. 1962 */ 1963 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff))) 1964 return true; 1965 1966 if (last_pgdat == zone->zone_pgdat) 1967 continue; 1968 last_pgdat = zone->zone_pgdat; 1969 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); 1970 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff))) 1971 return true; 1972 } 1973 return false; 1974 } 1975 1976 /* 1977 * Switch off vmstat processing and then fold all the remaining differentials 1978 * until the diffs stay at zero. The function is used by NOHZ and can only be 1979 * invoked when tick processing is not active. 1980 */ 1981 void quiet_vmstat(void) 1982 { 1983 if (system_state != SYSTEM_RUNNING) 1984 return; 1985 1986 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work))) 1987 return; 1988 1989 if (!need_update(smp_processor_id())) 1990 return; 1991 1992 /* 1993 * Just refresh counters and do not care about the pending delayed 1994 * vmstat_update. It doesn't fire that often to matter and canceling 1995 * it would be too expensive from this path. 1996 * vmstat_shepherd will take care about that for us. 1997 */ 1998 refresh_cpu_vm_stats(false); 1999 } 2000 2001 /* 2002 * Shepherd worker thread that checks the 2003 * differentials of processors that have their worker 2004 * threads for vm statistics updates disabled because of 2005 * inactivity. 2006 */ 2007 static void vmstat_shepherd(struct work_struct *w); 2008 2009 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd); 2010 2011 static void vmstat_shepherd(struct work_struct *w) 2012 { 2013 int cpu; 2014 2015 cpus_read_lock(); 2016 /* Check processors whose vmstat worker threads have been disabled */ 2017 for_each_online_cpu(cpu) { 2018 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 2019 2020 if (!delayed_work_pending(dw) && need_update(cpu)) 2021 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); 2022 2023 cond_resched(); 2024 } 2025 cpus_read_unlock(); 2026 2027 schedule_delayed_work(&shepherd, 2028 round_jiffies_relative(sysctl_stat_interval)); 2029 } 2030 2031 static void __init start_shepherd_timer(void) 2032 { 2033 int cpu; 2034 2035 for_each_possible_cpu(cpu) 2036 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 2037 vmstat_update); 2038 2039 schedule_delayed_work(&shepherd, 2040 round_jiffies_relative(sysctl_stat_interval)); 2041 } 2042 2043 static void __init init_cpu_node_state(void) 2044 { 2045 int node; 2046 2047 for_each_online_node(node) { 2048 if (!cpumask_empty(cpumask_of_node(node))) 2049 node_set_state(node, N_CPU); 2050 } 2051 } 2052 2053 static int vmstat_cpu_online(unsigned int cpu) 2054 { 2055 refresh_zone_stat_thresholds(); 2056 2057 if (!node_state(cpu_to_node(cpu), N_CPU)) { 2058 node_set_state(cpu_to_node(cpu), N_CPU); 2059 set_migration_target_nodes(); 2060 } 2061 2062 return 0; 2063 } 2064 2065 static int vmstat_cpu_down_prep(unsigned int cpu) 2066 { 2067 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 2068 return 0; 2069 } 2070 2071 static int vmstat_cpu_dead(unsigned int cpu) 2072 { 2073 const struct cpumask *node_cpus; 2074 int node; 2075 2076 node = cpu_to_node(cpu); 2077 2078 refresh_zone_stat_thresholds(); 2079 node_cpus = cpumask_of_node(node); 2080 if (!cpumask_empty(node_cpus)) 2081 return 0; 2082 2083 node_clear_state(node, N_CPU); 2084 set_migration_target_nodes(); 2085 2086 return 0; 2087 } 2088 2089 #endif 2090 2091 struct workqueue_struct *mm_percpu_wq; 2092 2093 void __init init_mm_internals(void) 2094 { 2095 int ret __maybe_unused; 2096 2097 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0); 2098 2099 #ifdef CONFIG_SMP 2100 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 2101 NULL, vmstat_cpu_dead); 2102 if (ret < 0) 2103 pr_err("vmstat: failed to register 'dead' hotplug state\n"); 2104 2105 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online", 2106 vmstat_cpu_online, 2107 vmstat_cpu_down_prep); 2108 if (ret < 0) 2109 pr_err("vmstat: failed to register 'online' hotplug state\n"); 2110 2111 cpus_read_lock(); 2112 init_cpu_node_state(); 2113 cpus_read_unlock(); 2114 2115 start_shepherd_timer(); 2116 #endif 2117 migrate_on_reclaim_init(); 2118 #ifdef CONFIG_PROC_FS 2119 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op); 2120 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op); 2121 proc_create_seq("vmstat", 0444, NULL, &vmstat_op); 2122 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op); 2123 #endif 2124 } 2125 2126 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 2127 2128 /* 2129 * Return an index indicating how much of the available free memory is 2130 * unusable for an allocation of the requested size. 2131 */ 2132 static int unusable_free_index(unsigned int order, 2133 struct contig_page_info *info) 2134 { 2135 /* No free memory is interpreted as all free memory is unusable */ 2136 if (info->free_pages == 0) 2137 return 1000; 2138 2139 /* 2140 * Index should be a value between 0 and 1. Return a value to 3 2141 * decimal places. 2142 * 2143 * 0 => no fragmentation 2144 * 1 => high fragmentation 2145 */ 2146 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); 2147 2148 } 2149 2150 static void unusable_show_print(struct seq_file *m, 2151 pg_data_t *pgdat, struct zone *zone) 2152 { 2153 unsigned int order; 2154 int index; 2155 struct contig_page_info info; 2156 2157 seq_printf(m, "Node %d, zone %8s ", 2158 pgdat->node_id, 2159 zone->name); 2160 for (order = 0; order < MAX_ORDER; ++order) { 2161 fill_contig_page_info(zone, order, &info); 2162 index = unusable_free_index(order, &info); 2163 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 2164 } 2165 2166 seq_putc(m, '\n'); 2167 } 2168 2169 /* 2170 * Display unusable free space index 2171 * 2172 * The unusable free space index measures how much of the available free 2173 * memory cannot be used to satisfy an allocation of a given size and is a 2174 * value between 0 and 1. The higher the value, the more of free memory is 2175 * unusable and by implication, the worse the external fragmentation is. This 2176 * can be expressed as a percentage by multiplying by 100. 2177 */ 2178 static int unusable_show(struct seq_file *m, void *arg) 2179 { 2180 pg_data_t *pgdat = (pg_data_t *)arg; 2181 2182 /* check memoryless node */ 2183 if (!node_state(pgdat->node_id, N_MEMORY)) 2184 return 0; 2185 2186 walk_zones_in_node(m, pgdat, true, false, unusable_show_print); 2187 2188 return 0; 2189 } 2190 2191 static const struct seq_operations unusable_sops = { 2192 .start = frag_start, 2193 .next = frag_next, 2194 .stop = frag_stop, 2195 .show = unusable_show, 2196 }; 2197 2198 DEFINE_SEQ_ATTRIBUTE(unusable); 2199 2200 static void extfrag_show_print(struct seq_file *m, 2201 pg_data_t *pgdat, struct zone *zone) 2202 { 2203 unsigned int order; 2204 int index; 2205 2206 /* Alloc on stack as interrupts are disabled for zone walk */ 2207 struct contig_page_info info; 2208 2209 seq_printf(m, "Node %d, zone %8s ", 2210 pgdat->node_id, 2211 zone->name); 2212 for (order = 0; order < MAX_ORDER; ++order) { 2213 fill_contig_page_info(zone, order, &info); 2214 index = __fragmentation_index(order, &info); 2215 seq_printf(m, "%2d.%03d ", index / 1000, index % 1000); 2216 } 2217 2218 seq_putc(m, '\n'); 2219 } 2220 2221 /* 2222 * Display fragmentation index for orders that allocations would fail for 2223 */ 2224 static int extfrag_show(struct seq_file *m, void *arg) 2225 { 2226 pg_data_t *pgdat = (pg_data_t *)arg; 2227 2228 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print); 2229 2230 return 0; 2231 } 2232 2233 static const struct seq_operations extfrag_sops = { 2234 .start = frag_start, 2235 .next = frag_next, 2236 .stop = frag_stop, 2237 .show = extfrag_show, 2238 }; 2239 2240 DEFINE_SEQ_ATTRIBUTE(extfrag); 2241 2242 static int __init extfrag_debug_init(void) 2243 { 2244 struct dentry *extfrag_debug_root; 2245 2246 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 2247 2248 debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL, 2249 &unusable_fops); 2250 2251 debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL, 2252 &extfrag_fops); 2253 2254 return 0; 2255 } 2256 2257 module_init(extfrag_debug_init); 2258 #endif 2259