Lines Matching +full:line +full:- +full:orders

1 // SPDX-License-Identifier: GPL-2.0-only
11 * Copyright (C) 2008-2014 Christoph Lameter
45 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
124 ret[i] += this->event[i]; in sum_vm_events()
130 * The result is unavoidably approximate - it can change
153 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
154 fold_state->event[i] = 0; in vm_events_fold_cpu()
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events()
183 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0); in fold_vm_zone_numa_events()
214 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
239 * ------------------------------------------------------------------ in calculate_normal_threshold()
240 * 8 1 1 0.9-1 GB 4 in calculate_normal_threshold()
241 * 16 2 2 0.9-1 GB 4 in calculate_normal_threshold()
242 * 20 2 2 1-2 GB 5 in calculate_normal_threshold()
243 * 24 2 2 2-4 GB 6 in calculate_normal_threshold()
244 * 28 2 2 4-8 GB 7 in calculate_normal_threshold()
245 * 32 2 2 8-16 GB 8 in calculate_normal_threshold()
247 * 30 4 3 2-4 GB 5 in calculate_normal_threshold()
248 * 48 4 3 8-16 GB 8 in calculate_normal_threshold()
249 * 32 8 4 1-2 GB 4 in calculate_normal_threshold()
250 * 32 8 4 0.9-1GB 4 in calculate_normal_threshold()
253 * 70 64 7 2-4 GB 5 in calculate_normal_threshold()
254 * 84 64 7 4-8 GB 6 in calculate_normal_threshold()
255 * 108 512 9 4-8 GB 6 in calculate_normal_threshold()
256 * 125 1024 10 8-16 GB 8 in calculate_normal_threshold()
257 * 125 1024 10 16-32 GB 9 in calculate_normal_threshold()
260 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
290 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds()
298 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
312 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
315 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
328 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
329 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
330 if (!zone->percpu_drift_mark) in set_pgdat_percpu_threshold()
335 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in set_pgdat_percpu_threshold()
348 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __mod_zone_page_state()
349 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state()
355 * atomicity is provided by IRQs being disabled -- either explicitly in __mod_zone_page_state()
364 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
379 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __mod_node_page_state()
380 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state()
389 * internally to keep the per-cpu counters compact. in __mod_node_page_state()
391 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state()
400 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
437 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __inc_zone_state()
438 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state()
445 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
450 __this_cpu_write(*p, -overstep); in __inc_zone_state()
458 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __inc_node_state()
459 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state()
468 t = __this_cpu_read(pcp->stat_threshold); in __inc_node_state()
473 __this_cpu_write(*p, -overstep); in __inc_node_state()
493 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __dec_zone_state()
494 s8 __percpu *p = pcp->vm_stat_diff + item; in __dec_zone_state()
501 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state()
502 if (unlikely(v < - t)) { in __dec_zone_state()
505 zone_page_state_add(v - overstep, zone, item); in __dec_zone_state()
514 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __dec_node_state()
515 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __dec_node_state()
524 t = __this_cpu_read(pcp->stat_threshold); in __dec_node_state()
525 if (unlikely(v < - t)) { in __dec_node_state()
528 node_page_state_add(v - overstep, pgdat, item); in __dec_node_state()
558 * -1 Overstepping minus half of threshold
563 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in mod_zone_state()
564 s8 __percpu *p = pcp->vm_stat_diff + item; in mod_zone_state()
582 t = this_cpu_read(pcp->stat_threshold); in mod_zone_state()
591 n = -os; in mod_zone_state()
614 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state()
621 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in mod_node_state()
622 s8 __percpu *p = pcp->vm_node_stat_diff + item; in mod_node_state()
631 * internally to keep the per-cpu counters compact. in mod_node_state()
633 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in mod_node_state()
651 t = this_cpu_read(pcp->stat_threshold); in mod_node_state()
660 n = -os; in mod_node_state()
688 mod_node_state(page_pgdat(page), item, -1, -1); in dec_node_page_state()
806 * with the global counters. These could cause remote node cache line
821 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; in refresh_cpu_vm_stats()
822 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset; in refresh_cpu_vm_stats()
827 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0); in refresh_cpu_vm_stats()
830 atomic_long_add(v, &zone->vm_stat[i]); in refresh_cpu_vm_stats()
834 __this_cpu_write(pcp->expire, 3); in refresh_cpu_vm_stats()
851 if (!__this_cpu_read(pcp->expire) || in refresh_cpu_vm_stats()
852 !__this_cpu_read(pcp->count)) in refresh_cpu_vm_stats()
859 __this_cpu_write(pcp->expire, 0); in refresh_cpu_vm_stats()
863 if (__this_cpu_dec_return(pcp->expire)) { in refresh_cpu_vm_stats()
868 if (__this_cpu_read(pcp->count)) { in refresh_cpu_vm_stats()
877 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; in refresh_cpu_vm_stats()
882 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); in refresh_cpu_vm_stats()
884 atomic_long_add(v, &pgdat->vm_stat[i]); in refresh_cpu_vm_stats()
910 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in cpu_vm_stats_fold()
913 if (pzstats->vm_stat_diff[i]) { in cpu_vm_stats_fold()
916 v = pzstats->vm_stat_diff[i]; in cpu_vm_stats_fold()
917 pzstats->vm_stat_diff[i] = 0; in cpu_vm_stats_fold()
918 atomic_long_add(v, &zone->vm_stat[i]); in cpu_vm_stats_fold()
924 if (pzstats->vm_numa_event[i]) { in cpu_vm_stats_fold()
927 v = pzstats->vm_numa_event[i]; in cpu_vm_stats_fold()
928 pzstats->vm_numa_event[i] = 0; in cpu_vm_stats_fold()
938 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in cpu_vm_stats_fold()
941 if (p->vm_node_stat_diff[i]) { in cpu_vm_stats_fold()
944 v = p->vm_node_stat_diff[i]; in cpu_vm_stats_fold()
945 p->vm_node_stat_diff[i] = 0; in cpu_vm_stats_fold()
946 atomic_long_add(v, &pgdat->vm_stat[i]); in cpu_vm_stats_fold()
956 * pset->vm_stat_diff[] exist.
964 if (pzstats->vm_stat_diff[i]) { in drain_zonestat()
965 v = pzstats->vm_stat_diff[i]; in drain_zonestat()
966 pzstats->vm_stat_diff[i] = 0; in drain_zonestat()
973 if (pzstats->vm_numa_event[i]) { in drain_zonestat()
974 v = pzstats->vm_numa_event[i]; in drain_zonestat()
975 pzstats->vm_numa_event[i] = 0; in drain_zonestat()
992 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
1006 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state()
1022 long x = atomic_long_read(&pgdat->vm_stat[item]); in node_page_state_pages()
1079 info->free_pages = 0; in fill_contig_page_info()
1080 info->free_blocks_total = 0; in fill_contig_page_info()
1081 info->free_blocks_suitable = 0; in fill_contig_page_info()
1092 blocks = data_race(zone->free_area[order].nr_free); in fill_contig_page_info()
1093 info->free_blocks_total += blocks; in fill_contig_page_info()
1096 info->free_pages += blocks << order; in fill_contig_page_info()
1100 info->free_blocks_suitable += blocks << in fill_contig_page_info()
1101 (order - suitable_order); in fill_contig_page_info()
1119 if (!info->free_blocks_total) in __fragmentation_index()
1123 if (info->free_blocks_suitable) in __fragmentation_index()
1124 return -1000; in __fragmentation_index()
1132 …return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_… in __fragmentation_index()
1148 return div_u64((info.free_pages - in extfrag_for_order()
1296 /* system-wide enum vm_stat_item counters */
1514 --node; in frag_start()
1540 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node()
1543 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
1548 spin_lock_irqsave(&zone->lock, flags); in walk_zones_in_node()
1551 spin_unlock_irqrestore(&zone->lock, flags); in walk_zones_in_node()
1562 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in frag_show_print()
1568 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); in frag_show_print()
1589 pgdat->node_id, in pagetypeinfo_showfree_print()
1590 zone->name, in pagetypeinfo_showfree_print()
1598 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print()
1600 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
1616 spin_unlock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1618 spin_lock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1631 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); in pagetypeinfo_showfree()
1644 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print()
1665 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showblockcount_print()
1677 seq_printf(m, "\n%-23s", "Number of blocks type "); in pagetypeinfo_showblockcount()
1701 seq_printf(m, "\n%-23s", "Number of mixed blocks "); in pagetypeinfo_showmixedcount()
1720 if (!node_state(pgdat->node_id, N_MEMORY)) in pagetypeinfo_show()
1752 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
1765 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); in zoneinfo_show_print()
1767 seq_printf(m, "\n per-node stats"); in zoneinfo_show_print()
1773 seq_printf(m, "\n %-12s %lu", node_stat_name(i), in zoneinfo_show_print()
1789 zone->watermark_boost, in zoneinfo_show_print()
1794 zone->spanned_pages, in zoneinfo_show_print()
1795 zone->present_pages, in zoneinfo_show_print()
1801 zone->lowmem_reserve[0]); in zoneinfo_show_print()
1802 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) in zoneinfo_show_print()
1803 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); in zoneinfo_show_print()
1813 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), in zoneinfo_show_print()
1819 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), in zoneinfo_show_print()
1828 pcp = per_cpu_ptr(zone->per_cpu_pageset, i); in zoneinfo_show_print()
1837 pcp->count, in zoneinfo_show_print()
1838 pcp->high, in zoneinfo_show_print()
1839 pcp->batch, in zoneinfo_show_print()
1840 pcp->high_min, in zoneinfo_show_print()
1841 pcp->high_max); in zoneinfo_show_print()
1843 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); in zoneinfo_show_print()
1845 pzstats->stat_threshold); in zoneinfo_show_print()
1851 atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES, in zoneinfo_show_print()
1852 zone->zone_start_pfn); in zoneinfo_show_print()
1895 m->private = v; in vmstat_start()
1897 return ERR_PTR(-ENOMEM); in vmstat_start()
1923 v[PGPGIN] /= 2; /* sectors -> kbytes */ in vmstat_start()
1926 return (unsigned long *)m->private + *pos; in vmstat_start()
1934 return (unsigned long *)m->private + *pos; in vmstat_next()
1940 unsigned long off = l - (unsigned long *)m->private; in vmstat_show()
1946 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
1948 * We've come to the end - add any deprecated counters to avoid in vmstat_show()
1958 kfree(m->private); in vmstat_stop()
1959 m->private = NULL; in vmstat_stop()
2064 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in need_update()
2070 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff))) in need_update()
2073 if (last_pgdat == zone->zone_pgdat) in need_update()
2075 last_pgdat = zone->zone_pgdat; in need_update()
2076 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); in need_update()
2077 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff))) in need_update()
2307 if (info->free_pages == 0) in unusable_free_index()
2317 …return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pa… in unusable_free_index()
2329 pgdat->node_id, in unusable_show_print()
2330 zone->name); in unusable_show_print()
2354 if (!node_state(pgdat->node_id, N_MEMORY)) in unusable_show()
2381 pgdat->node_id, in extfrag_show_print()
2382 zone->name); in extfrag_show_print()
2393 * Display fragmentation index for orders that allocations would fail for