Lines Matching +full:int +full:- +full:threshold

1 // SPDX-License-Identifier: GPL-2.0-only
11 * Copyright (C) 2008-2014 Christoph Lameter
35 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
40 int item, cpu; in zero_zone_numa_counters()
43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters()
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
63 int item; in zero_global_numa_counters()
77 int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, in sysctl_vm_numa_stat_handler()
80 int ret, oldval; in sysctl_vm_numa_stat_handler()
112 int cpu; in sum_vm_events()
113 int i; in sum_vm_events()
121 ret[i] += this->event[i]; in sum_vm_events()
127 * The result is unavoidably approximate - it can change
144 void vm_events_fold_cpu(int cpu) in vm_events_fold_cpu()
147 int i; in vm_events_fold_cpu()
150 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
151 fold_state->event[i] = 0; in vm_events_fold_cpu()
172 int cpu; in fold_vm_zone_numa_events()
178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events()
180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0); in fold_vm_zone_numa_events()
198 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold()
200 int threshold; in calculate_pressure_threshold() local
201 int watermark_distance; in calculate_pressure_threshold()
207 * value looks fine. The pressure threshold is a reduced value such in calculate_pressure_threshold()
211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
212 threshold = max(1, (int)(watermark_distance / num_online_cpus())); in calculate_pressure_threshold()
215 * Maximum threshold is 125 in calculate_pressure_threshold()
217 threshold = min(125, threshold); in calculate_pressure_threshold()
219 return threshold; in calculate_pressure_threshold()
222 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold()
224 int threshold; in calculate_normal_threshold() local
225 int mem; /* memory in 128 MB units */ in calculate_normal_threshold()
228 * The threshold scales with the number of processors and the amount in calculate_normal_threshold()
235 * Threshold Processors (fls) Zonesize fls(mem)+1 in calculate_normal_threshold()
236 * ------------------------------------------------------------------ in calculate_normal_threshold()
237 * 8 1 1 0.9-1 GB 4 in calculate_normal_threshold()
238 * 16 2 2 0.9-1 GB 4 in calculate_normal_threshold()
239 * 20 2 2 1-2 GB 5 in calculate_normal_threshold()
240 * 24 2 2 2-4 GB 6 in calculate_normal_threshold()
241 * 28 2 2 4-8 GB 7 in calculate_normal_threshold()
242 * 32 2 2 8-16 GB 8 in calculate_normal_threshold()
244 * 30 4 3 2-4 GB 5 in calculate_normal_threshold()
245 * 48 4 3 8-16 GB 8 in calculate_normal_threshold()
246 * 32 8 4 1-2 GB 4 in calculate_normal_threshold()
247 * 32 8 4 0.9-1GB 4 in calculate_normal_threshold()
250 * 70 64 7 2-4 GB 5 in calculate_normal_threshold()
251 * 84 64 7 4-8 GB 6 in calculate_normal_threshold()
252 * 108 512 9 4-8 GB 6 in calculate_normal_threshold()
253 * 125 1024 10 8-16 GB 8 in calculate_normal_threshold()
254 * 125 1024 10 16-32 GB 9 in calculate_normal_threshold()
257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); in calculate_normal_threshold()
262 * Maximum threshold is 125 in calculate_normal_threshold()
264 threshold = min(125, threshold); in calculate_normal_threshold()
266 return threshold; in calculate_normal_threshold()
276 int cpu; in refresh_zone_stat_thresholds()
277 int threshold; in refresh_zone_stat_thresholds() local
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
287 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds()
290 threshold = calculate_normal_threshold(zone); in refresh_zone_stat_thresholds()
293 int pgdat_threshold; in refresh_zone_stat_thresholds()
295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
296 = threshold; in refresh_zone_stat_thresholds()
298 /* Base nodestat threshold on the largest populated zone. */ in refresh_zone_stat_thresholds()
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
301 = max(threshold, pgdat_threshold); in refresh_zone_stat_thresholds()
309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
310 max_drift = num_online_cpus() * threshold; in refresh_zone_stat_thresholds()
312 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
318 int (*calculate_pressure)(struct zone *)) in set_pgdat_percpu_threshold()
321 int cpu; in set_pgdat_percpu_threshold()
322 int threshold; in set_pgdat_percpu_threshold() local
323 int i; in set_pgdat_percpu_threshold()
325 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
327 if (!zone->percpu_drift_mark) in set_pgdat_percpu_threshold()
330 threshold = (*calculate_pressure)(zone); in set_pgdat_percpu_threshold()
332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in set_pgdat_percpu_threshold()
333 = threshold; in set_pgdat_percpu_threshold()
345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __mod_zone_page_state()
346 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state()
352 * atomicity is provided by IRQs being disabled -- either explicitly in __mod_zone_page_state()
361 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
376 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __mod_node_page_state()
377 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state()
386 * internally to keep the per-cpu counters compact. in __mod_node_page_state()
388 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state()
397 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
434 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __inc_zone_state()
435 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state()
442 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
447 __this_cpu_write(*p, -overstep); in __inc_zone_state()
455 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __inc_node_state()
456 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state()
465 t = __this_cpu_read(pcp->stat_threshold); in __inc_node_state()
470 __this_cpu_write(*p, -overstep); in __inc_node_state()
490 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __dec_zone_state()
491 s8 __percpu *p = pcp->vm_stat_diff + item; in __dec_zone_state()
498 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state()
499 if (unlikely(v < - t)) { in __dec_zone_state()
502 zone_page_state_add(v - overstep, zone, item); in __dec_zone_state()
511 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __dec_node_state()
512 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __dec_node_state()
521 t = __this_cpu_read(pcp->stat_threshold); in __dec_node_state()
522 if (unlikely(v < - t)) { in __dec_node_state()
525 node_page_state_add(v - overstep, pgdat, item); in __dec_node_state()
554 * 1 Overstepping half of threshold
555 * -1 Overstepping minus half of threshold
558 enum zone_stat_item item, long delta, int overstep_mode) in mod_zone_state()
560 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in mod_zone_state()
561 s8 __percpu *p = pcp->vm_stat_diff + item; in mod_zone_state()
571 * a counter threshold to the wrong the cpu if we get in mod_zone_state()
573 * counter update will apply the threshold again and in mod_zone_state()
574 * therefore bring the counter under the threshold again. in mod_zone_state()
579 t = this_cpu_read(pcp->stat_threshold); in mod_zone_state()
584 int os = overstep_mode * (t >> 1) ; in mod_zone_state()
588 n = -os; in mod_zone_state()
611 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state()
616 enum node_stat_item item, int delta, int overstep_mode) in mod_node_state()
618 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in mod_node_state()
619 s8 __percpu *p = pcp->vm_node_stat_diff + item; in mod_node_state()
628 * internally to keep the per-cpu counters compact. in mod_node_state()
630 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in mod_node_state()
640 * a counter threshold to the wrong the cpu if we get in mod_node_state()
642 * counter update will apply the threshold again and in mod_node_state()
643 * therefore bring the counter under the threshold again. in mod_node_state()
648 t = this_cpu_read(pcp->stat_threshold); in mod_node_state()
653 int os = overstep_mode * (t >> 1) ; in mod_node_state()
657 n = -os; in mod_node_state()
685 mod_node_state(page_pgdat(page), item, -1, -1); in dec_node_page_state()
773 static int fold_diff(int *zone_diff, int *node_diff) in fold_diff()
775 int i; in fold_diff()
776 int changes = 0; in fold_diff()
808 static int refresh_cpu_vm_stats(bool do_pagesets) in refresh_cpu_vm_stats()
812 int i; in refresh_cpu_vm_stats()
813 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; in refresh_cpu_vm_stats()
814 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; in refresh_cpu_vm_stats()
815 int changes = 0; in refresh_cpu_vm_stats()
818 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; in refresh_cpu_vm_stats()
819 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset; in refresh_cpu_vm_stats()
822 int v; in refresh_cpu_vm_stats()
824 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0); in refresh_cpu_vm_stats()
827 atomic_long_add(v, &zone->vm_stat[i]); in refresh_cpu_vm_stats()
831 __this_cpu_write(pcp->expire, 3); in refresh_cpu_vm_stats()
848 if (!__this_cpu_read(pcp->expire) || in refresh_cpu_vm_stats()
849 !__this_cpu_read(pcp->count)) in refresh_cpu_vm_stats()
856 __this_cpu_write(pcp->expire, 0); in refresh_cpu_vm_stats()
860 if (__this_cpu_dec_return(pcp->expire)) { in refresh_cpu_vm_stats()
865 if (__this_cpu_read(pcp->count)) { in refresh_cpu_vm_stats()
874 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; in refresh_cpu_vm_stats()
877 int v; in refresh_cpu_vm_stats()
879 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); in refresh_cpu_vm_stats()
881 atomic_long_add(v, &pgdat->vm_stat[i]); in refresh_cpu_vm_stats()
896 void cpu_vm_stats_fold(int cpu) in cpu_vm_stats_fold()
900 int i; in cpu_vm_stats_fold()
901 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; in cpu_vm_stats_fold()
902 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; in cpu_vm_stats_fold()
907 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in cpu_vm_stats_fold()
910 if (pzstats->vm_stat_diff[i]) { in cpu_vm_stats_fold()
911 int v; in cpu_vm_stats_fold()
913 v = pzstats->vm_stat_diff[i]; in cpu_vm_stats_fold()
914 pzstats->vm_stat_diff[i] = 0; in cpu_vm_stats_fold()
915 atomic_long_add(v, &zone->vm_stat[i]); in cpu_vm_stats_fold()
921 if (pzstats->vm_numa_event[i]) { in cpu_vm_stats_fold()
924 v = pzstats->vm_numa_event[i]; in cpu_vm_stats_fold()
925 pzstats->vm_numa_event[i] = 0; in cpu_vm_stats_fold()
935 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in cpu_vm_stats_fold()
938 if (p->vm_node_stat_diff[i]) { in cpu_vm_stats_fold()
939 int v; in cpu_vm_stats_fold()
941 v = p->vm_node_stat_diff[i]; in cpu_vm_stats_fold()
942 p->vm_node_stat_diff[i] = 0; in cpu_vm_stats_fold()
943 atomic_long_add(v, &pgdat->vm_stat[i]); in cpu_vm_stats_fold()
953 * pset->vm_stat_diff[] exist.
958 int i; in drain_zonestat()
961 if (pzstats->vm_stat_diff[i]) { in drain_zonestat()
962 v = pzstats->vm_stat_diff[i]; in drain_zonestat()
963 pzstats->vm_stat_diff[i] = 0; in drain_zonestat()
970 if (pzstats->vm_numa_event[i]) { in drain_zonestat()
971 v = pzstats->vm_numa_event[i]; in drain_zonestat()
972 pzstats->vm_numa_event[i] = 0; in drain_zonestat()
986 unsigned long sum_zone_node_page_state(int node, in sum_zone_node_page_state()
989 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
990 int i; in sum_zone_node_page_state()
1000 unsigned long sum_zone_numa_event_state(int node, in sum_zone_numa_event_state()
1003 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state()
1005 int i; in sum_zone_numa_event_state()
1019 long x = atomic_long_read(&pgdat->vm_stat[item]); in node_page_state_pages()
1071 unsigned int suitable_order, in fill_contig_page_info()
1074 unsigned int order; in fill_contig_page_info()
1076 info->free_pages = 0; in fill_contig_page_info()
1077 info->free_blocks_total = 0; in fill_contig_page_info()
1078 info->free_blocks_suitable = 0; in fill_contig_page_info()
1089 blocks = data_race(zone->free_area[order].nr_free); in fill_contig_page_info()
1090 info->free_blocks_total += blocks; in fill_contig_page_info()
1093 info->free_pages += blocks << order; in fill_contig_page_info()
1097 info->free_blocks_suitable += blocks << in fill_contig_page_info()
1098 (order - suitable_order); in fill_contig_page_info()
1109 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index()
1116 if (!info->free_blocks_total) in __fragmentation_index()
1120 if (info->free_blocks_suitable) in __fragmentation_index()
1121 return -1000; in __fragmentation_index()
1129 …return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_… in __fragmentation_index()
1137 unsigned int extfrag_for_order(struct zone *zone, unsigned int order) in extfrag_for_order()
1145 return div_u64((info.free_pages - in extfrag_for_order()
1151 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index()
1279 /* system-wide enum vm_stat_item counters */
1483 --node; in frag_start()
1509 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node()
1512 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
1517 spin_lock_irqsave(&zone->lock, flags); in walk_zones_in_node()
1520 spin_unlock_irqrestore(&zone->lock, flags); in walk_zones_in_node()
1529 int order; in frag_show_print()
1531 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in frag_show_print()
1537 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); in frag_show_print()
1544 static int frag_show(struct seq_file *m, void *arg) in frag_show()
1554 int order, mtype; in pagetypeinfo_showfree_print()
1558 pgdat->node_id, in pagetypeinfo_showfree_print()
1559 zone->name, in pagetypeinfo_showfree_print()
1567 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print()
1569 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
1585 spin_unlock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1587 spin_lock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1596 int order; in pagetypeinfo_showfree()
1600 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); in pagetypeinfo_showfree()
1611 int mtype; in pagetypeinfo_showblockcount_print()
1613 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print()
1634 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showblockcount_print()
1643 int mtype; in pagetypeinfo_showblockcount()
1646 seq_printf(m, "\n%-23s", "Number of blocks type "); in pagetypeinfo_showblockcount()
1663 int mtype; in pagetypeinfo_showmixedcount()
1670 seq_printf(m, "\n%-23s", "Number of mixed blocks "); in pagetypeinfo_showmixedcount()
1684 static int pagetypeinfo_show(struct seq_file *m, void *arg) in pagetypeinfo_show()
1689 if (!node_state(pgdat->node_id, N_MEMORY)) in pagetypeinfo_show()
1718 int zid; in is_zone_first_populated()
1721 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
1733 int i; in zoneinfo_show_print()
1734 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); in zoneinfo_show_print()
1736 seq_printf(m, "\n per-node stats"); in zoneinfo_show_print()
1742 seq_printf(m, "\n %-12s %lu", node_stat_name(i), in zoneinfo_show_print()
1758 zone->watermark_boost, in zoneinfo_show_print()
1763 zone->spanned_pages, in zoneinfo_show_print()
1764 zone->present_pages, in zoneinfo_show_print()
1770 zone->lowmem_reserve[0]); in zoneinfo_show_print()
1771 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) in zoneinfo_show_print()
1772 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); in zoneinfo_show_print()
1782 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), in zoneinfo_show_print()
1788 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), in zoneinfo_show_print()
1797 pcp = per_cpu_ptr(zone->per_cpu_pageset, i); in zoneinfo_show_print()
1806 pcp->count, in zoneinfo_show_print()
1807 pcp->high, in zoneinfo_show_print()
1808 pcp->batch, in zoneinfo_show_print()
1809 pcp->high_min, in zoneinfo_show_print()
1810 pcp->high_max); in zoneinfo_show_print()
1812 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); in zoneinfo_show_print()
1813 seq_printf(m, "\n vm stats threshold: %d", in zoneinfo_show_print()
1814 pzstats->stat_threshold); in zoneinfo_show_print()
1820 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, in zoneinfo_show_print()
1821 zone->zone_start_pfn); in zoneinfo_show_print()
1831 static int zoneinfo_show(struct seq_file *m, void *arg) in zoneinfo_show()
1856 int i; in vmstat_start()
1864 m->private = v; in vmstat_start()
1866 return ERR_PTR(-ENOMEM); in vmstat_start()
1892 v[PGPGIN] /= 2; /* sectors -> kbytes */ in vmstat_start()
1895 return (unsigned long *)m->private + *pos; in vmstat_start()
1903 return (unsigned long *)m->private + *pos; in vmstat_next()
1906 static int vmstat_show(struct seq_file *m, void *arg) in vmstat_show()
1909 unsigned long off = l - (unsigned long *)m->private; in vmstat_show()
1915 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
1917 * We've come to the end - add any deprecated counters to avoid in vmstat_show()
1927 kfree(m->private); in vmstat_stop()
1928 m->private = NULL; in vmstat_stop()
1941 int sysctl_stat_interval __read_mostly = HZ;
1942 static int vmstat_late_init_done;
1950 int vmstat_refresh(const struct ctl_table *table, int write, in vmstat_refresh()
1954 int err; in vmstat_refresh()
1955 int i; in vmstat_refresh()
2027 static bool need_update(int cpu) in need_update()
2033 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in need_update()
2039 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff))) in need_update()
2042 if (last_pgdat == zone->zone_pgdat) in need_update()
2044 last_pgdat = zone->zone_pgdat; in need_update()
2045 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); in need_update()
2046 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff))) in need_update()
2089 int cpu; in vmstat_shepherd()
2123 int cpu; in start_shepherd_timer()
2145 int node; in init_cpu_node_state()
2153 static int vmstat_cpu_online(unsigned int cpu) in vmstat_cpu_online()
2166 static int vmstat_cpu_down_prep(unsigned int cpu) in vmstat_cpu_down_prep()
2172 static int vmstat_cpu_dead(unsigned int cpu) in vmstat_cpu_dead()
2175 int node; in vmstat_cpu_dead()
2189 static int __init vmstat_late_init(void) in vmstat_late_init()
2203 int ret __maybe_unused; in init_mm_internals()
2239 static int unusable_free_index(unsigned int order, in unusable_free_index()
2243 if (info->free_pages == 0) in unusable_free_index()
2253 …return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pa… in unusable_free_index()
2260 unsigned int order; in unusable_show_print()
2261 int index; in unusable_show_print()
2265 pgdat->node_id, in unusable_show_print()
2266 zone->name); in unusable_show_print()
2285 static int unusable_show(struct seq_file *m, void *arg) in unusable_show()
2290 if (!node_state(pgdat->node_id, N_MEMORY)) in unusable_show()
2310 unsigned int order; in extfrag_show_print()
2311 int index; in extfrag_show_print()
2317 pgdat->node_id, in extfrag_show_print()
2318 zone->name); in extfrag_show_print()
2331 static int extfrag_show(struct seq_file *m, void *arg) in extfrag_show()
2349 static int __init extfrag_debug_init(void) in extfrag_debug_init()