Lines Matching full:zone
26 static inline void show_node(struct zone *zone) in show_node() argument
29 printk("Node %d ", zone_to_nid(zone)); in show_node()
38 struct zone *zone; in si_mem_available() local
40 for_each_zone(zone) in si_mem_available()
41 wmark_low += low_wmark_pages(zone); in si_mem_available()
98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
99 managed_pages += zone_managed_pages(zone); in si_meminfo_node()
100 if (is_highmem(zone)) { in si_meminfo_node()
101 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
102 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
184 struct zone *zone; in show_free_areas() local
187 for_each_populated_zone(zone) { in show_free_areas()
188 if (zone_idx(zone) > max_zone_idx) in show_free_areas()
190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
194 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas()
287 for_each_populated_zone(zone) { in show_free_areas()
290 if (zone_idx(zone) > max_zone_idx) in show_free_areas()
292 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
297 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas()
299 show_node(zone); in show_free_areas()
323 zone->name, in show_free_areas()
324 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
325 K(zone->watermark_boost), in show_free_areas()
326 K(min_wmark_pages(zone)), in show_free_areas()
327 K(low_wmark_pages(zone)), in show_free_areas()
328 K(high_wmark_pages(zone)), in show_free_areas()
329 K(zone->nr_reserved_highatomic), in show_free_areas()
330 K(zone->nr_free_highatomic), in show_free_areas()
331 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
332 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
333 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
334 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
335 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
336 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
337 K(zone->present_pages), in show_free_areas()
338 K(zone_managed_pages(zone)), in show_free_areas()
339 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
342 K(this_cpu_read(zone->per_cpu_pageset->count)), in show_free_areas()
343 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
346 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
350 for_each_populated_zone(zone) { in show_free_areas()
355 if (zone_idx(zone) > max_zone_idx) in show_free_areas()
357 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
359 show_node(zone); in show_free_areas()
360 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
362 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
364 struct free_area *area = &zone->free_area[order]; in show_free_areas()
376 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
400 struct zone *zone; in __show_mem() local
405 for_each_populated_zone(zone) { in __show_mem()
407 total += zone->present_pages; in __show_mem()
408 reserved += zone->present_pages - zone_managed_pages(zone); in __show_mem()
410 if (is_highmem(zone)) in __show_mem()
411 highmem += zone->present_pages; in __show_mem()