xref: /linux/mm/show_mem.c (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic show_mem() implementation
4  *
5  * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/cma.h>
10 #include <linux/cpuset.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
13 #include <linux/mm.h>
14 #include <linux/mmzone.h>
15 #include <linux/swap.h>
16 #include <linux/vmstat.h>
17 
18 #include "internal.h"
19 #include "swap.h"
20 
21 atomic_long_t _totalram_pages __read_mostly;
22 EXPORT_SYMBOL(_totalram_pages);
23 unsigned long totalreserve_pages __read_mostly;
24 unsigned long totalcma_pages __read_mostly;
25 
show_node(struct zone * zone)26 static inline void show_node(struct zone *zone)
27 {
28 	if (IS_ENABLED(CONFIG_NUMA))
29 		printk("Node %d ", zone_to_nid(zone));
30 }
31 
si_mem_available(void)32 long si_mem_available(void)
33 {
34 	long available;
35 	unsigned long pagecache;
36 	unsigned long wmark_low = 0;
37 	unsigned long reclaimable;
38 	struct zone *zone;
39 
40 	for_each_zone(zone)
41 		wmark_low += low_wmark_pages(zone);
42 
43 	/*
44 	 * Estimate the amount of memory available for userspace allocations,
45 	 * without causing swapping or OOM.
46 	 */
47 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
48 
49 	/*
50 	 * Not all the page cache can be freed, otherwise the system will
51 	 * start swapping or thrashing. Assume at least half of the page
52 	 * cache, or the low watermark worth of cache, needs to stay.
53 	 */
54 	pagecache = global_node_page_state(NR_ACTIVE_FILE) +
55 		global_node_page_state(NR_INACTIVE_FILE);
56 	pagecache -= min(pagecache / 2, wmark_low);
57 	available += pagecache;
58 
59 	/*
60 	 * Part of the reclaimable slab and other kernel memory consists of
61 	 * items that are in use, and cannot be freed. Cap this estimate at the
62 	 * low watermark.
63 	 */
64 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
65 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
66 	reclaimable -= min(reclaimable / 2, wmark_low);
67 	available += reclaimable;
68 
69 	if (available < 0)
70 		available = 0;
71 	return available;
72 }
73 EXPORT_SYMBOL_GPL(si_mem_available);
74 
si_meminfo(struct sysinfo * val)75 void si_meminfo(struct sysinfo *val)
76 {
77 	val->totalram = totalram_pages();
78 	val->sharedram = global_node_page_state(NR_SHMEM);
79 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
80 	val->bufferram = nr_blockdev_pages();
81 	val->totalhigh = totalhigh_pages();
82 	val->freehigh = nr_free_highpages();
83 	val->mem_unit = PAGE_SIZE;
84 }
85 
86 EXPORT_SYMBOL(si_meminfo);
87 
88 #ifdef CONFIG_NUMA
si_meminfo_node(struct sysinfo * val,int nid)89 void si_meminfo_node(struct sysinfo *val, int nid)
90 {
91 	int zone_type;		/* needs to be signed */
92 	unsigned long managed_pages = 0;
93 	unsigned long managed_highpages = 0;
94 	unsigned long free_highpages = 0;
95 	pg_data_t *pgdat = NODE_DATA(nid);
96 
97 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
98 		struct zone *zone = &pgdat->node_zones[zone_type];
99 		managed_pages += zone_managed_pages(zone);
100 		if (is_highmem(zone)) {
101 			managed_highpages += zone_managed_pages(zone);
102 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
103 		}
104 	}
105 
106 	val->totalram = managed_pages;
107 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
108 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
109 	val->totalhigh = managed_highpages;
110 	val->freehigh = free_highpages;
111 	val->mem_unit = PAGE_SIZE;
112 }
113 #endif
114 
115 /*
116  * Determine whether the node should be displayed or not, depending on whether
117  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
118  */
show_mem_node_skip(unsigned int flags,int nid,nodemask_t * nodemask)119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
120 {
121 	if (!(flags & SHOW_MEM_FILTER_NODES))
122 		return false;
123 
124 	/*
125 	 * no node mask - aka implicit memory numa policy. Do not bother with
126 	 * the synchronization - read_mems_allowed_begin - because we do not
127 	 * have to be precise here.
128 	 */
129 	if (!nodemask)
130 		nodemask = &cpuset_current_mems_allowed;
131 
132 	return !node_isset(nid, *nodemask);
133 }
134 
show_migration_types(unsigned char type)135 static void show_migration_types(unsigned char type)
136 {
137 	static const char types[MIGRATE_TYPES] = {
138 		[MIGRATE_UNMOVABLE]	= 'U',
139 		[MIGRATE_MOVABLE]	= 'M',
140 		[MIGRATE_RECLAIMABLE]	= 'E',
141 		[MIGRATE_HIGHATOMIC]	= 'H',
142 #ifdef CONFIG_CMA
143 		[MIGRATE_CMA]		= 'C',
144 #endif
145 #ifdef CONFIG_MEMORY_ISOLATION
146 		[MIGRATE_ISOLATE]	= 'I',
147 #endif
148 	};
149 	char tmp[MIGRATE_TYPES + 1];
150 	char *p = tmp;
151 	int i;
152 
153 	for (i = 0; i < MIGRATE_TYPES; i++) {
154 		if (type & (1 << i))
155 			*p++ = types[i];
156 	}
157 
158 	*p = '\0';
159 	printk(KERN_CONT "(%s) ", tmp);
160 }
161 
node_has_managed_zones(pg_data_t * pgdat,int max_zone_idx)162 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
163 {
164 	int zone_idx;
165 	for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
166 		if (zone_managed_pages(pgdat->node_zones + zone_idx))
167 			return true;
168 	return false;
169 }
170 
171 /*
172  * Show free area list (used inside shift_scroll-lock stuff)
173  * We also calculate the percentage fragmentation. We do this by counting the
174  * memory on each free list with the exception of the first item on the list.
175  *
176  * Bits in @filter:
177  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
178  *   cpuset.
179  */
show_free_areas(unsigned int filter,nodemask_t * nodemask,int max_zone_idx)180 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
181 {
182 	unsigned long free_pcp = 0;
183 	int cpu, nid;
184 	struct zone *zone;
185 	pg_data_t *pgdat;
186 
187 	for_each_populated_zone(zone) {
188 		if (zone_idx(zone) > max_zone_idx)
189 			continue;
190 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
191 			continue;
192 
193 		for_each_online_cpu(cpu)
194 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
195 	}
196 
197 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
198 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
199 		" unevictable:%lu dirty:%lu writeback:%lu\n"
200 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
201 		" mapped:%lu shmem:%lu pagetables:%lu\n"
202 		" sec_pagetables:%lu bounce:%lu\n"
203 		" kernel_misc_reclaimable:%lu\n"
204 		" free:%lu free_pcp:%lu free_cma:%lu\n",
205 		global_node_page_state(NR_ACTIVE_ANON),
206 		global_node_page_state(NR_INACTIVE_ANON),
207 		global_node_page_state(NR_ISOLATED_ANON),
208 		global_node_page_state(NR_ACTIVE_FILE),
209 		global_node_page_state(NR_INACTIVE_FILE),
210 		global_node_page_state(NR_ISOLATED_FILE),
211 		global_node_page_state(NR_UNEVICTABLE),
212 		global_node_page_state(NR_FILE_DIRTY),
213 		global_node_page_state(NR_WRITEBACK),
214 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
215 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
216 		global_node_page_state(NR_FILE_MAPPED),
217 		global_node_page_state(NR_SHMEM),
218 		global_node_page_state(NR_PAGETABLE),
219 		global_node_page_state(NR_SECONDARY_PAGETABLE),
220 		0UL,
221 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
222 		global_zone_page_state(NR_FREE_PAGES),
223 		free_pcp,
224 		global_zone_page_state(NR_FREE_CMA_PAGES));
225 
226 	for_each_online_pgdat(pgdat) {
227 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
228 			continue;
229 		if (!node_has_managed_zones(pgdat, max_zone_idx))
230 			continue;
231 
232 		printk("Node %d"
233 			" active_anon:%lukB"
234 			" inactive_anon:%lukB"
235 			" active_file:%lukB"
236 			" inactive_file:%lukB"
237 			" unevictable:%lukB"
238 			" isolated(anon):%lukB"
239 			" isolated(file):%lukB"
240 			" mapped:%lukB"
241 			" dirty:%lukB"
242 			" writeback:%lukB"
243 			" shmem:%lukB"
244 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
245 			" shmem_thp:%lukB"
246 			" shmem_pmdmapped:%lukB"
247 			" anon_thp:%lukB"
248 #endif
249 			" writeback_tmp:%lukB"
250 			" kernel_stack:%lukB"
251 #ifdef CONFIG_SHADOW_CALL_STACK
252 			" shadow_call_stack:%lukB"
253 #endif
254 			" pagetables:%lukB"
255 			" sec_pagetables:%lukB"
256 			" all_unreclaimable? %s"
257 			" Balloon:%lukB"
258 			"\n",
259 			pgdat->node_id,
260 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
261 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
262 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
263 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
264 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
265 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
266 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
267 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
268 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
269 			K(node_page_state(pgdat, NR_WRITEBACK)),
270 			K(node_page_state(pgdat, NR_SHMEM)),
271 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
272 			K(node_page_state(pgdat, NR_SHMEM_THPS)),
273 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
274 			K(node_page_state(pgdat, NR_ANON_THPS)),
275 #endif
276 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
277 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
278 #ifdef CONFIG_SHADOW_CALL_STACK
279 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
280 #endif
281 			K(node_page_state(pgdat, NR_PAGETABLE)),
282 			K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
283 			str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES),
284 			K(node_page_state(pgdat, NR_BALLOON_PAGES)));
285 	}
286 
287 	for_each_populated_zone(zone) {
288 		int i;
289 
290 		if (zone_idx(zone) > max_zone_idx)
291 			continue;
292 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
293 			continue;
294 
295 		free_pcp = 0;
296 		for_each_online_cpu(cpu)
297 			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
298 
299 		show_node(zone);
300 		printk(KERN_CONT
301 			"%s"
302 			" free:%lukB"
303 			" boost:%lukB"
304 			" min:%lukB"
305 			" low:%lukB"
306 			" high:%lukB"
307 			" reserved_highatomic:%luKB"
308 			" free_highatomic:%luKB"
309 			" active_anon:%lukB"
310 			" inactive_anon:%lukB"
311 			" active_file:%lukB"
312 			" inactive_file:%lukB"
313 			" unevictable:%lukB"
314 			" writepending:%lukB"
315 			" present:%lukB"
316 			" managed:%lukB"
317 			" mlocked:%lukB"
318 			" bounce:%lukB"
319 			" free_pcp:%lukB"
320 			" local_pcp:%ukB"
321 			" free_cma:%lukB"
322 			"\n",
323 			zone->name,
324 			K(zone_page_state(zone, NR_FREE_PAGES)),
325 			K(zone->watermark_boost),
326 			K(min_wmark_pages(zone)),
327 			K(low_wmark_pages(zone)),
328 			K(high_wmark_pages(zone)),
329 			K(zone->nr_reserved_highatomic),
330 			K(zone->nr_free_highatomic),
331 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
332 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
333 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
334 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
335 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
336 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
337 			K(zone->present_pages),
338 			K(zone_managed_pages(zone)),
339 			K(zone_page_state(zone, NR_MLOCK)),
340 			0UL,
341 			K(free_pcp),
342 			K(this_cpu_read(zone->per_cpu_pageset->count)),
343 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
344 		printk("lowmem_reserve[]:");
345 		for (i = 0; i < MAX_NR_ZONES; i++)
346 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
347 		printk(KERN_CONT "\n");
348 	}
349 
350 	for_each_populated_zone(zone) {
351 		unsigned int order;
352 		unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
353 		unsigned char types[NR_PAGE_ORDERS];
354 
355 		if (zone_idx(zone) > max_zone_idx)
356 			continue;
357 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
358 			continue;
359 		show_node(zone);
360 		printk(KERN_CONT "%s: ", zone->name);
361 
362 		spin_lock_irqsave(&zone->lock, flags);
363 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
364 			struct free_area *area = &zone->free_area[order];
365 			int type;
366 
367 			nr[order] = area->nr_free;
368 			total += nr[order] << order;
369 
370 			types[order] = 0;
371 			for (type = 0; type < MIGRATE_TYPES; type++) {
372 				if (!free_area_empty(area, type))
373 					types[order] |= 1 << type;
374 			}
375 		}
376 		spin_unlock_irqrestore(&zone->lock, flags);
377 		for (order = 0; order < NR_PAGE_ORDERS; order++) {
378 			printk(KERN_CONT "%lu*%lukB ",
379 			       nr[order], K(1UL) << order);
380 			if (nr[order])
381 				show_migration_types(types[order]);
382 		}
383 		printk(KERN_CONT "= %lukB\n", K(total));
384 	}
385 
386 	for_each_online_node(nid) {
387 		if (show_mem_node_skip(filter, nid, nodemask))
388 			continue;
389 		hugetlb_show_meminfo_node(nid);
390 	}
391 
392 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
393 
394 	show_swap_cache_info();
395 }
396 
__show_mem(unsigned int filter,nodemask_t * nodemask,int max_zone_idx)397 void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
398 {
399 	unsigned long total = 0, reserved = 0, highmem = 0;
400 	struct zone *zone;
401 
402 	printk("Mem-Info:\n");
403 	show_free_areas(filter, nodemask, max_zone_idx);
404 
405 	for_each_populated_zone(zone) {
406 
407 		total += zone->present_pages;
408 		reserved += zone->present_pages - zone_managed_pages(zone);
409 
410 		if (is_highmem(zone))
411 			highmem += zone->present_pages;
412 	}
413 
414 	printk("%lu pages RAM\n", total);
415 	printk("%lu pages HighMem/MovableOnly\n", highmem);
416 	printk("%lu pages reserved\n", reserved);
417 #ifdef CONFIG_CMA
418 	printk("%lu pages cma reserved\n", totalcma_pages);
419 #endif
420 #ifdef CONFIG_MEMORY_FAILURE
421 	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
422 #endif
423 #ifdef CONFIG_MEM_ALLOC_PROFILING
424 	{
425 		struct codetag_bytes tags[10];
426 		size_t i, nr;
427 
428 		nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false);
429 		if (nr) {
430 			pr_notice("Memory allocations:\n");
431 			for (i = 0; i < nr; i++) {
432 				struct codetag *ct = tags[i].ct;
433 				struct alloc_tag *tag = ct_to_alloc_tag(ct);
434 				struct alloc_tag_counters counter = alloc_tag_read(tag);
435 				char bytes[10];
436 
437 				string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes));
438 
439 				/* Same as alloc_tag_to_text() but w/o intermediate buffer */
440 				if (ct->modname)
441 					pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
442 						  bytes, counter.calls, ct->filename,
443 						  ct->lineno, ct->modname, ct->function);
444 				else
445 					pr_notice("%12s %8llu %s:%u func:%s\n",
446 						  bytes, counter.calls, ct->filename,
447 						  ct->lineno, ct->function);
448 			}
449 		}
450 	}
451 #endif
452 }
453