1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic show_mem() implementation 4 * 5 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/cma.h> 10 #include <linux/cpuset.h> 11 #include <linux/highmem.h> 12 #include <linux/hugetlb.h> 13 #include <linux/mm.h> 14 #include <linux/mmzone.h> 15 #include <linux/swap.h> 16 #include <linux/vmstat.h> 17 18 #include "internal.h" 19 #include "swap.h" 20 21 atomic_long_t _totalram_pages __read_mostly; 22 EXPORT_SYMBOL(_totalram_pages); 23 unsigned long totalreserve_pages __read_mostly; 24 unsigned long totalcma_pages __read_mostly; 25 26 static inline void show_node(struct zone *zone) 27 { 28 if (IS_ENABLED(CONFIG_NUMA)) 29 printk("Node %d ", zone_to_nid(zone)); 30 } 31 32 long si_mem_available(void) 33 { 34 long available; 35 unsigned long pagecache; 36 unsigned long wmark_low = 0; 37 unsigned long reclaimable; 38 struct zone *zone; 39 40 for_each_zone(zone) 41 wmark_low += low_wmark_pages(zone); 42 43 /* 44 * Estimate the amount of memory available for userspace allocations, 45 * without causing swapping or OOM. 46 */ 47 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 48 49 /* 50 * Not all the page cache can be freed, otherwise the system will 51 * start swapping or thrashing. Assume at least half of the page 52 * cache, or the low watermark worth of cache, needs to stay. 53 */ 54 pagecache = global_node_page_state(NR_ACTIVE_FILE) + 55 global_node_page_state(NR_INACTIVE_FILE); 56 pagecache -= min(pagecache / 2, wmark_low); 57 available += pagecache; 58 59 /* 60 * Part of the reclaimable slab and other kernel memory consists of 61 * items that are in use, and cannot be freed. Cap this estimate at the 62 * low watermark. 63 */ 64 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 65 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 66 reclaimable -= min(reclaimable / 2, wmark_low); 67 available += reclaimable; 68 69 if (available < 0) 70 available = 0; 71 return available; 72 } 73 EXPORT_SYMBOL_GPL(si_mem_available); 74 75 void si_meminfo(struct sysinfo *val) 76 { 77 val->totalram = totalram_pages(); 78 val->sharedram = global_node_page_state(NR_SHMEM); 79 val->freeram = global_zone_page_state(NR_FREE_PAGES); 80 val->bufferram = nr_blockdev_pages(); 81 val->totalhigh = totalhigh_pages(); 82 val->freehigh = nr_free_highpages(); 83 val->mem_unit = PAGE_SIZE; 84 } 85 86 EXPORT_SYMBOL(si_meminfo); 87 88 #ifdef CONFIG_NUMA 89 void si_meminfo_node(struct sysinfo *val, int nid) 90 { 91 int zone_type; /* needs to be signed */ 92 unsigned long managed_pages = 0; 93 unsigned long managed_highpages = 0; 94 unsigned long free_highpages = 0; 95 pg_data_t *pgdat = NODE_DATA(nid); 96 97 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 98 struct zone *zone = &pgdat->node_zones[zone_type]; 99 managed_pages += zone_managed_pages(zone); 100 if (is_highmem(zone)) { 101 managed_highpages += zone_managed_pages(zone); 102 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 103 } 104 } 105 106 val->totalram = managed_pages; 107 val->sharedram = node_page_state(pgdat, NR_SHMEM); 108 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 109 val->totalhigh = managed_highpages; 110 val->freehigh = free_highpages; 111 val->mem_unit = PAGE_SIZE; 112 } 113 #endif 114 115 /* 116 * Determine whether the node should be displayed or not, depending on whether 117 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 118 */ 119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 120 { 121 if (!(flags & SHOW_MEM_FILTER_NODES)) 122 return false; 123 124 /* 125 * no node mask - aka implicit memory numa policy. Do not bother with 126 * the synchronization - read_mems_allowed_begin - because we do not 127 * have to be precise here. 128 */ 129 if (!nodemask) 130 nodemask = &cpuset_current_mems_allowed; 131 132 return !node_isset(nid, *nodemask); 133 } 134 135 static void show_migration_types(unsigned char type) 136 { 137 static const char types[MIGRATE_TYPES] = { 138 [MIGRATE_UNMOVABLE] = 'U', 139 [MIGRATE_MOVABLE] = 'M', 140 [MIGRATE_RECLAIMABLE] = 'E', 141 [MIGRATE_HIGHATOMIC] = 'H', 142 #ifdef CONFIG_CMA 143 [MIGRATE_CMA] = 'C', 144 #endif 145 #ifdef CONFIG_MEMORY_ISOLATION 146 [MIGRATE_ISOLATE] = 'I', 147 #endif 148 }; 149 char tmp[MIGRATE_TYPES + 1]; 150 char *p = tmp; 151 int i; 152 153 for (i = 0; i < MIGRATE_TYPES; i++) { 154 if (type & (1 << i)) 155 *p++ = types[i]; 156 } 157 158 *p = '\0'; 159 printk(KERN_CONT "(%s) ", tmp); 160 } 161 162 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 163 { 164 int zone_idx; 165 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 166 if (zone_managed_pages(pgdat->node_zones + zone_idx)) 167 return true; 168 return false; 169 } 170 171 /* 172 * Show free area list (used inside shift_scroll-lock stuff) 173 * We also calculate the percentage fragmentation. We do this by counting the 174 * memory on each free list with the exception of the first item on the list. 175 * 176 * Bits in @filter: 177 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 178 * cpuset. 179 */ 180 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 181 { 182 unsigned long free_pcp = 0; 183 int cpu, nid; 184 struct zone *zone; 185 pg_data_t *pgdat; 186 187 for_each_populated_zone(zone) { 188 if (zone_idx(zone) > max_zone_idx) 189 continue; 190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 191 continue; 192 193 for_each_online_cpu(cpu) 194 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 195 } 196 197 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 198 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 199 " unevictable:%lu dirty:%lu writeback:%lu\n" 200 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 201 " mapped:%lu shmem:%lu pagetables:%lu\n" 202 " sec_pagetables:%lu bounce:%lu\n" 203 " kernel_misc_reclaimable:%lu\n" 204 " free:%lu free_pcp:%lu free_cma:%lu\n", 205 global_node_page_state(NR_ACTIVE_ANON), 206 global_node_page_state(NR_INACTIVE_ANON), 207 global_node_page_state(NR_ISOLATED_ANON), 208 global_node_page_state(NR_ACTIVE_FILE), 209 global_node_page_state(NR_INACTIVE_FILE), 210 global_node_page_state(NR_ISOLATED_FILE), 211 global_node_page_state(NR_UNEVICTABLE), 212 global_node_page_state(NR_FILE_DIRTY), 213 global_node_page_state(NR_WRITEBACK), 214 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 215 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 216 global_node_page_state(NR_FILE_MAPPED), 217 global_node_page_state(NR_SHMEM), 218 global_node_page_state(NR_PAGETABLE), 219 global_node_page_state(NR_SECONDARY_PAGETABLE), 220 0UL, 221 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 222 global_zone_page_state(NR_FREE_PAGES), 223 free_pcp, 224 global_zone_page_state(NR_FREE_CMA_PAGES)); 225 226 for_each_online_pgdat(pgdat) { 227 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 228 continue; 229 if (!node_has_managed_zones(pgdat, max_zone_idx)) 230 continue; 231 232 printk("Node %d" 233 " active_anon:%lukB" 234 " inactive_anon:%lukB" 235 " active_file:%lukB" 236 " inactive_file:%lukB" 237 " unevictable:%lukB" 238 " isolated(anon):%lukB" 239 " isolated(file):%lukB" 240 " mapped:%lukB" 241 " dirty:%lukB" 242 " writeback:%lukB" 243 " shmem:%lukB" 244 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 245 " shmem_thp:%lukB" 246 " shmem_pmdmapped:%lukB" 247 " anon_thp:%lukB" 248 #endif 249 " kernel_stack:%lukB" 250 #ifdef CONFIG_SHADOW_CALL_STACK 251 " shadow_call_stack:%lukB" 252 #endif 253 " pagetables:%lukB" 254 " sec_pagetables:%lukB" 255 " all_unreclaimable? %s" 256 " Balloon:%lukB" 257 "\n", 258 pgdat->node_id, 259 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 260 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 261 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 262 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 263 K(node_page_state(pgdat, NR_UNEVICTABLE)), 264 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 265 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 266 K(node_page_state(pgdat, NR_FILE_MAPPED)), 267 K(node_page_state(pgdat, NR_FILE_DIRTY)), 268 K(node_page_state(pgdat, NR_WRITEBACK)), 269 K(node_page_state(pgdat, NR_SHMEM)), 270 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 271 K(node_page_state(pgdat, NR_SHMEM_THPS)), 272 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 273 K(node_page_state(pgdat, NR_ANON_THPS)), 274 #endif 275 node_page_state(pgdat, NR_KERNEL_STACK_KB), 276 #ifdef CONFIG_SHADOW_CALL_STACK 277 node_page_state(pgdat, NR_KERNEL_SCS_KB), 278 #endif 279 K(node_page_state(pgdat, NR_PAGETABLE)), 280 K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), 281 str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES), 282 K(node_page_state(pgdat, NR_BALLOON_PAGES))); 283 } 284 285 for_each_populated_zone(zone) { 286 int i; 287 288 if (zone_idx(zone) > max_zone_idx) 289 continue; 290 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 291 continue; 292 293 free_pcp = 0; 294 for_each_online_cpu(cpu) 295 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 296 297 show_node(zone); 298 printk(KERN_CONT 299 "%s" 300 " free:%lukB" 301 " boost:%lukB" 302 " min:%lukB" 303 " low:%lukB" 304 " high:%lukB" 305 " reserved_highatomic:%luKB" 306 " free_highatomic:%luKB" 307 " active_anon:%lukB" 308 " inactive_anon:%lukB" 309 " active_file:%lukB" 310 " inactive_file:%lukB" 311 " unevictable:%lukB" 312 " writepending:%lukB" 313 " present:%lukB" 314 " managed:%lukB" 315 " mlocked:%lukB" 316 " bounce:%lukB" 317 " free_pcp:%lukB" 318 " local_pcp:%ukB" 319 " free_cma:%lukB" 320 "\n", 321 zone->name, 322 K(zone_page_state(zone, NR_FREE_PAGES)), 323 K(zone->watermark_boost), 324 K(min_wmark_pages(zone)), 325 K(low_wmark_pages(zone)), 326 K(high_wmark_pages(zone)), 327 K(zone->nr_reserved_highatomic), 328 K(zone->nr_free_highatomic), 329 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 330 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 331 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 332 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 333 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 334 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 335 K(zone->present_pages), 336 K(zone_managed_pages(zone)), 337 K(zone_page_state(zone, NR_MLOCK)), 338 0UL, 339 K(free_pcp), 340 K(this_cpu_read(zone->per_cpu_pageset->count)), 341 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 342 printk("lowmem_reserve[]:"); 343 for (i = 0; i < MAX_NR_ZONES; i++) 344 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 345 printk(KERN_CONT "\n"); 346 } 347 348 for_each_populated_zone(zone) { 349 unsigned int order; 350 unsigned long nr[NR_PAGE_ORDERS], flags, total = 0; 351 unsigned char types[NR_PAGE_ORDERS]; 352 353 if (zone_idx(zone) > max_zone_idx) 354 continue; 355 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 356 continue; 357 show_node(zone); 358 printk(KERN_CONT "%s: ", zone->name); 359 360 spin_lock_irqsave(&zone->lock, flags); 361 for (order = 0; order < NR_PAGE_ORDERS; order++) { 362 struct free_area *area = &zone->free_area[order]; 363 int type; 364 365 nr[order] = area->nr_free; 366 total += nr[order] << order; 367 368 types[order] = 0; 369 for (type = 0; type < MIGRATE_TYPES; type++) { 370 if (!free_area_empty(area, type)) 371 types[order] |= 1 << type; 372 } 373 } 374 spin_unlock_irqrestore(&zone->lock, flags); 375 for (order = 0; order < NR_PAGE_ORDERS; order++) { 376 printk(KERN_CONT "%lu*%lukB ", 377 nr[order], K(1UL) << order); 378 if (nr[order]) 379 show_migration_types(types[order]); 380 } 381 printk(KERN_CONT "= %lukB\n", K(total)); 382 } 383 384 for_each_online_node(nid) { 385 if (show_mem_node_skip(filter, nid, nodemask)) 386 continue; 387 hugetlb_show_meminfo_node(nid); 388 } 389 390 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 391 392 show_swap_cache_info(); 393 } 394 395 void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 396 { 397 unsigned long total = 0, reserved = 0, highmem = 0; 398 struct zone *zone; 399 400 printk("Mem-Info:\n"); 401 show_free_areas(filter, nodemask, max_zone_idx); 402 403 for_each_populated_zone(zone) { 404 405 total += zone->present_pages; 406 reserved += zone->present_pages - zone_managed_pages(zone); 407 408 if (is_highmem(zone)) 409 highmem += zone->present_pages; 410 } 411 412 printk("%lu pages RAM\n", total); 413 printk("%lu pages HighMem/MovableOnly\n", highmem); 414 printk("%lu pages reserved\n", reserved); 415 #ifdef CONFIG_CMA 416 printk("%lu pages cma reserved\n", totalcma_pages); 417 #endif 418 #ifdef CONFIG_MEMORY_FAILURE 419 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); 420 #endif 421 #ifdef CONFIG_MEM_ALLOC_PROFILING 422 { 423 struct codetag_bytes tags[10]; 424 size_t i, nr; 425 426 nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false); 427 if (nr) { 428 pr_notice("Memory allocations:\n"); 429 for (i = 0; i < nr; i++) { 430 struct codetag *ct = tags[i].ct; 431 struct alloc_tag *tag = ct_to_alloc_tag(ct); 432 struct alloc_tag_counters counter = alloc_tag_read(tag); 433 char bytes[10]; 434 435 string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes)); 436 437 /* Same as alloc_tag_to_text() but w/o intermediate buffer */ 438 if (ct->modname) 439 pr_notice("%12s %8llu %s:%u [%s] func:%s\n", 440 bytes, counter.calls, ct->filename, 441 ct->lineno, ct->modname, ct->function); 442 else 443 pr_notice("%12s %8llu %s:%u func:%s\n", 444 bytes, counter.calls, ct->filename, 445 ct->lineno, ct->function); 446 } 447 } 448 } 449 #endif 450 } 451