1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/alloc_tag.h> 3 #include <linux/execmem.h> 4 #include <linux/fs.h> 5 #include <linux/gfp.h> 6 #include <linux/kallsyms.h> 7 #include <linux/module.h> 8 #include <linux/page_ext.h> 9 #include <linux/proc_fs.h> 10 #include <linux/seq_buf.h> 11 #include <linux/seq_file.h> 12 #include <linux/vmalloc.h> 13 14 #define ALLOCINFO_FILE_NAME "allocinfo" 15 #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 16 #define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) 17 #define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) 18 19 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 20 static bool mem_profiling_support = true; 21 #else 22 static bool mem_profiling_support; 23 #endif 24 25 static struct codetag_type *alloc_tag_cttype; 26 27 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); 28 EXPORT_SYMBOL(_shared_alloc_tag); 29 30 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 31 mem_alloc_profiling_key); 32 EXPORT_SYMBOL(mem_alloc_profiling_key); 33 34 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); 35 36 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; 37 unsigned long alloc_tag_ref_mask; 38 int alloc_tag_ref_offs; 39 40 struct allocinfo_private { 41 struct codetag_iterator iter; 42 bool print_header; 43 }; 44 45 static void *allocinfo_start(struct seq_file *m, loff_t *pos) 46 { 47 struct allocinfo_private *priv; 48 struct codetag *ct; 49 loff_t node = *pos; 50 51 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 52 m->private = priv; 53 if (!priv) 54 return NULL; 55 56 priv->print_header = (node == 0); 57 codetag_lock_module_list(alloc_tag_cttype, true); 58 priv->iter = codetag_get_ct_iter(alloc_tag_cttype); 59 while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) 60 node--; 61 62 return ct ? priv : NULL; 63 } 64 65 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) 66 { 67 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 68 struct codetag *ct = codetag_next_ct(&priv->iter); 69 70 (*pos)++; 71 if (!ct) 72 return NULL; 73 74 return priv; 75 } 76 77 static void allocinfo_stop(struct seq_file *m, void *arg) 78 { 79 struct allocinfo_private *priv = (struct allocinfo_private *)m->private; 80 81 if (priv) { 82 codetag_lock_module_list(alloc_tag_cttype, false); 83 kfree(priv); 84 } 85 } 86 87 static void print_allocinfo_header(struct seq_buf *buf) 88 { 89 /* Output format version, so we can change it. */ 90 seq_buf_printf(buf, "allocinfo - version: 1.0\n"); 91 seq_buf_printf(buf, "# <size> <calls> <tag info>\n"); 92 } 93 94 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) 95 { 96 struct alloc_tag *tag = ct_to_alloc_tag(ct); 97 struct alloc_tag_counters counter = alloc_tag_read(tag); 98 s64 bytes = counter.bytes; 99 100 seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); 101 codetag_to_text(out, ct); 102 seq_buf_putc(out, ' '); 103 seq_buf_putc(out, '\n'); 104 } 105 106 static int allocinfo_show(struct seq_file *m, void *arg) 107 { 108 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 109 char *bufp; 110 size_t n = seq_get_buf(m, &bufp); 111 struct seq_buf buf; 112 113 seq_buf_init(&buf, bufp, n); 114 if (priv->print_header) { 115 print_allocinfo_header(&buf); 116 priv->print_header = false; 117 } 118 alloc_tag_to_text(&buf, priv->iter.ct); 119 seq_commit(m, seq_buf_used(&buf)); 120 return 0; 121 } 122 123 static const struct seq_operations allocinfo_seq_op = { 124 .start = allocinfo_start, 125 .next = allocinfo_next, 126 .stop = allocinfo_stop, 127 .show = allocinfo_show, 128 }; 129 130 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) 131 { 132 struct codetag_iterator iter; 133 struct codetag *ct; 134 struct codetag_bytes n; 135 unsigned int i, nr = 0; 136 137 if (can_sleep) 138 codetag_lock_module_list(alloc_tag_cttype, true); 139 else if (!codetag_trylock_module_list(alloc_tag_cttype)) 140 return 0; 141 142 iter = codetag_get_ct_iter(alloc_tag_cttype); 143 while ((ct = codetag_next_ct(&iter))) { 144 struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); 145 146 n.ct = ct; 147 n.bytes = counter.bytes; 148 149 for (i = 0; i < nr; i++) 150 if (n.bytes > tags[i].bytes) 151 break; 152 153 if (i < count) { 154 nr -= nr == count; 155 memmove(&tags[i + 1], 156 &tags[i], 157 sizeof(tags[0]) * (nr - i)); 158 nr++; 159 tags[i] = n; 160 } 161 } 162 163 codetag_lock_module_list(alloc_tag_cttype, false); 164 165 return nr; 166 } 167 168 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) 169 { 170 int i; 171 struct alloc_tag *tag; 172 unsigned int nr_pages = 1 << new_order; 173 174 if (!mem_alloc_profiling_enabled()) 175 return; 176 177 tag = __pgalloc_tag_get(&folio->page); 178 if (!tag) 179 return; 180 181 for (i = nr_pages; i < (1 << old_order); i += nr_pages) { 182 union pgtag_ref_handle handle; 183 union codetag_ref ref; 184 185 if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) { 186 /* Set new reference to point to the original tag */ 187 alloc_tag_ref_set(&ref, tag); 188 update_page_tag_ref(handle, &ref); 189 put_page_tag_ref(handle); 190 } 191 } 192 } 193 194 void pgalloc_tag_swap(struct folio *new, struct folio *old) 195 { 196 union pgtag_ref_handle handle_old, handle_new; 197 union codetag_ref ref_old, ref_new; 198 struct alloc_tag *tag_old, *tag_new; 199 200 if (!mem_alloc_profiling_enabled()) 201 return; 202 203 tag_old = __pgalloc_tag_get(&old->page); 204 if (!tag_old) 205 return; 206 tag_new = __pgalloc_tag_get(&new->page); 207 if (!tag_new) 208 return; 209 210 if (!get_page_tag_ref(&old->page, &ref_old, &handle_old)) 211 return; 212 if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) { 213 put_page_tag_ref(handle_old); 214 return; 215 } 216 217 /* 218 * Clear tag references to avoid debug warning when using 219 * __alloc_tag_ref_set() with non-empty reference. 220 */ 221 set_codetag_empty(&ref_old); 222 set_codetag_empty(&ref_new); 223 224 /* swap tags */ 225 __alloc_tag_ref_set(&ref_old, tag_new); 226 update_page_tag_ref(handle_old, &ref_old); 227 __alloc_tag_ref_set(&ref_new, tag_old); 228 update_page_tag_ref(handle_new, &ref_new); 229 230 put_page_tag_ref(handle_old); 231 put_page_tag_ref(handle_new); 232 } 233 234 static void shutdown_mem_profiling(bool remove_file) 235 { 236 if (mem_alloc_profiling_enabled()) 237 static_branch_disable(&mem_alloc_profiling_key); 238 239 if (!mem_profiling_support) 240 return; 241 242 if (remove_file) 243 remove_proc_entry(ALLOCINFO_FILE_NAME, NULL); 244 mem_profiling_support = false; 245 } 246 247 static void __init procfs_init(void) 248 { 249 if (!mem_profiling_support) 250 return; 251 252 if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { 253 pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); 254 shutdown_mem_profiling(false); 255 } 256 } 257 258 void __init alloc_tag_sec_init(void) 259 { 260 struct alloc_tag *last_codetag; 261 262 if (!mem_profiling_support) 263 return; 264 265 if (!static_key_enabled(&mem_profiling_compressed)) 266 return; 267 268 kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( 269 SECTION_START(ALLOC_TAG_SECTION_NAME)); 270 last_codetag = (struct alloc_tag *)kallsyms_lookup_name( 271 SECTION_STOP(ALLOC_TAG_SECTION_NAME)); 272 kernel_tags.count = last_codetag - kernel_tags.first_tag; 273 274 /* Check if kernel tags fit into page flags */ 275 if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) { 276 shutdown_mem_profiling(false); /* allocinfo file does not exist yet */ 277 pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n", 278 kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS); 279 return; 280 } 281 282 alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS); 283 alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1); 284 pr_debug("Memory allocation profiling compression is using %d page flag bits!\n", 285 NR_UNUSED_PAGEFLAG_BITS); 286 } 287 288 #ifdef CONFIG_MODULES 289 290 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); 291 static struct vm_struct *vm_module_tags; 292 /* A dummy object used to indicate an unloaded module */ 293 static struct module unloaded_mod; 294 /* A dummy object used to indicate a module prepended area */ 295 static struct module prepend_mod; 296 297 struct alloc_tag_module_section module_tags; 298 299 static inline unsigned long alloc_tag_align(unsigned long val) 300 { 301 if (!static_key_enabled(&mem_profiling_compressed)) { 302 /* No alignment requirements when we are not indexing the tags */ 303 return val; 304 } 305 306 if (val % sizeof(struct alloc_tag) == 0) 307 return val; 308 return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); 309 } 310 311 static bool ensure_alignment(unsigned long align, unsigned int *prepend) 312 { 313 if (!static_key_enabled(&mem_profiling_compressed)) { 314 /* No alignment requirements when we are not indexing the tags */ 315 return true; 316 } 317 318 /* 319 * If alloc_tag size is not a multiple of required alignment, tag 320 * indexing does not work. 321 */ 322 if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) 323 return false; 324 325 /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ 326 if (*prepend) 327 *prepend = alloc_tag_align(*prepend); 328 329 return true; 330 } 331 332 static inline bool tags_addressable(void) 333 { 334 unsigned long tag_idx_count; 335 336 if (!static_key_enabled(&mem_profiling_compressed)) 337 return true; /* with page_ext tags are always addressable */ 338 339 tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count + 340 module_tags.size / sizeof(struct alloc_tag); 341 342 return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS); 343 } 344 345 static bool needs_section_mem(struct module *mod, unsigned long size) 346 { 347 if (!mem_profiling_support) 348 return false; 349 350 return size >= sizeof(struct alloc_tag); 351 } 352 353 static bool clean_unused_counters(struct alloc_tag *start_tag, 354 struct alloc_tag *end_tag) 355 { 356 struct alloc_tag *tag; 357 bool ret = true; 358 359 for (tag = start_tag; tag <= end_tag; tag++) { 360 struct alloc_tag_counters counter; 361 362 if (!tag->counters) 363 continue; 364 365 counter = alloc_tag_read(tag); 366 if (!counter.bytes) { 367 free_percpu(tag->counters); 368 tag->counters = NULL; 369 } else { 370 ret = false; 371 } 372 } 373 374 return ret; 375 } 376 377 /* Called with mod_area_mt locked */ 378 static void clean_unused_module_areas_locked(void) 379 { 380 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 381 struct module *val; 382 383 mas_for_each(&mas, val, module_tags.size) { 384 struct alloc_tag *start_tag; 385 struct alloc_tag *end_tag; 386 387 if (val != &unloaded_mod) 388 continue; 389 390 /* Release area if all tags are unused */ 391 start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); 392 end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); 393 if (clean_unused_counters(start_tag, end_tag)) 394 mas_erase(&mas); 395 } 396 } 397 398 /* Called with mod_area_mt locked */ 399 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, 400 unsigned long size, unsigned int prepend, unsigned long align) 401 { 402 bool cleanup_done = false; 403 404 repeat: 405 /* Try finding exact size and hope the start is aligned */ 406 if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { 407 if (IS_ALIGNED(mas->index + prepend, align)) 408 return true; 409 410 /* Try finding larger area to align later */ 411 mas_reset(mas); 412 if (!mas_empty_area(mas, 0, section_size - 1, 413 size + prepend + align - 1)) 414 return true; 415 } 416 417 /* No free area, try cleanup stale data and repeat the search once */ 418 if (!cleanup_done) { 419 clean_unused_module_areas_locked(); 420 cleanup_done = true; 421 mas_reset(mas); 422 goto repeat; 423 } 424 425 return false; 426 } 427 428 static int vm_module_tags_populate(void) 429 { 430 unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) + 431 (vm_module_tags->nr_pages << PAGE_SHIFT); 432 unsigned long new_end = module_tags.start_addr + module_tags.size; 433 434 if (phys_end < new_end) { 435 struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages; 436 unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN); 437 unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN); 438 unsigned long more_pages; 439 unsigned long nr = 0; 440 441 more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; 442 while (nr < more_pages) { 443 unsigned long allocated; 444 445 allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, 446 NUMA_NO_NODE, more_pages - nr, next_page + nr); 447 448 if (!allocated) 449 break; 450 nr += allocated; 451 } 452 453 if (nr < more_pages || 454 vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, 455 next_page, PAGE_SHIFT) < 0) { 456 /* Clean up and error out */ 457 for (int i = 0; i < nr; i++) 458 __free_page(next_page[i]); 459 return -ENOMEM; 460 } 461 462 vm_module_tags->nr_pages += nr; 463 464 /* 465 * Kasan allocates 1 byte of shadow for every 8 bytes of data. 466 * When kasan_alloc_module_shadow allocates shadow memory, 467 * its unit of allocation is a page. 468 * Therefore, here we need to align to MODULE_ALIGN. 469 */ 470 if (old_shadow_end < new_shadow_end) 471 kasan_alloc_module_shadow((void *)old_shadow_end, 472 new_shadow_end - old_shadow_end, 473 GFP_KERNEL); 474 } 475 476 /* 477 * Mark the pages as accessible, now that they are mapped. 478 * With hardware tag-based KASAN, marking is skipped for 479 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 480 */ 481 kasan_unpoison_vmalloc((void *)module_tags.start_addr, 482 new_end - module_tags.start_addr, 483 KASAN_VMALLOC_PROT_NORMAL); 484 485 return 0; 486 } 487 488 static void *reserve_module_tags(struct module *mod, unsigned long size, 489 unsigned int prepend, unsigned long align) 490 { 491 unsigned long section_size = module_tags.end_addr - module_tags.start_addr; 492 MA_STATE(mas, &mod_area_mt, 0, section_size - 1); 493 unsigned long offset; 494 void *ret = NULL; 495 496 /* If no tags return error */ 497 if (size < sizeof(struct alloc_tag)) 498 return ERR_PTR(-EINVAL); 499 500 /* 501 * align is always power of 2, so we can use IS_ALIGNED and ALIGN. 502 * align 0 or 1 means no alignment, to simplify set to 1. 503 */ 504 if (!align) 505 align = 1; 506 507 if (!ensure_alignment(align, &prepend)) { 508 shutdown_mem_profiling(true); 509 pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n", 510 mod->name, align); 511 return ERR_PTR(-EINVAL); 512 } 513 514 mas_lock(&mas); 515 if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 516 ret = ERR_PTR(-ENOMEM); 517 goto unlock; 518 } 519 520 /* Mark found area as reserved */ 521 offset = mas.index; 522 offset += prepend; 523 offset = ALIGN(offset, align); 524 if (offset != mas.index) { 525 unsigned long pad_start = mas.index; 526 527 mas.last = offset - 1; 528 mas_store(&mas, &prepend_mod); 529 if (mas_is_err(&mas)) { 530 ret = ERR_PTR(xa_err(mas.node)); 531 goto unlock; 532 } 533 mas.index = offset; 534 mas.last = offset + size - 1; 535 mas_store(&mas, mod); 536 if (mas_is_err(&mas)) { 537 mas.index = pad_start; 538 mas_erase(&mas); 539 ret = ERR_PTR(xa_err(mas.node)); 540 } 541 } else { 542 mas.last = offset + size - 1; 543 mas_store(&mas, mod); 544 if (mas_is_err(&mas)) 545 ret = ERR_PTR(xa_err(mas.node)); 546 } 547 unlock: 548 mas_unlock(&mas); 549 550 if (IS_ERR(ret)) 551 return ret; 552 553 if (module_tags.size < offset + size) { 554 int grow_res; 555 556 module_tags.size = offset + size; 557 if (mem_alloc_profiling_enabled() && !tags_addressable()) { 558 shutdown_mem_profiling(true); 559 pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n", 560 mod->name, NR_UNUSED_PAGEFLAG_BITS); 561 } 562 563 grow_res = vm_module_tags_populate(); 564 if (grow_res) { 565 shutdown_mem_profiling(true); 566 pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n", 567 mod->name); 568 return ERR_PTR(grow_res); 569 } 570 } 571 572 return (struct alloc_tag *)(module_tags.start_addr + offset); 573 } 574 575 static void release_module_tags(struct module *mod, bool used) 576 { 577 MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 578 struct alloc_tag *start_tag; 579 struct alloc_tag *end_tag; 580 struct module *val; 581 582 mas_lock(&mas); 583 mas_for_each_rev(&mas, val, 0) 584 if (val == mod) 585 break; 586 587 if (!val) /* module not found */ 588 goto out; 589 590 if (!used) 591 goto release_area; 592 593 start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); 594 end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); 595 if (!clean_unused_counters(start_tag, end_tag)) { 596 struct alloc_tag *tag; 597 598 for (tag = start_tag; tag <= end_tag; tag++) { 599 struct alloc_tag_counters counter; 600 601 if (!tag->counters) 602 continue; 603 604 counter = alloc_tag_read(tag); 605 pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 606 tag->ct.filename, tag->ct.lineno, tag->ct.modname, 607 tag->ct.function, counter.bytes); 608 } 609 } else { 610 used = false; 611 } 612 release_area: 613 mas_store(&mas, used ? &unloaded_mod : NULL); 614 val = mas_prev_range(&mas, 0); 615 if (val == &prepend_mod) 616 mas_store(&mas, NULL); 617 out: 618 mas_unlock(&mas); 619 } 620 621 static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) 622 { 623 /* Allocate module alloc_tag percpu counters */ 624 struct alloc_tag *start_tag; 625 struct alloc_tag *stop_tag; 626 struct alloc_tag *tag; 627 628 if (!mod) 629 return; 630 631 start_tag = ct_to_alloc_tag(start); 632 stop_tag = ct_to_alloc_tag(stop); 633 for (tag = start_tag; tag < stop_tag; tag++) { 634 WARN_ON(tag->counters); 635 tag->counters = alloc_percpu(struct alloc_tag_counters); 636 if (!tag->counters) { 637 while (--tag >= start_tag) { 638 free_percpu(tag->counters); 639 tag->counters = NULL; 640 } 641 shutdown_mem_profiling(true); 642 pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n", 643 mod->name); 644 break; 645 } 646 } 647 } 648 649 static void replace_module(struct module *mod, struct module *new_mod) 650 { 651 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 652 struct module *val; 653 654 mas_lock(&mas); 655 mas_for_each(&mas, val, module_tags.size) { 656 if (val != mod) 657 continue; 658 659 mas_store_gfp(&mas, new_mod, GFP_KERNEL); 660 break; 661 } 662 mas_unlock(&mas); 663 } 664 665 static int __init alloc_mod_tags_mem(void) 666 { 667 /* Map space to copy allocation tags */ 668 vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE); 669 if (!vm_module_tags) { 670 pr_err("Failed to map %lu bytes for module allocation tags\n", 671 MODULE_ALLOC_TAG_VMAP_SIZE); 672 module_tags.start_addr = 0; 673 return -ENOMEM; 674 } 675 676 vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT, 677 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 678 if (!vm_module_tags->pages) { 679 free_vm_area(vm_module_tags); 680 return -ENOMEM; 681 } 682 683 module_tags.start_addr = (unsigned long)vm_module_tags->addr; 684 module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 685 /* Ensure the base is alloc_tag aligned when required for indexing */ 686 module_tags.start_addr = alloc_tag_align(module_tags.start_addr); 687 688 return 0; 689 } 690 691 static void __init free_mod_tags_mem(void) 692 { 693 int i; 694 695 module_tags.start_addr = 0; 696 for (i = 0; i < vm_module_tags->nr_pages; i++) 697 __free_page(vm_module_tags->pages[i]); 698 kfree(vm_module_tags->pages); 699 free_vm_area(vm_module_tags); 700 } 701 702 #else /* CONFIG_MODULES */ 703 704 static inline int alloc_mod_tags_mem(void) { return 0; } 705 static inline void free_mod_tags_mem(void) {} 706 707 #endif /* CONFIG_MODULES */ 708 709 /* See: Documentation/mm/allocation-profiling.rst */ 710 static int __init setup_early_mem_profiling(char *str) 711 { 712 bool compressed = false; 713 bool enable; 714 715 if (!str || !str[0]) 716 return -EINVAL; 717 718 if (!strncmp(str, "never", 5)) { 719 enable = false; 720 mem_profiling_support = false; 721 pr_info("Memory allocation profiling is disabled!\n"); 722 } else { 723 char *token = strsep(&str, ","); 724 725 if (kstrtobool(token, &enable)) 726 return -EINVAL; 727 728 if (str) { 729 730 if (strcmp(str, "compressed")) 731 return -EINVAL; 732 733 compressed = true; 734 } 735 mem_profiling_support = true; 736 pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n", 737 compressed ? "with" : "without", enable ? "on" : "off"); 738 } 739 740 if (enable != mem_alloc_profiling_enabled()) { 741 if (enable) 742 static_branch_enable(&mem_alloc_profiling_key); 743 else 744 static_branch_disable(&mem_alloc_profiling_key); 745 } 746 if (compressed != static_key_enabled(&mem_profiling_compressed)) { 747 if (compressed) 748 static_branch_enable(&mem_profiling_compressed); 749 else 750 static_branch_disable(&mem_profiling_compressed); 751 } 752 753 return 0; 754 } 755 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling); 756 757 static __init bool need_page_alloc_tagging(void) 758 { 759 if (static_key_enabled(&mem_profiling_compressed)) 760 return false; 761 762 return mem_profiling_support; 763 } 764 765 static __init void init_page_alloc_tagging(void) 766 { 767 } 768 769 struct page_ext_operations page_alloc_tagging_ops = { 770 .size = sizeof(union codetag_ref), 771 .need = need_page_alloc_tagging, 772 .init = init_page_alloc_tagging, 773 }; 774 EXPORT_SYMBOL(page_alloc_tagging_ops); 775 776 #ifdef CONFIG_SYSCTL 777 static struct ctl_table memory_allocation_profiling_sysctls[] = { 778 { 779 .procname = "mem_profiling", 780 .data = &mem_alloc_profiling_key, 781 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 782 .mode = 0444, 783 #else 784 .mode = 0644, 785 #endif 786 .proc_handler = proc_do_static_key, 787 }, 788 }; 789 790 static void __init sysctl_init(void) 791 { 792 if (!mem_profiling_support) 793 memory_allocation_profiling_sysctls[0].mode = 0444; 794 795 register_sysctl_init("vm", memory_allocation_profiling_sysctls); 796 } 797 #else /* CONFIG_SYSCTL */ 798 static inline void sysctl_init(void) {} 799 #endif /* CONFIG_SYSCTL */ 800 801 static int __init alloc_tag_init(void) 802 { 803 const struct codetag_type_desc desc = { 804 .section = ALLOC_TAG_SECTION_NAME, 805 .tag_size = sizeof(struct alloc_tag), 806 #ifdef CONFIG_MODULES 807 .needs_section_mem = needs_section_mem, 808 .alloc_section_mem = reserve_module_tags, 809 .free_section_mem = release_module_tags, 810 .module_load = load_module, 811 .module_replaced = replace_module, 812 #endif 813 }; 814 int res; 815 816 res = alloc_mod_tags_mem(); 817 if (res) 818 return res; 819 820 alloc_tag_cttype = codetag_register_type(&desc); 821 if (IS_ERR(alloc_tag_cttype)) { 822 free_mod_tags_mem(); 823 return PTR_ERR(alloc_tag_cttype); 824 } 825 826 sysctl_init(); 827 procfs_init(); 828 829 return 0; 830 } 831 module_init(alloc_tag_init); 832