1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/alloc_tag.h> 3 #include <linux/execmem.h> 4 #include <linux/fs.h> 5 #include <linux/gfp.h> 6 #include <linux/kallsyms.h> 7 #include <linux/module.h> 8 #include <linux/page_ext.h> 9 #include <linux/proc_fs.h> 10 #include <linux/seq_buf.h> 11 #include <linux/seq_file.h> 12 #include <linux/vmalloc.h> 13 14 #define ALLOCINFO_FILE_NAME "allocinfo" 15 #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 16 #define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) 17 #define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) 18 19 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 20 static bool mem_profiling_support = true; 21 #else 22 static bool mem_profiling_support; 23 #endif 24 25 static struct codetag_type *alloc_tag_cttype; 26 27 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); 28 EXPORT_SYMBOL(_shared_alloc_tag); 29 30 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 31 mem_alloc_profiling_key); 32 EXPORT_SYMBOL(mem_alloc_profiling_key); 33 34 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); 35 36 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; 37 unsigned long alloc_tag_ref_mask; 38 int alloc_tag_ref_offs; 39 40 struct allocinfo_private { 41 struct codetag_iterator iter; 42 bool print_header; 43 }; 44 45 static void *allocinfo_start(struct seq_file *m, loff_t *pos) 46 { 47 struct allocinfo_private *priv; 48 struct codetag *ct; 49 loff_t node = *pos; 50 51 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 52 m->private = priv; 53 if (!priv) 54 return NULL; 55 56 priv->print_header = (node == 0); 57 codetag_lock_module_list(alloc_tag_cttype, true); 58 priv->iter = codetag_get_ct_iter(alloc_tag_cttype); 59 while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) 60 node--; 61 62 return ct ? priv : NULL; 63 } 64 65 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) 66 { 67 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 68 struct codetag *ct = codetag_next_ct(&priv->iter); 69 70 (*pos)++; 71 if (!ct) 72 return NULL; 73 74 return priv; 75 } 76 77 static void allocinfo_stop(struct seq_file *m, void *arg) 78 { 79 struct allocinfo_private *priv = (struct allocinfo_private *)m->private; 80 81 if (priv) { 82 codetag_lock_module_list(alloc_tag_cttype, false); 83 kfree(priv); 84 } 85 } 86 87 static void print_allocinfo_header(struct seq_buf *buf) 88 { 89 /* Output format version, so we can change it. */ 90 seq_buf_printf(buf, "allocinfo - version: 1.0\n"); 91 seq_buf_printf(buf, "# <size> <calls> <tag info>\n"); 92 } 93 94 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) 95 { 96 struct alloc_tag *tag = ct_to_alloc_tag(ct); 97 struct alloc_tag_counters counter = alloc_tag_read(tag); 98 s64 bytes = counter.bytes; 99 100 seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); 101 codetag_to_text(out, ct); 102 seq_buf_putc(out, ' '); 103 seq_buf_putc(out, '\n'); 104 } 105 106 static int allocinfo_show(struct seq_file *m, void *arg) 107 { 108 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 109 char *bufp; 110 size_t n = seq_get_buf(m, &bufp); 111 struct seq_buf buf; 112 113 seq_buf_init(&buf, bufp, n); 114 if (priv->print_header) { 115 print_allocinfo_header(&buf); 116 priv->print_header = false; 117 } 118 alloc_tag_to_text(&buf, priv->iter.ct); 119 seq_commit(m, seq_buf_used(&buf)); 120 return 0; 121 } 122 123 static const struct seq_operations allocinfo_seq_op = { 124 .start = allocinfo_start, 125 .next = allocinfo_next, 126 .stop = allocinfo_stop, 127 .show = allocinfo_show, 128 }; 129 130 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) 131 { 132 struct codetag_iterator iter; 133 struct codetag *ct; 134 struct codetag_bytes n; 135 unsigned int i, nr = 0; 136 137 if (can_sleep) 138 codetag_lock_module_list(alloc_tag_cttype, true); 139 else if (!codetag_trylock_module_list(alloc_tag_cttype)) 140 return 0; 141 142 iter = codetag_get_ct_iter(alloc_tag_cttype); 143 while ((ct = codetag_next_ct(&iter))) { 144 struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); 145 146 n.ct = ct; 147 n.bytes = counter.bytes; 148 149 for (i = 0; i < nr; i++) 150 if (n.bytes > tags[i].bytes) 151 break; 152 153 if (i < count) { 154 nr -= nr == count; 155 memmove(&tags[i + 1], 156 &tags[i], 157 sizeof(tags[0]) * (nr - i)); 158 nr++; 159 tags[i] = n; 160 } 161 } 162 163 codetag_lock_module_list(alloc_tag_cttype, false); 164 165 return nr; 166 } 167 168 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) 169 { 170 int i; 171 struct alloc_tag *tag; 172 unsigned int nr_pages = 1 << new_order; 173 174 if (!mem_alloc_profiling_enabled()) 175 return; 176 177 tag = pgalloc_tag_get(&folio->page); 178 if (!tag) 179 return; 180 181 for (i = nr_pages; i < (1 << old_order); i += nr_pages) { 182 union pgtag_ref_handle handle; 183 union codetag_ref ref; 184 185 if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) { 186 /* Set new reference to point to the original tag */ 187 alloc_tag_ref_set(&ref, tag); 188 update_page_tag_ref(handle, &ref); 189 put_page_tag_ref(handle); 190 } 191 } 192 } 193 194 void pgalloc_tag_swap(struct folio *new, struct folio *old) 195 { 196 union pgtag_ref_handle handle_old, handle_new; 197 union codetag_ref ref_old, ref_new; 198 struct alloc_tag *tag_old, *tag_new; 199 200 if (!mem_alloc_profiling_enabled()) 201 return; 202 203 tag_old = pgalloc_tag_get(&old->page); 204 if (!tag_old) 205 return; 206 tag_new = pgalloc_tag_get(&new->page); 207 if (!tag_new) 208 return; 209 210 if (!get_page_tag_ref(&old->page, &ref_old, &handle_old)) 211 return; 212 if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) { 213 put_page_tag_ref(handle_old); 214 return; 215 } 216 217 /* 218 * Clear tag references to avoid debug warning when using 219 * __alloc_tag_ref_set() with non-empty reference. 220 */ 221 set_codetag_empty(&ref_old); 222 set_codetag_empty(&ref_new); 223 224 /* swap tags */ 225 __alloc_tag_ref_set(&ref_old, tag_new); 226 update_page_tag_ref(handle_old, &ref_old); 227 __alloc_tag_ref_set(&ref_new, tag_old); 228 update_page_tag_ref(handle_new, &ref_new); 229 230 put_page_tag_ref(handle_old); 231 put_page_tag_ref(handle_new); 232 } 233 234 static void shutdown_mem_profiling(bool remove_file) 235 { 236 if (mem_alloc_profiling_enabled()) 237 static_branch_disable(&mem_alloc_profiling_key); 238 239 if (!mem_profiling_support) 240 return; 241 242 if (remove_file) 243 remove_proc_entry(ALLOCINFO_FILE_NAME, NULL); 244 mem_profiling_support = false; 245 } 246 247 static void __init procfs_init(void) 248 { 249 if (!mem_profiling_support) 250 return; 251 252 if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { 253 pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); 254 shutdown_mem_profiling(false); 255 } 256 } 257 258 void __init alloc_tag_sec_init(void) 259 { 260 struct alloc_tag *last_codetag; 261 262 if (!mem_profiling_support) 263 return; 264 265 if (!static_key_enabled(&mem_profiling_compressed)) 266 return; 267 268 kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( 269 SECTION_START(ALLOC_TAG_SECTION_NAME)); 270 last_codetag = (struct alloc_tag *)kallsyms_lookup_name( 271 SECTION_STOP(ALLOC_TAG_SECTION_NAME)); 272 kernel_tags.count = last_codetag - kernel_tags.first_tag; 273 274 /* Check if kernel tags fit into page flags */ 275 if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) { 276 shutdown_mem_profiling(false); /* allocinfo file does not exist yet */ 277 pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n", 278 kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS); 279 return; 280 } 281 282 alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS); 283 alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1); 284 pr_debug("Memory allocation profiling compression is using %d page flag bits!\n", 285 NR_UNUSED_PAGEFLAG_BITS); 286 } 287 288 #ifdef CONFIG_MODULES 289 290 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); 291 static struct vm_struct *vm_module_tags; 292 /* A dummy object used to indicate an unloaded module */ 293 static struct module unloaded_mod; 294 /* A dummy object used to indicate a module prepended area */ 295 static struct module prepend_mod; 296 297 struct alloc_tag_module_section module_tags; 298 299 static inline unsigned long alloc_tag_align(unsigned long val) 300 { 301 if (!static_key_enabled(&mem_profiling_compressed)) { 302 /* No alignment requirements when we are not indexing the tags */ 303 return val; 304 } 305 306 if (val % sizeof(struct alloc_tag) == 0) 307 return val; 308 return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); 309 } 310 311 static bool ensure_alignment(unsigned long align, unsigned int *prepend) 312 { 313 if (!static_key_enabled(&mem_profiling_compressed)) { 314 /* No alignment requirements when we are not indexing the tags */ 315 return true; 316 } 317 318 /* 319 * If alloc_tag size is not a multiple of required alignment, tag 320 * indexing does not work. 321 */ 322 if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) 323 return false; 324 325 /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ 326 if (*prepend) 327 *prepend = alloc_tag_align(*prepend); 328 329 return true; 330 } 331 332 static inline bool tags_addressable(void) 333 { 334 unsigned long tag_idx_count; 335 336 if (!static_key_enabled(&mem_profiling_compressed)) 337 return true; /* with page_ext tags are always addressable */ 338 339 tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count + 340 module_tags.size / sizeof(struct alloc_tag); 341 342 return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS); 343 } 344 345 static bool needs_section_mem(struct module *mod, unsigned long size) 346 { 347 if (!mem_profiling_support) 348 return false; 349 350 return size >= sizeof(struct alloc_tag); 351 } 352 353 static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) 354 { 355 while (from <= to) { 356 struct alloc_tag_counters counter; 357 358 counter = alloc_tag_read(from); 359 if (counter.bytes) 360 return from; 361 from++; 362 } 363 364 return NULL; 365 } 366 367 /* Called with mod_area_mt locked */ 368 static void clean_unused_module_areas_locked(void) 369 { 370 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 371 struct module *val; 372 373 mas_for_each(&mas, val, module_tags.size) { 374 if (val != &unloaded_mod) 375 continue; 376 377 /* Release area if all tags are unused */ 378 if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 379 (struct alloc_tag *)(module_tags.start_addr + mas.last))) 380 mas_erase(&mas); 381 } 382 } 383 384 /* Called with mod_area_mt locked */ 385 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, 386 unsigned long size, unsigned int prepend, unsigned long align) 387 { 388 bool cleanup_done = false; 389 390 repeat: 391 /* Try finding exact size and hope the start is aligned */ 392 if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { 393 if (IS_ALIGNED(mas->index + prepend, align)) 394 return true; 395 396 /* Try finding larger area to align later */ 397 mas_reset(mas); 398 if (!mas_empty_area(mas, 0, section_size - 1, 399 size + prepend + align - 1)) 400 return true; 401 } 402 403 /* No free area, try cleanup stale data and repeat the search once */ 404 if (!cleanup_done) { 405 clean_unused_module_areas_locked(); 406 cleanup_done = true; 407 mas_reset(mas); 408 goto repeat; 409 } 410 411 return false; 412 } 413 414 static int vm_module_tags_populate(void) 415 { 416 unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) + 417 (vm_module_tags->nr_pages << PAGE_SHIFT); 418 unsigned long new_end = module_tags.start_addr + module_tags.size; 419 420 if (phys_end < new_end) { 421 struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages; 422 unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN); 423 unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN); 424 unsigned long more_pages; 425 unsigned long nr; 426 427 more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; 428 nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, 429 NUMA_NO_NODE, more_pages, next_page); 430 if (nr < more_pages || 431 vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, 432 next_page, PAGE_SHIFT) < 0) { 433 /* Clean up and error out */ 434 for (int i = 0; i < nr; i++) 435 __free_page(next_page[i]); 436 return -ENOMEM; 437 } 438 439 vm_module_tags->nr_pages += nr; 440 441 /* 442 * Kasan allocates 1 byte of shadow for every 8 bytes of data. 443 * When kasan_alloc_module_shadow allocates shadow memory, 444 * its unit of allocation is a page. 445 * Therefore, here we need to align to MODULE_ALIGN. 446 */ 447 if (old_shadow_end < new_shadow_end) 448 kasan_alloc_module_shadow((void *)old_shadow_end, 449 new_shadow_end - old_shadow_end, 450 GFP_KERNEL); 451 } 452 453 /* 454 * Mark the pages as accessible, now that they are mapped. 455 * With hardware tag-based KASAN, marking is skipped for 456 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 457 */ 458 kasan_unpoison_vmalloc((void *)module_tags.start_addr, 459 new_end - module_tags.start_addr, 460 KASAN_VMALLOC_PROT_NORMAL); 461 462 return 0; 463 } 464 465 static void *reserve_module_tags(struct module *mod, unsigned long size, 466 unsigned int prepend, unsigned long align) 467 { 468 unsigned long section_size = module_tags.end_addr - module_tags.start_addr; 469 MA_STATE(mas, &mod_area_mt, 0, section_size - 1); 470 unsigned long offset; 471 void *ret = NULL; 472 473 /* If no tags return error */ 474 if (size < sizeof(struct alloc_tag)) 475 return ERR_PTR(-EINVAL); 476 477 /* 478 * align is always power of 2, so we can use IS_ALIGNED and ALIGN. 479 * align 0 or 1 means no alignment, to simplify set to 1. 480 */ 481 if (!align) 482 align = 1; 483 484 if (!ensure_alignment(align, &prepend)) { 485 shutdown_mem_profiling(true); 486 pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n", 487 mod->name, align); 488 return ERR_PTR(-EINVAL); 489 } 490 491 mas_lock(&mas); 492 if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 493 ret = ERR_PTR(-ENOMEM); 494 goto unlock; 495 } 496 497 /* Mark found area as reserved */ 498 offset = mas.index; 499 offset += prepend; 500 offset = ALIGN(offset, align); 501 if (offset != mas.index) { 502 unsigned long pad_start = mas.index; 503 504 mas.last = offset - 1; 505 mas_store(&mas, &prepend_mod); 506 if (mas_is_err(&mas)) { 507 ret = ERR_PTR(xa_err(mas.node)); 508 goto unlock; 509 } 510 mas.index = offset; 511 mas.last = offset + size - 1; 512 mas_store(&mas, mod); 513 if (mas_is_err(&mas)) { 514 mas.index = pad_start; 515 mas_erase(&mas); 516 ret = ERR_PTR(xa_err(mas.node)); 517 } 518 } else { 519 mas.last = offset + size - 1; 520 mas_store(&mas, mod); 521 if (mas_is_err(&mas)) 522 ret = ERR_PTR(xa_err(mas.node)); 523 } 524 unlock: 525 mas_unlock(&mas); 526 527 if (IS_ERR(ret)) 528 return ret; 529 530 if (module_tags.size < offset + size) { 531 int grow_res; 532 533 module_tags.size = offset + size; 534 if (mem_alloc_profiling_enabled() && !tags_addressable()) { 535 shutdown_mem_profiling(true); 536 pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n", 537 mod->name, NR_UNUSED_PAGEFLAG_BITS); 538 } 539 540 grow_res = vm_module_tags_populate(); 541 if (grow_res) { 542 shutdown_mem_profiling(true); 543 pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n", 544 mod->name); 545 return ERR_PTR(grow_res); 546 } 547 } 548 549 return (struct alloc_tag *)(module_tags.start_addr + offset); 550 } 551 552 static void release_module_tags(struct module *mod, bool used) 553 { 554 MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 555 struct alloc_tag *tag; 556 struct module *val; 557 558 mas_lock(&mas); 559 mas_for_each_rev(&mas, val, 0) 560 if (val == mod) 561 break; 562 563 if (!val) /* module not found */ 564 goto out; 565 566 if (!used) 567 goto release_area; 568 569 /* Find out if the area is used */ 570 tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 571 (struct alloc_tag *)(module_tags.start_addr + mas.last)); 572 if (tag) { 573 struct alloc_tag_counters counter = alloc_tag_read(tag); 574 575 pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 576 tag->ct.filename, tag->ct.lineno, tag->ct.modname, 577 tag->ct.function, counter.bytes); 578 } else { 579 used = false; 580 } 581 release_area: 582 mas_store(&mas, used ? &unloaded_mod : NULL); 583 val = mas_prev_range(&mas, 0); 584 if (val == &prepend_mod) 585 mas_store(&mas, NULL); 586 out: 587 mas_unlock(&mas); 588 } 589 590 static void replace_module(struct module *mod, struct module *new_mod) 591 { 592 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 593 struct module *val; 594 595 mas_lock(&mas); 596 mas_for_each(&mas, val, module_tags.size) { 597 if (val != mod) 598 continue; 599 600 mas_store_gfp(&mas, new_mod, GFP_KERNEL); 601 break; 602 } 603 mas_unlock(&mas); 604 } 605 606 static int __init alloc_mod_tags_mem(void) 607 { 608 /* Map space to copy allocation tags */ 609 vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE); 610 if (!vm_module_tags) { 611 pr_err("Failed to map %lu bytes for module allocation tags\n", 612 MODULE_ALLOC_TAG_VMAP_SIZE); 613 module_tags.start_addr = 0; 614 return -ENOMEM; 615 } 616 617 vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT, 618 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 619 if (!vm_module_tags->pages) { 620 free_vm_area(vm_module_tags); 621 return -ENOMEM; 622 } 623 624 module_tags.start_addr = (unsigned long)vm_module_tags->addr; 625 module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 626 /* Ensure the base is alloc_tag aligned when required for indexing */ 627 module_tags.start_addr = alloc_tag_align(module_tags.start_addr); 628 629 return 0; 630 } 631 632 static void __init free_mod_tags_mem(void) 633 { 634 int i; 635 636 module_tags.start_addr = 0; 637 for (i = 0; i < vm_module_tags->nr_pages; i++) 638 __free_page(vm_module_tags->pages[i]); 639 kfree(vm_module_tags->pages); 640 free_vm_area(vm_module_tags); 641 } 642 643 #else /* CONFIG_MODULES */ 644 645 static inline int alloc_mod_tags_mem(void) { return 0; } 646 static inline void free_mod_tags_mem(void) {} 647 648 #endif /* CONFIG_MODULES */ 649 650 /* See: Documentation/mm/allocation-profiling.rst */ 651 static int __init setup_early_mem_profiling(char *str) 652 { 653 bool compressed = false; 654 bool enable; 655 656 if (!str || !str[0]) 657 return -EINVAL; 658 659 if (!strncmp(str, "never", 5)) { 660 enable = false; 661 mem_profiling_support = false; 662 pr_info("Memory allocation profiling is disabled!\n"); 663 } else { 664 char *token = strsep(&str, ","); 665 666 if (kstrtobool(token, &enable)) 667 return -EINVAL; 668 669 if (str) { 670 671 if (strcmp(str, "compressed")) 672 return -EINVAL; 673 674 compressed = true; 675 } 676 mem_profiling_support = true; 677 pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n", 678 compressed ? "with" : "without", enable ? "on" : "off"); 679 } 680 681 if (enable != mem_alloc_profiling_enabled()) { 682 if (enable) 683 static_branch_enable(&mem_alloc_profiling_key); 684 else 685 static_branch_disable(&mem_alloc_profiling_key); 686 } 687 if (compressed != static_key_enabled(&mem_profiling_compressed)) { 688 if (compressed) 689 static_branch_enable(&mem_profiling_compressed); 690 else 691 static_branch_disable(&mem_profiling_compressed); 692 } 693 694 return 0; 695 } 696 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling); 697 698 static __init bool need_page_alloc_tagging(void) 699 { 700 if (static_key_enabled(&mem_profiling_compressed)) 701 return false; 702 703 return mem_profiling_support; 704 } 705 706 static __init void init_page_alloc_tagging(void) 707 { 708 } 709 710 struct page_ext_operations page_alloc_tagging_ops = { 711 .size = sizeof(union codetag_ref), 712 .need = need_page_alloc_tagging, 713 .init = init_page_alloc_tagging, 714 }; 715 EXPORT_SYMBOL(page_alloc_tagging_ops); 716 717 #ifdef CONFIG_SYSCTL 718 static struct ctl_table memory_allocation_profiling_sysctls[] = { 719 { 720 .procname = "mem_profiling", 721 .data = &mem_alloc_profiling_key, 722 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 723 .mode = 0444, 724 #else 725 .mode = 0644, 726 #endif 727 .proc_handler = proc_do_static_key, 728 }, 729 }; 730 731 static void __init sysctl_init(void) 732 { 733 if (!mem_profiling_support) 734 memory_allocation_profiling_sysctls[0].mode = 0444; 735 736 register_sysctl_init("vm", memory_allocation_profiling_sysctls); 737 } 738 #else /* CONFIG_SYSCTL */ 739 static inline void sysctl_init(void) {} 740 #endif /* CONFIG_SYSCTL */ 741 742 static int __init alloc_tag_init(void) 743 { 744 const struct codetag_type_desc desc = { 745 .section = ALLOC_TAG_SECTION_NAME, 746 .tag_size = sizeof(struct alloc_tag), 747 #ifdef CONFIG_MODULES 748 .needs_section_mem = needs_section_mem, 749 .alloc_section_mem = reserve_module_tags, 750 .free_section_mem = release_module_tags, 751 .module_replaced = replace_module, 752 #endif 753 }; 754 int res; 755 756 res = alloc_mod_tags_mem(); 757 if (res) 758 return res; 759 760 alloc_tag_cttype = codetag_register_type(&desc); 761 if (IS_ERR(alloc_tag_cttype)) { 762 free_mod_tags_mem(); 763 return PTR_ERR(alloc_tag_cttype); 764 } 765 766 sysctl_init(); 767 procfs_init(); 768 769 return 0; 770 } 771 module_init(alloc_tag_init); 772