1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/alloc_tag.h> 3 #include <linux/execmem.h> 4 #include <linux/fs.h> 5 #include <linux/gfp.h> 6 #include <linux/kallsyms.h> 7 #include <linux/module.h> 8 #include <linux/page_ext.h> 9 #include <linux/proc_fs.h> 10 #include <linux/seq_buf.h> 11 #include <linux/seq_file.h> 12 #include <linux/vmalloc.h> 13 14 #define ALLOCINFO_FILE_NAME "allocinfo" 15 #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 16 #define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) 17 #define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) 18 19 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 20 static bool mem_profiling_support = true; 21 #else 22 static bool mem_profiling_support; 23 #endif 24 25 static struct codetag_type *alloc_tag_cttype; 26 27 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); 28 EXPORT_SYMBOL(_shared_alloc_tag); 29 30 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 31 mem_alloc_profiling_key); 32 EXPORT_SYMBOL(mem_alloc_profiling_key); 33 34 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); 35 36 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; 37 unsigned long alloc_tag_ref_mask; 38 int alloc_tag_ref_offs; 39 40 struct allocinfo_private { 41 struct codetag_iterator iter; 42 bool print_header; 43 }; 44 45 static void *allocinfo_start(struct seq_file *m, loff_t *pos) 46 { 47 struct allocinfo_private *priv; 48 struct codetag *ct; 49 loff_t node = *pos; 50 51 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 52 m->private = priv; 53 if (!priv) 54 return NULL; 55 56 priv->print_header = (node == 0); 57 codetag_lock_module_list(alloc_tag_cttype, true); 58 priv->iter = codetag_get_ct_iter(alloc_tag_cttype); 59 while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) 60 node--; 61 62 return ct ? priv : NULL; 63 } 64 65 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) 66 { 67 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 68 struct codetag *ct = codetag_next_ct(&priv->iter); 69 70 (*pos)++; 71 if (!ct) 72 return NULL; 73 74 return priv; 75 } 76 77 static void allocinfo_stop(struct seq_file *m, void *arg) 78 { 79 struct allocinfo_private *priv = (struct allocinfo_private *)m->private; 80 81 if (priv) { 82 codetag_lock_module_list(alloc_tag_cttype, false); 83 kfree(priv); 84 } 85 } 86 87 static void print_allocinfo_header(struct seq_buf *buf) 88 { 89 /* Output format version, so we can change it. */ 90 seq_buf_printf(buf, "allocinfo - version: 1.0\n"); 91 seq_buf_printf(buf, "# <size> <calls> <tag info>\n"); 92 } 93 94 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) 95 { 96 struct alloc_tag *tag = ct_to_alloc_tag(ct); 97 struct alloc_tag_counters counter = alloc_tag_read(tag); 98 s64 bytes = counter.bytes; 99 100 seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); 101 codetag_to_text(out, ct); 102 seq_buf_putc(out, ' '); 103 seq_buf_putc(out, '\n'); 104 } 105 106 static int allocinfo_show(struct seq_file *m, void *arg) 107 { 108 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 109 char *bufp; 110 size_t n = seq_get_buf(m, &bufp); 111 struct seq_buf buf; 112 113 seq_buf_init(&buf, bufp, n); 114 if (priv->print_header) { 115 print_allocinfo_header(&buf); 116 priv->print_header = false; 117 } 118 alloc_tag_to_text(&buf, priv->iter.ct); 119 seq_commit(m, seq_buf_used(&buf)); 120 return 0; 121 } 122 123 static const struct seq_operations allocinfo_seq_op = { 124 .start = allocinfo_start, 125 .next = allocinfo_next, 126 .stop = allocinfo_stop, 127 .show = allocinfo_show, 128 }; 129 130 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) 131 { 132 struct codetag_iterator iter; 133 struct codetag *ct; 134 struct codetag_bytes n; 135 unsigned int i, nr = 0; 136 137 if (can_sleep) 138 codetag_lock_module_list(alloc_tag_cttype, true); 139 else if (!codetag_trylock_module_list(alloc_tag_cttype)) 140 return 0; 141 142 iter = codetag_get_ct_iter(alloc_tag_cttype); 143 while ((ct = codetag_next_ct(&iter))) { 144 struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); 145 146 n.ct = ct; 147 n.bytes = counter.bytes; 148 149 for (i = 0; i < nr; i++) 150 if (n.bytes > tags[i].bytes) 151 break; 152 153 if (i < count) { 154 nr -= nr == count; 155 memmove(&tags[i + 1], 156 &tags[i], 157 sizeof(tags[0]) * (nr - i)); 158 nr++; 159 tags[i] = n; 160 } 161 } 162 163 codetag_lock_module_list(alloc_tag_cttype, false); 164 165 return nr; 166 } 167 168 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) 169 { 170 int i; 171 struct alloc_tag *tag; 172 unsigned int nr_pages = 1 << new_order; 173 174 if (!mem_alloc_profiling_enabled()) 175 return; 176 177 tag = pgalloc_tag_get(&folio->page); 178 if (!tag) 179 return; 180 181 for (i = nr_pages; i < (1 << old_order); i += nr_pages) { 182 union pgtag_ref_handle handle; 183 union codetag_ref ref; 184 185 if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) { 186 /* Set new reference to point to the original tag */ 187 alloc_tag_ref_set(&ref, tag); 188 update_page_tag_ref(handle, &ref); 189 put_page_tag_ref(handle); 190 } 191 } 192 } 193 194 void pgalloc_tag_swap(struct folio *new, struct folio *old) 195 { 196 union pgtag_ref_handle handle_old, handle_new; 197 union codetag_ref ref_old, ref_new; 198 struct alloc_tag *tag_old, *tag_new; 199 200 tag_old = pgalloc_tag_get(&old->page); 201 if (!tag_old) 202 return; 203 tag_new = pgalloc_tag_get(&new->page); 204 if (!tag_new) 205 return; 206 207 if (!get_page_tag_ref(&old->page, &ref_old, &handle_old)) 208 return; 209 if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) { 210 put_page_tag_ref(handle_old); 211 return; 212 } 213 214 /* 215 * Clear tag references to avoid debug warning when using 216 * __alloc_tag_ref_set() with non-empty reference. 217 */ 218 set_codetag_empty(&ref_old); 219 set_codetag_empty(&ref_new); 220 221 /* swap tags */ 222 __alloc_tag_ref_set(&ref_old, tag_new); 223 update_page_tag_ref(handle_old, &ref_old); 224 __alloc_tag_ref_set(&ref_new, tag_old); 225 update_page_tag_ref(handle_new, &ref_new); 226 227 put_page_tag_ref(handle_old); 228 put_page_tag_ref(handle_new); 229 } 230 231 static void shutdown_mem_profiling(bool remove_file) 232 { 233 if (mem_alloc_profiling_enabled()) 234 static_branch_disable(&mem_alloc_profiling_key); 235 236 if (!mem_profiling_support) 237 return; 238 239 if (remove_file) 240 remove_proc_entry(ALLOCINFO_FILE_NAME, NULL); 241 mem_profiling_support = false; 242 } 243 244 static void __init procfs_init(void) 245 { 246 if (!mem_profiling_support) 247 return; 248 249 if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { 250 pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); 251 shutdown_mem_profiling(false); 252 } 253 } 254 255 void __init alloc_tag_sec_init(void) 256 { 257 struct alloc_tag *last_codetag; 258 259 if (!mem_profiling_support) 260 return; 261 262 if (!static_key_enabled(&mem_profiling_compressed)) 263 return; 264 265 kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( 266 SECTION_START(ALLOC_TAG_SECTION_NAME)); 267 last_codetag = (struct alloc_tag *)kallsyms_lookup_name( 268 SECTION_STOP(ALLOC_TAG_SECTION_NAME)); 269 kernel_tags.count = last_codetag - kernel_tags.first_tag; 270 271 /* Check if kernel tags fit into page flags */ 272 if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) { 273 shutdown_mem_profiling(false); /* allocinfo file does not exist yet */ 274 pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n", 275 kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS); 276 return; 277 } 278 279 alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS); 280 alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1); 281 pr_debug("Memory allocation profiling compression is using %d page flag bits!\n", 282 NR_UNUSED_PAGEFLAG_BITS); 283 } 284 285 #ifdef CONFIG_MODULES 286 287 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); 288 static struct vm_struct *vm_module_tags; 289 /* A dummy object used to indicate an unloaded module */ 290 static struct module unloaded_mod; 291 /* A dummy object used to indicate a module prepended area */ 292 static struct module prepend_mod; 293 294 struct alloc_tag_module_section module_tags; 295 296 static inline unsigned long alloc_tag_align(unsigned long val) 297 { 298 if (!static_key_enabled(&mem_profiling_compressed)) { 299 /* No alignment requirements when we are not indexing the tags */ 300 return val; 301 } 302 303 if (val % sizeof(struct alloc_tag) == 0) 304 return val; 305 return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); 306 } 307 308 static bool ensure_alignment(unsigned long align, unsigned int *prepend) 309 { 310 if (!static_key_enabled(&mem_profiling_compressed)) { 311 /* No alignment requirements when we are not indexing the tags */ 312 return true; 313 } 314 315 /* 316 * If alloc_tag size is not a multiple of required alignment, tag 317 * indexing does not work. 318 */ 319 if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) 320 return false; 321 322 /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ 323 if (*prepend) 324 *prepend = alloc_tag_align(*prepend); 325 326 return true; 327 } 328 329 static inline bool tags_addressable(void) 330 { 331 unsigned long tag_idx_count; 332 333 if (!static_key_enabled(&mem_profiling_compressed)) 334 return true; /* with page_ext tags are always addressable */ 335 336 tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count + 337 module_tags.size / sizeof(struct alloc_tag); 338 339 return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS); 340 } 341 342 static bool needs_section_mem(struct module *mod, unsigned long size) 343 { 344 if (!mem_profiling_support) 345 return false; 346 347 return size >= sizeof(struct alloc_tag); 348 } 349 350 static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) 351 { 352 while (from <= to) { 353 struct alloc_tag_counters counter; 354 355 counter = alloc_tag_read(from); 356 if (counter.bytes) 357 return from; 358 from++; 359 } 360 361 return NULL; 362 } 363 364 /* Called with mod_area_mt locked */ 365 static void clean_unused_module_areas_locked(void) 366 { 367 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 368 struct module *val; 369 370 mas_for_each(&mas, val, module_tags.size) { 371 if (val != &unloaded_mod) 372 continue; 373 374 /* Release area if all tags are unused */ 375 if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 376 (struct alloc_tag *)(module_tags.start_addr + mas.last))) 377 mas_erase(&mas); 378 } 379 } 380 381 /* Called with mod_area_mt locked */ 382 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, 383 unsigned long size, unsigned int prepend, unsigned long align) 384 { 385 bool cleanup_done = false; 386 387 repeat: 388 /* Try finding exact size and hope the start is aligned */ 389 if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { 390 if (IS_ALIGNED(mas->index + prepend, align)) 391 return true; 392 393 /* Try finding larger area to align later */ 394 mas_reset(mas); 395 if (!mas_empty_area(mas, 0, section_size - 1, 396 size + prepend + align - 1)) 397 return true; 398 } 399 400 /* No free area, try cleanup stale data and repeat the search once */ 401 if (!cleanup_done) { 402 clean_unused_module_areas_locked(); 403 cleanup_done = true; 404 mas_reset(mas); 405 goto repeat; 406 } 407 408 return false; 409 } 410 411 static int vm_module_tags_populate(void) 412 { 413 unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) + 414 (vm_module_tags->nr_pages << PAGE_SHIFT); 415 unsigned long new_end = module_tags.start_addr + module_tags.size; 416 417 if (phys_end < new_end) { 418 struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages; 419 unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN); 420 unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN); 421 unsigned long more_pages; 422 unsigned long nr; 423 424 more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; 425 nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, 426 NUMA_NO_NODE, more_pages, next_page); 427 if (nr < more_pages || 428 vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, 429 next_page, PAGE_SHIFT) < 0) { 430 /* Clean up and error out */ 431 for (int i = 0; i < nr; i++) 432 __free_page(next_page[i]); 433 return -ENOMEM; 434 } 435 436 vm_module_tags->nr_pages += nr; 437 438 /* 439 * Kasan allocates 1 byte of shadow for every 8 bytes of data. 440 * When kasan_alloc_module_shadow allocates shadow memory, 441 * its unit of allocation is a page. 442 * Therefore, here we need to align to MODULE_ALIGN. 443 */ 444 if (old_shadow_end < new_shadow_end) 445 kasan_alloc_module_shadow((void *)old_shadow_end, 446 new_shadow_end - old_shadow_end, 447 GFP_KERNEL); 448 } 449 450 /* 451 * Mark the pages as accessible, now that they are mapped. 452 * With hardware tag-based KASAN, marking is skipped for 453 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 454 */ 455 kasan_unpoison_vmalloc((void *)module_tags.start_addr, 456 new_end - module_tags.start_addr, 457 KASAN_VMALLOC_PROT_NORMAL); 458 459 return 0; 460 } 461 462 static void *reserve_module_tags(struct module *mod, unsigned long size, 463 unsigned int prepend, unsigned long align) 464 { 465 unsigned long section_size = module_tags.end_addr - module_tags.start_addr; 466 MA_STATE(mas, &mod_area_mt, 0, section_size - 1); 467 unsigned long offset; 468 void *ret = NULL; 469 470 /* If no tags return error */ 471 if (size < sizeof(struct alloc_tag)) 472 return ERR_PTR(-EINVAL); 473 474 /* 475 * align is always power of 2, so we can use IS_ALIGNED and ALIGN. 476 * align 0 or 1 means no alignment, to simplify set to 1. 477 */ 478 if (!align) 479 align = 1; 480 481 if (!ensure_alignment(align, &prepend)) { 482 shutdown_mem_profiling(true); 483 pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n", 484 mod->name, align); 485 return ERR_PTR(-EINVAL); 486 } 487 488 mas_lock(&mas); 489 if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 490 ret = ERR_PTR(-ENOMEM); 491 goto unlock; 492 } 493 494 /* Mark found area as reserved */ 495 offset = mas.index; 496 offset += prepend; 497 offset = ALIGN(offset, align); 498 if (offset != mas.index) { 499 unsigned long pad_start = mas.index; 500 501 mas.last = offset - 1; 502 mas_store(&mas, &prepend_mod); 503 if (mas_is_err(&mas)) { 504 ret = ERR_PTR(xa_err(mas.node)); 505 goto unlock; 506 } 507 mas.index = offset; 508 mas.last = offset + size - 1; 509 mas_store(&mas, mod); 510 if (mas_is_err(&mas)) { 511 mas.index = pad_start; 512 mas_erase(&mas); 513 ret = ERR_PTR(xa_err(mas.node)); 514 } 515 } else { 516 mas.last = offset + size - 1; 517 mas_store(&mas, mod); 518 if (mas_is_err(&mas)) 519 ret = ERR_PTR(xa_err(mas.node)); 520 } 521 unlock: 522 mas_unlock(&mas); 523 524 if (IS_ERR(ret)) 525 return ret; 526 527 if (module_tags.size < offset + size) { 528 int grow_res; 529 530 module_tags.size = offset + size; 531 if (mem_alloc_profiling_enabled() && !tags_addressable()) { 532 shutdown_mem_profiling(true); 533 pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n", 534 mod->name, NR_UNUSED_PAGEFLAG_BITS); 535 } 536 537 grow_res = vm_module_tags_populate(); 538 if (grow_res) { 539 shutdown_mem_profiling(true); 540 pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n", 541 mod->name); 542 return ERR_PTR(grow_res); 543 } 544 } 545 546 return (struct alloc_tag *)(module_tags.start_addr + offset); 547 } 548 549 static void release_module_tags(struct module *mod, bool used) 550 { 551 MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 552 struct alloc_tag *tag; 553 struct module *val; 554 555 mas_lock(&mas); 556 mas_for_each_rev(&mas, val, 0) 557 if (val == mod) 558 break; 559 560 if (!val) /* module not found */ 561 goto out; 562 563 if (!used) 564 goto release_area; 565 566 /* Find out if the area is used */ 567 tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 568 (struct alloc_tag *)(module_tags.start_addr + mas.last)); 569 if (tag) { 570 struct alloc_tag_counters counter = alloc_tag_read(tag); 571 572 pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 573 tag->ct.filename, tag->ct.lineno, tag->ct.modname, 574 tag->ct.function, counter.bytes); 575 } else { 576 used = false; 577 } 578 release_area: 579 mas_store(&mas, used ? &unloaded_mod : NULL); 580 val = mas_prev_range(&mas, 0); 581 if (val == &prepend_mod) 582 mas_store(&mas, NULL); 583 out: 584 mas_unlock(&mas); 585 } 586 587 static void replace_module(struct module *mod, struct module *new_mod) 588 { 589 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 590 struct module *val; 591 592 mas_lock(&mas); 593 mas_for_each(&mas, val, module_tags.size) { 594 if (val != mod) 595 continue; 596 597 mas_store_gfp(&mas, new_mod, GFP_KERNEL); 598 break; 599 } 600 mas_unlock(&mas); 601 } 602 603 static int __init alloc_mod_tags_mem(void) 604 { 605 /* Map space to copy allocation tags */ 606 vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE); 607 if (!vm_module_tags) { 608 pr_err("Failed to map %lu bytes for module allocation tags\n", 609 MODULE_ALLOC_TAG_VMAP_SIZE); 610 module_tags.start_addr = 0; 611 return -ENOMEM; 612 } 613 614 vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT, 615 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 616 if (!vm_module_tags->pages) { 617 free_vm_area(vm_module_tags); 618 return -ENOMEM; 619 } 620 621 module_tags.start_addr = (unsigned long)vm_module_tags->addr; 622 module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 623 /* Ensure the base is alloc_tag aligned when required for indexing */ 624 module_tags.start_addr = alloc_tag_align(module_tags.start_addr); 625 626 return 0; 627 } 628 629 static void __init free_mod_tags_mem(void) 630 { 631 int i; 632 633 module_tags.start_addr = 0; 634 for (i = 0; i < vm_module_tags->nr_pages; i++) 635 __free_page(vm_module_tags->pages[i]); 636 kfree(vm_module_tags->pages); 637 free_vm_area(vm_module_tags); 638 } 639 640 #else /* CONFIG_MODULES */ 641 642 static inline int alloc_mod_tags_mem(void) { return 0; } 643 static inline void free_mod_tags_mem(void) {} 644 645 #endif /* CONFIG_MODULES */ 646 647 /* See: Documentation/mm/allocation-profiling.rst */ 648 static int __init setup_early_mem_profiling(char *str) 649 { 650 bool compressed = false; 651 bool enable; 652 653 if (!str || !str[0]) 654 return -EINVAL; 655 656 if (!strncmp(str, "never", 5)) { 657 enable = false; 658 mem_profiling_support = false; 659 pr_info("Memory allocation profiling is disabled!\n"); 660 } else { 661 char *token = strsep(&str, ","); 662 663 if (kstrtobool(token, &enable)) 664 return -EINVAL; 665 666 if (str) { 667 668 if (strcmp(str, "compressed")) 669 return -EINVAL; 670 671 compressed = true; 672 } 673 mem_profiling_support = true; 674 pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n", 675 compressed ? "with" : "without", enable ? "on" : "off"); 676 } 677 678 if (enable != mem_alloc_profiling_enabled()) { 679 if (enable) 680 static_branch_enable(&mem_alloc_profiling_key); 681 else 682 static_branch_disable(&mem_alloc_profiling_key); 683 } 684 if (compressed != static_key_enabled(&mem_profiling_compressed)) { 685 if (compressed) 686 static_branch_enable(&mem_profiling_compressed); 687 else 688 static_branch_disable(&mem_profiling_compressed); 689 } 690 691 return 0; 692 } 693 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling); 694 695 static __init bool need_page_alloc_tagging(void) 696 { 697 if (static_key_enabled(&mem_profiling_compressed)) 698 return false; 699 700 return mem_profiling_support; 701 } 702 703 static __init void init_page_alloc_tagging(void) 704 { 705 } 706 707 struct page_ext_operations page_alloc_tagging_ops = { 708 .size = sizeof(union codetag_ref), 709 .need = need_page_alloc_tagging, 710 .init = init_page_alloc_tagging, 711 }; 712 EXPORT_SYMBOL(page_alloc_tagging_ops); 713 714 #ifdef CONFIG_SYSCTL 715 static struct ctl_table memory_allocation_profiling_sysctls[] = { 716 { 717 .procname = "mem_profiling", 718 .data = &mem_alloc_profiling_key, 719 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 720 .mode = 0444, 721 #else 722 .mode = 0644, 723 #endif 724 .proc_handler = proc_do_static_key, 725 }, 726 }; 727 728 static void __init sysctl_init(void) 729 { 730 if (!mem_profiling_support) 731 memory_allocation_profiling_sysctls[0].mode = 0444; 732 733 register_sysctl_init("vm", memory_allocation_profiling_sysctls); 734 } 735 #else /* CONFIG_SYSCTL */ 736 static inline void sysctl_init(void) {} 737 #endif /* CONFIG_SYSCTL */ 738 739 static int __init alloc_tag_init(void) 740 { 741 const struct codetag_type_desc desc = { 742 .section = ALLOC_TAG_SECTION_NAME, 743 .tag_size = sizeof(struct alloc_tag), 744 #ifdef CONFIG_MODULES 745 .needs_section_mem = needs_section_mem, 746 .alloc_section_mem = reserve_module_tags, 747 .free_section_mem = release_module_tags, 748 .module_replaced = replace_module, 749 #endif 750 }; 751 int res; 752 753 res = alloc_mod_tags_mem(); 754 if (res) 755 return res; 756 757 alloc_tag_cttype = codetag_register_type(&desc); 758 if (IS_ERR(alloc_tag_cttype)) { 759 free_mod_tags_mem(); 760 return PTR_ERR(alloc_tag_cttype); 761 } 762 763 sysctl_init(); 764 procfs_init(); 765 766 return 0; 767 } 768 module_init(alloc_tag_init); 769