1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/alloc_tag.h> 3 #include <linux/execmem.h> 4 #include <linux/fs.h> 5 #include <linux/gfp.h> 6 #include <linux/kallsyms.h> 7 #include <linux/module.h> 8 #include <linux/page_ext.h> 9 #include <linux/proc_fs.h> 10 #include <linux/seq_buf.h> 11 #include <linux/seq_file.h> 12 #include <linux/vmalloc.h> 13 14 #define ALLOCINFO_FILE_NAME "allocinfo" 15 #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 16 #define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) 17 #define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) 18 19 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 20 static bool mem_profiling_support = true; 21 #else 22 static bool mem_profiling_support; 23 #endif 24 25 static struct codetag_type *alloc_tag_cttype; 26 27 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); 28 EXPORT_SYMBOL(_shared_alloc_tag); 29 30 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 31 mem_alloc_profiling_key); 32 EXPORT_SYMBOL(mem_alloc_profiling_key); 33 34 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); 35 36 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; 37 unsigned long alloc_tag_ref_mask; 38 int alloc_tag_ref_offs; 39 40 struct allocinfo_private { 41 struct codetag_iterator iter; 42 bool print_header; 43 }; 44 45 static void *allocinfo_start(struct seq_file *m, loff_t *pos) 46 { 47 struct allocinfo_private *priv; 48 struct codetag *ct; 49 loff_t node = *pos; 50 51 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 52 m->private = priv; 53 if (!priv) 54 return NULL; 55 56 priv->print_header = (node == 0); 57 codetag_lock_module_list(alloc_tag_cttype, true); 58 priv->iter = codetag_get_ct_iter(alloc_tag_cttype); 59 while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) 60 node--; 61 62 return ct ? priv : NULL; 63 } 64 65 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) 66 { 67 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 68 struct codetag *ct = codetag_next_ct(&priv->iter); 69 70 (*pos)++; 71 if (!ct) 72 return NULL; 73 74 return priv; 75 } 76 77 static void allocinfo_stop(struct seq_file *m, void *arg) 78 { 79 struct allocinfo_private *priv = (struct allocinfo_private *)m->private; 80 81 if (priv) { 82 codetag_lock_module_list(alloc_tag_cttype, false); 83 kfree(priv); 84 } 85 } 86 87 static void print_allocinfo_header(struct seq_buf *buf) 88 { 89 /* Output format version, so we can change it. */ 90 seq_buf_printf(buf, "allocinfo - version: 1.0\n"); 91 seq_buf_printf(buf, "# <size> <calls> <tag info>\n"); 92 } 93 94 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) 95 { 96 struct alloc_tag *tag = ct_to_alloc_tag(ct); 97 struct alloc_tag_counters counter = alloc_tag_read(tag); 98 s64 bytes = counter.bytes; 99 100 seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); 101 codetag_to_text(out, ct); 102 seq_buf_putc(out, ' '); 103 seq_buf_putc(out, '\n'); 104 } 105 106 static int allocinfo_show(struct seq_file *m, void *arg) 107 { 108 struct allocinfo_private *priv = (struct allocinfo_private *)arg; 109 char *bufp; 110 size_t n = seq_get_buf(m, &bufp); 111 struct seq_buf buf; 112 113 seq_buf_init(&buf, bufp, n); 114 if (priv->print_header) { 115 print_allocinfo_header(&buf); 116 priv->print_header = false; 117 } 118 alloc_tag_to_text(&buf, priv->iter.ct); 119 seq_commit(m, seq_buf_used(&buf)); 120 return 0; 121 } 122 123 static const struct seq_operations allocinfo_seq_op = { 124 .start = allocinfo_start, 125 .next = allocinfo_next, 126 .stop = allocinfo_stop, 127 .show = allocinfo_show, 128 }; 129 130 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) 131 { 132 struct codetag_iterator iter; 133 struct codetag *ct; 134 struct codetag_bytes n; 135 unsigned int i, nr = 0; 136 137 if (can_sleep) 138 codetag_lock_module_list(alloc_tag_cttype, true); 139 else if (!codetag_trylock_module_list(alloc_tag_cttype)) 140 return 0; 141 142 iter = codetag_get_ct_iter(alloc_tag_cttype); 143 while ((ct = codetag_next_ct(&iter))) { 144 struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); 145 146 n.ct = ct; 147 n.bytes = counter.bytes; 148 149 for (i = 0; i < nr; i++) 150 if (n.bytes > tags[i].bytes) 151 break; 152 153 if (i < count) { 154 nr -= nr == count; 155 memmove(&tags[i + 1], 156 &tags[i], 157 sizeof(tags[0]) * (nr - i)); 158 nr++; 159 tags[i] = n; 160 } 161 } 162 163 codetag_lock_module_list(alloc_tag_cttype, false); 164 165 return nr; 166 } 167 168 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) 169 { 170 int i; 171 struct alloc_tag *tag; 172 unsigned int nr_pages = 1 << new_order; 173 174 if (!mem_alloc_profiling_enabled()) 175 return; 176 177 tag = __pgalloc_tag_get(&folio->page); 178 if (!tag) 179 return; 180 181 for (i = nr_pages; i < (1 << old_order); i += nr_pages) { 182 union pgtag_ref_handle handle; 183 union codetag_ref ref; 184 185 if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) { 186 /* Set new reference to point to the original tag */ 187 alloc_tag_ref_set(&ref, tag); 188 update_page_tag_ref(handle, &ref); 189 put_page_tag_ref(handle); 190 } 191 } 192 } 193 194 void pgalloc_tag_swap(struct folio *new, struct folio *old) 195 { 196 union pgtag_ref_handle handle_old, handle_new; 197 union codetag_ref ref_old, ref_new; 198 struct alloc_tag *tag_old, *tag_new; 199 200 if (!mem_alloc_profiling_enabled()) 201 return; 202 203 tag_old = __pgalloc_tag_get(&old->page); 204 if (!tag_old) 205 return; 206 tag_new = __pgalloc_tag_get(&new->page); 207 if (!tag_new) 208 return; 209 210 if (!get_page_tag_ref(&old->page, &ref_old, &handle_old)) 211 return; 212 if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) { 213 put_page_tag_ref(handle_old); 214 return; 215 } 216 217 /* 218 * Clear tag references to avoid debug warning when using 219 * __alloc_tag_ref_set() with non-empty reference. 220 */ 221 set_codetag_empty(&ref_old); 222 set_codetag_empty(&ref_new); 223 224 /* swap tags */ 225 __alloc_tag_ref_set(&ref_old, tag_new); 226 update_page_tag_ref(handle_old, &ref_old); 227 __alloc_tag_ref_set(&ref_new, tag_old); 228 update_page_tag_ref(handle_new, &ref_new); 229 230 put_page_tag_ref(handle_old); 231 put_page_tag_ref(handle_new); 232 } 233 234 static void shutdown_mem_profiling(bool remove_file) 235 { 236 if (mem_alloc_profiling_enabled()) 237 static_branch_disable(&mem_alloc_profiling_key); 238 239 if (!mem_profiling_support) 240 return; 241 242 if (remove_file) 243 remove_proc_entry(ALLOCINFO_FILE_NAME, NULL); 244 mem_profiling_support = false; 245 } 246 247 void __init alloc_tag_sec_init(void) 248 { 249 struct alloc_tag *last_codetag; 250 251 if (!mem_profiling_support) 252 return; 253 254 if (!static_key_enabled(&mem_profiling_compressed)) 255 return; 256 257 kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( 258 SECTION_START(ALLOC_TAG_SECTION_NAME)); 259 last_codetag = (struct alloc_tag *)kallsyms_lookup_name( 260 SECTION_STOP(ALLOC_TAG_SECTION_NAME)); 261 kernel_tags.count = last_codetag - kernel_tags.first_tag; 262 263 /* Check if kernel tags fit into page flags */ 264 if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) { 265 shutdown_mem_profiling(false); /* allocinfo file does not exist yet */ 266 pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n", 267 kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS); 268 return; 269 } 270 271 alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS); 272 alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1); 273 pr_debug("Memory allocation profiling compression is using %d page flag bits!\n", 274 NR_UNUSED_PAGEFLAG_BITS); 275 } 276 277 #ifdef CONFIG_MODULES 278 279 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE); 280 static struct vm_struct *vm_module_tags; 281 /* A dummy object used to indicate an unloaded module */ 282 static struct module unloaded_mod; 283 /* A dummy object used to indicate a module prepended area */ 284 static struct module prepend_mod; 285 286 struct alloc_tag_module_section module_tags; 287 288 static inline unsigned long alloc_tag_align(unsigned long val) 289 { 290 if (!static_key_enabled(&mem_profiling_compressed)) { 291 /* No alignment requirements when we are not indexing the tags */ 292 return val; 293 } 294 295 if (val % sizeof(struct alloc_tag) == 0) 296 return val; 297 return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); 298 } 299 300 static bool ensure_alignment(unsigned long align, unsigned int *prepend) 301 { 302 if (!static_key_enabled(&mem_profiling_compressed)) { 303 /* No alignment requirements when we are not indexing the tags */ 304 return true; 305 } 306 307 /* 308 * If alloc_tag size is not a multiple of required alignment, tag 309 * indexing does not work. 310 */ 311 if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) 312 return false; 313 314 /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ 315 if (*prepend) 316 *prepend = alloc_tag_align(*prepend); 317 318 return true; 319 } 320 321 static inline bool tags_addressable(void) 322 { 323 unsigned long tag_idx_count; 324 325 if (!static_key_enabled(&mem_profiling_compressed)) 326 return true; /* with page_ext tags are always addressable */ 327 328 tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count + 329 module_tags.size / sizeof(struct alloc_tag); 330 331 return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS); 332 } 333 334 static bool needs_section_mem(struct module *mod, unsigned long size) 335 { 336 if (!mem_profiling_support) 337 return false; 338 339 return size >= sizeof(struct alloc_tag); 340 } 341 342 static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) 343 { 344 while (from <= to) { 345 struct alloc_tag_counters counter; 346 347 counter = alloc_tag_read(from); 348 if (counter.bytes) 349 return from; 350 from++; 351 } 352 353 return NULL; 354 } 355 356 /* Called with mod_area_mt locked */ 357 static void clean_unused_module_areas_locked(void) 358 { 359 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 360 struct module *val; 361 362 mas_for_each(&mas, val, module_tags.size) { 363 if (val != &unloaded_mod) 364 continue; 365 366 /* Release area if all tags are unused */ 367 if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 368 (struct alloc_tag *)(module_tags.start_addr + mas.last))) 369 mas_erase(&mas); 370 } 371 } 372 373 /* Called with mod_area_mt locked */ 374 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size, 375 unsigned long size, unsigned int prepend, unsigned long align) 376 { 377 bool cleanup_done = false; 378 379 repeat: 380 /* Try finding exact size and hope the start is aligned */ 381 if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) { 382 if (IS_ALIGNED(mas->index + prepend, align)) 383 return true; 384 385 /* Try finding larger area to align later */ 386 mas_reset(mas); 387 if (!mas_empty_area(mas, 0, section_size - 1, 388 size + prepend + align - 1)) 389 return true; 390 } 391 392 /* No free area, try cleanup stale data and repeat the search once */ 393 if (!cleanup_done) { 394 clean_unused_module_areas_locked(); 395 cleanup_done = true; 396 mas_reset(mas); 397 goto repeat; 398 } 399 400 return false; 401 } 402 403 static int vm_module_tags_populate(void) 404 { 405 unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) + 406 (vm_module_tags->nr_pages << PAGE_SHIFT); 407 unsigned long new_end = module_tags.start_addr + module_tags.size; 408 409 if (phys_end < new_end) { 410 struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages; 411 unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN); 412 unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN); 413 unsigned long more_pages; 414 unsigned long nr = 0; 415 416 more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; 417 while (nr < more_pages) { 418 unsigned long allocated; 419 420 allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, 421 NUMA_NO_NODE, more_pages - nr, next_page + nr); 422 423 if (!allocated) 424 break; 425 nr += allocated; 426 } 427 428 if (nr < more_pages || 429 vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, 430 next_page, PAGE_SHIFT) < 0) { 431 /* Clean up and error out */ 432 for (int i = 0; i < nr; i++) 433 __free_page(next_page[i]); 434 return -ENOMEM; 435 } 436 437 vm_module_tags->nr_pages += nr; 438 439 /* 440 * Kasan allocates 1 byte of shadow for every 8 bytes of data. 441 * When kasan_alloc_module_shadow allocates shadow memory, 442 * its unit of allocation is a page. 443 * Therefore, here we need to align to MODULE_ALIGN. 444 */ 445 if (old_shadow_end < new_shadow_end) 446 kasan_alloc_module_shadow((void *)old_shadow_end, 447 new_shadow_end - old_shadow_end, 448 GFP_KERNEL); 449 } 450 451 /* 452 * Mark the pages as accessible, now that they are mapped. 453 * With hardware tag-based KASAN, marking is skipped for 454 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 455 */ 456 kasan_unpoison_vmalloc((void *)module_tags.start_addr, 457 new_end - module_tags.start_addr, 458 KASAN_VMALLOC_PROT_NORMAL); 459 460 return 0; 461 } 462 463 static void *reserve_module_tags(struct module *mod, unsigned long size, 464 unsigned int prepend, unsigned long align) 465 { 466 unsigned long section_size = module_tags.end_addr - module_tags.start_addr; 467 MA_STATE(mas, &mod_area_mt, 0, section_size - 1); 468 unsigned long offset; 469 void *ret = NULL; 470 471 /* If no tags return error */ 472 if (size < sizeof(struct alloc_tag)) 473 return ERR_PTR(-EINVAL); 474 475 /* 476 * align is always power of 2, so we can use IS_ALIGNED and ALIGN. 477 * align 0 or 1 means no alignment, to simplify set to 1. 478 */ 479 if (!align) 480 align = 1; 481 482 if (!ensure_alignment(align, &prepend)) { 483 shutdown_mem_profiling(true); 484 pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n", 485 mod->name, align); 486 return ERR_PTR(-EINVAL); 487 } 488 489 mas_lock(&mas); 490 if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 491 ret = ERR_PTR(-ENOMEM); 492 goto unlock; 493 } 494 495 /* Mark found area as reserved */ 496 offset = mas.index; 497 offset += prepend; 498 offset = ALIGN(offset, align); 499 if (offset != mas.index) { 500 unsigned long pad_start = mas.index; 501 502 mas.last = offset - 1; 503 mas_store(&mas, &prepend_mod); 504 if (mas_is_err(&mas)) { 505 ret = ERR_PTR(xa_err(mas.node)); 506 goto unlock; 507 } 508 mas.index = offset; 509 mas.last = offset + size - 1; 510 mas_store(&mas, mod); 511 if (mas_is_err(&mas)) { 512 mas.index = pad_start; 513 mas_erase(&mas); 514 ret = ERR_PTR(xa_err(mas.node)); 515 } 516 } else { 517 mas.last = offset + size - 1; 518 mas_store(&mas, mod); 519 if (mas_is_err(&mas)) 520 ret = ERR_PTR(xa_err(mas.node)); 521 } 522 unlock: 523 mas_unlock(&mas); 524 525 if (IS_ERR(ret)) 526 return ret; 527 528 if (module_tags.size < offset + size) { 529 int grow_res; 530 531 module_tags.size = offset + size; 532 if (mem_alloc_profiling_enabled() && !tags_addressable()) { 533 shutdown_mem_profiling(true); 534 pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n", 535 mod->name, NR_UNUSED_PAGEFLAG_BITS); 536 } 537 538 grow_res = vm_module_tags_populate(); 539 if (grow_res) { 540 shutdown_mem_profiling(true); 541 pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n", 542 mod->name); 543 return ERR_PTR(grow_res); 544 } 545 } 546 547 return (struct alloc_tag *)(module_tags.start_addr + offset); 548 } 549 550 static void release_module_tags(struct module *mod, bool used) 551 { 552 MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 553 struct alloc_tag *tag; 554 struct module *val; 555 556 mas_lock(&mas); 557 mas_for_each_rev(&mas, val, 0) 558 if (val == mod) 559 break; 560 561 if (!val) /* module not found */ 562 goto out; 563 564 if (!used) 565 goto release_area; 566 567 /* Find out if the area is used */ 568 tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 569 (struct alloc_tag *)(module_tags.start_addr + mas.last)); 570 if (tag) { 571 struct alloc_tag_counters counter = alloc_tag_read(tag); 572 573 pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 574 tag->ct.filename, tag->ct.lineno, tag->ct.modname, 575 tag->ct.function, counter.bytes); 576 } else { 577 used = false; 578 } 579 release_area: 580 mas_store(&mas, used ? &unloaded_mod : NULL); 581 val = mas_prev_range(&mas, 0); 582 if (val == &prepend_mod) 583 mas_store(&mas, NULL); 584 out: 585 mas_unlock(&mas); 586 } 587 588 static void replace_module(struct module *mod, struct module *new_mod) 589 { 590 MA_STATE(mas, &mod_area_mt, 0, module_tags.size); 591 struct module *val; 592 593 mas_lock(&mas); 594 mas_for_each(&mas, val, module_tags.size) { 595 if (val != mod) 596 continue; 597 598 mas_store_gfp(&mas, new_mod, GFP_KERNEL); 599 break; 600 } 601 mas_unlock(&mas); 602 } 603 604 static int __init alloc_mod_tags_mem(void) 605 { 606 /* Map space to copy allocation tags */ 607 vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE); 608 if (!vm_module_tags) { 609 pr_err("Failed to map %lu bytes for module allocation tags\n", 610 MODULE_ALLOC_TAG_VMAP_SIZE); 611 module_tags.start_addr = 0; 612 return -ENOMEM; 613 } 614 615 vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT, 616 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 617 if (!vm_module_tags->pages) { 618 free_vm_area(vm_module_tags); 619 return -ENOMEM; 620 } 621 622 module_tags.start_addr = (unsigned long)vm_module_tags->addr; 623 module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 624 /* Ensure the base is alloc_tag aligned when required for indexing */ 625 module_tags.start_addr = alloc_tag_align(module_tags.start_addr); 626 627 return 0; 628 } 629 630 static void __init free_mod_tags_mem(void) 631 { 632 int i; 633 634 module_tags.start_addr = 0; 635 for (i = 0; i < vm_module_tags->nr_pages; i++) 636 __free_page(vm_module_tags->pages[i]); 637 kfree(vm_module_tags->pages); 638 free_vm_area(vm_module_tags); 639 } 640 641 #else /* CONFIG_MODULES */ 642 643 static inline int alloc_mod_tags_mem(void) { return 0; } 644 static inline void free_mod_tags_mem(void) {} 645 646 #endif /* CONFIG_MODULES */ 647 648 /* See: Documentation/mm/allocation-profiling.rst */ 649 static int __init setup_early_mem_profiling(char *str) 650 { 651 bool compressed = false; 652 bool enable; 653 654 if (!str || !str[0]) 655 return -EINVAL; 656 657 if (!strncmp(str, "never", 5)) { 658 enable = false; 659 mem_profiling_support = false; 660 pr_info("Memory allocation profiling is disabled!\n"); 661 } else { 662 char *token = strsep(&str, ","); 663 664 if (kstrtobool(token, &enable)) 665 return -EINVAL; 666 667 if (str) { 668 669 if (strcmp(str, "compressed")) 670 return -EINVAL; 671 672 compressed = true; 673 } 674 mem_profiling_support = true; 675 pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n", 676 compressed ? "with" : "without", enable ? "on" : "off"); 677 } 678 679 if (enable != mem_alloc_profiling_enabled()) { 680 if (enable) 681 static_branch_enable(&mem_alloc_profiling_key); 682 else 683 static_branch_disable(&mem_alloc_profiling_key); 684 } 685 if (compressed != static_key_enabled(&mem_profiling_compressed)) { 686 if (compressed) 687 static_branch_enable(&mem_profiling_compressed); 688 else 689 static_branch_disable(&mem_profiling_compressed); 690 } 691 692 return 0; 693 } 694 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling); 695 696 static __init bool need_page_alloc_tagging(void) 697 { 698 if (static_key_enabled(&mem_profiling_compressed)) 699 return false; 700 701 return mem_profiling_support; 702 } 703 704 static __init void init_page_alloc_tagging(void) 705 { 706 } 707 708 struct page_ext_operations page_alloc_tagging_ops = { 709 .size = sizeof(union codetag_ref), 710 .need = need_page_alloc_tagging, 711 .init = init_page_alloc_tagging, 712 }; 713 EXPORT_SYMBOL(page_alloc_tagging_ops); 714 715 #ifdef CONFIG_SYSCTL 716 static struct ctl_table memory_allocation_profiling_sysctls[] = { 717 { 718 .procname = "mem_profiling", 719 .data = &mem_alloc_profiling_key, 720 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 721 .mode = 0444, 722 #else 723 .mode = 0644, 724 #endif 725 .proc_handler = proc_do_static_key, 726 }, 727 }; 728 729 static void __init sysctl_init(void) 730 { 731 if (!mem_profiling_support) 732 memory_allocation_profiling_sysctls[0].mode = 0444; 733 734 register_sysctl_init("vm", memory_allocation_profiling_sysctls); 735 } 736 #else /* CONFIG_SYSCTL */ 737 static inline void sysctl_init(void) {} 738 #endif /* CONFIG_SYSCTL */ 739 740 static int __init alloc_tag_init(void) 741 { 742 const struct codetag_type_desc desc = { 743 .section = ALLOC_TAG_SECTION_NAME, 744 .tag_size = sizeof(struct alloc_tag), 745 #ifdef CONFIG_MODULES 746 .needs_section_mem = needs_section_mem, 747 .alloc_section_mem = reserve_module_tags, 748 .free_section_mem = release_module_tags, 749 .module_replaced = replace_module, 750 #endif 751 }; 752 int res; 753 754 sysctl_init(); 755 756 if (!mem_profiling_support) { 757 pr_info("Memory allocation profiling is not supported!\n"); 758 return 0; 759 } 760 761 if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { 762 pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); 763 shutdown_mem_profiling(false); 764 return -ENOMEM; 765 } 766 767 res = alloc_mod_tags_mem(); 768 if (res) { 769 pr_err("Failed to reserve address space for module tags, errno = %d\n", res); 770 shutdown_mem_profiling(true); 771 return res; 772 } 773 774 alloc_tag_cttype = codetag_register_type(&desc); 775 if (IS_ERR(alloc_tag_cttype)) { 776 pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype)); 777 free_mod_tags_mem(); 778 shutdown_mem_profiling(true); 779 return PTR_ERR(alloc_tag_cttype); 780 } 781 782 return 0; 783 } 784 module_init(alloc_tag_init); 785