xref: /linux/lib/alloc_tag.c (revision 4aa748dd1abf337426b4c941ae1b606ed0e2a5aa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/alloc_tag.h>
3 #include <linux/execmem.h>
4 #include <linux/fs.h>
5 #include <linux/gfp.h>
6 #include <linux/kallsyms.h>
7 #include <linux/module.h>
8 #include <linux/page_ext.h>
9 #include <linux/proc_fs.h>
10 #include <linux/seq_buf.h>
11 #include <linux/seq_file.h>
12 #include <linux/vmalloc.h>
13 
14 #define ALLOCINFO_FILE_NAME		"allocinfo"
15 #define MODULE_ALLOC_TAG_VMAP_SIZE	(100000UL * sizeof(struct alloc_tag))
16 #define SECTION_START(NAME)		(CODETAG_SECTION_START_PREFIX NAME)
17 #define SECTION_STOP(NAME)		(CODETAG_SECTION_STOP_PREFIX NAME)
18 
19 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
20 static bool mem_profiling_support = true;
21 #else
22 static bool mem_profiling_support;
23 #endif
24 
25 static struct codetag_type *alloc_tag_cttype;
26 
27 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
28 EXPORT_SYMBOL(_shared_alloc_tag);
29 
30 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
31 			mem_alloc_profiling_key);
32 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
33 
34 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
35 unsigned long alloc_tag_ref_mask;
36 int alloc_tag_ref_offs;
37 
38 struct allocinfo_private {
39 	struct codetag_iterator iter;
40 	bool print_header;
41 };
42 
allocinfo_start(struct seq_file * m,loff_t * pos)43 static void *allocinfo_start(struct seq_file *m, loff_t *pos)
44 {
45 	struct allocinfo_private *priv;
46 	struct codetag *ct;
47 	loff_t node = *pos;
48 
49 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
50 	m->private = priv;
51 	if (!priv)
52 		return NULL;
53 
54 	priv->print_header = (node == 0);
55 	codetag_lock_module_list(alloc_tag_cttype, true);
56 	priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
57 	while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
58 		node--;
59 
60 	return ct ? priv : NULL;
61 }
62 
allocinfo_next(struct seq_file * m,void * arg,loff_t * pos)63 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
64 {
65 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
66 	struct codetag *ct = codetag_next_ct(&priv->iter);
67 
68 	(*pos)++;
69 	if (!ct)
70 		return NULL;
71 
72 	return priv;
73 }
74 
allocinfo_stop(struct seq_file * m,void * arg)75 static void allocinfo_stop(struct seq_file *m, void *arg)
76 {
77 	struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
78 
79 	if (priv) {
80 		codetag_lock_module_list(alloc_tag_cttype, false);
81 		kfree(priv);
82 	}
83 }
84 
print_allocinfo_header(struct seq_buf * buf)85 static void print_allocinfo_header(struct seq_buf *buf)
86 {
87 	/* Output format version, so we can change it. */
88 	seq_buf_printf(buf, "allocinfo - version: 1.0\n");
89 	seq_buf_printf(buf, "#     <size>  <calls> <tag info>\n");
90 }
91 
alloc_tag_to_text(struct seq_buf * out,struct codetag * ct)92 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
93 {
94 	struct alloc_tag *tag = ct_to_alloc_tag(ct);
95 	struct alloc_tag_counters counter = alloc_tag_read(tag);
96 	s64 bytes = counter.bytes;
97 
98 	seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
99 	codetag_to_text(out, ct);
100 	seq_buf_putc(out, ' ');
101 	seq_buf_putc(out, '\n');
102 }
103 
allocinfo_show(struct seq_file * m,void * arg)104 static int allocinfo_show(struct seq_file *m, void *arg)
105 {
106 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
107 	char *bufp;
108 	size_t n = seq_get_buf(m, &bufp);
109 	struct seq_buf buf;
110 
111 	seq_buf_init(&buf, bufp, n);
112 	if (priv->print_header) {
113 		print_allocinfo_header(&buf);
114 		priv->print_header = false;
115 	}
116 	alloc_tag_to_text(&buf, priv->iter.ct);
117 	seq_commit(m, seq_buf_used(&buf));
118 	return 0;
119 }
120 
121 static const struct seq_operations allocinfo_seq_op = {
122 	.start	= allocinfo_start,
123 	.next	= allocinfo_next,
124 	.stop	= allocinfo_stop,
125 	.show	= allocinfo_show,
126 };
127 
alloc_tag_top_users(struct codetag_bytes * tags,size_t count,bool can_sleep)128 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
129 {
130 	struct codetag_iterator iter;
131 	struct codetag *ct;
132 	struct codetag_bytes n;
133 	unsigned int i, nr = 0;
134 
135 	if (can_sleep)
136 		codetag_lock_module_list(alloc_tag_cttype, true);
137 	else if (!codetag_trylock_module_list(alloc_tag_cttype))
138 		return 0;
139 
140 	iter = codetag_get_ct_iter(alloc_tag_cttype);
141 	while ((ct = codetag_next_ct(&iter))) {
142 		struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
143 
144 		n.ct	= ct;
145 		n.bytes = counter.bytes;
146 
147 		for (i = 0; i < nr; i++)
148 			if (n.bytes > tags[i].bytes)
149 				break;
150 
151 		if (i < count) {
152 			nr -= nr == count;
153 			memmove(&tags[i + 1],
154 				&tags[i],
155 				sizeof(tags[0]) * (nr - i));
156 			nr++;
157 			tags[i] = n;
158 		}
159 	}
160 
161 	codetag_lock_module_list(alloc_tag_cttype, false);
162 
163 	return nr;
164 }
165 
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)166 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
167 {
168 	int i;
169 	struct alloc_tag *tag;
170 	unsigned int nr_pages = 1 << new_order;
171 
172 	if (!mem_alloc_profiling_enabled())
173 		return;
174 
175 	tag = pgalloc_tag_get(&folio->page);
176 	if (!tag)
177 		return;
178 
179 	for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
180 		union pgtag_ref_handle handle;
181 		union codetag_ref ref;
182 
183 		if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
184 			/* Set new reference to point to the original tag */
185 			alloc_tag_ref_set(&ref, tag);
186 			update_page_tag_ref(handle, &ref);
187 			put_page_tag_ref(handle);
188 		}
189 	}
190 }
191 
pgalloc_tag_swap(struct folio * new,struct folio * old)192 void pgalloc_tag_swap(struct folio *new, struct folio *old)
193 {
194 	union pgtag_ref_handle handle_old, handle_new;
195 	union codetag_ref ref_old, ref_new;
196 	struct alloc_tag *tag_old, *tag_new;
197 
198 	tag_old = pgalloc_tag_get(&old->page);
199 	if (!tag_old)
200 		return;
201 	tag_new = pgalloc_tag_get(&new->page);
202 	if (!tag_new)
203 		return;
204 
205 	if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
206 		return;
207 	if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
208 		put_page_tag_ref(handle_old);
209 		return;
210 	}
211 
212 	/*
213 	 * Clear tag references to avoid debug warning when using
214 	 * __alloc_tag_ref_set() with non-empty reference.
215 	 */
216 	set_codetag_empty(&ref_old);
217 	set_codetag_empty(&ref_new);
218 
219 	/* swap tags */
220 	__alloc_tag_ref_set(&ref_old, tag_new);
221 	update_page_tag_ref(handle_old, &ref_old);
222 	__alloc_tag_ref_set(&ref_new, tag_old);
223 	update_page_tag_ref(handle_new, &ref_new);
224 
225 	put_page_tag_ref(handle_old);
226 	put_page_tag_ref(handle_new);
227 }
228 
shutdown_mem_profiling(bool remove_file)229 static void shutdown_mem_profiling(bool remove_file)
230 {
231 	if (mem_alloc_profiling_enabled())
232 		static_branch_disable(&mem_alloc_profiling_key);
233 
234 	if (!mem_profiling_support)
235 		return;
236 
237 	if (remove_file)
238 		remove_proc_entry(ALLOCINFO_FILE_NAME, NULL);
239 	mem_profiling_support = false;
240 }
241 
procfs_init(void)242 static void __init procfs_init(void)
243 {
244 	if (!mem_profiling_support)
245 		return;
246 
247 	if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
248 		pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
249 		shutdown_mem_profiling(false);
250 	}
251 }
252 
alloc_tag_sec_init(void)253 void __init alloc_tag_sec_init(void)
254 {
255 	struct alloc_tag *last_codetag;
256 
257 	if (!mem_profiling_support)
258 		return;
259 
260 	if (!static_key_enabled(&mem_profiling_compressed))
261 		return;
262 
263 	kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name(
264 					SECTION_START(ALLOC_TAG_SECTION_NAME));
265 	last_codetag = (struct alloc_tag *)kallsyms_lookup_name(
266 					SECTION_STOP(ALLOC_TAG_SECTION_NAME));
267 	kernel_tags.count = last_codetag - kernel_tags.first_tag;
268 
269 	/* Check if kernel tags fit into page flags */
270 	if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) {
271 		shutdown_mem_profiling(false); /* allocinfo file does not exist yet */
272 		pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n",
273 			kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS);
274 		return;
275 	}
276 
277 	alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS);
278 	alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1);
279 	pr_debug("Memory allocation profiling compression is using %d page flag bits!\n",
280 		 NR_UNUSED_PAGEFLAG_BITS);
281 }
282 
283 #ifdef CONFIG_MODULES
284 
285 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE);
286 static struct vm_struct *vm_module_tags;
287 /* A dummy object used to indicate an unloaded module */
288 static struct module unloaded_mod;
289 /* A dummy object used to indicate a module prepended area */
290 static struct module prepend_mod;
291 
292 struct alloc_tag_module_section module_tags;
293 
alloc_tag_align(unsigned long val)294 static inline unsigned long alloc_tag_align(unsigned long val)
295 {
296 	if (!static_key_enabled(&mem_profiling_compressed)) {
297 		/* No alignment requirements when we are not indexing the tags */
298 		return val;
299 	}
300 
301 	if (val % sizeof(struct alloc_tag) == 0)
302 		return val;
303 	return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag);
304 }
305 
ensure_alignment(unsigned long align,unsigned int * prepend)306 static bool ensure_alignment(unsigned long align, unsigned int *prepend)
307 {
308 	if (!static_key_enabled(&mem_profiling_compressed)) {
309 		/* No alignment requirements when we are not indexing the tags */
310 		return true;
311 	}
312 
313 	/*
314 	 * If alloc_tag size is not a multiple of required alignment, tag
315 	 * indexing does not work.
316 	 */
317 	if (!IS_ALIGNED(sizeof(struct alloc_tag), align))
318 		return false;
319 
320 	/* Ensure prepend consumes multiple of alloc_tag-sized blocks */
321 	if (*prepend)
322 		*prepend = alloc_tag_align(*prepend);
323 
324 	return true;
325 }
326 
tags_addressable(void)327 static inline bool tags_addressable(void)
328 {
329 	unsigned long tag_idx_count;
330 
331 	if (!static_key_enabled(&mem_profiling_compressed))
332 		return true; /* with page_ext tags are always addressable */
333 
334 	tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count +
335 			module_tags.size / sizeof(struct alloc_tag);
336 
337 	return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS);
338 }
339 
needs_section_mem(struct module * mod,unsigned long size)340 static bool needs_section_mem(struct module *mod, unsigned long size)
341 {
342 	if (!mem_profiling_support)
343 		return false;
344 
345 	return size >= sizeof(struct alloc_tag);
346 }
347 
find_used_tag(struct alloc_tag * from,struct alloc_tag * to)348 static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
349 {
350 	while (from <= to) {
351 		struct alloc_tag_counters counter;
352 
353 		counter = alloc_tag_read(from);
354 		if (counter.bytes)
355 			return from;
356 		from++;
357 	}
358 
359 	return NULL;
360 }
361 
362 /* Called with mod_area_mt locked */
clean_unused_module_areas_locked(void)363 static void clean_unused_module_areas_locked(void)
364 {
365 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
366 	struct module *val;
367 
368 	mas_for_each(&mas, val, module_tags.size) {
369 		if (val != &unloaded_mod)
370 			continue;
371 
372 		/* Release area if all tags are unused */
373 		if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
374 				   (struct alloc_tag *)(module_tags.start_addr + mas.last)))
375 			mas_erase(&mas);
376 	}
377 }
378 
379 /* Called with mod_area_mt locked */
find_aligned_area(struct ma_state * mas,unsigned long section_size,unsigned long size,unsigned int prepend,unsigned long align)380 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
381 			      unsigned long size, unsigned int prepend, unsigned long align)
382 {
383 	bool cleanup_done = false;
384 
385 repeat:
386 	/* Try finding exact size and hope the start is aligned */
387 	if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) {
388 		if (IS_ALIGNED(mas->index + prepend, align))
389 			return true;
390 
391 		/* Try finding larger area to align later */
392 		mas_reset(mas);
393 		if (!mas_empty_area(mas, 0, section_size - 1,
394 				    size + prepend + align - 1))
395 			return true;
396 	}
397 
398 	/* No free area, try cleanup stale data and repeat the search once */
399 	if (!cleanup_done) {
400 		clean_unused_module_areas_locked();
401 		cleanup_done = true;
402 		mas_reset(mas);
403 		goto repeat;
404 	}
405 
406 	return false;
407 }
408 
vm_module_tags_populate(void)409 static int vm_module_tags_populate(void)
410 {
411 	unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
412 				 (vm_module_tags->nr_pages << PAGE_SHIFT);
413 	unsigned long new_end = module_tags.start_addr + module_tags.size;
414 
415 	if (phys_end < new_end) {
416 		struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
417 		unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
418 		unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
419 		unsigned long more_pages;
420 		unsigned long nr;
421 
422 		more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
423 		nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
424 						 NUMA_NO_NODE, more_pages, next_page);
425 		if (nr < more_pages ||
426 		    vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
427 				     next_page, PAGE_SHIFT) < 0) {
428 			/* Clean up and error out */
429 			for (int i = 0; i < nr; i++)
430 				__free_page(next_page[i]);
431 			return -ENOMEM;
432 		}
433 
434 		vm_module_tags->nr_pages += nr;
435 
436 		/*
437 		 * Kasan allocates 1 byte of shadow for every 8 bytes of data.
438 		 * When kasan_alloc_module_shadow allocates shadow memory,
439 		 * its unit of allocation is a page.
440 		 * Therefore, here we need to align to MODULE_ALIGN.
441 		 */
442 		if (old_shadow_end < new_shadow_end)
443 			kasan_alloc_module_shadow((void *)old_shadow_end,
444 						  new_shadow_end - old_shadow_end,
445 						  GFP_KERNEL);
446 	}
447 
448 	/*
449 	 * Mark the pages as accessible, now that they are mapped.
450 	 * With hardware tag-based KASAN, marking is skipped for
451 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
452 	 */
453 	kasan_unpoison_vmalloc((void *)module_tags.start_addr,
454 				new_end - module_tags.start_addr,
455 				KASAN_VMALLOC_PROT_NORMAL);
456 
457 	return 0;
458 }
459 
reserve_module_tags(struct module * mod,unsigned long size,unsigned int prepend,unsigned long align)460 static void *reserve_module_tags(struct module *mod, unsigned long size,
461 				 unsigned int prepend, unsigned long align)
462 {
463 	unsigned long section_size = module_tags.end_addr - module_tags.start_addr;
464 	MA_STATE(mas, &mod_area_mt, 0, section_size - 1);
465 	unsigned long offset;
466 	void *ret = NULL;
467 
468 	/* If no tags return error */
469 	if (size < sizeof(struct alloc_tag))
470 		return ERR_PTR(-EINVAL);
471 
472 	/*
473 	 * align is always power of 2, so we can use IS_ALIGNED and ALIGN.
474 	 * align 0 or 1 means no alignment, to simplify set to 1.
475 	 */
476 	if (!align)
477 		align = 1;
478 
479 	if (!ensure_alignment(align, &prepend)) {
480 		shutdown_mem_profiling(true);
481 		pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n",
482 			mod->name, align);
483 		return ERR_PTR(-EINVAL);
484 	}
485 
486 	mas_lock(&mas);
487 	if (!find_aligned_area(&mas, section_size, size, prepend, align)) {
488 		ret = ERR_PTR(-ENOMEM);
489 		goto unlock;
490 	}
491 
492 	/* Mark found area as reserved */
493 	offset = mas.index;
494 	offset += prepend;
495 	offset = ALIGN(offset, align);
496 	if (offset != mas.index) {
497 		unsigned long pad_start = mas.index;
498 
499 		mas.last = offset - 1;
500 		mas_store(&mas, &prepend_mod);
501 		if (mas_is_err(&mas)) {
502 			ret = ERR_PTR(xa_err(mas.node));
503 			goto unlock;
504 		}
505 		mas.index = offset;
506 		mas.last = offset + size - 1;
507 		mas_store(&mas, mod);
508 		if (mas_is_err(&mas)) {
509 			mas.index = pad_start;
510 			mas_erase(&mas);
511 			ret = ERR_PTR(xa_err(mas.node));
512 		}
513 	} else {
514 		mas.last = offset + size - 1;
515 		mas_store(&mas, mod);
516 		if (mas_is_err(&mas))
517 			ret = ERR_PTR(xa_err(mas.node));
518 	}
519 unlock:
520 	mas_unlock(&mas);
521 
522 	if (IS_ERR(ret))
523 		return ret;
524 
525 	if (module_tags.size < offset + size) {
526 		int grow_res;
527 
528 		module_tags.size = offset + size;
529 		if (mem_alloc_profiling_enabled() && !tags_addressable()) {
530 			shutdown_mem_profiling(true);
531 			pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n",
532 				mod->name, NR_UNUSED_PAGEFLAG_BITS);
533 		}
534 
535 		grow_res = vm_module_tags_populate();
536 		if (grow_res) {
537 			shutdown_mem_profiling(true);
538 			pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n",
539 			       mod->name);
540 			return ERR_PTR(grow_res);
541 		}
542 	}
543 
544 	return (struct alloc_tag *)(module_tags.start_addr + offset);
545 }
546 
release_module_tags(struct module * mod,bool used)547 static void release_module_tags(struct module *mod, bool used)
548 {
549 	MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
550 	struct alloc_tag *tag;
551 	struct module *val;
552 
553 	mas_lock(&mas);
554 	mas_for_each_rev(&mas, val, 0)
555 		if (val == mod)
556 			break;
557 
558 	if (!val) /* module not found */
559 		goto out;
560 
561 	if (!used)
562 		goto release_area;
563 
564 	/* Find out if the area is used */
565 	tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
566 			    (struct alloc_tag *)(module_tags.start_addr + mas.last));
567 	if (tag) {
568 		struct alloc_tag_counters counter = alloc_tag_read(tag);
569 
570 		pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
571 			tag->ct.filename, tag->ct.lineno, tag->ct.modname,
572 			tag->ct.function, counter.bytes);
573 	} else {
574 		used = false;
575 	}
576 release_area:
577 	mas_store(&mas, used ? &unloaded_mod : NULL);
578 	val = mas_prev_range(&mas, 0);
579 	if (val == &prepend_mod)
580 		mas_store(&mas, NULL);
581 out:
582 	mas_unlock(&mas);
583 }
584 
replace_module(struct module * mod,struct module * new_mod)585 static void replace_module(struct module *mod, struct module *new_mod)
586 {
587 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
588 	struct module *val;
589 
590 	mas_lock(&mas);
591 	mas_for_each(&mas, val, module_tags.size) {
592 		if (val != mod)
593 			continue;
594 
595 		mas_store_gfp(&mas, new_mod, GFP_KERNEL);
596 		break;
597 	}
598 	mas_unlock(&mas);
599 }
600 
alloc_mod_tags_mem(void)601 static int __init alloc_mod_tags_mem(void)
602 {
603 	/* Map space to copy allocation tags */
604 	vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE);
605 	if (!vm_module_tags) {
606 		pr_err("Failed to map %lu bytes for module allocation tags\n",
607 			MODULE_ALLOC_TAG_VMAP_SIZE);
608 		module_tags.start_addr = 0;
609 		return -ENOMEM;
610 	}
611 
612 	vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
613 					sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
614 	if (!vm_module_tags->pages) {
615 		free_vm_area(vm_module_tags);
616 		return -ENOMEM;
617 	}
618 
619 	module_tags.start_addr = (unsigned long)vm_module_tags->addr;
620 	module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE;
621 	/* Ensure the base is alloc_tag aligned when required for indexing */
622 	module_tags.start_addr = alloc_tag_align(module_tags.start_addr);
623 
624 	return 0;
625 }
626 
free_mod_tags_mem(void)627 static void __init free_mod_tags_mem(void)
628 {
629 	int i;
630 
631 	module_tags.start_addr = 0;
632 	for (i = 0; i < vm_module_tags->nr_pages; i++)
633 		__free_page(vm_module_tags->pages[i]);
634 	kfree(vm_module_tags->pages);
635 	free_vm_area(vm_module_tags);
636 }
637 
638 #else /* CONFIG_MODULES */
639 
alloc_mod_tags_mem(void)640 static inline int alloc_mod_tags_mem(void) { return 0; }
free_mod_tags_mem(void)641 static inline void free_mod_tags_mem(void) {}
642 
643 #endif /* CONFIG_MODULES */
644 
645 /* See: Documentation/mm/allocation-profiling.rst */
setup_early_mem_profiling(char * str)646 static int __init setup_early_mem_profiling(char *str)
647 {
648 	bool compressed = false;
649 	bool enable;
650 
651 	if (!str || !str[0])
652 		return -EINVAL;
653 
654 	if (!strncmp(str, "never", 5)) {
655 		enable = false;
656 		mem_profiling_support = false;
657 		pr_info("Memory allocation profiling is disabled!\n");
658 	} else {
659 		char *token = strsep(&str, ",");
660 
661 		if (kstrtobool(token, &enable))
662 			return -EINVAL;
663 
664 		if (str) {
665 
666 			if (strcmp(str, "compressed"))
667 				return -EINVAL;
668 
669 			compressed = true;
670 		}
671 		mem_profiling_support = true;
672 		pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n",
673 			compressed ? "with" : "without", enable ? "on" : "off");
674 	}
675 
676 	if (enable != mem_alloc_profiling_enabled()) {
677 		if (enable)
678 			static_branch_enable(&mem_alloc_profiling_key);
679 		else
680 			static_branch_disable(&mem_alloc_profiling_key);
681 	}
682 	if (compressed != static_key_enabled(&mem_profiling_compressed)) {
683 		if (compressed)
684 			static_branch_enable(&mem_profiling_compressed);
685 		else
686 			static_branch_disable(&mem_profiling_compressed);
687 	}
688 
689 	return 0;
690 }
691 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
692 
need_page_alloc_tagging(void)693 static __init bool need_page_alloc_tagging(void)
694 {
695 	if (static_key_enabled(&mem_profiling_compressed))
696 		return false;
697 
698 	return mem_profiling_support;
699 }
700 
init_page_alloc_tagging(void)701 static __init void init_page_alloc_tagging(void)
702 {
703 }
704 
705 struct page_ext_operations page_alloc_tagging_ops = {
706 	.size = sizeof(union codetag_ref),
707 	.need = need_page_alloc_tagging,
708 	.init = init_page_alloc_tagging,
709 };
710 EXPORT_SYMBOL(page_alloc_tagging_ops);
711 
712 #ifdef CONFIG_SYSCTL
713 static struct ctl_table memory_allocation_profiling_sysctls[] = {
714 	{
715 		.procname	= "mem_profiling",
716 		.data		= &mem_alloc_profiling_key,
717 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
718 		.mode		= 0444,
719 #else
720 		.mode		= 0644,
721 #endif
722 		.proc_handler	= proc_do_static_key,
723 	},
724 };
725 
sysctl_init(void)726 static void __init sysctl_init(void)
727 {
728 	if (!mem_profiling_support)
729 		memory_allocation_profiling_sysctls[0].mode = 0444;
730 
731 	register_sysctl_init("vm", memory_allocation_profiling_sysctls);
732 }
733 #else /* CONFIG_SYSCTL */
sysctl_init(void)734 static inline void sysctl_init(void) {}
735 #endif /* CONFIG_SYSCTL */
736 
alloc_tag_init(void)737 static int __init alloc_tag_init(void)
738 {
739 	const struct codetag_type_desc desc = {
740 		.section		= ALLOC_TAG_SECTION_NAME,
741 		.tag_size		= sizeof(struct alloc_tag),
742 #ifdef CONFIG_MODULES
743 		.needs_section_mem	= needs_section_mem,
744 		.alloc_section_mem	= reserve_module_tags,
745 		.free_section_mem	= release_module_tags,
746 		.module_replaced	= replace_module,
747 #endif
748 	};
749 	int res;
750 
751 	res = alloc_mod_tags_mem();
752 	if (res)
753 		return res;
754 
755 	alloc_tag_cttype = codetag_register_type(&desc);
756 	if (IS_ERR(alloc_tag_cttype)) {
757 		free_mod_tags_mem();
758 		return PTR_ERR(alloc_tag_cttype);
759 	}
760 
761 	sysctl_init();
762 	procfs_init();
763 
764 	return 0;
765 }
766 module_init(alloc_tag_init);
767