xref: /linux/lib/alloc_tag.c (revision 0fd39af24e37a6866c479ca385301845f6029787)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/alloc_tag.h>
3 #include <linux/execmem.h>
4 #include <linux/fs.h>
5 #include <linux/gfp.h>
6 #include <linux/kallsyms.h>
7 #include <linux/module.h>
8 #include <linux/page_ext.h>
9 #include <linux/proc_fs.h>
10 #include <linux/seq_buf.h>
11 #include <linux/seq_file.h>
12 #include <linux/vmalloc.h>
13 #include <linux/kmemleak.h>
14 
15 #define ALLOCINFO_FILE_NAME		"allocinfo"
16 #define MODULE_ALLOC_TAG_VMAP_SIZE	(100000UL * sizeof(struct alloc_tag))
17 #define SECTION_START(NAME)		(CODETAG_SECTION_START_PREFIX NAME)
18 #define SECTION_STOP(NAME)		(CODETAG_SECTION_STOP_PREFIX NAME)
19 
20 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
21 static bool mem_profiling_support = true;
22 #else
23 static bool mem_profiling_support;
24 #endif
25 
26 static struct codetag_type *alloc_tag_cttype;
27 
28 DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
29 EXPORT_SYMBOL(_shared_alloc_tag);
30 
31 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
32 			mem_alloc_profiling_key);
33 EXPORT_SYMBOL(mem_alloc_profiling_key);
34 
35 DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
36 
37 struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
38 unsigned long alloc_tag_ref_mask;
39 int alloc_tag_ref_offs;
40 
41 struct allocinfo_private {
42 	struct codetag_iterator iter;
43 	bool print_header;
44 };
45 
allocinfo_start(struct seq_file * m,loff_t * pos)46 static void *allocinfo_start(struct seq_file *m, loff_t *pos)
47 {
48 	struct allocinfo_private *priv;
49 	struct codetag *ct;
50 	loff_t node = *pos;
51 
52 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
53 	m->private = priv;
54 	if (!priv)
55 		return NULL;
56 
57 	priv->print_header = (node == 0);
58 	codetag_lock_module_list(alloc_tag_cttype, true);
59 	priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
60 	while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
61 		node--;
62 
63 	return ct ? priv : NULL;
64 }
65 
allocinfo_next(struct seq_file * m,void * arg,loff_t * pos)66 static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
67 {
68 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
69 	struct codetag *ct = codetag_next_ct(&priv->iter);
70 
71 	(*pos)++;
72 	if (!ct)
73 		return NULL;
74 
75 	return priv;
76 }
77 
allocinfo_stop(struct seq_file * m,void * arg)78 static void allocinfo_stop(struct seq_file *m, void *arg)
79 {
80 	struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
81 
82 	if (priv) {
83 		codetag_lock_module_list(alloc_tag_cttype, false);
84 		kfree(priv);
85 	}
86 }
87 
print_allocinfo_header(struct seq_buf * buf)88 static void print_allocinfo_header(struct seq_buf *buf)
89 {
90 	/* Output format version, so we can change it. */
91 	seq_buf_printf(buf, "allocinfo - version: 1.0\n");
92 	seq_buf_printf(buf, "#     <size>  <calls> <tag info>\n");
93 }
94 
alloc_tag_to_text(struct seq_buf * out,struct codetag * ct)95 static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
96 {
97 	struct alloc_tag *tag = ct_to_alloc_tag(ct);
98 	struct alloc_tag_counters counter = alloc_tag_read(tag);
99 	s64 bytes = counter.bytes;
100 
101 	seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
102 	codetag_to_text(out, ct);
103 	seq_buf_putc(out, ' ');
104 	seq_buf_putc(out, '\n');
105 }
106 
allocinfo_show(struct seq_file * m,void * arg)107 static int allocinfo_show(struct seq_file *m, void *arg)
108 {
109 	struct allocinfo_private *priv = (struct allocinfo_private *)arg;
110 	char *bufp;
111 	size_t n = seq_get_buf(m, &bufp);
112 	struct seq_buf buf;
113 
114 	seq_buf_init(&buf, bufp, n);
115 	if (priv->print_header) {
116 		print_allocinfo_header(&buf);
117 		priv->print_header = false;
118 	}
119 	alloc_tag_to_text(&buf, priv->iter.ct);
120 	seq_commit(m, seq_buf_used(&buf));
121 	return 0;
122 }
123 
124 static const struct seq_operations allocinfo_seq_op = {
125 	.start	= allocinfo_start,
126 	.next	= allocinfo_next,
127 	.stop	= allocinfo_stop,
128 	.show	= allocinfo_show,
129 };
130 
alloc_tag_top_users(struct codetag_bytes * tags,size_t count,bool can_sleep)131 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
132 {
133 	struct codetag_iterator iter;
134 	struct codetag *ct;
135 	struct codetag_bytes n;
136 	unsigned int i, nr = 0;
137 
138 	if (can_sleep)
139 		codetag_lock_module_list(alloc_tag_cttype, true);
140 	else if (!codetag_trylock_module_list(alloc_tag_cttype))
141 		return 0;
142 
143 	iter = codetag_get_ct_iter(alloc_tag_cttype);
144 	while ((ct = codetag_next_ct(&iter))) {
145 		struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
146 
147 		n.ct	= ct;
148 		n.bytes = counter.bytes;
149 
150 		for (i = 0; i < nr; i++)
151 			if (n.bytes > tags[i].bytes)
152 				break;
153 
154 		if (i < count) {
155 			nr -= nr == count;
156 			memmove(&tags[i + 1],
157 				&tags[i],
158 				sizeof(tags[0]) * (nr - i));
159 			nr++;
160 			tags[i] = n;
161 		}
162 	}
163 
164 	codetag_lock_module_list(alloc_tag_cttype, false);
165 
166 	return nr;
167 }
168 
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)169 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
170 {
171 	int i;
172 	struct alloc_tag *tag;
173 	unsigned int nr_pages = 1 << new_order;
174 
175 	if (!mem_alloc_profiling_enabled())
176 		return;
177 
178 	tag = __pgalloc_tag_get(&folio->page);
179 	if (!tag)
180 		return;
181 
182 	for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
183 		union pgtag_ref_handle handle;
184 		union codetag_ref ref;
185 
186 		if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
187 			/* Set new reference to point to the original tag */
188 			alloc_tag_ref_set(&ref, tag);
189 			update_page_tag_ref(handle, &ref);
190 			put_page_tag_ref(handle);
191 		}
192 	}
193 }
194 
pgalloc_tag_swap(struct folio * new,struct folio * old)195 void pgalloc_tag_swap(struct folio *new, struct folio *old)
196 {
197 	union pgtag_ref_handle handle_old, handle_new;
198 	union codetag_ref ref_old, ref_new;
199 	struct alloc_tag *tag_old, *tag_new;
200 
201 	if (!mem_alloc_profiling_enabled())
202 		return;
203 
204 	tag_old = __pgalloc_tag_get(&old->page);
205 	if (!tag_old)
206 		return;
207 	tag_new = __pgalloc_tag_get(&new->page);
208 	if (!tag_new)
209 		return;
210 
211 	if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
212 		return;
213 	if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
214 		put_page_tag_ref(handle_old);
215 		return;
216 	}
217 
218 	/*
219 	 * Clear tag references to avoid debug warning when using
220 	 * __alloc_tag_ref_set() with non-empty reference.
221 	 */
222 	set_codetag_empty(&ref_old);
223 	set_codetag_empty(&ref_new);
224 
225 	/* swap tags */
226 	__alloc_tag_ref_set(&ref_old, tag_new);
227 	update_page_tag_ref(handle_old, &ref_old);
228 	__alloc_tag_ref_set(&ref_new, tag_old);
229 	update_page_tag_ref(handle_new, &ref_new);
230 
231 	put_page_tag_ref(handle_old);
232 	put_page_tag_ref(handle_new);
233 }
234 
shutdown_mem_profiling(bool remove_file)235 static void shutdown_mem_profiling(bool remove_file)
236 {
237 	if (mem_alloc_profiling_enabled())
238 		static_branch_disable(&mem_alloc_profiling_key);
239 
240 	if (!mem_profiling_support)
241 		return;
242 
243 	if (remove_file)
244 		remove_proc_entry(ALLOCINFO_FILE_NAME, NULL);
245 	mem_profiling_support = false;
246 }
247 
alloc_tag_sec_init(void)248 void __init alloc_tag_sec_init(void)
249 {
250 	struct alloc_tag *last_codetag;
251 
252 	if (!mem_profiling_support)
253 		return;
254 
255 	if (!static_key_enabled(&mem_profiling_compressed))
256 		return;
257 
258 	kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name(
259 					SECTION_START(ALLOC_TAG_SECTION_NAME));
260 	last_codetag = (struct alloc_tag *)kallsyms_lookup_name(
261 					SECTION_STOP(ALLOC_TAG_SECTION_NAME));
262 	kernel_tags.count = last_codetag - kernel_tags.first_tag;
263 
264 	/* Check if kernel tags fit into page flags */
265 	if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) {
266 		shutdown_mem_profiling(false); /* allocinfo file does not exist yet */
267 		pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n",
268 			kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS);
269 		return;
270 	}
271 
272 	alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS);
273 	alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1);
274 	pr_debug("Memory allocation profiling compression is using %d page flag bits!\n",
275 		 NR_UNUSED_PAGEFLAG_BITS);
276 }
277 
278 #ifdef CONFIG_MODULES
279 
280 static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE);
281 static struct vm_struct *vm_module_tags;
282 /* A dummy object used to indicate an unloaded module */
283 static struct module unloaded_mod;
284 /* A dummy object used to indicate a module prepended area */
285 static struct module prepend_mod;
286 
287 struct alloc_tag_module_section module_tags;
288 
alloc_tag_align(unsigned long val)289 static inline unsigned long alloc_tag_align(unsigned long val)
290 {
291 	if (!static_key_enabled(&mem_profiling_compressed)) {
292 		/* No alignment requirements when we are not indexing the tags */
293 		return val;
294 	}
295 
296 	if (val % sizeof(struct alloc_tag) == 0)
297 		return val;
298 	return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag);
299 }
300 
ensure_alignment(unsigned long align,unsigned int * prepend)301 static bool ensure_alignment(unsigned long align, unsigned int *prepend)
302 {
303 	if (!static_key_enabled(&mem_profiling_compressed)) {
304 		/* No alignment requirements when we are not indexing the tags */
305 		return true;
306 	}
307 
308 	/*
309 	 * If alloc_tag size is not a multiple of required alignment, tag
310 	 * indexing does not work.
311 	 */
312 	if (!IS_ALIGNED(sizeof(struct alloc_tag), align))
313 		return false;
314 
315 	/* Ensure prepend consumes multiple of alloc_tag-sized blocks */
316 	if (*prepend)
317 		*prepend = alloc_tag_align(*prepend);
318 
319 	return true;
320 }
321 
tags_addressable(void)322 static inline bool tags_addressable(void)
323 {
324 	unsigned long tag_idx_count;
325 
326 	if (!static_key_enabled(&mem_profiling_compressed))
327 		return true; /* with page_ext tags are always addressable */
328 
329 	tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count +
330 			module_tags.size / sizeof(struct alloc_tag);
331 
332 	return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS);
333 }
334 
needs_section_mem(struct module * mod,unsigned long size)335 static bool needs_section_mem(struct module *mod, unsigned long size)
336 {
337 	if (!mem_profiling_support)
338 		return false;
339 
340 	return size >= sizeof(struct alloc_tag);
341 }
342 
clean_unused_counters(struct alloc_tag * start_tag,struct alloc_tag * end_tag)343 static bool clean_unused_counters(struct alloc_tag *start_tag,
344 				  struct alloc_tag *end_tag)
345 {
346 	struct alloc_tag *tag;
347 	bool ret = true;
348 
349 	for (tag = start_tag; tag <= end_tag; tag++) {
350 		struct alloc_tag_counters counter;
351 
352 		if (!tag->counters)
353 			continue;
354 
355 		counter = alloc_tag_read(tag);
356 		if (!counter.bytes) {
357 			free_percpu(tag->counters);
358 			tag->counters = NULL;
359 		} else {
360 			ret = false;
361 		}
362 	}
363 
364 	return ret;
365 }
366 
367 /* Called with mod_area_mt locked */
clean_unused_module_areas_locked(void)368 static void clean_unused_module_areas_locked(void)
369 {
370 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
371 	struct module *val;
372 
373 	mas_for_each(&mas, val, module_tags.size) {
374 		struct alloc_tag *start_tag;
375 		struct alloc_tag *end_tag;
376 
377 		if (val != &unloaded_mod)
378 			continue;
379 
380 		/* Release area if all tags are unused */
381 		start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
382 		end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
383 		if (clean_unused_counters(start_tag, end_tag))
384 			mas_erase(&mas);
385 	}
386 }
387 
388 /* Called with mod_area_mt locked */
find_aligned_area(struct ma_state * mas,unsigned long section_size,unsigned long size,unsigned int prepend,unsigned long align)389 static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
390 			      unsigned long size, unsigned int prepend, unsigned long align)
391 {
392 	bool cleanup_done = false;
393 
394 repeat:
395 	/* Try finding exact size and hope the start is aligned */
396 	if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) {
397 		if (IS_ALIGNED(mas->index + prepend, align))
398 			return true;
399 
400 		/* Try finding larger area to align later */
401 		mas_reset(mas);
402 		if (!mas_empty_area(mas, 0, section_size - 1,
403 				    size + prepend + align - 1))
404 			return true;
405 	}
406 
407 	/* No free area, try cleanup stale data and repeat the search once */
408 	if (!cleanup_done) {
409 		clean_unused_module_areas_locked();
410 		cleanup_done = true;
411 		mas_reset(mas);
412 		goto repeat;
413 	}
414 
415 	return false;
416 }
417 
vm_module_tags_populate(void)418 static int vm_module_tags_populate(void)
419 {
420 	unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
421 				 (vm_module_tags->nr_pages << PAGE_SHIFT);
422 	unsigned long new_end = module_tags.start_addr + module_tags.size;
423 
424 	if (phys_end < new_end) {
425 		struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
426 		unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
427 		unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
428 		unsigned long more_pages;
429 		unsigned long nr = 0;
430 
431 		more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
432 		while (nr < more_pages) {
433 			unsigned long allocated;
434 
435 			allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
436 				NUMA_NO_NODE, more_pages - nr, next_page + nr);
437 
438 			if (!allocated)
439 				break;
440 			nr += allocated;
441 		}
442 
443 		if (nr < more_pages ||
444 		    vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
445 				     next_page, PAGE_SHIFT) < 0) {
446 			/* Clean up and error out */
447 			for (int i = 0; i < nr; i++)
448 				__free_page(next_page[i]);
449 			return -ENOMEM;
450 		}
451 
452 		vm_module_tags->nr_pages += nr;
453 
454 		/*
455 		 * Kasan allocates 1 byte of shadow for every 8 bytes of data.
456 		 * When kasan_alloc_module_shadow allocates shadow memory,
457 		 * its unit of allocation is a page.
458 		 * Therefore, here we need to align to MODULE_ALIGN.
459 		 */
460 		if (old_shadow_end < new_shadow_end)
461 			kasan_alloc_module_shadow((void *)old_shadow_end,
462 						  new_shadow_end - old_shadow_end,
463 						  GFP_KERNEL);
464 	}
465 
466 	/*
467 	 * Mark the pages as accessible, now that they are mapped.
468 	 * With hardware tag-based KASAN, marking is skipped for
469 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
470 	 */
471 	kasan_unpoison_vmalloc((void *)module_tags.start_addr,
472 				new_end - module_tags.start_addr,
473 				KASAN_VMALLOC_PROT_NORMAL);
474 
475 	return 0;
476 }
477 
reserve_module_tags(struct module * mod,unsigned long size,unsigned int prepend,unsigned long align)478 static void *reserve_module_tags(struct module *mod, unsigned long size,
479 				 unsigned int prepend, unsigned long align)
480 {
481 	unsigned long section_size = module_tags.end_addr - module_tags.start_addr;
482 	MA_STATE(mas, &mod_area_mt, 0, section_size - 1);
483 	unsigned long offset;
484 	void *ret = NULL;
485 
486 	/* If no tags return error */
487 	if (size < sizeof(struct alloc_tag))
488 		return ERR_PTR(-EINVAL);
489 
490 	/*
491 	 * align is always power of 2, so we can use IS_ALIGNED and ALIGN.
492 	 * align 0 or 1 means no alignment, to simplify set to 1.
493 	 */
494 	if (!align)
495 		align = 1;
496 
497 	if (!ensure_alignment(align, &prepend)) {
498 		shutdown_mem_profiling(true);
499 		pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n",
500 			mod->name, align);
501 		return ERR_PTR(-EINVAL);
502 	}
503 
504 	mas_lock(&mas);
505 	if (!find_aligned_area(&mas, section_size, size, prepend, align)) {
506 		ret = ERR_PTR(-ENOMEM);
507 		goto unlock;
508 	}
509 
510 	/* Mark found area as reserved */
511 	offset = mas.index;
512 	offset += prepend;
513 	offset = ALIGN(offset, align);
514 	if (offset != mas.index) {
515 		unsigned long pad_start = mas.index;
516 
517 		mas.last = offset - 1;
518 		mas_store(&mas, &prepend_mod);
519 		if (mas_is_err(&mas)) {
520 			ret = ERR_PTR(xa_err(mas.node));
521 			goto unlock;
522 		}
523 		mas.index = offset;
524 		mas.last = offset + size - 1;
525 		mas_store(&mas, mod);
526 		if (mas_is_err(&mas)) {
527 			mas.index = pad_start;
528 			mas_erase(&mas);
529 			ret = ERR_PTR(xa_err(mas.node));
530 		}
531 	} else {
532 		mas.last = offset + size - 1;
533 		mas_store(&mas, mod);
534 		if (mas_is_err(&mas))
535 			ret = ERR_PTR(xa_err(mas.node));
536 	}
537 unlock:
538 	mas_unlock(&mas);
539 
540 	if (IS_ERR(ret))
541 		return ret;
542 
543 	if (module_tags.size < offset + size) {
544 		int grow_res;
545 
546 		module_tags.size = offset + size;
547 		if (mem_alloc_profiling_enabled() && !tags_addressable()) {
548 			shutdown_mem_profiling(true);
549 			pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n",
550 				mod->name, NR_UNUSED_PAGEFLAG_BITS);
551 		}
552 
553 		grow_res = vm_module_tags_populate();
554 		if (grow_res) {
555 			shutdown_mem_profiling(true);
556 			pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n",
557 			       mod->name);
558 			return ERR_PTR(grow_res);
559 		}
560 	}
561 
562 	return (struct alloc_tag *)(module_tags.start_addr + offset);
563 }
564 
release_module_tags(struct module * mod,bool used)565 static void release_module_tags(struct module *mod, bool used)
566 {
567 	MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
568 	struct alloc_tag *start_tag;
569 	struct alloc_tag *end_tag;
570 	struct module *val;
571 
572 	mas_lock(&mas);
573 	mas_for_each_rev(&mas, val, 0)
574 		if (val == mod)
575 			break;
576 
577 	if (!val) /* module not found */
578 		goto out;
579 
580 	if (!used)
581 		goto release_area;
582 
583 	start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
584 	end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
585 	if (!clean_unused_counters(start_tag, end_tag)) {
586 		struct alloc_tag *tag;
587 
588 		for (tag = start_tag; tag <= end_tag; tag++) {
589 			struct alloc_tag_counters counter;
590 
591 			if (!tag->counters)
592 				continue;
593 
594 			counter = alloc_tag_read(tag);
595 			pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
596 				tag->ct.filename, tag->ct.lineno, tag->ct.modname,
597 				tag->ct.function, counter.bytes);
598 		}
599 	} else {
600 		used = false;
601 	}
602 release_area:
603 	mas_store(&mas, used ? &unloaded_mod : NULL);
604 	val = mas_prev_range(&mas, 0);
605 	if (val == &prepend_mod)
606 		mas_store(&mas, NULL);
607 out:
608 	mas_unlock(&mas);
609 }
610 
load_module(struct module * mod,struct codetag * start,struct codetag * stop)611 static int load_module(struct module *mod, struct codetag *start, struct codetag *stop)
612 {
613 	/* Allocate module alloc_tag percpu counters */
614 	struct alloc_tag *start_tag;
615 	struct alloc_tag *stop_tag;
616 	struct alloc_tag *tag;
617 
618 	/* percpu counters for core allocations are already statically allocated */
619 	if (!mod)
620 		return 0;
621 
622 	start_tag = ct_to_alloc_tag(start);
623 	stop_tag = ct_to_alloc_tag(stop);
624 	for (tag = start_tag; tag < stop_tag; tag++) {
625 		WARN_ON(tag->counters);
626 		tag->counters = alloc_percpu(struct alloc_tag_counters);
627 		if (!tag->counters) {
628 			while (--tag >= start_tag) {
629 				free_percpu(tag->counters);
630 				tag->counters = NULL;
631 			}
632 			pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s\n",
633 			       mod->name);
634 			return -ENOMEM;
635 		}
636 
637 		/*
638 		 * Avoid a kmemleak false positive. The pointer to the counters is stored
639 		 * in the alloc_tag section of the module and cannot be directly accessed.
640 		 */
641 		kmemleak_ignore_percpu(tag->counters);
642 	}
643 	return 0;
644 }
645 
replace_module(struct module * mod,struct module * new_mod)646 static void replace_module(struct module *mod, struct module *new_mod)
647 {
648 	MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
649 	struct module *val;
650 
651 	mas_lock(&mas);
652 	mas_for_each(&mas, val, module_tags.size) {
653 		if (val != mod)
654 			continue;
655 
656 		mas_store_gfp(&mas, new_mod, GFP_KERNEL);
657 		break;
658 	}
659 	mas_unlock(&mas);
660 }
661 
alloc_mod_tags_mem(void)662 static int __init alloc_mod_tags_mem(void)
663 {
664 	/* Map space to copy allocation tags */
665 	vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE);
666 	if (!vm_module_tags) {
667 		pr_err("Failed to map %lu bytes for module allocation tags\n",
668 			MODULE_ALLOC_TAG_VMAP_SIZE);
669 		module_tags.start_addr = 0;
670 		return -ENOMEM;
671 	}
672 
673 	vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
674 					sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
675 	if (!vm_module_tags->pages) {
676 		free_vm_area(vm_module_tags);
677 		return -ENOMEM;
678 	}
679 
680 	module_tags.start_addr = (unsigned long)vm_module_tags->addr;
681 	module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE;
682 	/* Ensure the base is alloc_tag aligned when required for indexing */
683 	module_tags.start_addr = alloc_tag_align(module_tags.start_addr);
684 
685 	return 0;
686 }
687 
free_mod_tags_mem(void)688 static void __init free_mod_tags_mem(void)
689 {
690 	int i;
691 
692 	module_tags.start_addr = 0;
693 	for (i = 0; i < vm_module_tags->nr_pages; i++)
694 		__free_page(vm_module_tags->pages[i]);
695 	kfree(vm_module_tags->pages);
696 	free_vm_area(vm_module_tags);
697 }
698 
699 #else /* CONFIG_MODULES */
700 
alloc_mod_tags_mem(void)701 static inline int alloc_mod_tags_mem(void) { return 0; }
free_mod_tags_mem(void)702 static inline void free_mod_tags_mem(void) {}
703 
704 #endif /* CONFIG_MODULES */
705 
706 /* See: Documentation/mm/allocation-profiling.rst */
setup_early_mem_profiling(char * str)707 static int __init setup_early_mem_profiling(char *str)
708 {
709 	bool compressed = false;
710 	bool enable;
711 
712 	if (!str || !str[0])
713 		return -EINVAL;
714 
715 	if (!strncmp(str, "never", 5)) {
716 		enable = false;
717 		mem_profiling_support = false;
718 		pr_info("Memory allocation profiling is disabled!\n");
719 	} else {
720 		char *token = strsep(&str, ",");
721 
722 		if (kstrtobool(token, &enable))
723 			return -EINVAL;
724 
725 		if (str) {
726 
727 			if (strcmp(str, "compressed"))
728 				return -EINVAL;
729 
730 			compressed = true;
731 		}
732 		mem_profiling_support = true;
733 		pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n",
734 			compressed ? "with" : "without", enable ? "on" : "off");
735 	}
736 
737 	if (enable != mem_alloc_profiling_enabled()) {
738 		if (enable)
739 			static_branch_enable(&mem_alloc_profiling_key);
740 		else
741 			static_branch_disable(&mem_alloc_profiling_key);
742 	}
743 	if (compressed != static_key_enabled(&mem_profiling_compressed)) {
744 		if (compressed)
745 			static_branch_enable(&mem_profiling_compressed);
746 		else
747 			static_branch_disable(&mem_profiling_compressed);
748 	}
749 
750 	return 0;
751 }
752 early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
753 
need_page_alloc_tagging(void)754 static __init bool need_page_alloc_tagging(void)
755 {
756 	if (static_key_enabled(&mem_profiling_compressed))
757 		return false;
758 
759 	return mem_profiling_support;
760 }
761 
init_page_alloc_tagging(void)762 static __init void init_page_alloc_tagging(void)
763 {
764 }
765 
766 struct page_ext_operations page_alloc_tagging_ops = {
767 	.size = sizeof(union codetag_ref),
768 	.need = need_page_alloc_tagging,
769 	.init = init_page_alloc_tagging,
770 };
771 EXPORT_SYMBOL(page_alloc_tagging_ops);
772 
773 #ifdef CONFIG_SYSCTL
774 static struct ctl_table memory_allocation_profiling_sysctls[] = {
775 	{
776 		.procname	= "mem_profiling",
777 		.data		= &mem_alloc_profiling_key,
778 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
779 		.mode		= 0444,
780 #else
781 		.mode		= 0644,
782 #endif
783 		.proc_handler	= proc_do_static_key,
784 	},
785 };
786 
sysctl_init(void)787 static void __init sysctl_init(void)
788 {
789 	if (!mem_profiling_support)
790 		memory_allocation_profiling_sysctls[0].mode = 0444;
791 
792 	register_sysctl_init("vm", memory_allocation_profiling_sysctls);
793 }
794 #else /* CONFIG_SYSCTL */
sysctl_init(void)795 static inline void sysctl_init(void) {}
796 #endif /* CONFIG_SYSCTL */
797 
alloc_tag_init(void)798 static int __init alloc_tag_init(void)
799 {
800 	const struct codetag_type_desc desc = {
801 		.section		= ALLOC_TAG_SECTION_NAME,
802 		.tag_size		= sizeof(struct alloc_tag),
803 #ifdef CONFIG_MODULES
804 		.needs_section_mem	= needs_section_mem,
805 		.alloc_section_mem	= reserve_module_tags,
806 		.free_section_mem	= release_module_tags,
807 		.module_load		= load_module,
808 		.module_replaced	= replace_module,
809 #endif
810 	};
811 	int res;
812 
813 	sysctl_init();
814 
815 	if (!mem_profiling_support) {
816 		pr_info("Memory allocation profiling is not supported!\n");
817 		return 0;
818 	}
819 
820 	if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
821 		pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
822 		shutdown_mem_profiling(false);
823 		return -ENOMEM;
824 	}
825 
826 	res = alloc_mod_tags_mem();
827 	if (res) {
828 		pr_err("Failed to reserve address space for module tags, errno = %d\n", res);
829 		shutdown_mem_profiling(true);
830 		return res;
831 	}
832 
833 	alloc_tag_cttype = codetag_register_type(&desc);
834 	if (IS_ERR(alloc_tag_cttype)) {
835 		pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype));
836 		free_mod_tags_mem();
837 		shutdown_mem_profiling(true);
838 		return PTR_ERR(alloc_tag_cttype);
839 	}
840 
841 	return 0;
842 }
843 module_init(alloc_tag_init);
844