xref: /linux/arch/s390/include/asm/kmsan.h (revision 9557b4376d02088a33e5f4116bcc324d35a3b64c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_KMSAN_H
3 #define _ASM_S390_KMSAN_H
4 
5 #include <asm/lowcore.h>
6 #include <asm/page.h>
7 #include <linux/kmsan.h>
8 #include <linux/mmzone.h>
9 #include <linux/stddef.h>
10 
11 #ifndef MODULE
12 
13 static inline bool is_lowcore_addr(void *addr)
14 {
15 	return addr >= (void *)get_lowcore() &&
16 	       addr < (void *)(get_lowcore() + 1);
17 }
18 
19 static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
20 {
21 	if (is_lowcore_addr(addr)) {
22 		/*
23 		 * Different lowcores accessed via S390_lowcore are described
24 		 * by the same struct page. Resolve the prefix manually in
25 		 * order to get a distinct struct page.
26 		 */
27 		addr += (void *)lowcore_ptr[raw_smp_processor_id()] -
28 			(void *)get_lowcore();
29 		if (KMSAN_WARN_ON(is_lowcore_addr(addr)))
30 			return NULL;
31 		return kmsan_get_metadata(addr, is_origin);
32 	}
33 	return NULL;
34 }
35 
36 static inline bool kmsan_virt_addr_valid(void *addr)
37 {
38 	bool ret;
39 
40 	/*
41 	 * pfn_valid() relies on RCU, and may call into the scheduler on exiting
42 	 * the critical section. However, this would result in recursion with
43 	 * KMSAN. Therefore, disable preemption here, and re-enable preemption
44 	 * below while suppressing reschedules to avoid recursion.
45 	 *
46 	 * Note, this sacrifices occasionally breaking scheduling guarantees.
47 	 * Although, a kernel compiled with KMSAN has already given up on any
48 	 * performance guarantees due to being heavily instrumented.
49 	 */
50 	preempt_disable();
51 	ret = virt_addr_valid(addr);
52 	preempt_enable_no_resched();
53 
54 	return ret;
55 }
56 
57 #endif /* !MODULE */
58 
59 #endif /* _ASM_S390_KMSAN_H */
60