xref: /linux/arch/arm64/include/asm/mte-kasan.h (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 ARM Ltd.
4  */
5 #ifndef __ASM_MTE_KASAN_H
6 #define __ASM_MTE_KASAN_H
7 
8 #include <asm/mte-def.h>
9 
10 #ifndef __ASSEMBLY__
11 
12 #include <linux/types.h>
13 
14 #ifdef CONFIG_ARM64_MTE
15 
16 /*
17  * These functions are meant to be only used from KASAN runtime through
18  * the arch_*() interface defined in asm/memory.h.
19  * These functions don't include system_supports_mte() checks,
20  * as KASAN only calls them when MTE is supported and enabled.
21  */
22 
23 static inline u8 mte_get_ptr_tag(void *ptr)
24 {
25 	/* Note: The format of KASAN tags is 0xF<x> */
26 	u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
27 
28 	return tag;
29 }
30 
31 /* Get allocation tag for the address. */
32 static inline u8 mte_get_mem_tag(void *addr)
33 {
34 	asm(__MTE_PREAMBLE "ldg %0, [%0]"
35 		: "+r" (addr));
36 
37 	return mte_get_ptr_tag(addr);
38 }
39 
40 /* Generate a random tag. */
41 static inline u8 mte_get_random_tag(void)
42 {
43 	void *addr;
44 
45 	asm(__MTE_PREAMBLE "irg %0, %0"
46 		: "=r" (addr));
47 
48 	return mte_get_ptr_tag(addr);
49 }
50 
51 /*
52  * Assign allocation tags for a region of memory based on the pointer tag.
53  * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
54  * size must be non-zero and MTE_GRANULE_SIZE aligned.
55  */
56 static inline void mte_set_mem_tag_range(void *addr, size_t size,
57 						u8 tag, bool init)
58 {
59 	u64 curr, end;
60 
61 	if (!size)
62 		return;
63 
64 	curr = (u64)__tag_set(addr, tag);
65 	end = curr + size;
66 
67 	/*
68 	 * 'asm volatile' is required to prevent the compiler to move
69 	 * the statement outside of the loop.
70 	 */
71 	if (init) {
72 		do {
73 			asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
74 				     :
75 				     : "r" (curr)
76 				     : "memory");
77 			curr += MTE_GRANULE_SIZE;
78 		} while (curr != end);
79 	} else {
80 		do {
81 			asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
82 				     :
83 				     : "r" (curr)
84 				     : "memory");
85 			curr += MTE_GRANULE_SIZE;
86 		} while (curr != end);
87 	}
88 }
89 
90 void mte_enable_kernel_sync(void);
91 void mte_enable_kernel_async(void);
92 void mte_init_tags(u64 max_tag);
93 
94 void mte_set_report_once(bool state);
95 bool mte_report_once(void);
96 
97 #else /* CONFIG_ARM64_MTE */
98 
99 static inline u8 mte_get_ptr_tag(void *ptr)
100 {
101 	return 0xFF;
102 }
103 
104 static inline u8 mte_get_mem_tag(void *addr)
105 {
106 	return 0xFF;
107 }
108 
109 static inline u8 mte_get_random_tag(void)
110 {
111 	return 0xFF;
112 }
113 
114 static inline void mte_set_mem_tag_range(void *addr, size_t size,
115 						u8 tag, bool init)
116 {
117 }
118 
119 static inline void mte_enable_kernel_sync(void)
120 {
121 }
122 
123 static inline void mte_enable_kernel_async(void)
124 {
125 }
126 
127 static inline void mte_init_tags(u64 max_tag)
128 {
129 }
130 
131 static inline void mte_set_report_once(bool state)
132 {
133 }
134 
135 static inline bool mte_report_once(void)
136 {
137 	return false;
138 }
139 
140 #endif /* CONFIG_ARM64_MTE */
141 
142 #endif /* __ASSEMBLY__ */
143 
144 #endif /* __ASM_MTE_KASAN_H  */
145