xref: /linux/arch/arm64/include/asm/mte-kasan.h (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020 ARM Ltd.
4  */
5 #ifndef __ASM_MTE_KASAN_H
6 #define __ASM_MTE_KASAN_H
7 
8 #include <asm/mte-def.h>
9 
10 #ifndef __ASSEMBLY__
11 
12 #include <linux/types.h>
13 
14 #ifdef CONFIG_ARM64_MTE
15 
16 /*
17  * These functions are meant to be only used from KASAN runtime through
18  * the arch_*() interface defined in asm/memory.h.
19  * These functions don't include system_supports_mte() checks,
20  * as KASAN only calls them when MTE is supported and enabled.
21  */
22 
23 static inline u8 mte_get_ptr_tag(void *ptr)
24 {
25 	/* Note: The format of KASAN tags is 0xF<x> */
26 	u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
27 
28 	return tag;
29 }
30 
31 /* Get allocation tag for the address. */
32 static inline u8 mte_get_mem_tag(void *addr)
33 {
34 	asm(__MTE_PREAMBLE "ldg %0, [%0]"
35 		: "+r" (addr));
36 
37 	return mte_get_ptr_tag(addr);
38 }
39 
40 /* Generate a random tag. */
41 static inline u8 mte_get_random_tag(void)
42 {
43 	void *addr;
44 
45 	asm(__MTE_PREAMBLE "irg %0, %0"
46 		: "=r" (addr));
47 
48 	return mte_get_ptr_tag(addr);
49 }
50 
51 static inline u64 __stg_post(u64 p)
52 {
53 	asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
54 		     : "+r"(p)
55 		     :
56 		     : "memory");
57 	return p;
58 }
59 
60 static inline u64 __stzg_post(u64 p)
61 {
62 	asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
63 		     : "+r"(p)
64 		     :
65 		     : "memory");
66 	return p;
67 }
68 
69 static inline void __dc_gva(u64 p)
70 {
71 	asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
72 }
73 
74 static inline void __dc_gzva(u64 p)
75 {
76 	asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
77 }
78 
79 /*
80  * Assign allocation tags for a region of memory based on the pointer tag.
81  * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
82  * size must be MTE_GRANULE_SIZE aligned.
83  */
84 static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
85 					 bool init)
86 {
87 	u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
88 
89 	/* Read DC G(Z)VA block size from the system register. */
90 	dczid = read_cpuid(DCZID_EL0);
91 	dczid_bs = 4ul << (dczid & 0xf);
92 	dczid_dzp = (dczid >> 4) & 1;
93 
94 	curr = (u64)__tag_set(addr, tag);
95 	mask = dczid_bs - 1;
96 	/* STG/STZG up to the end of the first block. */
97 	end1 = curr | mask;
98 	end3 = curr + size;
99 	/* DC GVA / GZVA in [end1, end2) */
100 	end2 = end3 & ~mask;
101 
102 	/*
103 	 * The following code uses STG on the first DC GVA block even if the
104 	 * start address is aligned - it appears to be faster than an alignment
105 	 * check + conditional branch. Also, if the range size is at least 2 DC
106 	 * GVA blocks, the first two loops can use post-condition to save one
107 	 * branch each.
108 	 */
109 #define SET_MEMTAG_RANGE(stg_post, dc_gva)		\
110 	do {						\
111 		if (!dczid_dzp && size >= 2 * dczid_bs) {\
112 			do {				\
113 				curr = stg_post(curr);	\
114 			} while (curr < end1);		\
115 							\
116 			do {				\
117 				dc_gva(curr);		\
118 				curr += dczid_bs;	\
119 			} while (curr < end2);		\
120 		}					\
121 							\
122 		while (curr < end3)			\
123 			curr = stg_post(curr);		\
124 	} while (0)
125 
126 	if (init)
127 		SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
128 	else
129 		SET_MEMTAG_RANGE(__stg_post, __dc_gva);
130 #undef SET_MEMTAG_RANGE
131 }
132 
133 void mte_enable_kernel_sync(void);
134 void mte_enable_kernel_async(void);
135 void mte_enable_kernel_asymm(void);
136 
137 #else /* CONFIG_ARM64_MTE */
138 
139 static inline u8 mte_get_ptr_tag(void *ptr)
140 {
141 	return 0xFF;
142 }
143 
144 static inline u8 mte_get_mem_tag(void *addr)
145 {
146 	return 0xFF;
147 }
148 
149 static inline u8 mte_get_random_tag(void)
150 {
151 	return 0xFF;
152 }
153 
154 static inline void mte_set_mem_tag_range(void *addr, size_t size,
155 						u8 tag, bool init)
156 {
157 }
158 
159 static inline void mte_enable_kernel_sync(void)
160 {
161 }
162 
163 static inline void mte_enable_kernel_async(void)
164 {
165 }
166 
167 static inline void mte_enable_kernel_asymm(void)
168 {
169 }
170 
171 #endif /* CONFIG_ARM64_MTE */
172 
173 #endif /* __ASSEMBLY__ */
174 
175 #endif /* __ASM_MTE_KASAN_H  */
176