xref: /linux/arch/arm64/include/asm/kasan.h (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_KASAN_H
3 #define __ASM_KASAN_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <linux/linkage.h>
8 #include <asm/memory.h>
9 #include <asm/mte-kasan.h>
10 #include <asm/pgtable-types.h>
11 
12 #define arch_kasan_set_tag(addr, tag)	__tag_set(addr, tag)
13 #define arch_kasan_reset_tag(addr)	__tag_reset(addr)
14 #define arch_kasan_get_tag(addr)	__tag_get(addr)
15 
16 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
17 
18 void kasan_init(void);
19 
20 /*
21  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
22  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
23  * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
24  *
25  * KASAN_SHADOW_OFFSET:
26  * This value is used to map an address to the corresponding shadow
27  * address by the following formula:
28  *     shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
29  *
30  * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
31  * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
32  * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
33  *      KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
34  *				(1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
35  */
36 #define _KASAN_SHADOW_START(va)	(KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
37 #define KASAN_SHADOW_START      _KASAN_SHADOW_START(vabits_actual)
38 
39 void kasan_copy_shadow(pgd_t *pgdir);
40 asmlinkage void kasan_early_init(void);
41 
42 #else
43 static inline void kasan_init(void) { }
44 static inline void kasan_copy_shadow(pgd_t *pgdir) { }
45 #endif
46 
47 #endif
48 #endif
49