xref: /linux/mm/kfence/kfence.h (revision 61307b7be41a1f1039d1d1368810a1d92cb97b44)
10ce20dd8SAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
20ce20dd8SAlexander Potapenko /*
30ce20dd8SAlexander Potapenko  * Kernel Electric-Fence (KFENCE). For more info please see
40ce20dd8SAlexander Potapenko  * Documentation/dev-tools/kfence.rst.
50ce20dd8SAlexander Potapenko  *
60ce20dd8SAlexander Potapenko  * Copyright (C) 2020, Google LLC.
70ce20dd8SAlexander Potapenko  */
80ce20dd8SAlexander Potapenko 
90ce20dd8SAlexander Potapenko #ifndef MM_KFENCE_KFENCE_H
100ce20dd8SAlexander Potapenko #define MM_KFENCE_KFENCE_H
110ce20dd8SAlexander Potapenko 
120ce20dd8SAlexander Potapenko #include <linux/mm.h>
130ce20dd8SAlexander Potapenko #include <linux/slab.h>
140ce20dd8SAlexander Potapenko #include <linux/spinlock.h>
150ce20dd8SAlexander Potapenko #include <linux/types.h>
160ce20dd8SAlexander Potapenko 
170ce20dd8SAlexander Potapenko #include "../slab.h" /* for struct kmem_cache */
180ce20dd8SAlexander Potapenko 
190ce20dd8SAlexander Potapenko /*
200ce20dd8SAlexander Potapenko  * Get the canary byte pattern for @addr. Use a pattern that varies based on the
210ce20dd8SAlexander Potapenko  * lower 3 bits of the address, to detect memory corruptions with higher
220ce20dd8SAlexander Potapenko  * probability, where similar constants are used.
230ce20dd8SAlexander Potapenko  */
241ba3cbf3SPeng Zhang #define KFENCE_CANARY_PATTERN_U8(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
251ba3cbf3SPeng Zhang 
261ba3cbf3SPeng Zhang /*
271ba3cbf3SPeng Zhang  * Define a continuous 8-byte canary starting from a multiple of 8. The canary
281ba3cbf3SPeng Zhang  * of each byte is only related to the lowest three bits of its address, so the
291ba3cbf3SPeng Zhang  * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
301ba3cbf3SPeng Zhang  * at a time instead of byte by byte to improve performance.
311ba3cbf3SPeng Zhang  */
327581495aSMichael Ellerman #define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
330ce20dd8SAlexander Potapenko 
340ce20dd8SAlexander Potapenko /* Maximum stack depth for reports. */
350ce20dd8SAlexander Potapenko #define KFENCE_STACK_DEPTH 64
360ce20dd8SAlexander Potapenko 
370ce20dd8SAlexander Potapenko /* KFENCE object states. */
380ce20dd8SAlexander Potapenko enum kfence_object_state {
390ce20dd8SAlexander Potapenko 	KFENCE_OBJECT_UNUSED,		/* Object is unused. */
400ce20dd8SAlexander Potapenko 	KFENCE_OBJECT_ALLOCATED,	/* Object is currently allocated. */
410ce20dd8SAlexander Potapenko 	KFENCE_OBJECT_FREED,		/* Object was allocated, and then freed. */
420ce20dd8SAlexander Potapenko };
430ce20dd8SAlexander Potapenko 
440ce20dd8SAlexander Potapenko /* Alloc/free tracking information. */
450ce20dd8SAlexander Potapenko struct kfence_track {
460ce20dd8SAlexander Potapenko 	pid_t pid;
474bbf04aaSMarco Elver 	int cpu;
484bbf04aaSMarco Elver 	u64 ts_nsec;
490ce20dd8SAlexander Potapenko 	int num_stack_entries;
500ce20dd8SAlexander Potapenko 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
510ce20dd8SAlexander Potapenko };
520ce20dd8SAlexander Potapenko 
530ce20dd8SAlexander Potapenko /* KFENCE metadata per guarded allocation. */
540ce20dd8SAlexander Potapenko struct kfence_metadata {
550ce20dd8SAlexander Potapenko 	struct list_head list;		/* Freelist node; access under kfence_freelist_lock. */
560ce20dd8SAlexander Potapenko 	struct rcu_head rcu_head;	/* For delayed freeing. */
570ce20dd8SAlexander Potapenko 
580ce20dd8SAlexander Potapenko 	/*
590ce20dd8SAlexander Potapenko 	 * Lock protecting below data; to ensure consistency of the below data,
600ce20dd8SAlexander Potapenko 	 * since the following may execute concurrently: __kfence_alloc(),
610ce20dd8SAlexander Potapenko 	 * __kfence_free(), kfence_handle_page_fault(). However, note that we
620ce20dd8SAlexander Potapenko 	 * cannot grab the same metadata off the freelist twice, and multiple
630ce20dd8SAlexander Potapenko 	 * __kfence_alloc() cannot run concurrently on the same metadata.
640ce20dd8SAlexander Potapenko 	 */
650ce20dd8SAlexander Potapenko 	raw_spinlock_t lock;
660ce20dd8SAlexander Potapenko 
670ce20dd8SAlexander Potapenko 	/* The current state of the object; see above. */
680ce20dd8SAlexander Potapenko 	enum kfence_object_state state;
690ce20dd8SAlexander Potapenko 
700ce20dd8SAlexander Potapenko 	/*
710ce20dd8SAlexander Potapenko 	 * Allocated object address; cannot be calculated from size, because of
720ce20dd8SAlexander Potapenko 	 * alignment requirements.
730ce20dd8SAlexander Potapenko 	 *
740ce20dd8SAlexander Potapenko 	 * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant.
750ce20dd8SAlexander Potapenko 	 */
760ce20dd8SAlexander Potapenko 	unsigned long addr;
770ce20dd8SAlexander Potapenko 
780ce20dd8SAlexander Potapenko 	/*
790ce20dd8SAlexander Potapenko 	 * The size of the original allocation.
800ce20dd8SAlexander Potapenko 	 */
810ce20dd8SAlexander Potapenko 	size_t size;
820ce20dd8SAlexander Potapenko 
830ce20dd8SAlexander Potapenko 	/*
840ce20dd8SAlexander Potapenko 	 * The kmem_cache cache of the last allocation; NULL if never allocated
850ce20dd8SAlexander Potapenko 	 * or the cache has already been destroyed.
860ce20dd8SAlexander Potapenko 	 */
870ce20dd8SAlexander Potapenko 	struct kmem_cache *cache;
880ce20dd8SAlexander Potapenko 
890ce20dd8SAlexander Potapenko 	/*
900ce20dd8SAlexander Potapenko 	 * In case of an invalid access, the page that was unprotected; we
910ce20dd8SAlexander Potapenko 	 * optimistically only store one address.
920ce20dd8SAlexander Potapenko 	 */
930ce20dd8SAlexander Potapenko 	unsigned long unprotected_page;
940ce20dd8SAlexander Potapenko 
950ce20dd8SAlexander Potapenko 	/* Allocation and free stack information. */
960ce20dd8SAlexander Potapenko 	struct kfence_track alloc_track;
970ce20dd8SAlexander Potapenko 	struct kfence_track free_track;
9808f6b106SMarco Elver 	/* For updating alloc_covered on frees. */
9908f6b106SMarco Elver 	u32 alloc_stack_hash;
100*21c690a3SSuren Baghdasaryan #ifdef CONFIG_MEMCG_KMEM
101*21c690a3SSuren Baghdasaryan 	struct slabobj_ext obj_exts;
1028f0b3649SMuchun Song #endif
1030ce20dd8SAlexander Potapenko };
1040ce20dd8SAlexander Potapenko 
105cabdf74eSPeng Zhang #define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
106cabdf74eSPeng Zhang 					CONFIG_KFENCE_NUM_OBJECTS)
107cabdf74eSPeng Zhang 
108cabdf74eSPeng Zhang extern struct kfence_metadata *kfence_metadata;
1090ce20dd8SAlexander Potapenko 
1102dfe63e6SMarco Elver static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
1112dfe63e6SMarco Elver {
1122dfe63e6SMarco Elver 	long index;
1132dfe63e6SMarco Elver 
1142dfe63e6SMarco Elver 	/* The checks do not affect performance; only called from slow-paths. */
1152dfe63e6SMarco Elver 
1162dfe63e6SMarco Elver 	if (!is_kfence_address((void *)addr))
1172dfe63e6SMarco Elver 		return NULL;
1182dfe63e6SMarco Elver 
1192dfe63e6SMarco Elver 	/*
1202dfe63e6SMarco Elver 	 * May be an invalid index if called with an address at the edge of
1212dfe63e6SMarco Elver 	 * __kfence_pool, in which case we would report an "invalid access"
1222dfe63e6SMarco Elver 	 * error.
1232dfe63e6SMarco Elver 	 */
1242dfe63e6SMarco Elver 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
1252dfe63e6SMarco Elver 	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
1262dfe63e6SMarco Elver 		return NULL;
1272dfe63e6SMarco Elver 
1282dfe63e6SMarco Elver 	return &kfence_metadata[index];
1292dfe63e6SMarco Elver }
1302dfe63e6SMarco Elver 
1310ce20dd8SAlexander Potapenko /* KFENCE error types for report generation. */
1320ce20dd8SAlexander Potapenko enum kfence_error_type {
1330ce20dd8SAlexander Potapenko 	KFENCE_ERROR_OOB,		/* Detected a out-of-bounds access. */
1340ce20dd8SAlexander Potapenko 	KFENCE_ERROR_UAF,		/* Detected a use-after-free access. */
1350ce20dd8SAlexander Potapenko 	KFENCE_ERROR_CORRUPTION,	/* Detected a memory corruption on free. */
1360ce20dd8SAlexander Potapenko 	KFENCE_ERROR_INVALID,		/* Invalid access of unknown type. */
1370ce20dd8SAlexander Potapenko 	KFENCE_ERROR_INVALID_FREE,	/* Invalid free. */
1380ce20dd8SAlexander Potapenko };
1390ce20dd8SAlexander Potapenko 
140bc8fbc5fSMarco Elver void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
141d438fabcSMarco Elver 			 const struct kfence_metadata *meta, enum kfence_error_type type);
1420ce20dd8SAlexander Potapenko 
1430ce20dd8SAlexander Potapenko void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
1440ce20dd8SAlexander Potapenko 
1450ce20dd8SAlexander Potapenko #endif /* MM_KFENCE_KFENCE_H */
146