xref: /linux/kernel/kcsan/core.c (revision 49f72d5358dd3c0d28bcd2232c513000b15480f0)
1dfd402a4SMarco Elver // SPDX-License-Identifier: GPL-2.0
2bd0ccc4aSMarco Elver /*
3bd0ccc4aSMarco Elver  * KCSAN core runtime.
4bd0ccc4aSMarco Elver  *
5bd0ccc4aSMarco Elver  * Copyright (C) 2019, Google LLC.
6bd0ccc4aSMarco Elver  */
7dfd402a4SMarco Elver 
827787930SMarco Elver #define pr_fmt(fmt) "kcsan: " fmt
927787930SMarco Elver 
10dfd402a4SMarco Elver #include <linux/atomic.h>
11dfd402a4SMarco Elver #include <linux/bug.h>
12dfd402a4SMarco Elver #include <linux/delay.h>
13dfd402a4SMarco Elver #include <linux/export.h>
14dfd402a4SMarco Elver #include <linux/init.h>
151e6ee2f0SMarco Elver #include <linux/kernel.h>
16757a4cefSMarco Elver #include <linux/list.h>
1780d4c477SMarco Elver #include <linux/moduleparam.h>
18dfd402a4SMarco Elver #include <linux/percpu.h>
19dfd402a4SMarco Elver #include <linux/preempt.h>
20dfd402a4SMarco Elver #include <linux/sched.h>
21dfd402a4SMarco Elver #include <linux/uaccess.h>
22dfd402a4SMarco Elver 
23dfd402a4SMarco Elver #include "encoding.h"
24dfd402a4SMarco Elver #include "kcsan.h"
25*49f72d53SMarco Elver #include "permissive.h"
26dfd402a4SMarco Elver 
2780d4c477SMarco Elver static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
282402d0eaSMarco Elver unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
292402d0eaSMarco Elver unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
3080d4c477SMarco Elver static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
3148b1fc19SMarco Elver static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
3280d4c477SMarco Elver 
3380d4c477SMarco Elver #ifdef MODULE_PARAM_PREFIX
3480d4c477SMarco Elver #undef MODULE_PARAM_PREFIX
3580d4c477SMarco Elver #endif
3680d4c477SMarco Elver #define MODULE_PARAM_PREFIX "kcsan."
3780d4c477SMarco Elver module_param_named(early_enable, kcsan_early_enable, bool, 0);
3880d4c477SMarco Elver module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
3980d4c477SMarco Elver module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
4080d4c477SMarco Elver module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
4148b1fc19SMarco Elver module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
4280d4c477SMarco Elver 
43dfd402a4SMarco Elver bool kcsan_enabled;
44dfd402a4SMarco Elver 
45dfd402a4SMarco Elver /* Per-CPU kcsan_ctx for interrupts */
46dfd402a4SMarco Elver static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
47dfd402a4SMarco Elver 	.disable_count		= 0,
48dfd402a4SMarco Elver 	.atomic_next		= 0,
49dfd402a4SMarco Elver 	.atomic_nest_count	= 0,
50dfd402a4SMarco Elver 	.in_flat_atomic		= false,
5181af89e1SMarco Elver 	.access_mask		= 0,
52757a4cefSMarco Elver 	.scoped_accesses	= {LIST_POISON1, NULL},
53dfd402a4SMarco Elver };
54dfd402a4SMarco Elver 
55dfd402a4SMarco Elver /*
56e7b34100SQiujun Huang  * Helper macros to index into adjacent slots, starting from address slot
57dfd402a4SMarco Elver  * itself, followed by the right and left slots.
58dfd402a4SMarco Elver  *
59dfd402a4SMarco Elver  * The purpose is 2-fold:
60dfd402a4SMarco Elver  *
61dfd402a4SMarco Elver  *	1. if during insertion the address slot is already occupied, check if
62dfd402a4SMarco Elver  *	   any adjacent slots are free;
63dfd402a4SMarco Elver  *	2. accesses that straddle a slot boundary due to size that exceeds a
64dfd402a4SMarco Elver  *	   slot's range may check adjacent slots if any watchpoint matches.
65dfd402a4SMarco Elver  *
66dfd402a4SMarco Elver  * Note that accesses with very large size may still miss a watchpoint; however,
67dfd402a4SMarco Elver  * given this should be rare, this is a reasonable trade-off to make, since this
68dfd402a4SMarco Elver  * will avoid:
69dfd402a4SMarco Elver  *
70dfd402a4SMarco Elver  *	1. excessive contention between watchpoint checks and setup;
71dfd402a4SMarco Elver  *	2. larger number of simultaneous watchpoints without sacrificing
72dfd402a4SMarco Elver  *	   performance.
73dfd402a4SMarco Elver  *
74dfd402a4SMarco Elver  * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
75dfd402a4SMarco Elver  *
76dfd402a4SMarco Elver  *   slot=0:  [ 1,  2,  0]
77dfd402a4SMarco Elver  *   slot=9:  [10, 11,  9]
78dfd402a4SMarco Elver  *   slot=63: [64, 65, 63]
79dfd402a4SMarco Elver  */
80dfd402a4SMarco Elver #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
81dfd402a4SMarco Elver 
82dfd402a4SMarco Elver /*
835cbaefe9SIngo Molnar  * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
84d591ec3dSMarco Elver  * slot (middle) is fine if we assume that races occur rarely. The set of
85dfd402a4SMarco Elver  * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
86dfd402a4SMarco Elver  * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
87dfd402a4SMarco Elver  */
88dfd402a4SMarco Elver #define SLOT_IDX_FAST(slot, i) (slot + i)
89dfd402a4SMarco Elver 
90dfd402a4SMarco Elver /*
91dfd402a4SMarco Elver  * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
92dfd402a4SMarco Elver  * able to safely update and access a watchpoint without introducing locking
93dfd402a4SMarco Elver  * overhead, we encode each watchpoint as a single atomic long. The initial
94dfd402a4SMarco Elver  * zero-initialized state matches INVALID_WATCHPOINT.
95dfd402a4SMarco Elver  *
96dfd402a4SMarco Elver  * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
975cbaefe9SIngo Molnar  * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
98dfd402a4SMarco Elver  */
99dfd402a4SMarco Elver static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
100dfd402a4SMarco Elver 
101dfd402a4SMarco Elver /*
102dfd402a4SMarco Elver  * Instructions to skip watching counter, used in should_watch(). We use a
103dfd402a4SMarco Elver  * per-CPU counter to avoid excessive contention.
104dfd402a4SMarco Elver  */
105dfd402a4SMarco Elver static DEFINE_PER_CPU(long, kcsan_skip);
106dfd402a4SMarco Elver 
107cd290ec2SMarco Elver /* For kcsan_prandom_u32_max(). */
10871a076f4SMarco Elver static DEFINE_PER_CPU(u32, kcsan_rand_state);
109cd290ec2SMarco Elver 
1105c361425SMarco Elver static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
1115cbaefe9SIngo Molnar 						      size_t size,
112dfd402a4SMarco Elver 						      bool expect_write,
113dfd402a4SMarco Elver 						      long *encoded_watchpoint)
114dfd402a4SMarco Elver {
115dfd402a4SMarco Elver 	const int slot = watchpoint_slot(addr);
116dfd402a4SMarco Elver 	const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
117dfd402a4SMarco Elver 	atomic_long_t *watchpoint;
118dfd402a4SMarco Elver 	unsigned long wp_addr_masked;
119dfd402a4SMarco Elver 	size_t wp_size;
120dfd402a4SMarco Elver 	bool is_write;
121dfd402a4SMarco Elver 	int i;
122dfd402a4SMarco Elver 
123dfd402a4SMarco Elver 	BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
124dfd402a4SMarco Elver 
125dfd402a4SMarco Elver 	for (i = 0; i < NUM_SLOTS; ++i) {
126dfd402a4SMarco Elver 		watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
127dfd402a4SMarco Elver 		*encoded_watchpoint = atomic_long_read(watchpoint);
128dfd402a4SMarco Elver 		if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
129dfd402a4SMarco Elver 				       &wp_size, &is_write))
130dfd402a4SMarco Elver 			continue;
131dfd402a4SMarco Elver 
132dfd402a4SMarco Elver 		if (expect_write && !is_write)
133dfd402a4SMarco Elver 			continue;
134dfd402a4SMarco Elver 
135dfd402a4SMarco Elver 		/* Check if the watchpoint matches the access. */
136dfd402a4SMarco Elver 		if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
137dfd402a4SMarco Elver 			return watchpoint;
138dfd402a4SMarco Elver 	}
139dfd402a4SMarco Elver 
140dfd402a4SMarco Elver 	return NULL;
141dfd402a4SMarco Elver }
142dfd402a4SMarco Elver 
1435cbaefe9SIngo Molnar static inline atomic_long_t *
1445cbaefe9SIngo Molnar insert_watchpoint(unsigned long addr, size_t size, bool is_write)
145dfd402a4SMarco Elver {
146dfd402a4SMarco Elver 	const int slot = watchpoint_slot(addr);
147dfd402a4SMarco Elver 	const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
148dfd402a4SMarco Elver 	atomic_long_t *watchpoint;
149dfd402a4SMarco Elver 	int i;
150dfd402a4SMarco Elver 
151dfd402a4SMarco Elver 	/* Check slot index logic, ensuring we stay within array bounds. */
152dfd402a4SMarco Elver 	BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
153dfd402a4SMarco Elver 	BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
1545cbaefe9SIngo Molnar 	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
1555cbaefe9SIngo Molnar 	BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
156dfd402a4SMarco Elver 
157dfd402a4SMarco Elver 	for (i = 0; i < NUM_SLOTS; ++i) {
158dfd402a4SMarco Elver 		long expect_val = INVALID_WATCHPOINT;
159dfd402a4SMarco Elver 
160dfd402a4SMarco Elver 		/* Try to acquire this slot. */
161dfd402a4SMarco Elver 		watchpoint = &watchpoints[SLOT_IDX(slot, i)];
1625cbaefe9SIngo Molnar 		if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
163dfd402a4SMarco Elver 			return watchpoint;
164dfd402a4SMarco Elver 	}
165dfd402a4SMarco Elver 
166dfd402a4SMarco Elver 	return NULL;
167dfd402a4SMarco Elver }
168dfd402a4SMarco Elver 
169dfd402a4SMarco Elver /*
170dfd402a4SMarco Elver  * Return true if watchpoint was successfully consumed, false otherwise.
171dfd402a4SMarco Elver  *
172dfd402a4SMarco Elver  * This may return false if:
173dfd402a4SMarco Elver  *
174dfd402a4SMarco Elver  *	1. another thread already consumed the watchpoint;
175dfd402a4SMarco Elver  *	2. the thread that set up the watchpoint already removed it;
176dfd402a4SMarco Elver  *	3. the watchpoint was removed and then re-used.
177dfd402a4SMarco Elver  */
1785c361425SMarco Elver static __always_inline bool
1795cbaefe9SIngo Molnar try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
180dfd402a4SMarco Elver {
1815cbaefe9SIngo Molnar 	return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
182dfd402a4SMarco Elver }
183dfd402a4SMarco Elver 
1846119418fSMarco Elver /* Return true if watchpoint was not touched, false if already consumed. */
1856119418fSMarco Elver static inline bool consume_watchpoint(atomic_long_t *watchpoint)
186dfd402a4SMarco Elver {
1876119418fSMarco Elver 	return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
1886119418fSMarco Elver }
1896119418fSMarco Elver 
1906119418fSMarco Elver /* Remove the watchpoint -- its slot may be reused after. */
1916119418fSMarco Elver static inline void remove_watchpoint(atomic_long_t *watchpoint)
1926119418fSMarco Elver {
1936119418fSMarco Elver 	atomic_long_set(watchpoint, INVALID_WATCHPOINT);
194dfd402a4SMarco Elver }
195dfd402a4SMarco Elver 
1965c361425SMarco Elver static __always_inline struct kcsan_ctx *get_ctx(void)
197dfd402a4SMarco Elver {
198dfd402a4SMarco Elver 	/*
1995cbaefe9SIngo Molnar 	 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
200dfd402a4SMarco Elver 	 * also result in calls that generate warnings in uaccess regions.
201dfd402a4SMarco Elver 	 */
202dfd402a4SMarco Elver 	return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
203dfd402a4SMarco Elver }
204dfd402a4SMarco Elver 
205757a4cefSMarco Elver /* Check scoped accesses; never inline because this is a slow-path! */
206757a4cefSMarco Elver static noinline void kcsan_check_scoped_accesses(void)
207757a4cefSMarco Elver {
208757a4cefSMarco Elver 	struct kcsan_ctx *ctx = get_ctx();
209757a4cefSMarco Elver 	struct list_head *prev_save = ctx->scoped_accesses.prev;
210757a4cefSMarco Elver 	struct kcsan_scoped_access *scoped_access;
211757a4cefSMarco Elver 
212757a4cefSMarco Elver 	ctx->scoped_accesses.prev = NULL;  /* Avoid recursion. */
213757a4cefSMarco Elver 	list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
214757a4cefSMarco Elver 		__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
215757a4cefSMarco Elver 	ctx->scoped_accesses.prev = prev_save;
216757a4cefSMarco Elver }
217757a4cefSMarco Elver 
21844656d3dSMarco Elver /* Rules for generic atomic accesses. Called from fast-path. */
2191e6ee2f0SMarco Elver static __always_inline bool
220757a4cefSMarco Elver is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
221dfd402a4SMarco Elver {
22244656d3dSMarco Elver 	if (type & KCSAN_ACCESS_ATOMIC)
2231e6ee2f0SMarco Elver 		return true;
2241e6ee2f0SMarco Elver 
225d591ec3dSMarco Elver 	/*
226d591ec3dSMarco Elver 	 * Unless explicitly declared atomic, never consider an assertion access
227d591ec3dSMarco Elver 	 * as atomic. This allows using them also in atomic regions, such as
228d591ec3dSMarco Elver 	 * seqlocks, without implicitly changing their semantics.
229d591ec3dSMarco Elver 	 */
23044656d3dSMarco Elver 	if (type & KCSAN_ACCESS_ASSERT)
231d591ec3dSMarco Elver 		return false;
232d591ec3dSMarco Elver 
2331e6ee2f0SMarco Elver 	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
23444656d3dSMarco Elver 	    (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
23514e2ac8dSMarco Elver 	    !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
2361e6ee2f0SMarco Elver 		return true; /* Assume aligned writes up to word size are atomic. */
2371e6ee2f0SMarco Elver 
23844656d3dSMarco Elver 	if (ctx->atomic_next > 0) {
239dfd402a4SMarco Elver 		/*
240dfd402a4SMarco Elver 		 * Because we do not have separate contexts for nested
241dfd402a4SMarco Elver 		 * interrupts, in case atomic_next is set, we simply assume that
242dfd402a4SMarco Elver 		 * the outer interrupt set atomic_next. In the worst case, we
243dfd402a4SMarco Elver 		 * will conservatively consider operations as atomic. This is a
244dfd402a4SMarco Elver 		 * reasonable trade-off to make, since this case should be
245dfd402a4SMarco Elver 		 * extremely rare; however, even if extremely rare, it could
246dfd402a4SMarco Elver 		 * lead to false positives otherwise.
247dfd402a4SMarco Elver 		 */
248dfd402a4SMarco Elver 		if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
249dfd402a4SMarco Elver 			--ctx->atomic_next; /* in task, or outer interrupt */
250dfd402a4SMarco Elver 		return true;
251dfd402a4SMarco Elver 	}
252dfd402a4SMarco Elver 
25344656d3dSMarco Elver 	return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
254dfd402a4SMarco Elver }
255dfd402a4SMarco Elver 
2561e6ee2f0SMarco Elver static __always_inline bool
257757a4cefSMarco Elver should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
258dfd402a4SMarco Elver {
259dfd402a4SMarco Elver 	/*
260dfd402a4SMarco Elver 	 * Never set up watchpoints when memory operations are atomic.
261dfd402a4SMarco Elver 	 *
262dfd402a4SMarco Elver 	 * Need to check this first, before kcsan_skip check below: (1) atomics
263dfd402a4SMarco Elver 	 * should not count towards skipped instructions, and (2) to actually
264dfd402a4SMarco Elver 	 * decrement kcsan_atomic_next for consecutive instruction stream.
265dfd402a4SMarco Elver 	 */
266757a4cefSMarco Elver 	if (is_atomic(ptr, size, type, ctx))
267dfd402a4SMarco Elver 		return false;
268dfd402a4SMarco Elver 
269dfd402a4SMarco Elver 	if (this_cpu_dec_return(kcsan_skip) >= 0)
270dfd402a4SMarco Elver 		return false;
271dfd402a4SMarco Elver 
272dfd402a4SMarco Elver 	/*
273dfd402a4SMarco Elver 	 * NOTE: If we get here, kcsan_skip must always be reset in slow path
274dfd402a4SMarco Elver 	 * via reset_kcsan_skip() to avoid underflow.
275dfd402a4SMarco Elver 	 */
276dfd402a4SMarco Elver 
277dfd402a4SMarco Elver 	/* this operation should be watched */
278dfd402a4SMarco Elver 	return true;
279dfd402a4SMarco Elver }
280dfd402a4SMarco Elver 
281cd290ec2SMarco Elver /*
28271a076f4SMarco Elver  * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
28371a076f4SMarco Elver  * congruential generator, using constants from "Numerical Recipes".
284cd290ec2SMarco Elver  */
285cd290ec2SMarco Elver static u32 kcsan_prandom_u32_max(u32 ep_ro)
286cd290ec2SMarco Elver {
28771a076f4SMarco Elver 	u32 state = this_cpu_read(kcsan_rand_state);
288cd290ec2SMarco Elver 
28971a076f4SMarco Elver 	state = 1664525 * state + 1013904223;
29071a076f4SMarco Elver 	this_cpu_write(kcsan_rand_state, state);
29171a076f4SMarco Elver 
29271a076f4SMarco Elver 	return state % ep_ro;
293cd290ec2SMarco Elver }
294cd290ec2SMarco Elver 
295dfd402a4SMarco Elver static inline void reset_kcsan_skip(void)
296dfd402a4SMarco Elver {
29780d4c477SMarco Elver 	long skip_count = kcsan_skip_watch -
298dfd402a4SMarco Elver 			  (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
299cd290ec2SMarco Elver 				   kcsan_prandom_u32_max(kcsan_skip_watch) :
300dfd402a4SMarco Elver 				   0);
301dfd402a4SMarco Elver 	this_cpu_write(kcsan_skip, skip_count);
302dfd402a4SMarco Elver }
303dfd402a4SMarco Elver 
30408cac604SMarco Elver static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
305dfd402a4SMarco Elver {
30608cac604SMarco Elver 	return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
307dfd402a4SMarco Elver }
308dfd402a4SMarco Elver 
309cd290ec2SMarco Elver /* Introduce delay depending on context and configuration. */
310cd290ec2SMarco Elver static void delay_access(int type)
311dfd402a4SMarco Elver {
31280d4c477SMarco Elver 	unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
313106a307fSMarco Elver 	/* For certain access types, skew the random delay to be longer. */
314106a307fSMarco Elver 	unsigned int skew_delay_order =
315106a307fSMarco Elver 		(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
316106a307fSMarco Elver 
317cd290ec2SMarco Elver 	delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
318cd290ec2SMarco Elver 			       kcsan_prandom_u32_max(delay >> skew_delay_order) :
319cd290ec2SMarco Elver 			       0;
320cd290ec2SMarco Elver 	udelay(delay);
321dfd402a4SMarco Elver }
322dfd402a4SMarco Elver 
32392c209acSMarco Elver void kcsan_save_irqtrace(struct task_struct *task)
32492c209acSMarco Elver {
32592c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS
32692c209acSMarco Elver 	task->kcsan_save_irqtrace = task->irqtrace;
32792c209acSMarco Elver #endif
32892c209acSMarco Elver }
32992c209acSMarco Elver 
33092c209acSMarco Elver void kcsan_restore_irqtrace(struct task_struct *task)
33192c209acSMarco Elver {
33292c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS
33392c209acSMarco Elver 	task->irqtrace = task->kcsan_save_irqtrace;
33492c209acSMarco Elver #endif
33592c209acSMarco Elver }
33692c209acSMarco Elver 
337dfd402a4SMarco Elver /*
338dfd402a4SMarco Elver  * Pull everything together: check_access() below contains the performance
339dfd402a4SMarco Elver  * critical operations; the fast-path (including check_access) functions should
340dfd402a4SMarco Elver  * all be inlinable by the instrumentation functions.
341dfd402a4SMarco Elver  *
342dfd402a4SMarco Elver  * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
343dfd402a4SMarco Elver  * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
344dfd402a4SMarco Elver  * be filtered from the stacktrace, as well as give them unique names for the
345dfd402a4SMarco Elver  * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
346dfd402a4SMarco Elver  * since they do not access any user memory, but instrumentation is still
347dfd402a4SMarco Elver  * emitted in UACCESS regions.
348dfd402a4SMarco Elver  */
349dfd402a4SMarco Elver 
350dfd402a4SMarco Elver static noinline void kcsan_found_watchpoint(const volatile void *ptr,
3515cbaefe9SIngo Molnar 					    size_t size,
35247144ecaSMarco Elver 					    int type,
353dfd402a4SMarco Elver 					    atomic_long_t *watchpoint,
354dfd402a4SMarco Elver 					    long encoded_watchpoint)
355dfd402a4SMarco Elver {
356*49f72d53SMarco Elver 	const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
35708cac604SMarco Elver 	struct kcsan_ctx *ctx = get_ctx();
358dfd402a4SMarco Elver 	unsigned long flags;
359dfd402a4SMarco Elver 	bool consumed;
360dfd402a4SMarco Elver 
36108cac604SMarco Elver 	/*
36208cac604SMarco Elver 	 * We know a watchpoint exists. Let's try to keep the race-window
36308cac604SMarco Elver 	 * between here and finally consuming the watchpoint below as small as
36408cac604SMarco Elver 	 * possible -- avoid unneccessarily complex code until consumed.
36508cac604SMarco Elver 	 */
36608cac604SMarco Elver 
36708cac604SMarco Elver 	if (!kcsan_is_enabled(ctx))
368dfd402a4SMarco Elver 		return;
36981af89e1SMarco Elver 
37081af89e1SMarco Elver 	/*
37181af89e1SMarco Elver 	 * The access_mask check relies on value-change comparison. To avoid
37281af89e1SMarco Elver 	 * reporting a race where e.g. the writer set up the watchpoint, but the
37381af89e1SMarco Elver 	 * reader has access_mask!=0, we have to ignore the found watchpoint.
37481af89e1SMarco Elver 	 */
37508cac604SMarco Elver 	if (ctx->access_mask)
37681af89e1SMarco Elver 		return;
37781af89e1SMarco Elver 
378dfd402a4SMarco Elver 	/*
379*49f72d53SMarco Elver 	 * If the other thread does not want to ignore the access, and there was
380*49f72d53SMarco Elver 	 * a value change as a result of this thread's operation, we will still
381*49f72d53SMarco Elver 	 * generate a report of unknown origin.
382*49f72d53SMarco Elver 	 *
383*49f72d53SMarco Elver 	 * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter.
384*49f72d53SMarco Elver 	 */
385*49f72d53SMarco Elver 	if (!is_assert && kcsan_ignore_address(ptr))
386*49f72d53SMarco Elver 		return;
387*49f72d53SMarco Elver 
388*49f72d53SMarco Elver 	/*
38908cac604SMarco Elver 	 * Consuming the watchpoint must be guarded by kcsan_is_enabled() to
39008cac604SMarco Elver 	 * avoid erroneously triggering reports if the context is disabled.
391dfd402a4SMarco Elver 	 */
392dfd402a4SMarco Elver 	consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
393dfd402a4SMarco Elver 
394dfd402a4SMarco Elver 	/* keep this after try_consume_watchpoint */
395dfd402a4SMarco Elver 	flags = user_access_save();
396dfd402a4SMarco Elver 
397dfd402a4SMarco Elver 	if (consumed) {
39892c209acSMarco Elver 		kcsan_save_irqtrace(current);
399793c2579SMark Rutland 		kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
40092c209acSMarco Elver 		kcsan_restore_irqtrace(current);
401dfd402a4SMarco Elver 	} else {
402dfd402a4SMarco Elver 		/*
403dfd402a4SMarco Elver 		 * The other thread may not print any diagnostics, as it has
404dfd402a4SMarco Elver 		 * already removed the watchpoint, or another thread consumed
405dfd402a4SMarco Elver 		 * the watchpoint before this thread.
406dfd402a4SMarco Elver 		 */
4072e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
408dfd402a4SMarco Elver 	}
409d591ec3dSMarco Elver 
410*49f72d53SMarco Elver 	if (is_assert)
4112e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
412d591ec3dSMarco Elver 	else
4132e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
414dfd402a4SMarco Elver 
415dfd402a4SMarco Elver 	user_access_restore(flags);
416dfd402a4SMarco Elver }
417dfd402a4SMarco Elver 
4185cbaefe9SIngo Molnar static noinline void
41947144ecaSMarco Elver kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
420dfd402a4SMarco Elver {
42147144ecaSMarco Elver 	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
422d591ec3dSMarco Elver 	const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
423dfd402a4SMarco Elver 	atomic_long_t *watchpoint;
4246f2d9819SMark Rutland 	u64 old, new, diff;
42581af89e1SMarco Elver 	unsigned long access_mask;
426b738f616SMarco Elver 	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
427dfd402a4SMarco Elver 	unsigned long ua_flags = user_access_save();
42808cac604SMarco Elver 	struct kcsan_ctx *ctx = get_ctx();
42948b1fc19SMarco Elver 	unsigned long irq_flags = 0;
430dfd402a4SMarco Elver 
431dfd402a4SMarco Elver 	/*
432dfd402a4SMarco Elver 	 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
433dfd402a4SMarco Elver 	 * should_watch().
434dfd402a4SMarco Elver 	 */
435dfd402a4SMarco Elver 	reset_kcsan_skip();
436dfd402a4SMarco Elver 
43708cac604SMarco Elver 	if (!kcsan_is_enabled(ctx))
438dfd402a4SMarco Elver 		goto out;
439dfd402a4SMarco Elver 
44044656d3dSMarco Elver 	/*
441*49f72d53SMarco Elver 	 * Check to-ignore addresses after kcsan_is_enabled(), as we may access
442*49f72d53SMarco Elver 	 * memory that is not yet initialized during early boot.
44344656d3dSMarco Elver 	 */
444*49f72d53SMarco Elver 	if (!is_assert && kcsan_ignore_address(ptr))
44544656d3dSMarco Elver 		goto out;
44644656d3dSMarco Elver 
447dfd402a4SMarco Elver 	if (!check_encodable((unsigned long)ptr, size)) {
4482e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
449dfd402a4SMarco Elver 		goto out;
450dfd402a4SMarco Elver 	}
451dfd402a4SMarco Elver 
45292c209acSMarco Elver 	/*
45392c209acSMarco Elver 	 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
45492c209acSMarco Elver 	 * runtime is entered for every memory access, and potentially useful
45592c209acSMarco Elver 	 * information is lost if dirtied by KCSAN.
45692c209acSMarco Elver 	 */
45792c209acSMarco Elver 	kcsan_save_irqtrace(current);
45848b1fc19SMarco Elver 	if (!kcsan_interrupt_watcher)
459248591f5SMarco Elver 		local_irq_save(irq_flags);
460dfd402a4SMarco Elver 
461dfd402a4SMarco Elver 	watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
462dfd402a4SMarco Elver 	if (watchpoint == NULL) {
463dfd402a4SMarco Elver 		/*
4645cbaefe9SIngo Molnar 		 * Out of capacity: the size of 'watchpoints', and the frequency
4655cbaefe9SIngo Molnar 		 * with which should_watch() returns true should be tweaked so
466dfd402a4SMarco Elver 		 * that this case happens very rarely.
467dfd402a4SMarco Elver 		 */
4682e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
469dfd402a4SMarco Elver 		goto out_unlock;
470dfd402a4SMarco Elver 	}
471dfd402a4SMarco Elver 
4722e986b81SMarco Elver 	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
4732e986b81SMarco Elver 	atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
474dfd402a4SMarco Elver 
475dfd402a4SMarco Elver 	/*
476dfd402a4SMarco Elver 	 * Read the current value, to later check and infer a race if the data
477dfd402a4SMarco Elver 	 * was modified via a non-instrumented access, e.g. from a device.
478dfd402a4SMarco Elver 	 */
4796f2d9819SMark Rutland 	old = 0;
480dfd402a4SMarco Elver 	switch (size) {
481dfd402a4SMarco Elver 	case 1:
4826f2d9819SMark Rutland 		old = READ_ONCE(*(const u8 *)ptr);
483dfd402a4SMarco Elver 		break;
484dfd402a4SMarco Elver 	case 2:
4856f2d9819SMark Rutland 		old = READ_ONCE(*(const u16 *)ptr);
486dfd402a4SMarco Elver 		break;
487dfd402a4SMarco Elver 	case 4:
4886f2d9819SMark Rutland 		old = READ_ONCE(*(const u32 *)ptr);
489dfd402a4SMarco Elver 		break;
490dfd402a4SMarco Elver 	case 8:
4916f2d9819SMark Rutland 		old = READ_ONCE(*(const u64 *)ptr);
492dfd402a4SMarco Elver 		break;
493dfd402a4SMarco Elver 	default:
494dfd402a4SMarco Elver 		break; /* ignore; we do not diff the values */
495dfd402a4SMarco Elver 	}
496dfd402a4SMarco Elver 
497dfd402a4SMarco Elver 	/*
498dfd402a4SMarco Elver 	 * Delay this thread, to increase probability of observing a racy
499dfd402a4SMarco Elver 	 * conflicting access.
500dfd402a4SMarco Elver 	 */
501cd290ec2SMarco Elver 	delay_access(type);
502dfd402a4SMarco Elver 
503dfd402a4SMarco Elver 	/*
504dfd402a4SMarco Elver 	 * Re-read value, and check if it is as expected; if not, we infer a
505dfd402a4SMarco Elver 	 * racy access.
506dfd402a4SMarco Elver 	 */
50708cac604SMarco Elver 	access_mask = ctx->access_mask;
5086f2d9819SMark Rutland 	new = 0;
509dfd402a4SMarco Elver 	switch (size) {
510dfd402a4SMarco Elver 	case 1:
5116f2d9819SMark Rutland 		new = READ_ONCE(*(const u8 *)ptr);
512dfd402a4SMarco Elver 		break;
513dfd402a4SMarco Elver 	case 2:
5146f2d9819SMark Rutland 		new = READ_ONCE(*(const u16 *)ptr);
515dfd402a4SMarco Elver 		break;
516dfd402a4SMarco Elver 	case 4:
5176f2d9819SMark Rutland 		new = READ_ONCE(*(const u32 *)ptr);
518dfd402a4SMarco Elver 		break;
519dfd402a4SMarco Elver 	case 8:
5206f2d9819SMark Rutland 		new = READ_ONCE(*(const u64 *)ptr);
521dfd402a4SMarco Elver 		break;
522dfd402a4SMarco Elver 	default:
523dfd402a4SMarco Elver 		break; /* ignore; we do not diff the values */
524dfd402a4SMarco Elver 	}
525dfd402a4SMarco Elver 
5266f2d9819SMark Rutland 	diff = old ^ new;
5276f2d9819SMark Rutland 	if (access_mask)
5286f2d9819SMark Rutland 		diff &= access_mask;
5296f2d9819SMark Rutland 
530*49f72d53SMarco Elver 	/*
531*49f72d53SMarco Elver 	 * Check if we observed a value change.
532*49f72d53SMarco Elver 	 *
533*49f72d53SMarco Elver 	 * Also check if the data race should be ignored (the rules depend on
534*49f72d53SMarco Elver 	 * non-zero diff); if it is to be ignored, the below rules for
535*49f72d53SMarco Elver 	 * KCSAN_VALUE_CHANGE_MAYBE apply.
536*49f72d53SMarco Elver 	 */
537*49f72d53SMarco Elver 	if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
538b738f616SMarco Elver 		value_change = KCSAN_VALUE_CHANGE_TRUE;
539b738f616SMarco Elver 
540dfd402a4SMarco Elver 	/* Check if this access raced with another. */
5416119418fSMarco Elver 	if (!consume_watchpoint(watchpoint)) {
542dfd402a4SMarco Elver 		/*
543b738f616SMarco Elver 		 * Depending on the access type, map a value_change of MAYBE to
54481af89e1SMarco Elver 		 * TRUE (always report) or FALSE (never report).
545b738f616SMarco Elver 		 */
54681af89e1SMarco Elver 		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
54781af89e1SMarco Elver 			if (access_mask != 0) {
54881af89e1SMarco Elver 				/*
54981af89e1SMarco Elver 				 * For access with access_mask, we require a
55081af89e1SMarco Elver 				 * value-change, as it is likely that races on
55181af89e1SMarco Elver 				 * ~access_mask bits are expected.
55281af89e1SMarco Elver 				 */
55381af89e1SMarco Elver 				value_change = KCSAN_VALUE_CHANGE_FALSE;
55481af89e1SMarco Elver 			} else if (size > 8 || is_assert) {
555b738f616SMarco Elver 				/* Always assume a value-change. */
556b738f616SMarco Elver 				value_change = KCSAN_VALUE_CHANGE_TRUE;
557b738f616SMarco Elver 			}
55881af89e1SMarco Elver 		}
559b738f616SMarco Elver 
560b738f616SMarco Elver 		/*
561dfd402a4SMarco Elver 		 * No need to increment 'data_races' counter, as the racing
562dfd402a4SMarco Elver 		 * thread already did.
563d591ec3dSMarco Elver 		 *
564d591ec3dSMarco Elver 		 * Count 'assert_failures' for each failed ASSERT access,
565d591ec3dSMarco Elver 		 * therefore both this thread and the racing thread may
566d591ec3dSMarco Elver 		 * increment this counter.
567dfd402a4SMarco Elver 		 */
568b738f616SMarco Elver 		if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
5692e986b81SMarco Elver 			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
570d591ec3dSMarco Elver 
571793c2579SMark Rutland 		kcsan_report_known_origin(ptr, size, type, value_change,
5727bbe6dc0SMark Rutland 					  watchpoint - watchpoints,
5737bbe6dc0SMark Rutland 					  old, new, access_mask);
574b738f616SMarco Elver 	} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
575dfd402a4SMarco Elver 		/* Inferring a race, since the value should not have changed. */
576d591ec3dSMarco Elver 
5772e986b81SMarco Elver 		atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
578d591ec3dSMarco Elver 		if (is_assert)
5792e986b81SMarco Elver 			atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
580d591ec3dSMarco Elver 
581d591ec3dSMarco Elver 		if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
5827bbe6dc0SMark Rutland 			kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
583dfd402a4SMarco Elver 	}
584dfd402a4SMarco Elver 
5856119418fSMarco Elver 	/*
5866119418fSMarco Elver 	 * Remove watchpoint; must be after reporting, since the slot may be
5876119418fSMarco Elver 	 * reused after this point.
5886119418fSMarco Elver 	 */
5896119418fSMarco Elver 	remove_watchpoint(watchpoint);
5902e986b81SMarco Elver 	atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
591dfd402a4SMarco Elver out_unlock:
59248b1fc19SMarco Elver 	if (!kcsan_interrupt_watcher)
593248591f5SMarco Elver 		local_irq_restore(irq_flags);
59492c209acSMarco Elver 	kcsan_restore_irqtrace(current);
595dfd402a4SMarco Elver out:
596dfd402a4SMarco Elver 	user_access_restore(ua_flags);
597dfd402a4SMarco Elver }
598dfd402a4SMarco Elver 
599dfd402a4SMarco Elver static __always_inline void check_access(const volatile void *ptr, size_t size,
600dfd402a4SMarco Elver 					 int type)
601dfd402a4SMarco Elver {
602dfd402a4SMarco Elver 	const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
603dfd402a4SMarco Elver 	atomic_long_t *watchpoint;
604dfd402a4SMarco Elver 	long encoded_watchpoint;
605dfd402a4SMarco Elver 
606dfd402a4SMarco Elver 	/*
607ed95f95cSMarco Elver 	 * Do nothing for 0 sized check; this comparison will be optimized out
608ed95f95cSMarco Elver 	 * for constant sized instrumentation (__tsan_{read,write}N).
609ed95f95cSMarco Elver 	 */
610ed95f95cSMarco Elver 	if (unlikely(size == 0))
611ed95f95cSMarco Elver 		return;
612ed95f95cSMarco Elver 
613ed95f95cSMarco Elver 	/*
614dfd402a4SMarco Elver 	 * Avoid user_access_save in fast-path: find_watchpoint is safe without
615dfd402a4SMarco Elver 	 * user_access_save, as the address that ptr points to is only used to
616dfd402a4SMarco Elver 	 * check if a watchpoint exists; ptr is never dereferenced.
617dfd402a4SMarco Elver 	 */
618dfd402a4SMarco Elver 	watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
619dfd402a4SMarco Elver 				     &encoded_watchpoint);
620dfd402a4SMarco Elver 	/*
621dfd402a4SMarco Elver 	 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
622d591ec3dSMarco Elver 	 * slow-path, as long as no state changes that cause a race to be
623dfd402a4SMarco Elver 	 * detected and reported have occurred until kcsan_is_enabled() is
624dfd402a4SMarco Elver 	 * checked.
625dfd402a4SMarco Elver 	 */
626dfd402a4SMarco Elver 
627dfd402a4SMarco Elver 	if (unlikely(watchpoint != NULL))
62847144ecaSMarco Elver 		kcsan_found_watchpoint(ptr, size, type, watchpoint,
629dfd402a4SMarco Elver 				       encoded_watchpoint);
630757a4cefSMarco Elver 	else {
631757a4cefSMarco Elver 		struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
632757a4cefSMarco Elver 
633757a4cefSMarco Elver 		if (unlikely(should_watch(ptr, size, type, ctx)))
63447144ecaSMarco Elver 			kcsan_setup_watchpoint(ptr, size, type);
635757a4cefSMarco Elver 		else if (unlikely(ctx->scoped_accesses.prev))
636757a4cefSMarco Elver 			kcsan_check_scoped_accesses();
637757a4cefSMarco Elver 	}
638dfd402a4SMarco Elver }
639dfd402a4SMarco Elver 
640dfd402a4SMarco Elver /* === Public interface ===================================================== */
641dfd402a4SMarco Elver 
642dfd402a4SMarco Elver void __init kcsan_init(void)
643dfd402a4SMarco Elver {
64471a076f4SMarco Elver 	int cpu;
64571a076f4SMarco Elver 
646dfd402a4SMarco Elver 	BUG_ON(!in_task());
647dfd402a4SMarco Elver 
64871a076f4SMarco Elver 	for_each_possible_cpu(cpu)
64971a076f4SMarco Elver 		per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
650dfd402a4SMarco Elver 
651dfd402a4SMarco Elver 	/*
652dfd402a4SMarco Elver 	 * We are in the init task, and no other tasks should be running;
653dfd402a4SMarco Elver 	 * WRITE_ONCE without memory barrier is sufficient.
654dfd402a4SMarco Elver 	 */
65527787930SMarco Elver 	if (kcsan_early_enable) {
65627787930SMarco Elver 		pr_info("enabled early\n");
657dfd402a4SMarco Elver 		WRITE_ONCE(kcsan_enabled, true);
658dfd402a4SMarco Elver 	}
65927787930SMarco Elver }
660dfd402a4SMarco Elver 
661dfd402a4SMarco Elver /* === Exported interface =================================================== */
662dfd402a4SMarco Elver 
663dfd402a4SMarco Elver void kcsan_disable_current(void)
664dfd402a4SMarco Elver {
665dfd402a4SMarco Elver 	++get_ctx()->disable_count;
666dfd402a4SMarco Elver }
667dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_disable_current);
668dfd402a4SMarco Elver 
669dfd402a4SMarco Elver void kcsan_enable_current(void)
670dfd402a4SMarco Elver {
671dfd402a4SMarco Elver 	if (get_ctx()->disable_count-- == 0) {
672dfd402a4SMarco Elver 		/*
673dfd402a4SMarco Elver 		 * Warn if kcsan_enable_current() calls are unbalanced with
674dfd402a4SMarco Elver 		 * kcsan_disable_current() calls, which causes disable_count to
675dfd402a4SMarco Elver 		 * become negative and should not happen.
676dfd402a4SMarco Elver 		 */
677dfd402a4SMarco Elver 		kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
678dfd402a4SMarco Elver 		kcsan_disable_current(); /* disable to generate warning */
679dfd402a4SMarco Elver 		WARN(1, "Unbalanced %s()", __func__);
680dfd402a4SMarco Elver 		kcsan_enable_current();
681dfd402a4SMarco Elver 	}
682dfd402a4SMarco Elver }
683dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_enable_current);
684dfd402a4SMarco Elver 
68519acd03dSMarco Elver void kcsan_enable_current_nowarn(void)
68619acd03dSMarco Elver {
68719acd03dSMarco Elver 	if (get_ctx()->disable_count-- == 0)
68819acd03dSMarco Elver 		kcsan_disable_current();
68919acd03dSMarco Elver }
69019acd03dSMarco Elver EXPORT_SYMBOL(kcsan_enable_current_nowarn);
69119acd03dSMarco Elver 
692dfd402a4SMarco Elver void kcsan_nestable_atomic_begin(void)
693dfd402a4SMarco Elver {
694dfd402a4SMarco Elver 	/*
695dfd402a4SMarco Elver 	 * Do *not* check and warn if we are in a flat atomic region: nestable
696dfd402a4SMarco Elver 	 * and flat atomic regions are independent from each other.
697dfd402a4SMarco Elver 	 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
698dfd402a4SMarco Elver 	 * comments.
699dfd402a4SMarco Elver 	 */
700dfd402a4SMarco Elver 
701dfd402a4SMarco Elver 	++get_ctx()->atomic_nest_count;
702dfd402a4SMarco Elver }
703dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
704dfd402a4SMarco Elver 
705dfd402a4SMarco Elver void kcsan_nestable_atomic_end(void)
706dfd402a4SMarco Elver {
707dfd402a4SMarco Elver 	if (get_ctx()->atomic_nest_count-- == 0) {
708dfd402a4SMarco Elver 		/*
709dfd402a4SMarco Elver 		 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
710dfd402a4SMarco Elver 		 * kcsan_nestable_atomic_begin() calls, which causes
711dfd402a4SMarco Elver 		 * atomic_nest_count to become negative and should not happen.
712dfd402a4SMarco Elver 		 */
713dfd402a4SMarco Elver 		kcsan_nestable_atomic_begin(); /* restore to 0 */
714dfd402a4SMarco Elver 		kcsan_disable_current(); /* disable to generate warning */
715dfd402a4SMarco Elver 		WARN(1, "Unbalanced %s()", __func__);
716dfd402a4SMarco Elver 		kcsan_enable_current();
717dfd402a4SMarco Elver 	}
718dfd402a4SMarco Elver }
719dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_nestable_atomic_end);
720dfd402a4SMarco Elver 
721dfd402a4SMarco Elver void kcsan_flat_atomic_begin(void)
722dfd402a4SMarco Elver {
723dfd402a4SMarco Elver 	get_ctx()->in_flat_atomic = true;
724dfd402a4SMarco Elver }
725dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_flat_atomic_begin);
726dfd402a4SMarco Elver 
727dfd402a4SMarco Elver void kcsan_flat_atomic_end(void)
728dfd402a4SMarco Elver {
729dfd402a4SMarco Elver 	get_ctx()->in_flat_atomic = false;
730dfd402a4SMarco Elver }
731dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_flat_atomic_end);
732dfd402a4SMarco Elver 
733dfd402a4SMarco Elver void kcsan_atomic_next(int n)
734dfd402a4SMarco Elver {
735dfd402a4SMarco Elver 	get_ctx()->atomic_next = n;
736dfd402a4SMarco Elver }
737dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_atomic_next);
738dfd402a4SMarco Elver 
73981af89e1SMarco Elver void kcsan_set_access_mask(unsigned long mask)
74081af89e1SMarco Elver {
74181af89e1SMarco Elver 	get_ctx()->access_mask = mask;
74281af89e1SMarco Elver }
74381af89e1SMarco Elver EXPORT_SYMBOL(kcsan_set_access_mask);
74481af89e1SMarco Elver 
745757a4cefSMarco Elver struct kcsan_scoped_access *
746757a4cefSMarco Elver kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
747757a4cefSMarco Elver 			  struct kcsan_scoped_access *sa)
748757a4cefSMarco Elver {
749757a4cefSMarco Elver 	struct kcsan_ctx *ctx = get_ctx();
750757a4cefSMarco Elver 
751757a4cefSMarco Elver 	__kcsan_check_access(ptr, size, type);
752757a4cefSMarco Elver 
753757a4cefSMarco Elver 	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
754757a4cefSMarco Elver 
755757a4cefSMarco Elver 	INIT_LIST_HEAD(&sa->list);
756757a4cefSMarco Elver 	sa->ptr = ptr;
757757a4cefSMarco Elver 	sa->size = size;
758757a4cefSMarco Elver 	sa->type = type;
759757a4cefSMarco Elver 
760757a4cefSMarco Elver 	if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
761757a4cefSMarco Elver 		INIT_LIST_HEAD(&ctx->scoped_accesses);
762757a4cefSMarco Elver 	list_add(&sa->list, &ctx->scoped_accesses);
763757a4cefSMarco Elver 
764757a4cefSMarco Elver 	ctx->disable_count--;
765757a4cefSMarco Elver 	return sa;
766757a4cefSMarco Elver }
767757a4cefSMarco Elver EXPORT_SYMBOL(kcsan_begin_scoped_access);
768757a4cefSMarco Elver 
769757a4cefSMarco Elver void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
770757a4cefSMarco Elver {
771757a4cefSMarco Elver 	struct kcsan_ctx *ctx = get_ctx();
772757a4cefSMarco Elver 
773757a4cefSMarco Elver 	if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
774757a4cefSMarco Elver 		return;
775757a4cefSMarco Elver 
776757a4cefSMarco Elver 	ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
777757a4cefSMarco Elver 
778757a4cefSMarco Elver 	list_del(&sa->list);
779757a4cefSMarco Elver 	if (list_empty(&ctx->scoped_accesses))
780757a4cefSMarco Elver 		/*
781757a4cefSMarco Elver 		 * Ensure we do not enter kcsan_check_scoped_accesses()
782757a4cefSMarco Elver 		 * slow-path if unnecessary, and avoids requiring list_empty()
783757a4cefSMarco Elver 		 * in the fast-path (to avoid a READ_ONCE() and potential
784757a4cefSMarco Elver 		 * uaccess warning).
785757a4cefSMarco Elver 		 */
786757a4cefSMarco Elver 		ctx->scoped_accesses.prev = NULL;
787757a4cefSMarco Elver 
788757a4cefSMarco Elver 	ctx->disable_count--;
789757a4cefSMarco Elver 
790757a4cefSMarco Elver 	__kcsan_check_access(sa->ptr, sa->size, sa->type);
791757a4cefSMarco Elver }
792757a4cefSMarco Elver EXPORT_SYMBOL(kcsan_end_scoped_access);
793757a4cefSMarco Elver 
794dfd402a4SMarco Elver void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
795dfd402a4SMarco Elver {
796dfd402a4SMarco Elver 	check_access(ptr, size, type);
797dfd402a4SMarco Elver }
798dfd402a4SMarco Elver EXPORT_SYMBOL(__kcsan_check_access);
799dfd402a4SMarco Elver 
800dfd402a4SMarco Elver /*
801dfd402a4SMarco Elver  * KCSAN uses the same instrumentation that is emitted by supported compilers
802dfd402a4SMarco Elver  * for ThreadSanitizer (TSAN).
803dfd402a4SMarco Elver  *
804dfd402a4SMarco Elver  * When enabled, the compiler emits instrumentation calls (the functions
805dfd402a4SMarco Elver  * prefixed with "__tsan" below) for all loads and stores that it generated;
806dfd402a4SMarco Elver  * inline asm is not instrumented.
807dfd402a4SMarco Elver  *
808dfd402a4SMarco Elver  * Note that, not all supported compiler versions distinguish aligned/unaligned
809dfd402a4SMarco Elver  * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
810dfd402a4SMarco Elver  * version to the generic version, which can handle both.
811dfd402a4SMarco Elver  */
812dfd402a4SMarco Elver 
813dfd402a4SMarco Elver #define DEFINE_TSAN_READ_WRITE(size)                                           \
8149dd979baSMarco Elver 	void __tsan_read##size(void *ptr);                                     \
815dfd402a4SMarco Elver 	void __tsan_read##size(void *ptr)                                      \
816dfd402a4SMarco Elver 	{                                                                      \
817dfd402a4SMarco Elver 		check_access(ptr, size, 0);                                    \
818dfd402a4SMarco Elver 	}                                                                      \
819dfd402a4SMarco Elver 	EXPORT_SYMBOL(__tsan_read##size);                                      \
820dfd402a4SMarco Elver 	void __tsan_unaligned_read##size(void *ptr)                            \
821dfd402a4SMarco Elver 		__alias(__tsan_read##size);                                    \
822dfd402a4SMarco Elver 	EXPORT_SYMBOL(__tsan_unaligned_read##size);                            \
8239dd979baSMarco Elver 	void __tsan_write##size(void *ptr);                                    \
824dfd402a4SMarco Elver 	void __tsan_write##size(void *ptr)                                     \
825dfd402a4SMarco Elver 	{                                                                      \
826dfd402a4SMarco Elver 		check_access(ptr, size, KCSAN_ACCESS_WRITE);                   \
827dfd402a4SMarco Elver 	}                                                                      \
828dfd402a4SMarco Elver 	EXPORT_SYMBOL(__tsan_write##size);                                     \
829dfd402a4SMarco Elver 	void __tsan_unaligned_write##size(void *ptr)                           \
830dfd402a4SMarco Elver 		__alias(__tsan_write##size);                                   \
83114e2ac8dSMarco Elver 	EXPORT_SYMBOL(__tsan_unaligned_write##size);                           \
83214e2ac8dSMarco Elver 	void __tsan_read_write##size(void *ptr);                               \
83314e2ac8dSMarco Elver 	void __tsan_read_write##size(void *ptr)                                \
83414e2ac8dSMarco Elver 	{                                                                      \
83514e2ac8dSMarco Elver 		check_access(ptr, size,                                        \
83614e2ac8dSMarco Elver 			     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE);      \
83714e2ac8dSMarco Elver 	}                                                                      \
83814e2ac8dSMarco Elver 	EXPORT_SYMBOL(__tsan_read_write##size);                                \
83914e2ac8dSMarco Elver 	void __tsan_unaligned_read_write##size(void *ptr)                      \
84014e2ac8dSMarco Elver 		__alias(__tsan_read_write##size);                              \
84114e2ac8dSMarco Elver 	EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
842dfd402a4SMarco Elver 
843dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(1);
844dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(2);
845dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(4);
846dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(8);
847dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(16);
848dfd402a4SMarco Elver 
8499dd979baSMarco Elver void __tsan_read_range(void *ptr, size_t size);
850dfd402a4SMarco Elver void __tsan_read_range(void *ptr, size_t size)
851dfd402a4SMarco Elver {
852dfd402a4SMarco Elver 	check_access(ptr, size, 0);
853dfd402a4SMarco Elver }
854dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_read_range);
855dfd402a4SMarco Elver 
8569dd979baSMarco Elver void __tsan_write_range(void *ptr, size_t size);
857dfd402a4SMarco Elver void __tsan_write_range(void *ptr, size_t size)
858dfd402a4SMarco Elver {
859dfd402a4SMarco Elver 	check_access(ptr, size, KCSAN_ACCESS_WRITE);
860dfd402a4SMarco Elver }
861dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_write_range);
862dfd402a4SMarco Elver 
863dfd402a4SMarco Elver /*
86475d75b7aSMarco Elver  * Use of explicit volatile is generally disallowed [1], however, volatile is
86575d75b7aSMarco Elver  * still used in various concurrent context, whether in low-level
86675d75b7aSMarco Elver  * synchronization primitives or for legacy reasons.
86775d75b7aSMarco Elver  * [1] https://lwn.net/Articles/233479/
86875d75b7aSMarco Elver  *
86975d75b7aSMarco Elver  * We only consider volatile accesses atomic if they are aligned and would pass
87075d75b7aSMarco Elver  * the size-check of compiletime_assert_rwonce_type().
87175d75b7aSMarco Elver  */
87275d75b7aSMarco Elver #define DEFINE_TSAN_VOLATILE_READ_WRITE(size)                                  \
8739dd979baSMarco Elver 	void __tsan_volatile_read##size(void *ptr);                            \
87475d75b7aSMarco Elver 	void __tsan_volatile_read##size(void *ptr)                             \
87575d75b7aSMarco Elver 	{                                                                      \
87675d75b7aSMarco Elver 		const bool is_atomic = size <= sizeof(long long) &&            \
87775d75b7aSMarco Elver 				       IS_ALIGNED((unsigned long)ptr, size);   \
87875d75b7aSMarco Elver 		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
87975d75b7aSMarco Elver 			return;                                                \
88075d75b7aSMarco Elver 		check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0);  \
88175d75b7aSMarco Elver 	}                                                                      \
88275d75b7aSMarco Elver 	EXPORT_SYMBOL(__tsan_volatile_read##size);                             \
88375d75b7aSMarco Elver 	void __tsan_unaligned_volatile_read##size(void *ptr)                   \
88475d75b7aSMarco Elver 		__alias(__tsan_volatile_read##size);                           \
88575d75b7aSMarco Elver 	EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size);                   \
8869dd979baSMarco Elver 	void __tsan_volatile_write##size(void *ptr);                           \
88775d75b7aSMarco Elver 	void __tsan_volatile_write##size(void *ptr)                            \
88875d75b7aSMarco Elver 	{                                                                      \
88975d75b7aSMarco Elver 		const bool is_atomic = size <= sizeof(long long) &&            \
89075d75b7aSMarco Elver 				       IS_ALIGNED((unsigned long)ptr, size);   \
89175d75b7aSMarco Elver 		if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic)      \
89275d75b7aSMarco Elver 			return;                                                \
89375d75b7aSMarco Elver 		check_access(ptr, size,                                        \
89475d75b7aSMarco Elver 			     KCSAN_ACCESS_WRITE |                              \
89575d75b7aSMarco Elver 				     (is_atomic ? KCSAN_ACCESS_ATOMIC : 0));   \
89675d75b7aSMarco Elver 	}                                                                      \
89775d75b7aSMarco Elver 	EXPORT_SYMBOL(__tsan_volatile_write##size);                            \
89875d75b7aSMarco Elver 	void __tsan_unaligned_volatile_write##size(void *ptr)                  \
89975d75b7aSMarco Elver 		__alias(__tsan_volatile_write##size);                          \
90075d75b7aSMarco Elver 	EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
90175d75b7aSMarco Elver 
90275d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(1);
90375d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(2);
90475d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(4);
90575d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(8);
90675d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(16);
90775d75b7aSMarco Elver 
90875d75b7aSMarco Elver /*
909dfd402a4SMarco Elver  * The below are not required by KCSAN, but can still be emitted by the
910dfd402a4SMarco Elver  * compiler.
911dfd402a4SMarco Elver  */
9129dd979baSMarco Elver void __tsan_func_entry(void *call_pc);
913dfd402a4SMarco Elver void __tsan_func_entry(void *call_pc)
914dfd402a4SMarco Elver {
915dfd402a4SMarco Elver }
916dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_func_entry);
9179dd979baSMarco Elver void __tsan_func_exit(void);
918dfd402a4SMarco Elver void __tsan_func_exit(void)
919dfd402a4SMarco Elver {
920dfd402a4SMarco Elver }
921dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_func_exit);
9229dd979baSMarco Elver void __tsan_init(void);
923dfd402a4SMarco Elver void __tsan_init(void)
924dfd402a4SMarco Elver {
925dfd402a4SMarco Elver }
926dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_init);
9270f8ad5f2SMarco Elver 
9280f8ad5f2SMarco Elver /*
9290f8ad5f2SMarco Elver  * Instrumentation for atomic builtins (__atomic_*, __sync_*).
9300f8ad5f2SMarco Elver  *
9310f8ad5f2SMarco Elver  * Normal kernel code _should not_ be using them directly, but some
9320f8ad5f2SMarco Elver  * architectures may implement some or all atomics using the compilers'
9330f8ad5f2SMarco Elver  * builtins.
9340f8ad5f2SMarco Elver  *
9350f8ad5f2SMarco Elver  * Note: If an architecture decides to fully implement atomics using the
9360f8ad5f2SMarco Elver  * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
9370f8ad5f2SMarco Elver  * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
9380f8ad5f2SMarco Elver  * atomic-instrumented) is no longer necessary.
9390f8ad5f2SMarco Elver  *
9400f8ad5f2SMarco Elver  * TSAN instrumentation replaces atomic accesses with calls to any of the below
9410f8ad5f2SMarco Elver  * functions, whose job is to also execute the operation itself.
9420f8ad5f2SMarco Elver  */
9430f8ad5f2SMarco Elver 
9440f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)                                                        \
9450f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder);                      \
9460f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder)                       \
9470f8ad5f2SMarco Elver 	{                                                                                          \
9489d1335ccSMarco Elver 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
9490f8ad5f2SMarco Elver 			check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC);              \
9509d1335ccSMarco Elver 		}                                                                                  \
9510f8ad5f2SMarco Elver 		return __atomic_load_n(ptr, memorder);                                             \
9520f8ad5f2SMarco Elver 	}                                                                                          \
9530f8ad5f2SMarco Elver 	EXPORT_SYMBOL(__tsan_atomic##bits##_load);                                                 \
9540f8ad5f2SMarco Elver 	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder);                   \
9550f8ad5f2SMarco Elver 	void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder)                    \
9560f8ad5f2SMarco Elver 	{                                                                                          \
9579d1335ccSMarco Elver 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
9589d1335ccSMarco Elver 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
9599d1335ccSMarco Elver 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC);                    \
9609d1335ccSMarco Elver 		}                                                                                  \
9610f8ad5f2SMarco Elver 		__atomic_store_n(ptr, v, memorder);                                                \
9620f8ad5f2SMarco Elver 	}                                                                                          \
9630f8ad5f2SMarco Elver 	EXPORT_SYMBOL(__tsan_atomic##bits##_store)
9640f8ad5f2SMarco Elver 
9650f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)                                                   \
9660f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder);                 \
9670f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder)                  \
9680f8ad5f2SMarco Elver 	{                                                                                          \
9699d1335ccSMarco Elver 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
97014e2ac8dSMarco Elver 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
9719d1335ccSMarco Elver 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
9729d1335ccSMarco Elver 					     KCSAN_ACCESS_ATOMIC);                                 \
9739d1335ccSMarco Elver 		}                                                                                  \
9740f8ad5f2SMarco Elver 		return __atomic_##op##suffix(ptr, v, memorder);                                    \
9750f8ad5f2SMarco Elver 	}                                                                                          \
9760f8ad5f2SMarco Elver 	EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
9770f8ad5f2SMarco Elver 
9780f8ad5f2SMarco Elver /*
9790f8ad5f2SMarco Elver  * Note: CAS operations are always classified as write, even in case they
9800f8ad5f2SMarco Elver  * fail. We cannot perform check_access() after a write, as it might lead to
9810f8ad5f2SMarco Elver  * false positives, in cases such as:
9820f8ad5f2SMarco Elver  *
9830f8ad5f2SMarco Elver  *	T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
9840f8ad5f2SMarco Elver  *
9850f8ad5f2SMarco Elver  *	T1: if (__atomic_load_n(&p->flag, ...)) {
9860f8ad5f2SMarco Elver  *		modify *p;
9870f8ad5f2SMarco Elver  *		p->flag = 0;
9880f8ad5f2SMarco Elver  *	    }
9890f8ad5f2SMarco Elver  *
9900f8ad5f2SMarco Elver  * The only downside is that, if there are 3 threads, with one CAS that
9910f8ad5f2SMarco Elver  * succeeds, another CAS that fails, and an unmarked racing operation, we may
9920f8ad5f2SMarco Elver  * point at the wrong CAS as the source of the race. However, if we assume that
9930f8ad5f2SMarco Elver  * all CAS can succeed in some other execution, the data race is still valid.
9940f8ad5f2SMarco Elver  */
9950f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak)                                           \
9960f8ad5f2SMarco Elver 	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
9970f8ad5f2SMarco Elver 							      u##bits val, int mo, int fail_mo);   \
9980f8ad5f2SMarco Elver 	int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp,          \
9990f8ad5f2SMarco Elver 							      u##bits val, int mo, int fail_mo)    \
10000f8ad5f2SMarco Elver 	{                                                                                          \
10019d1335ccSMarco Elver 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
100214e2ac8dSMarco Elver 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
10039d1335ccSMarco Elver 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
10049d1335ccSMarco Elver 					     KCSAN_ACCESS_ATOMIC);                                 \
10059d1335ccSMarco Elver 		}                                                                                  \
10060f8ad5f2SMarco Elver 		return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo);              \
10070f8ad5f2SMarco Elver 	}                                                                                          \
10080f8ad5f2SMarco Elver 	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
10090f8ad5f2SMarco Elver 
10100f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)                                                       \
10110f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
10120f8ad5f2SMarco Elver 							   int mo, int fail_mo);                   \
10130f8ad5f2SMarco Elver 	u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
10140f8ad5f2SMarco Elver 							   int mo, int fail_mo)                    \
10150f8ad5f2SMarco Elver 	{                                                                                          \
10169d1335ccSMarco Elver 		if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {                                    \
101714e2ac8dSMarco Elver 			check_access(ptr, bits / BITS_PER_BYTE,                                    \
10189d1335ccSMarco Elver 				     KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE |                  \
10199d1335ccSMarco Elver 					     KCSAN_ACCESS_ATOMIC);                                 \
10209d1335ccSMarco Elver 		}                                                                                  \
10210f8ad5f2SMarco Elver 		__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo);                       \
10220f8ad5f2SMarco Elver 		return exp;                                                                        \
10230f8ad5f2SMarco Elver 	}                                                                                          \
10240f8ad5f2SMarco Elver 	EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
10250f8ad5f2SMarco Elver 
10260f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_OPS(bits)                                                               \
10270f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_LOAD_STORE(bits);                                                       \
10280f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n);                                                \
10290f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, );                                                 \
10300f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, );                                                 \
10310f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, );                                                 \
10320f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, );                                                  \
10330f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, );                                                 \
10340f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, );                                                \
10350f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0);                                               \
10360f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1);                                                 \
10370f8ad5f2SMarco Elver 	DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
10380f8ad5f2SMarco Elver 
10390f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(8);
10400f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(16);
10410f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(32);
10420f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(64);
10430f8ad5f2SMarco Elver 
10440f8ad5f2SMarco Elver void __tsan_atomic_thread_fence(int memorder);
10450f8ad5f2SMarco Elver void __tsan_atomic_thread_fence(int memorder)
10460f8ad5f2SMarco Elver {
10470f8ad5f2SMarco Elver 	__atomic_thread_fence(memorder);
10480f8ad5f2SMarco Elver }
10490f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic_thread_fence);
10500f8ad5f2SMarco Elver 
10510f8ad5f2SMarco Elver void __tsan_atomic_signal_fence(int memorder);
10520f8ad5f2SMarco Elver void __tsan_atomic_signal_fence(int memorder) { }
10530f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1054