1dfd402a4SMarco Elver // SPDX-License-Identifier: GPL-2.0
2bd0ccc4aSMarco Elver /*
3bd0ccc4aSMarco Elver * KCSAN core runtime.
4bd0ccc4aSMarco Elver *
5bd0ccc4aSMarco Elver * Copyright (C) 2019, Google LLC.
6bd0ccc4aSMarco Elver */
7dfd402a4SMarco Elver
827787930SMarco Elver #define pr_fmt(fmt) "kcsan: " fmt
927787930SMarco Elver
10dfd402a4SMarco Elver #include <linux/atomic.h>
11dfd402a4SMarco Elver #include <linux/bug.h>
12dfd402a4SMarco Elver #include <linux/delay.h>
13dfd402a4SMarco Elver #include <linux/export.h>
14dfd402a4SMarco Elver #include <linux/init.h>
151e6ee2f0SMarco Elver #include <linux/kernel.h>
16757a4cefSMarco Elver #include <linux/list.h>
177c201739SMarco Elver #include <linux/minmax.h>
1880d4c477SMarco Elver #include <linux/moduleparam.h>
19dfd402a4SMarco Elver #include <linux/percpu.h>
20dfd402a4SMarco Elver #include <linux/preempt.h>
21dfd402a4SMarco Elver #include <linux/sched.h>
227c201739SMarco Elver #include <linux/string.h>
23dfd402a4SMarco Elver #include <linux/uaccess.h>
24dfd402a4SMarco Elver
25dfd402a4SMarco Elver #include "encoding.h"
26dfd402a4SMarco Elver #include "kcsan.h"
2749f72d53SMarco Elver #include "permissive.h"
28dfd402a4SMarco Elver
2980d4c477SMarco Elver static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
302402d0eaSMarco Elver unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
312402d0eaSMarco Elver unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
3280d4c477SMarco Elver static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
3348b1fc19SMarco Elver static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
3480d4c477SMarco Elver
3580d4c477SMarco Elver #ifdef MODULE_PARAM_PREFIX
3680d4c477SMarco Elver #undef MODULE_PARAM_PREFIX
3780d4c477SMarco Elver #endif
3880d4c477SMarco Elver #define MODULE_PARAM_PREFIX "kcsan."
3980d4c477SMarco Elver module_param_named(early_enable, kcsan_early_enable, bool, 0);
4080d4c477SMarco Elver module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
4180d4c477SMarco Elver module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
4280d4c477SMarco Elver module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
4348b1fc19SMarco Elver module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
4480d4c477SMarco Elver
4569562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
4669562e49SMarco Elver static bool kcsan_weak_memory = true;
4769562e49SMarco Elver module_param_named(weak_memory, kcsan_weak_memory, bool, 0644);
4869562e49SMarco Elver #else
4969562e49SMarco Elver #define kcsan_weak_memory false
5069562e49SMarco Elver #endif
5169562e49SMarco Elver
52dfd402a4SMarco Elver bool kcsan_enabled;
53dfd402a4SMarco Elver
54dfd402a4SMarco Elver /* Per-CPU kcsan_ctx for interrupts */
55dfd402a4SMarco Elver static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
56757a4cefSMarco Elver .scoped_accesses = {LIST_POISON1, NULL},
57dfd402a4SMarco Elver };
58dfd402a4SMarco Elver
59dfd402a4SMarco Elver /*
60e7b34100SQiujun Huang * Helper macros to index into adjacent slots, starting from address slot
61dfd402a4SMarco Elver * itself, followed by the right and left slots.
62dfd402a4SMarco Elver *
63dfd402a4SMarco Elver * The purpose is 2-fold:
64dfd402a4SMarco Elver *
65dfd402a4SMarco Elver * 1. if during insertion the address slot is already occupied, check if
66dfd402a4SMarco Elver * any adjacent slots are free;
67dfd402a4SMarco Elver * 2. accesses that straddle a slot boundary due to size that exceeds a
68dfd402a4SMarco Elver * slot's range may check adjacent slots if any watchpoint matches.
69dfd402a4SMarco Elver *
70dfd402a4SMarco Elver * Note that accesses with very large size may still miss a watchpoint; however,
71dfd402a4SMarco Elver * given this should be rare, this is a reasonable trade-off to make, since this
72dfd402a4SMarco Elver * will avoid:
73dfd402a4SMarco Elver *
74dfd402a4SMarco Elver * 1. excessive contention between watchpoint checks and setup;
75dfd402a4SMarco Elver * 2. larger number of simultaneous watchpoints without sacrificing
76dfd402a4SMarco Elver * performance.
77dfd402a4SMarco Elver *
78dfd402a4SMarco Elver * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
79dfd402a4SMarco Elver *
80dfd402a4SMarco Elver * slot=0: [ 1, 2, 0]
81dfd402a4SMarco Elver * slot=9: [10, 11, 9]
82dfd402a4SMarco Elver * slot=63: [64, 65, 63]
83dfd402a4SMarco Elver */
84dfd402a4SMarco Elver #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
85dfd402a4SMarco Elver
86dfd402a4SMarco Elver /*
875cbaefe9SIngo Molnar * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
88d591ec3dSMarco Elver * slot (middle) is fine if we assume that races occur rarely. The set of
89dfd402a4SMarco Elver * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
90dfd402a4SMarco Elver * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
91dfd402a4SMarco Elver */
92dfd402a4SMarco Elver #define SLOT_IDX_FAST(slot, i) (slot + i)
93dfd402a4SMarco Elver
94dfd402a4SMarco Elver /*
95dfd402a4SMarco Elver * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
96dfd402a4SMarco Elver * able to safely update and access a watchpoint without introducing locking
97dfd402a4SMarco Elver * overhead, we encode each watchpoint as a single atomic long. The initial
98dfd402a4SMarco Elver * zero-initialized state matches INVALID_WATCHPOINT.
99dfd402a4SMarco Elver *
100dfd402a4SMarco Elver * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
1015cbaefe9SIngo Molnar * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
102dfd402a4SMarco Elver */
103dfd402a4SMarco Elver static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
104dfd402a4SMarco Elver
105dfd402a4SMarco Elver /*
106dfd402a4SMarco Elver * Instructions to skip watching counter, used in should_watch(). We use a
107dfd402a4SMarco Elver * per-CPU counter to avoid excessive contention.
108dfd402a4SMarco Elver */
109dfd402a4SMarco Elver static DEFINE_PER_CPU(long, kcsan_skip);
110dfd402a4SMarco Elver
111cd290ec2SMarco Elver /* For kcsan_prandom_u32_max(). */
11271a076f4SMarco Elver static DEFINE_PER_CPU(u32, kcsan_rand_state);
113cd290ec2SMarco Elver
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)1145c361425SMarco Elver static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
1155cbaefe9SIngo Molnar size_t size,
116dfd402a4SMarco Elver bool expect_write,
117dfd402a4SMarco Elver long *encoded_watchpoint)
118dfd402a4SMarco Elver {
119dfd402a4SMarco Elver const int slot = watchpoint_slot(addr);
120dfd402a4SMarco Elver const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
121dfd402a4SMarco Elver atomic_long_t *watchpoint;
122dfd402a4SMarco Elver unsigned long wp_addr_masked;
123dfd402a4SMarco Elver size_t wp_size;
124dfd402a4SMarco Elver bool is_write;
125dfd402a4SMarco Elver int i;
126dfd402a4SMarco Elver
127dfd402a4SMarco Elver BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
128dfd402a4SMarco Elver
129dfd402a4SMarco Elver for (i = 0; i < NUM_SLOTS; ++i) {
130dfd402a4SMarco Elver watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
131dfd402a4SMarco Elver *encoded_watchpoint = atomic_long_read(watchpoint);
132dfd402a4SMarco Elver if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
133dfd402a4SMarco Elver &wp_size, &is_write))
134dfd402a4SMarco Elver continue;
135dfd402a4SMarco Elver
136dfd402a4SMarco Elver if (expect_write && !is_write)
137dfd402a4SMarco Elver continue;
138dfd402a4SMarco Elver
139dfd402a4SMarco Elver /* Check if the watchpoint matches the access. */
140dfd402a4SMarco Elver if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
141dfd402a4SMarco Elver return watchpoint;
142dfd402a4SMarco Elver }
143dfd402a4SMarco Elver
144dfd402a4SMarco Elver return NULL;
145dfd402a4SMarco Elver }
146dfd402a4SMarco Elver
1475cbaefe9SIngo Molnar static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)1485cbaefe9SIngo Molnar insert_watchpoint(unsigned long addr, size_t size, bool is_write)
149dfd402a4SMarco Elver {
150dfd402a4SMarco Elver const int slot = watchpoint_slot(addr);
151dfd402a4SMarco Elver const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
152dfd402a4SMarco Elver atomic_long_t *watchpoint;
153dfd402a4SMarco Elver int i;
154dfd402a4SMarco Elver
155dfd402a4SMarco Elver /* Check slot index logic, ensuring we stay within array bounds. */
156dfd402a4SMarco Elver BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
157dfd402a4SMarco Elver BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
1585cbaefe9SIngo Molnar BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
1595cbaefe9SIngo Molnar BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
160dfd402a4SMarco Elver
161dfd402a4SMarco Elver for (i = 0; i < NUM_SLOTS; ++i) {
162dfd402a4SMarco Elver long expect_val = INVALID_WATCHPOINT;
163dfd402a4SMarco Elver
164dfd402a4SMarco Elver /* Try to acquire this slot. */
165dfd402a4SMarco Elver watchpoint = &watchpoints[SLOT_IDX(slot, i)];
1665cbaefe9SIngo Molnar if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
167dfd402a4SMarco Elver return watchpoint;
168dfd402a4SMarco Elver }
169dfd402a4SMarco Elver
170dfd402a4SMarco Elver return NULL;
171dfd402a4SMarco Elver }
172dfd402a4SMarco Elver
173dfd402a4SMarco Elver /*
174dfd402a4SMarco Elver * Return true if watchpoint was successfully consumed, false otherwise.
175dfd402a4SMarco Elver *
176dfd402a4SMarco Elver * This may return false if:
177dfd402a4SMarco Elver *
178dfd402a4SMarco Elver * 1. another thread already consumed the watchpoint;
179dfd402a4SMarco Elver * 2. the thread that set up the watchpoint already removed it;
180dfd402a4SMarco Elver * 3. the watchpoint was removed and then re-used.
181dfd402a4SMarco Elver */
1825c361425SMarco Elver static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)1835cbaefe9SIngo Molnar try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
184dfd402a4SMarco Elver {
1855cbaefe9SIngo Molnar return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
186dfd402a4SMarco Elver }
187dfd402a4SMarco Elver
1886119418fSMarco Elver /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)1896119418fSMarco Elver static inline bool consume_watchpoint(atomic_long_t *watchpoint)
190dfd402a4SMarco Elver {
1916119418fSMarco Elver return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
1926119418fSMarco Elver }
1936119418fSMarco Elver
1946119418fSMarco Elver /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)1956119418fSMarco Elver static inline void remove_watchpoint(atomic_long_t *watchpoint)
1966119418fSMarco Elver {
1976119418fSMarco Elver atomic_long_set(watchpoint, INVALID_WATCHPOINT);
198dfd402a4SMarco Elver }
199dfd402a4SMarco Elver
get_ctx(void)2005c361425SMarco Elver static __always_inline struct kcsan_ctx *get_ctx(void)
201dfd402a4SMarco Elver {
202dfd402a4SMarco Elver /*
2035cbaefe9SIngo Molnar * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
204dfd402a4SMarco Elver * also result in calls that generate warnings in uaccess regions.
205dfd402a4SMarco Elver */
206dfd402a4SMarco Elver return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
207dfd402a4SMarco Elver }
208dfd402a4SMarco Elver
209f4c87dbbSMarco Elver static __always_inline void
210f4c87dbbSMarco Elver check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
211f4c87dbbSMarco Elver
212757a4cefSMarco Elver /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)213757a4cefSMarco Elver static noinline void kcsan_check_scoped_accesses(void)
214757a4cefSMarco Elver {
215757a4cefSMarco Elver struct kcsan_ctx *ctx = get_ctx();
216757a4cefSMarco Elver struct kcsan_scoped_access *scoped_access;
217757a4cefSMarco Elver
2189756f64cSMarco Elver if (ctx->disable_scoped)
2199756f64cSMarco Elver return;
2209756f64cSMarco Elver
2219756f64cSMarco Elver ctx->disable_scoped++;
222f4c87dbbSMarco Elver list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
223f4c87dbbSMarco Elver check_access(scoped_access->ptr, scoped_access->size,
224f4c87dbbSMarco Elver scoped_access->type, scoped_access->ip);
225f4c87dbbSMarco Elver }
2269756f64cSMarco Elver ctx->disable_scoped--;
227757a4cefSMarco Elver }
228757a4cefSMarco Elver
22944656d3dSMarco Elver /* Rules for generic atomic accesses. Called from fast-path. */
2301e6ee2f0SMarco Elver static __always_inline bool
is_atomic(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type)23178c3d954SMarco Elver is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
232dfd402a4SMarco Elver {
23344656d3dSMarco Elver if (type & KCSAN_ACCESS_ATOMIC)
2341e6ee2f0SMarco Elver return true;
2351e6ee2f0SMarco Elver
236d591ec3dSMarco Elver /*
237d591ec3dSMarco Elver * Unless explicitly declared atomic, never consider an assertion access
238d591ec3dSMarco Elver * as atomic. This allows using them also in atomic regions, such as
239d591ec3dSMarco Elver * seqlocks, without implicitly changing their semantics.
240d591ec3dSMarco Elver */
24144656d3dSMarco Elver if (type & KCSAN_ACCESS_ASSERT)
242d591ec3dSMarco Elver return false;
243d591ec3dSMarco Elver
2441e6ee2f0SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
24544656d3dSMarco Elver (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
24614e2ac8dSMarco Elver !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
2471e6ee2f0SMarco Elver return true; /* Assume aligned writes up to word size are atomic. */
2481e6ee2f0SMarco Elver
24944656d3dSMarco Elver if (ctx->atomic_next > 0) {
250dfd402a4SMarco Elver /*
251dfd402a4SMarco Elver * Because we do not have separate contexts for nested
252dfd402a4SMarco Elver * interrupts, in case atomic_next is set, we simply assume that
253dfd402a4SMarco Elver * the outer interrupt set atomic_next. In the worst case, we
254dfd402a4SMarco Elver * will conservatively consider operations as atomic. This is a
255dfd402a4SMarco Elver * reasonable trade-off to make, since this case should be
256dfd402a4SMarco Elver * extremely rare; however, even if extremely rare, it could
257dfd402a4SMarco Elver * lead to false positives otherwise.
258dfd402a4SMarco Elver */
259dfd402a4SMarco Elver if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
260dfd402a4SMarco Elver --ctx->atomic_next; /* in task, or outer interrupt */
261dfd402a4SMarco Elver return true;
262dfd402a4SMarco Elver }
263dfd402a4SMarco Elver
26444656d3dSMarco Elver return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
265dfd402a4SMarco Elver }
266dfd402a4SMarco Elver
2671e6ee2f0SMarco Elver static __always_inline bool
should_watch(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type)26878c3d954SMarco Elver should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
269dfd402a4SMarco Elver {
270dfd402a4SMarco Elver /*
271dfd402a4SMarco Elver * Never set up watchpoints when memory operations are atomic.
272dfd402a4SMarco Elver *
273dfd402a4SMarco Elver * Need to check this first, before kcsan_skip check below: (1) atomics
274dfd402a4SMarco Elver * should not count towards skipped instructions, and (2) to actually
275dfd402a4SMarco Elver * decrement kcsan_atomic_next for consecutive instruction stream.
276dfd402a4SMarco Elver */
27778c3d954SMarco Elver if (is_atomic(ctx, ptr, size, type))
278dfd402a4SMarco Elver return false;
279dfd402a4SMarco Elver
280dfd402a4SMarco Elver if (this_cpu_dec_return(kcsan_skip) >= 0)
281dfd402a4SMarco Elver return false;
282dfd402a4SMarco Elver
283dfd402a4SMarco Elver /*
284dfd402a4SMarco Elver * NOTE: If we get here, kcsan_skip must always be reset in slow path
285dfd402a4SMarco Elver * via reset_kcsan_skip() to avoid underflow.
286dfd402a4SMarco Elver */
287dfd402a4SMarco Elver
288dfd402a4SMarco Elver /* this operation should be watched */
289dfd402a4SMarco Elver return true;
290dfd402a4SMarco Elver }
291dfd402a4SMarco Elver
292cd290ec2SMarco Elver /*
29371a076f4SMarco Elver * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
29471a076f4SMarco Elver * congruential generator, using constants from "Numerical Recipes".
295cd290ec2SMarco Elver */
kcsan_prandom_u32_max(u32 ep_ro)296cd290ec2SMarco Elver static u32 kcsan_prandom_u32_max(u32 ep_ro)
297cd290ec2SMarco Elver {
29871a076f4SMarco Elver u32 state = this_cpu_read(kcsan_rand_state);
299cd290ec2SMarco Elver
30071a076f4SMarco Elver state = 1664525 * state + 1013904223;
30171a076f4SMarco Elver this_cpu_write(kcsan_rand_state, state);
30271a076f4SMarco Elver
30371a076f4SMarco Elver return state % ep_ro;
304cd290ec2SMarco Elver }
305cd290ec2SMarco Elver
reset_kcsan_skip(void)306dfd402a4SMarco Elver static inline void reset_kcsan_skip(void)
307dfd402a4SMarco Elver {
30880d4c477SMarco Elver long skip_count = kcsan_skip_watch -
309dfd402a4SMarco Elver (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
310cd290ec2SMarco Elver kcsan_prandom_u32_max(kcsan_skip_watch) :
311dfd402a4SMarco Elver 0);
312dfd402a4SMarco Elver this_cpu_write(kcsan_skip, skip_count);
313dfd402a4SMarco Elver }
314dfd402a4SMarco Elver
kcsan_is_enabled(struct kcsan_ctx * ctx)31508cac604SMarco Elver static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
316dfd402a4SMarco Elver {
31708cac604SMarco Elver return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
318dfd402a4SMarco Elver }
319dfd402a4SMarco Elver
320cd290ec2SMarco Elver /* Introduce delay depending on context and configuration. */
delay_access(int type)321cd290ec2SMarco Elver static void delay_access(int type)
322dfd402a4SMarco Elver {
32380d4c477SMarco Elver unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
324106a307fSMarco Elver /* For certain access types, skew the random delay to be longer. */
325106a307fSMarco Elver unsigned int skew_delay_order =
326106a307fSMarco Elver (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
327106a307fSMarco Elver
328cd290ec2SMarco Elver delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
329cd290ec2SMarco Elver kcsan_prandom_u32_max(delay >> skew_delay_order) :
330cd290ec2SMarco Elver 0;
331cd290ec2SMarco Elver udelay(delay);
332dfd402a4SMarco Elver }
333dfd402a4SMarco Elver
33412305abeSMarco Elver /*
33512305abeSMarco Elver * Reads the instrumented memory for value change detection; value change
33612305abeSMarco Elver * detection is currently done for accesses up to a size of 8 bytes.
33712305abeSMarco Elver */
read_instrumented_memory(const volatile void * ptr,size_t size)33812305abeSMarco Elver static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
33912305abeSMarco Elver {
3408dec8807SMarco Elver /*
3418dec8807SMarco Elver * In the below we don't necessarily need the read of the location to
3428dec8807SMarco Elver * be atomic, and we don't use READ_ONCE(), since all we need for race
3438dec8807SMarco Elver * detection is to observe 2 different values.
3448dec8807SMarco Elver *
3458dec8807SMarco Elver * Furthermore, on certain architectures (such as arm64), READ_ONCE()
3468dec8807SMarco Elver * may turn into more complex instructions than a plain load that cannot
3478dec8807SMarco Elver * do unaligned accesses.
3488dec8807SMarco Elver */
34912305abeSMarco Elver switch (size) {
3508dec8807SMarco Elver case 1: return *(const volatile u8 *)ptr;
3518dec8807SMarco Elver case 2: return *(const volatile u16 *)ptr;
3528dec8807SMarco Elver case 4: return *(const volatile u32 *)ptr;
3538dec8807SMarco Elver case 8: return *(const volatile u64 *)ptr;
35412305abeSMarco Elver default: return 0; /* Ignore; we do not diff the values. */
35512305abeSMarco Elver }
35612305abeSMarco Elver }
35712305abeSMarco Elver
kcsan_save_irqtrace(struct task_struct * task)35892c209acSMarco Elver void kcsan_save_irqtrace(struct task_struct *task)
35992c209acSMarco Elver {
36092c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS
36192c209acSMarco Elver task->kcsan_save_irqtrace = task->irqtrace;
36292c209acSMarco Elver #endif
36392c209acSMarco Elver }
36492c209acSMarco Elver
kcsan_restore_irqtrace(struct task_struct * task)36592c209acSMarco Elver void kcsan_restore_irqtrace(struct task_struct *task)
36692c209acSMarco Elver {
36792c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS
36892c209acSMarco Elver task->irqtrace = task->kcsan_save_irqtrace;
36992c209acSMarco Elver #endif
37092c209acSMarco Elver }
37192c209acSMarco Elver
get_kcsan_stack_depth(void)37269562e49SMarco Elver static __always_inline int get_kcsan_stack_depth(void)
37369562e49SMarco Elver {
37469562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
37569562e49SMarco Elver return current->kcsan_stack_depth;
37669562e49SMarco Elver #else
37769562e49SMarco Elver BUILD_BUG();
37869562e49SMarco Elver return 0;
37969562e49SMarco Elver #endif
38069562e49SMarco Elver }
38169562e49SMarco Elver
add_kcsan_stack_depth(int val)38269562e49SMarco Elver static __always_inline void add_kcsan_stack_depth(int val)
38369562e49SMarco Elver {
38469562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
38569562e49SMarco Elver current->kcsan_stack_depth += val;
38669562e49SMarco Elver #else
38769562e49SMarco Elver BUILD_BUG();
38869562e49SMarco Elver #endif
38969562e49SMarco Elver }
39069562e49SMarco Elver
get_reorder_access(struct kcsan_ctx * ctx)39169562e49SMarco Elver static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
39269562e49SMarco Elver {
39369562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY
39469562e49SMarco Elver return ctx->disable_scoped ? NULL : &ctx->reorder_access;
39569562e49SMarco Elver #else
39669562e49SMarco Elver return NULL;
39769562e49SMarco Elver #endif
39869562e49SMarco Elver }
39969562e49SMarco Elver
40069562e49SMarco Elver static __always_inline bool
find_reorder_access(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type,unsigned long ip)40169562e49SMarco Elver find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
40269562e49SMarco Elver int type, unsigned long ip)
40369562e49SMarco Elver {
40469562e49SMarco Elver struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
40569562e49SMarco Elver
40669562e49SMarco Elver if (!reorder_access)
40769562e49SMarco Elver return false;
40869562e49SMarco Elver
40969562e49SMarco Elver /*
41069562e49SMarco Elver * Note: If accesses are repeated while reorder_access is identical,
41169562e49SMarco Elver * never matches the new access, because !(type & KCSAN_ACCESS_SCOPED).
41269562e49SMarco Elver */
41369562e49SMarco Elver return reorder_access->ptr == ptr && reorder_access->size == size &&
41469562e49SMarco Elver reorder_access->type == type && reorder_access->ip == ip;
41569562e49SMarco Elver }
41669562e49SMarco Elver
41769562e49SMarco Elver static inline void
set_reorder_access(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type,unsigned long ip)41869562e49SMarco Elver set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
41969562e49SMarco Elver int type, unsigned long ip)
42069562e49SMarco Elver {
42169562e49SMarco Elver struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
42269562e49SMarco Elver
42369562e49SMarco Elver if (!reorder_access || !kcsan_weak_memory)
42469562e49SMarco Elver return;
42569562e49SMarco Elver
426e3d2b72bSMarco Elver /*
427e3d2b72bSMarco Elver * To avoid nested interrupts or scheduler (which share kcsan_ctx)
428e3d2b72bSMarco Elver * reading an inconsistent reorder_access, ensure that the below has
429e3d2b72bSMarco Elver * exclusive access to reorder_access by disallowing concurrent use.
430e3d2b72bSMarco Elver */
431e3d2b72bSMarco Elver ctx->disable_scoped++;
432e3d2b72bSMarco Elver barrier();
43369562e49SMarco Elver reorder_access->ptr = ptr;
43469562e49SMarco Elver reorder_access->size = size;
43569562e49SMarco Elver reorder_access->type = type | KCSAN_ACCESS_SCOPED;
43669562e49SMarco Elver reorder_access->ip = ip;
43769562e49SMarco Elver reorder_access->stack_depth = get_kcsan_stack_depth();
438e3d2b72bSMarco Elver barrier();
439e3d2b72bSMarco Elver ctx->disable_scoped--;
44069562e49SMarco Elver }
44169562e49SMarco Elver
442dfd402a4SMarco Elver /*
443dfd402a4SMarco Elver * Pull everything together: check_access() below contains the performance
444dfd402a4SMarco Elver * critical operations; the fast-path (including check_access) functions should
445dfd402a4SMarco Elver * all be inlinable by the instrumentation functions.
446dfd402a4SMarco Elver *
447dfd402a4SMarco Elver * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
448dfd402a4SMarco Elver * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
449dfd402a4SMarco Elver * be filtered from the stacktrace, as well as give them unique names for the
450dfd402a4SMarco Elver * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
451dfd402a4SMarco Elver * since they do not access any user memory, but instrumentation is still
452dfd402a4SMarco Elver * emitted in UACCESS regions.
453dfd402a4SMarco Elver */
454dfd402a4SMarco Elver
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,unsigned long ip,atomic_long_t * watchpoint,long encoded_watchpoint)455dfd402a4SMarco Elver static noinline void kcsan_found_watchpoint(const volatile void *ptr,
4565cbaefe9SIngo Molnar size_t size,
45747144ecaSMarco Elver int type,
45855a55fecSMarco Elver unsigned long ip,
459dfd402a4SMarco Elver atomic_long_t *watchpoint,
460dfd402a4SMarco Elver long encoded_watchpoint)
461dfd402a4SMarco Elver {
46249f72d53SMarco Elver const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
46308cac604SMarco Elver struct kcsan_ctx *ctx = get_ctx();
464dfd402a4SMarco Elver unsigned long flags;
465dfd402a4SMarco Elver bool consumed;
466dfd402a4SMarco Elver
46708cac604SMarco Elver /*
46808cac604SMarco Elver * We know a watchpoint exists. Let's try to keep the race-window
46908cac604SMarco Elver * between here and finally consuming the watchpoint below as small as
47008cac604SMarco Elver * possible -- avoid unneccessarily complex code until consumed.
47108cac604SMarco Elver */
47208cac604SMarco Elver
47308cac604SMarco Elver if (!kcsan_is_enabled(ctx))
474dfd402a4SMarco Elver return;
47581af89e1SMarco Elver
47681af89e1SMarco Elver /*
47781af89e1SMarco Elver * The access_mask check relies on value-change comparison. To avoid
47881af89e1SMarco Elver * reporting a race where e.g. the writer set up the watchpoint, but the
47981af89e1SMarco Elver * reader has access_mask!=0, we have to ignore the found watchpoint.
48069562e49SMarco Elver *
48169562e49SMarco Elver * reorder_access is never created from an access with access_mask set.
48281af89e1SMarco Elver */
48369562e49SMarco Elver if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip))
48481af89e1SMarco Elver return;
48581af89e1SMarco Elver
486dfd402a4SMarco Elver /*
48749f72d53SMarco Elver * If the other thread does not want to ignore the access, and there was
48849f72d53SMarco Elver * a value change as a result of this thread's operation, we will still
48949f72d53SMarco Elver * generate a report of unknown origin.
49049f72d53SMarco Elver *
49149f72d53SMarco Elver * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter.
49249f72d53SMarco Elver */
49349f72d53SMarco Elver if (!is_assert && kcsan_ignore_address(ptr))
49449f72d53SMarco Elver return;
49549f72d53SMarco Elver
49649f72d53SMarco Elver /*
49708cac604SMarco Elver * Consuming the watchpoint must be guarded by kcsan_is_enabled() to
49808cac604SMarco Elver * avoid erroneously triggering reports if the context is disabled.
499dfd402a4SMarco Elver */
500dfd402a4SMarco Elver consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
501dfd402a4SMarco Elver
502dfd402a4SMarco Elver /* keep this after try_consume_watchpoint */
503dfd402a4SMarco Elver flags = user_access_save();
504dfd402a4SMarco Elver
505dfd402a4SMarco Elver if (consumed) {
50692c209acSMarco Elver kcsan_save_irqtrace(current);
50755a55fecSMarco Elver kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
50892c209acSMarco Elver kcsan_restore_irqtrace(current);
509dfd402a4SMarco Elver } else {
510dfd402a4SMarco Elver /*
511dfd402a4SMarco Elver * The other thread may not print any diagnostics, as it has
512dfd402a4SMarco Elver * already removed the watchpoint, or another thread consumed
513dfd402a4SMarco Elver * the watchpoint before this thread.
514dfd402a4SMarco Elver */
5152e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
516dfd402a4SMarco Elver }
517d591ec3dSMarco Elver
51849f72d53SMarco Elver if (is_assert)
5192e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
520d591ec3dSMarco Elver else
5212e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
522dfd402a4SMarco Elver
523dfd402a4SMarco Elver user_access_restore(flags);
524dfd402a4SMarco Elver }
525dfd402a4SMarco Elver
5265cbaefe9SIngo Molnar static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type,unsigned long ip)52755a55fecSMarco Elver kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
528dfd402a4SMarco Elver {
52947144ecaSMarco Elver const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
530d591ec3dSMarco Elver const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
531dfd402a4SMarco Elver atomic_long_t *watchpoint;
5326f2d9819SMark Rutland u64 old, new, diff;
533b738f616SMarco Elver enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
53469562e49SMarco Elver bool interrupt_watcher = kcsan_interrupt_watcher;
535dfd402a4SMarco Elver unsigned long ua_flags = user_access_save();
53608cac604SMarco Elver struct kcsan_ctx *ctx = get_ctx();
53769562e49SMarco Elver unsigned long access_mask = ctx->access_mask;
53848b1fc19SMarco Elver unsigned long irq_flags = 0;
53969562e49SMarco Elver bool is_reorder_access;
540dfd402a4SMarco Elver
541dfd402a4SMarco Elver /*
542dfd402a4SMarco Elver * Always reset kcsan_skip counter in slow-path to avoid underflow; see
543dfd402a4SMarco Elver * should_watch().
544dfd402a4SMarco Elver */
545dfd402a4SMarco Elver reset_kcsan_skip();
546dfd402a4SMarco Elver
54708cac604SMarco Elver if (!kcsan_is_enabled(ctx))
548dfd402a4SMarco Elver goto out;
549dfd402a4SMarco Elver
55044656d3dSMarco Elver /*
55149f72d53SMarco Elver * Check to-ignore addresses after kcsan_is_enabled(), as we may access
55249f72d53SMarco Elver * memory that is not yet initialized during early boot.
55344656d3dSMarco Elver */
55449f72d53SMarco Elver if (!is_assert && kcsan_ignore_address(ptr))
55544656d3dSMarco Elver goto out;
55644656d3dSMarco Elver
557dfd402a4SMarco Elver if (!check_encodable((unsigned long)ptr, size)) {
5582e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
559dfd402a4SMarco Elver goto out;
560dfd402a4SMarco Elver }
561dfd402a4SMarco Elver
56292c209acSMarco Elver /*
56369562e49SMarco Elver * The local CPU cannot observe reordering of its own accesses, and
56469562e49SMarco Elver * therefore we need to take care of 2 cases to avoid false positives:
56569562e49SMarco Elver *
56669562e49SMarco Elver * 1. Races of the reordered access with interrupts. To avoid, if
56769562e49SMarco Elver * the current access is reorder_access, disable interrupts.
56869562e49SMarco Elver * 2. Avoid races of scoped accesses from nested interrupts (below).
56969562e49SMarco Elver */
57069562e49SMarco Elver is_reorder_access = find_reorder_access(ctx, ptr, size, type, ip);
57169562e49SMarco Elver if (is_reorder_access)
57269562e49SMarco Elver interrupt_watcher = false;
57369562e49SMarco Elver /*
5749756f64cSMarco Elver * Avoid races of scoped accesses from nested interrupts (or scheduler).
5759756f64cSMarco Elver * Assume setting up a watchpoint for a non-scoped (normal) access that
5769756f64cSMarco Elver * also conflicts with a current scoped access. In a nested interrupt,
5779756f64cSMarco Elver * which shares the context, it would check a conflicting scoped access.
5789756f64cSMarco Elver * To avoid, disable scoped access checking.
5799756f64cSMarco Elver */
5809756f64cSMarco Elver ctx->disable_scoped++;
5819756f64cSMarco Elver
5829756f64cSMarco Elver /*
58392c209acSMarco Elver * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
58492c209acSMarco Elver * runtime is entered for every memory access, and potentially useful
58592c209acSMarco Elver * information is lost if dirtied by KCSAN.
58692c209acSMarco Elver */
58792c209acSMarco Elver kcsan_save_irqtrace(current);
58869562e49SMarco Elver if (!interrupt_watcher)
589248591f5SMarco Elver local_irq_save(irq_flags);
590dfd402a4SMarco Elver
591dfd402a4SMarco Elver watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
592dfd402a4SMarco Elver if (watchpoint == NULL) {
593dfd402a4SMarco Elver /*
5945cbaefe9SIngo Molnar * Out of capacity: the size of 'watchpoints', and the frequency
5955cbaefe9SIngo Molnar * with which should_watch() returns true should be tweaked so
596dfd402a4SMarco Elver * that this case happens very rarely.
597dfd402a4SMarco Elver */
5982e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
599dfd402a4SMarco Elver goto out_unlock;
600dfd402a4SMarco Elver }
601dfd402a4SMarco Elver
6022e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
6032e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
604dfd402a4SMarco Elver
605dfd402a4SMarco Elver /*
606dfd402a4SMarco Elver * Read the current value, to later check and infer a race if the data
607dfd402a4SMarco Elver * was modified via a non-instrumented access, e.g. from a device.
608dfd402a4SMarco Elver */
60969562e49SMarco Elver old = is_reorder_access ? 0 : read_instrumented_memory(ptr, size);
610dfd402a4SMarco Elver
611dfd402a4SMarco Elver /*
612dfd402a4SMarco Elver * Delay this thread, to increase probability of observing a racy
613dfd402a4SMarco Elver * conflicting access.
614dfd402a4SMarco Elver */
615cd290ec2SMarco Elver delay_access(type);
616dfd402a4SMarco Elver
617dfd402a4SMarco Elver /*
618dfd402a4SMarco Elver * Re-read value, and check if it is as expected; if not, we infer a
619dfd402a4SMarco Elver * racy access.
620dfd402a4SMarco Elver */
62169562e49SMarco Elver if (!is_reorder_access) {
62212305abeSMarco Elver new = read_instrumented_memory(ptr, size);
62369562e49SMarco Elver } else {
62469562e49SMarco Elver /*
62569562e49SMarco Elver * Reordered accesses cannot be used for value change detection,
62669562e49SMarco Elver * because the memory location may no longer be accessible and
62769562e49SMarco Elver * could result in a fault.
62869562e49SMarco Elver */
62969562e49SMarco Elver new = 0;
63069562e49SMarco Elver access_mask = 0;
63169562e49SMarco Elver }
632dfd402a4SMarco Elver
6336f2d9819SMark Rutland diff = old ^ new;
6346f2d9819SMark Rutland if (access_mask)
6356f2d9819SMark Rutland diff &= access_mask;
6366f2d9819SMark Rutland
63749f72d53SMarco Elver /*
63849f72d53SMarco Elver * Check if we observed a value change.
63949f72d53SMarco Elver *
64049f72d53SMarco Elver * Also check if the data race should be ignored (the rules depend on
64149f72d53SMarco Elver * non-zero diff); if it is to be ignored, the below rules for
64249f72d53SMarco Elver * KCSAN_VALUE_CHANGE_MAYBE apply.
64349f72d53SMarco Elver */
64449f72d53SMarco Elver if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
645b738f616SMarco Elver value_change = KCSAN_VALUE_CHANGE_TRUE;
646b738f616SMarco Elver
647dfd402a4SMarco Elver /* Check if this access raced with another. */
6486119418fSMarco Elver if (!consume_watchpoint(watchpoint)) {
649dfd402a4SMarco Elver /*
650b738f616SMarco Elver * Depending on the access type, map a value_change of MAYBE to
65181af89e1SMarco Elver * TRUE (always report) or FALSE (never report).
652b738f616SMarco Elver */
65381af89e1SMarco Elver if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
65481af89e1SMarco Elver if (access_mask != 0) {
65581af89e1SMarco Elver /*
65681af89e1SMarco Elver * For access with access_mask, we require a
65781af89e1SMarco Elver * value-change, as it is likely that races on
65881af89e1SMarco Elver * ~access_mask bits are expected.
65981af89e1SMarco Elver */
66081af89e1SMarco Elver value_change = KCSAN_VALUE_CHANGE_FALSE;
66181af89e1SMarco Elver } else if (size > 8 || is_assert) {
662b738f616SMarco Elver /* Always assume a value-change. */
663b738f616SMarco Elver value_change = KCSAN_VALUE_CHANGE_TRUE;
664b738f616SMarco Elver }
66581af89e1SMarco Elver }
666b738f616SMarco Elver
667b738f616SMarco Elver /*
668dfd402a4SMarco Elver * No need to increment 'data_races' counter, as the racing
669dfd402a4SMarco Elver * thread already did.
670d591ec3dSMarco Elver *
671d591ec3dSMarco Elver * Count 'assert_failures' for each failed ASSERT access,
672d591ec3dSMarco Elver * therefore both this thread and the racing thread may
673d591ec3dSMarco Elver * increment this counter.
674dfd402a4SMarco Elver */
675b738f616SMarco Elver if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
6762e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
677d591ec3dSMarco Elver
67855a55fecSMarco Elver kcsan_report_known_origin(ptr, size, type, ip,
67955a55fecSMarco Elver value_change, watchpoint - watchpoints,
6807bbe6dc0SMark Rutland old, new, access_mask);
681b738f616SMarco Elver } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
682dfd402a4SMarco Elver /* Inferring a race, since the value should not have changed. */
683d591ec3dSMarco Elver
6842e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
685d591ec3dSMarco Elver if (is_assert)
6862e986b81SMarco Elver atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
687d591ec3dSMarco Elver
68855a55fecSMarco Elver if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
68955a55fecSMarco Elver kcsan_report_unknown_origin(ptr, size, type, ip,
69055a55fecSMarco Elver old, new, access_mask);
69155a55fecSMarco Elver }
692dfd402a4SMarco Elver }
693dfd402a4SMarco Elver
6946119418fSMarco Elver /*
6956119418fSMarco Elver * Remove watchpoint; must be after reporting, since the slot may be
6966119418fSMarco Elver * reused after this point.
6976119418fSMarco Elver */
6986119418fSMarco Elver remove_watchpoint(watchpoint);
6992e986b81SMarco Elver atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
70069562e49SMarco Elver
701dfd402a4SMarco Elver out_unlock:
70269562e49SMarco Elver if (!interrupt_watcher)
703248591f5SMarco Elver local_irq_restore(irq_flags);
70492c209acSMarco Elver kcsan_restore_irqtrace(current);
7059756f64cSMarco Elver ctx->disable_scoped--;
70669562e49SMarco Elver
70769562e49SMarco Elver /*
70869562e49SMarco Elver * Reordered accesses cannot be used for value change detection,
70969562e49SMarco Elver * therefore never consider for reordering if access_mask is set.
71069562e49SMarco Elver * ASSERT_EXCLUSIVE are not real accesses, ignore them as well.
71169562e49SMarco Elver */
71269562e49SMarco Elver if (!access_mask && !is_assert)
71369562e49SMarco Elver set_reorder_access(ctx, ptr, size, type, ip);
714dfd402a4SMarco Elver out:
715dfd402a4SMarco Elver user_access_restore(ua_flags);
716dfd402a4SMarco Elver }
717dfd402a4SMarco Elver
71855a55fecSMarco Elver static __always_inline void
check_access(const volatile void * ptr,size_t size,int type,unsigned long ip)71955a55fecSMarco Elver check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
720dfd402a4SMarco Elver {
721dfd402a4SMarco Elver atomic_long_t *watchpoint;
722dfd402a4SMarco Elver long encoded_watchpoint;
723dfd402a4SMarco Elver
724dfd402a4SMarco Elver /*
725ed95f95cSMarco Elver * Do nothing for 0 sized check; this comparison will be optimized out
726ed95f95cSMarco Elver * for constant sized instrumentation (__tsan_{read,write}N).
727ed95f95cSMarco Elver */
728ed95f95cSMarco Elver if (unlikely(size == 0))
729ed95f95cSMarco Elver return;
730ed95f95cSMarco Elver
73169562e49SMarco Elver again:
732ed95f95cSMarco Elver /*
733dfd402a4SMarco Elver * Avoid user_access_save in fast-path: find_watchpoint is safe without
734dfd402a4SMarco Elver * user_access_save, as the address that ptr points to is only used to
735dfd402a4SMarco Elver * check if a watchpoint exists; ptr is never dereferenced.
736dfd402a4SMarco Elver */
73769562e49SMarco Elver watchpoint = find_watchpoint((unsigned long)ptr, size,
73869562e49SMarco Elver !(type & KCSAN_ACCESS_WRITE),
739dfd402a4SMarco Elver &encoded_watchpoint);
740dfd402a4SMarco Elver /*
741dfd402a4SMarco Elver * It is safe to check kcsan_is_enabled() after find_watchpoint in the
742d591ec3dSMarco Elver * slow-path, as long as no state changes that cause a race to be
743dfd402a4SMarco Elver * detected and reported have occurred until kcsan_is_enabled() is
744dfd402a4SMarco Elver * checked.
745dfd402a4SMarco Elver */
746dfd402a4SMarco Elver
747dfd402a4SMarco Elver if (unlikely(watchpoint != NULL))
74855a55fecSMarco Elver kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
749757a4cefSMarco Elver else {
750757a4cefSMarco Elver struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
751757a4cefSMarco Elver
75269562e49SMarco Elver if (unlikely(should_watch(ctx, ptr, size, type))) {
75355a55fecSMarco Elver kcsan_setup_watchpoint(ptr, size, type, ip);
75469562e49SMarco Elver return;
75569562e49SMarco Elver }
75669562e49SMarco Elver
75769562e49SMarco Elver if (!(type & KCSAN_ACCESS_SCOPED)) {
75869562e49SMarco Elver struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
75969562e49SMarco Elver
76069562e49SMarco Elver if (reorder_access) {
76169562e49SMarco Elver /*
76269562e49SMarco Elver * reorder_access check: simulates reordering of
76369562e49SMarco Elver * the access after subsequent operations.
76469562e49SMarco Elver */
76569562e49SMarco Elver ptr = reorder_access->ptr;
76669562e49SMarco Elver type = reorder_access->type;
76769562e49SMarco Elver ip = reorder_access->ip;
76869562e49SMarco Elver /*
76969562e49SMarco Elver * Upon a nested interrupt, this context's
77069562e49SMarco Elver * reorder_access can be modified (shared ctx).
77169562e49SMarco Elver * We know that upon return, reorder_access is
77269562e49SMarco Elver * always invalidated by setting size to 0 via
77369562e49SMarco Elver * __tsan_func_exit(). Therefore we must read
77469562e49SMarco Elver * and check size after the other fields.
77569562e49SMarco Elver */
77669562e49SMarco Elver barrier();
77769562e49SMarco Elver size = READ_ONCE(reorder_access->size);
77869562e49SMarco Elver if (size)
77969562e49SMarco Elver goto again;
78069562e49SMarco Elver }
78169562e49SMarco Elver }
78269562e49SMarco Elver
78369562e49SMarco Elver /*
78469562e49SMarco Elver * Always checked last, right before returning from runtime;
78569562e49SMarco Elver * if reorder_access is valid, checked after it was checked.
78669562e49SMarco Elver */
78769562e49SMarco Elver if (unlikely(ctx->scoped_accesses.prev))
788757a4cefSMarco Elver kcsan_check_scoped_accesses();
789757a4cefSMarco Elver }
790dfd402a4SMarco Elver }
791dfd402a4SMarco Elver
792dfd402a4SMarco Elver /* === Public interface ===================================================== */
793dfd402a4SMarco Elver
kcsan_init(void)794dfd402a4SMarco Elver void __init kcsan_init(void)
795dfd402a4SMarco Elver {
79671a076f4SMarco Elver int cpu;
79771a076f4SMarco Elver
798dfd402a4SMarco Elver BUG_ON(!in_task());
799dfd402a4SMarco Elver
80071a076f4SMarco Elver for_each_possible_cpu(cpu)
80171a076f4SMarco Elver per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
802dfd402a4SMarco Elver
803dfd402a4SMarco Elver /*
804dfd402a4SMarco Elver * We are in the init task, and no other tasks should be running;
805dfd402a4SMarco Elver * WRITE_ONCE without memory barrier is sufficient.
806dfd402a4SMarco Elver */
80727787930SMarco Elver if (kcsan_early_enable) {
80827787930SMarco Elver pr_info("enabled early\n");
809dfd402a4SMarco Elver WRITE_ONCE(kcsan_enabled, true);
810dfd402a4SMarco Elver }
8119c827cd1SMarco Elver
8129c827cd1SMarco Elver if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
8139c827cd1SMarco Elver IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
8149c827cd1SMarco Elver IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
8159c827cd1SMarco Elver IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
8169c827cd1SMarco Elver pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
8179c827cd1SMarco Elver } else {
8189c827cd1SMarco Elver pr_info("strict mode configured\n");
8199c827cd1SMarco Elver }
82027787930SMarco Elver }
821dfd402a4SMarco Elver
822dfd402a4SMarco Elver /* === Exported interface =================================================== */
823dfd402a4SMarco Elver
kcsan_disable_current(void)824dfd402a4SMarco Elver void kcsan_disable_current(void)
825dfd402a4SMarco Elver {
826dfd402a4SMarco Elver ++get_ctx()->disable_count;
827dfd402a4SMarco Elver }
828dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_disable_current);
829dfd402a4SMarco Elver
kcsan_enable_current(void)830dfd402a4SMarco Elver void kcsan_enable_current(void)
831dfd402a4SMarco Elver {
832dfd402a4SMarco Elver if (get_ctx()->disable_count-- == 0) {
833dfd402a4SMarco Elver /*
834dfd402a4SMarco Elver * Warn if kcsan_enable_current() calls are unbalanced with
835dfd402a4SMarco Elver * kcsan_disable_current() calls, which causes disable_count to
836dfd402a4SMarco Elver * become negative and should not happen.
837dfd402a4SMarco Elver */
838dfd402a4SMarco Elver kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
839dfd402a4SMarco Elver kcsan_disable_current(); /* disable to generate warning */
840dfd402a4SMarco Elver WARN(1, "Unbalanced %s()", __func__);
841dfd402a4SMarco Elver kcsan_enable_current();
842dfd402a4SMarco Elver }
843dfd402a4SMarco Elver }
844dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_enable_current);
845dfd402a4SMarco Elver
kcsan_enable_current_nowarn(void)84619acd03dSMarco Elver void kcsan_enable_current_nowarn(void)
84719acd03dSMarco Elver {
84819acd03dSMarco Elver if (get_ctx()->disable_count-- == 0)
84919acd03dSMarco Elver kcsan_disable_current();
85019acd03dSMarco Elver }
85119acd03dSMarco Elver EXPORT_SYMBOL(kcsan_enable_current_nowarn);
85219acd03dSMarco Elver
kcsan_nestable_atomic_begin(void)853dfd402a4SMarco Elver void kcsan_nestable_atomic_begin(void)
854dfd402a4SMarco Elver {
855dfd402a4SMarco Elver /*
856dfd402a4SMarco Elver * Do *not* check and warn if we are in a flat atomic region: nestable
857dfd402a4SMarco Elver * and flat atomic regions are independent from each other.
858dfd402a4SMarco Elver * See include/linux/kcsan.h: struct kcsan_ctx comments for more
859dfd402a4SMarco Elver * comments.
860dfd402a4SMarco Elver */
861dfd402a4SMarco Elver
862dfd402a4SMarco Elver ++get_ctx()->atomic_nest_count;
863dfd402a4SMarco Elver }
864dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
865dfd402a4SMarco Elver
kcsan_nestable_atomic_end(void)866dfd402a4SMarco Elver void kcsan_nestable_atomic_end(void)
867dfd402a4SMarco Elver {
868dfd402a4SMarco Elver if (get_ctx()->atomic_nest_count-- == 0) {
869dfd402a4SMarco Elver /*
870dfd402a4SMarco Elver * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
871dfd402a4SMarco Elver * kcsan_nestable_atomic_begin() calls, which causes
872dfd402a4SMarco Elver * atomic_nest_count to become negative and should not happen.
873dfd402a4SMarco Elver */
874dfd402a4SMarco Elver kcsan_nestable_atomic_begin(); /* restore to 0 */
875dfd402a4SMarco Elver kcsan_disable_current(); /* disable to generate warning */
876dfd402a4SMarco Elver WARN(1, "Unbalanced %s()", __func__);
877dfd402a4SMarco Elver kcsan_enable_current();
878dfd402a4SMarco Elver }
879dfd402a4SMarco Elver }
880dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_nestable_atomic_end);
881dfd402a4SMarco Elver
kcsan_flat_atomic_begin(void)882dfd402a4SMarco Elver void kcsan_flat_atomic_begin(void)
883dfd402a4SMarco Elver {
884dfd402a4SMarco Elver get_ctx()->in_flat_atomic = true;
885dfd402a4SMarco Elver }
886dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_flat_atomic_begin);
887dfd402a4SMarco Elver
kcsan_flat_atomic_end(void)888dfd402a4SMarco Elver void kcsan_flat_atomic_end(void)
889dfd402a4SMarco Elver {
890dfd402a4SMarco Elver get_ctx()->in_flat_atomic = false;
891dfd402a4SMarco Elver }
892dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_flat_atomic_end);
893dfd402a4SMarco Elver
kcsan_atomic_next(int n)894dfd402a4SMarco Elver void kcsan_atomic_next(int n)
895dfd402a4SMarco Elver {
896dfd402a4SMarco Elver get_ctx()->atomic_next = n;
897dfd402a4SMarco Elver }
898dfd402a4SMarco Elver EXPORT_SYMBOL(kcsan_atomic_next);
899dfd402a4SMarco Elver
kcsan_set_access_mask(unsigned long mask)90081af89e1SMarco Elver void kcsan_set_access_mask(unsigned long mask)
90181af89e1SMarco Elver {
90281af89e1SMarco Elver get_ctx()->access_mask = mask;
90381af89e1SMarco Elver }
90481af89e1SMarco Elver EXPORT_SYMBOL(kcsan_set_access_mask);
90581af89e1SMarco Elver
906757a4cefSMarco Elver struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)907757a4cefSMarco Elver kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
908757a4cefSMarco Elver struct kcsan_scoped_access *sa)
909757a4cefSMarco Elver {
910757a4cefSMarco Elver struct kcsan_ctx *ctx = get_ctx();
911757a4cefSMarco Elver
91255a55fecSMarco Elver check_access(ptr, size, type, _RET_IP_);
913757a4cefSMarco Elver
914757a4cefSMarco Elver ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
915757a4cefSMarco Elver
916757a4cefSMarco Elver INIT_LIST_HEAD(&sa->list);
917757a4cefSMarco Elver sa->ptr = ptr;
918757a4cefSMarco Elver sa->size = size;
919757a4cefSMarco Elver sa->type = type;
920f4c87dbbSMarco Elver sa->ip = _RET_IP_;
921757a4cefSMarco Elver
922757a4cefSMarco Elver if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
923757a4cefSMarco Elver INIT_LIST_HEAD(&ctx->scoped_accesses);
924757a4cefSMarco Elver list_add(&sa->list, &ctx->scoped_accesses);
925757a4cefSMarco Elver
926757a4cefSMarco Elver ctx->disable_count--;
927757a4cefSMarco Elver return sa;
928757a4cefSMarco Elver }
929757a4cefSMarco Elver EXPORT_SYMBOL(kcsan_begin_scoped_access);
930757a4cefSMarco Elver
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)931757a4cefSMarco Elver void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
932757a4cefSMarco Elver {
933757a4cefSMarco Elver struct kcsan_ctx *ctx = get_ctx();
934757a4cefSMarco Elver
935757a4cefSMarco Elver if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
936757a4cefSMarco Elver return;
937757a4cefSMarco Elver
938757a4cefSMarco Elver ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
939757a4cefSMarco Elver
940757a4cefSMarco Elver list_del(&sa->list);
941757a4cefSMarco Elver if (list_empty(&ctx->scoped_accesses))
942757a4cefSMarco Elver /*
943757a4cefSMarco Elver * Ensure we do not enter kcsan_check_scoped_accesses()
944757a4cefSMarco Elver * slow-path if unnecessary, and avoids requiring list_empty()
945757a4cefSMarco Elver * in the fast-path (to avoid a READ_ONCE() and potential
946757a4cefSMarco Elver * uaccess warning).
947757a4cefSMarco Elver */
948757a4cefSMarco Elver ctx->scoped_accesses.prev = NULL;
949757a4cefSMarco Elver
950757a4cefSMarco Elver ctx->disable_count--;
951757a4cefSMarco Elver
952f4c87dbbSMarco Elver check_access(sa->ptr, sa->size, sa->type, sa->ip);
953757a4cefSMarco Elver }
954757a4cefSMarco Elver EXPORT_SYMBOL(kcsan_end_scoped_access);
955757a4cefSMarco Elver
__kcsan_check_access(const volatile void * ptr,size_t size,int type)956dfd402a4SMarco Elver void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
957dfd402a4SMarco Elver {
95855a55fecSMarco Elver check_access(ptr, size, type, _RET_IP_);
959dfd402a4SMarco Elver }
960dfd402a4SMarco Elver EXPORT_SYMBOL(__kcsan_check_access);
961dfd402a4SMarco Elver
9620b8b0830SMarco Elver #define DEFINE_MEMORY_BARRIER(name, order_before_cond) \
9630b8b0830SMarco Elver void __kcsan_##name(void) \
9640b8b0830SMarco Elver { \
9650b8b0830SMarco Elver struct kcsan_scoped_access *sa = get_reorder_access(get_ctx()); \
9660b8b0830SMarco Elver if (!sa) \
9670b8b0830SMarco Elver return; \
9680b8b0830SMarco Elver if (order_before_cond) \
9690b8b0830SMarco Elver sa->size = 0; \
9700b8b0830SMarco Elver } \
9710b8b0830SMarco Elver EXPORT_SYMBOL(__kcsan_##name)
9720b8b0830SMarco Elver
9730b8b0830SMarco Elver DEFINE_MEMORY_BARRIER(mb, true);
9740b8b0830SMarco Elver DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND));
9750b8b0830SMarco Elver DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND));
9760b8b0830SMarco Elver DEFINE_MEMORY_BARRIER(release, true);
9770b8b0830SMarco Elver
978dfd402a4SMarco Elver /*
979dfd402a4SMarco Elver * KCSAN uses the same instrumentation that is emitted by supported compilers
980dfd402a4SMarco Elver * for ThreadSanitizer (TSAN).
981dfd402a4SMarco Elver *
982dfd402a4SMarco Elver * When enabled, the compiler emits instrumentation calls (the functions
983dfd402a4SMarco Elver * prefixed with "__tsan" below) for all loads and stores that it generated;
984dfd402a4SMarco Elver * inline asm is not instrumented.
985dfd402a4SMarco Elver *
986dfd402a4SMarco Elver * Note that, not all supported compiler versions distinguish aligned/unaligned
987dfd402a4SMarco Elver * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
988dfd402a4SMarco Elver * version to the generic version, which can handle both.
989dfd402a4SMarco Elver */
990dfd402a4SMarco Elver
991dfd402a4SMarco Elver #define DEFINE_TSAN_READ_WRITE(size) \
9929dd979baSMarco Elver void __tsan_read##size(void *ptr); \
993dfd402a4SMarco Elver void __tsan_read##size(void *ptr) \
994dfd402a4SMarco Elver { \
99555a55fecSMarco Elver check_access(ptr, size, 0, _RET_IP_); \
996dfd402a4SMarco Elver } \
997dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_read##size); \
998dfd402a4SMarco Elver void __tsan_unaligned_read##size(void *ptr) \
999dfd402a4SMarco Elver __alias(__tsan_read##size); \
1000dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_unaligned_read##size); \
10019dd979baSMarco Elver void __tsan_write##size(void *ptr); \
1002dfd402a4SMarco Elver void __tsan_write##size(void *ptr) \
1003dfd402a4SMarco Elver { \
100455a55fecSMarco Elver check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
1005dfd402a4SMarco Elver } \
1006dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_write##size); \
1007dfd402a4SMarco Elver void __tsan_unaligned_write##size(void *ptr) \
1008dfd402a4SMarco Elver __alias(__tsan_write##size); \
100914e2ac8dSMarco Elver EXPORT_SYMBOL(__tsan_unaligned_write##size); \
101014e2ac8dSMarco Elver void __tsan_read_write##size(void *ptr); \
101114e2ac8dSMarco Elver void __tsan_read_write##size(void *ptr) \
101214e2ac8dSMarco Elver { \
101314e2ac8dSMarco Elver check_access(ptr, size, \
101455a55fecSMarco Elver KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
101555a55fecSMarco Elver _RET_IP_); \
101614e2ac8dSMarco Elver } \
101714e2ac8dSMarco Elver EXPORT_SYMBOL(__tsan_read_write##size); \
101814e2ac8dSMarco Elver void __tsan_unaligned_read_write##size(void *ptr) \
101914e2ac8dSMarco Elver __alias(__tsan_read_write##size); \
102014e2ac8dSMarco Elver EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
1021dfd402a4SMarco Elver
1022dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(1);
1023dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(2);
1024dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(4);
1025dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(8);
1026dfd402a4SMarco Elver DEFINE_TSAN_READ_WRITE(16);
1027dfd402a4SMarco Elver
10289dd979baSMarco Elver void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)1029dfd402a4SMarco Elver void __tsan_read_range(void *ptr, size_t size)
1030dfd402a4SMarco Elver {
103155a55fecSMarco Elver check_access(ptr, size, 0, _RET_IP_);
1032dfd402a4SMarco Elver }
1033dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_read_range);
1034dfd402a4SMarco Elver
10359dd979baSMarco Elver void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)1036dfd402a4SMarco Elver void __tsan_write_range(void *ptr, size_t size)
1037dfd402a4SMarco Elver {
103855a55fecSMarco Elver check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
1039dfd402a4SMarco Elver }
1040dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_write_range);
1041dfd402a4SMarco Elver
1042dfd402a4SMarco Elver /*
104375d75b7aSMarco Elver * Use of explicit volatile is generally disallowed [1], however, volatile is
104475d75b7aSMarco Elver * still used in various concurrent context, whether in low-level
104575d75b7aSMarco Elver * synchronization primitives or for legacy reasons.
104675d75b7aSMarco Elver * [1] https://lwn.net/Articles/233479/
104775d75b7aSMarco Elver *
104875d75b7aSMarco Elver * We only consider volatile accesses atomic if they are aligned and would pass
104975d75b7aSMarco Elver * the size-check of compiletime_assert_rwonce_type().
105075d75b7aSMarco Elver */
105175d75b7aSMarco Elver #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
10529dd979baSMarco Elver void __tsan_volatile_read##size(void *ptr); \
105375d75b7aSMarco Elver void __tsan_volatile_read##size(void *ptr) \
105475d75b7aSMarco Elver { \
105575d75b7aSMarco Elver const bool is_atomic = size <= sizeof(long long) && \
105675d75b7aSMarco Elver IS_ALIGNED((unsigned long)ptr, size); \
105775d75b7aSMarco Elver if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
105875d75b7aSMarco Elver return; \
105955a55fecSMarco Elver check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
106055a55fecSMarco Elver _RET_IP_); \
106175d75b7aSMarco Elver } \
106275d75b7aSMarco Elver EXPORT_SYMBOL(__tsan_volatile_read##size); \
106375d75b7aSMarco Elver void __tsan_unaligned_volatile_read##size(void *ptr) \
106475d75b7aSMarco Elver __alias(__tsan_volatile_read##size); \
106575d75b7aSMarco Elver EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
10669dd979baSMarco Elver void __tsan_volatile_write##size(void *ptr); \
106775d75b7aSMarco Elver void __tsan_volatile_write##size(void *ptr) \
106875d75b7aSMarco Elver { \
106975d75b7aSMarco Elver const bool is_atomic = size <= sizeof(long long) && \
107075d75b7aSMarco Elver IS_ALIGNED((unsigned long)ptr, size); \
107175d75b7aSMarco Elver if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
107275d75b7aSMarco Elver return; \
107375d75b7aSMarco Elver check_access(ptr, size, \
107475d75b7aSMarco Elver KCSAN_ACCESS_WRITE | \
107555a55fecSMarco Elver (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
107655a55fecSMarco Elver _RET_IP_); \
107775d75b7aSMarco Elver } \
107875d75b7aSMarco Elver EXPORT_SYMBOL(__tsan_volatile_write##size); \
107975d75b7aSMarco Elver void __tsan_unaligned_volatile_write##size(void *ptr) \
108075d75b7aSMarco Elver __alias(__tsan_volatile_write##size); \
108175d75b7aSMarco Elver EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
108275d75b7aSMarco Elver
108375d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(1);
108475d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(2);
108575d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(4);
108675d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(8);
108775d75b7aSMarco Elver DEFINE_TSAN_VOLATILE_READ_WRITE(16);
108875d75b7aSMarco Elver
108975d75b7aSMarco Elver /*
109069562e49SMarco Elver * Function entry and exit are used to determine the validty of reorder_access.
109169562e49SMarco Elver * Reordering of the access ends at the end of the function scope where the
109269562e49SMarco Elver * access happened. This is done for two reasons:
109369562e49SMarco Elver *
109469562e49SMarco Elver * 1. Artificially limits the scope where missing barriers are detected.
109569562e49SMarco Elver * This minimizes false positives due to uninstrumented functions that
109669562e49SMarco Elver * contain the required barriers but were missed.
109769562e49SMarco Elver *
109869562e49SMarco Elver * 2. Simplifies generating the stack trace of the access.
1099dfd402a4SMarco Elver */
11009dd979baSMarco Elver void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)110169562e49SMarco Elver noinline void __tsan_func_entry(void *call_pc)
1102dfd402a4SMarco Elver {
110369562e49SMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
110469562e49SMarco Elver return;
110569562e49SMarco Elver
110669562e49SMarco Elver add_kcsan_stack_depth(1);
1107dfd402a4SMarco Elver }
1108dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_func_entry);
110969562e49SMarco Elver
11109dd979baSMarco Elver void __tsan_func_exit(void);
__tsan_func_exit(void)111169562e49SMarco Elver noinline void __tsan_func_exit(void)
1112dfd402a4SMarco Elver {
111369562e49SMarco Elver struct kcsan_scoped_access *reorder_access;
111469562e49SMarco Elver
111569562e49SMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
111669562e49SMarco Elver return;
111769562e49SMarco Elver
111869562e49SMarco Elver reorder_access = get_reorder_access(get_ctx());
111969562e49SMarco Elver if (!reorder_access)
112069562e49SMarco Elver goto out;
112169562e49SMarco Elver
112269562e49SMarco Elver if (get_kcsan_stack_depth() <= reorder_access->stack_depth) {
112369562e49SMarco Elver /*
112469562e49SMarco Elver * Access check to catch cases where write without a barrier
112569562e49SMarco Elver * (supposed release) was last access in function: because
112669562e49SMarco Elver * instrumentation is inserted before the real access, a data
112769562e49SMarco Elver * race due to the write giving up a c-s would only be caught if
112869562e49SMarco Elver * we do the conflicting access after.
112969562e49SMarco Elver */
113069562e49SMarco Elver check_access(reorder_access->ptr, reorder_access->size,
113169562e49SMarco Elver reorder_access->type, reorder_access->ip);
113269562e49SMarco Elver reorder_access->size = 0;
113369562e49SMarco Elver reorder_access->stack_depth = INT_MIN;
113469562e49SMarco Elver }
113569562e49SMarco Elver out:
113669562e49SMarco Elver add_kcsan_stack_depth(-1);
1137dfd402a4SMarco Elver }
1138dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_func_exit);
113969562e49SMarco Elver
11409dd979baSMarco Elver void __tsan_init(void);
__tsan_init(void)1141dfd402a4SMarco Elver void __tsan_init(void)
1142dfd402a4SMarco Elver {
1143dfd402a4SMarco Elver }
1144dfd402a4SMarco Elver EXPORT_SYMBOL(__tsan_init);
11450f8ad5f2SMarco Elver
11460f8ad5f2SMarco Elver /*
11470f8ad5f2SMarco Elver * Instrumentation for atomic builtins (__atomic_*, __sync_*).
11480f8ad5f2SMarco Elver *
11490f8ad5f2SMarco Elver * Normal kernel code _should not_ be using them directly, but some
11500f8ad5f2SMarco Elver * architectures may implement some or all atomics using the compilers'
11510f8ad5f2SMarco Elver * builtins.
11520f8ad5f2SMarco Elver *
11530f8ad5f2SMarco Elver * Note: If an architecture decides to fully implement atomics using the
11540f8ad5f2SMarco Elver * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
11550f8ad5f2SMarco Elver * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
11560f8ad5f2SMarco Elver * atomic-instrumented) is no longer necessary.
11570f8ad5f2SMarco Elver *
11580f8ad5f2SMarco Elver * TSAN instrumentation replaces atomic accesses with calls to any of the below
11590f8ad5f2SMarco Elver * functions, whose job is to also execute the operation itself.
11600f8ad5f2SMarco Elver */
11610f8ad5f2SMarco Elver
kcsan_atomic_builtin_memorder(int memorder)11620b8b0830SMarco Elver static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
11630b8b0830SMarco Elver {
11640b8b0830SMarco Elver if (memorder == __ATOMIC_RELEASE ||
11650b8b0830SMarco Elver memorder == __ATOMIC_SEQ_CST ||
11660b8b0830SMarco Elver memorder == __ATOMIC_ACQ_REL)
11670b8b0830SMarco Elver __kcsan_release();
11680b8b0830SMarco Elver }
11690b8b0830SMarco Elver
11700f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
11710f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
11720f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
11730f8ad5f2SMarco Elver { \
11740b8b0830SMarco Elver kcsan_atomic_builtin_memorder(memorder); \
11759d1335ccSMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
117655a55fecSMarco Elver check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
11779d1335ccSMarco Elver } \
11780f8ad5f2SMarco Elver return __atomic_load_n(ptr, memorder); \
11790f8ad5f2SMarco Elver } \
11800f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
11810f8ad5f2SMarco Elver void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
11820f8ad5f2SMarco Elver void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
11830f8ad5f2SMarco Elver { \
11840b8b0830SMarco Elver kcsan_atomic_builtin_memorder(memorder); \
11859d1335ccSMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
11869d1335ccSMarco Elver check_access(ptr, bits / BITS_PER_BYTE, \
118755a55fecSMarco Elver KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
11889d1335ccSMarco Elver } \
11890f8ad5f2SMarco Elver __atomic_store_n(ptr, v, memorder); \
11900f8ad5f2SMarco Elver } \
11910f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic##bits##_store)
11920f8ad5f2SMarco Elver
11930f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
11940f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
11950f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
11960f8ad5f2SMarco Elver { \
11970b8b0830SMarco Elver kcsan_atomic_builtin_memorder(memorder); \
11989d1335ccSMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
119914e2ac8dSMarco Elver check_access(ptr, bits / BITS_PER_BYTE, \
12009d1335ccSMarco Elver KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
120155a55fecSMarco Elver KCSAN_ACCESS_ATOMIC, _RET_IP_); \
12029d1335ccSMarco Elver } \
12030f8ad5f2SMarco Elver return __atomic_##op##suffix(ptr, v, memorder); \
12040f8ad5f2SMarco Elver } \
12050f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
12060f8ad5f2SMarco Elver
12070f8ad5f2SMarco Elver /*
12080f8ad5f2SMarco Elver * Note: CAS operations are always classified as write, even in case they
12090f8ad5f2SMarco Elver * fail. We cannot perform check_access() after a write, as it might lead to
12100f8ad5f2SMarco Elver * false positives, in cases such as:
12110f8ad5f2SMarco Elver *
12120f8ad5f2SMarco Elver * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
12130f8ad5f2SMarco Elver *
12140f8ad5f2SMarco Elver * T1: if (__atomic_load_n(&p->flag, ...)) {
12150f8ad5f2SMarco Elver * modify *p;
12160f8ad5f2SMarco Elver * p->flag = 0;
12170f8ad5f2SMarco Elver * }
12180f8ad5f2SMarco Elver *
12190f8ad5f2SMarco Elver * The only downside is that, if there are 3 threads, with one CAS that
12200f8ad5f2SMarco Elver * succeeds, another CAS that fails, and an unmarked racing operation, we may
12210f8ad5f2SMarco Elver * point at the wrong CAS as the source of the race. However, if we assume that
12220f8ad5f2SMarco Elver * all CAS can succeed in some other execution, the data race is still valid.
12230f8ad5f2SMarco Elver */
12240f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
12250f8ad5f2SMarco Elver int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
12260f8ad5f2SMarco Elver u##bits val, int mo, int fail_mo); \
12270f8ad5f2SMarco Elver int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
12280f8ad5f2SMarco Elver u##bits val, int mo, int fail_mo) \
12290f8ad5f2SMarco Elver { \
12300b8b0830SMarco Elver kcsan_atomic_builtin_memorder(mo); \
12319d1335ccSMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
123214e2ac8dSMarco Elver check_access(ptr, bits / BITS_PER_BYTE, \
12339d1335ccSMarco Elver KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
123455a55fecSMarco Elver KCSAN_ACCESS_ATOMIC, _RET_IP_); \
12359d1335ccSMarco Elver } \
12360f8ad5f2SMarco Elver return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
12370f8ad5f2SMarco Elver } \
12380f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
12390f8ad5f2SMarco Elver
12400f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
12410f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
12420f8ad5f2SMarco Elver int mo, int fail_mo); \
12430f8ad5f2SMarco Elver u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
12440f8ad5f2SMarco Elver int mo, int fail_mo) \
12450f8ad5f2SMarco Elver { \
12460b8b0830SMarco Elver kcsan_atomic_builtin_memorder(mo); \
12479d1335ccSMarco Elver if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
124814e2ac8dSMarco Elver check_access(ptr, bits / BITS_PER_BYTE, \
12499d1335ccSMarco Elver KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
125055a55fecSMarco Elver KCSAN_ACCESS_ATOMIC, _RET_IP_); \
12519d1335ccSMarco Elver } \
12520f8ad5f2SMarco Elver __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
12530f8ad5f2SMarco Elver return exp; \
12540f8ad5f2SMarco Elver } \
12550f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
12560f8ad5f2SMarco Elver
12570f8ad5f2SMarco Elver #define DEFINE_TSAN_ATOMIC_OPS(bits) \
12580f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
12590f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
12600f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
12610f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
12620f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
12630f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
12640f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
12650f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
12660f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
12670f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
12680f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
12690f8ad5f2SMarco Elver
12700f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(8);
12710f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(16);
12720f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(32);
1273*353e7300SChristophe Leroy #ifdef CONFIG_64BIT
12740f8ad5f2SMarco Elver DEFINE_TSAN_ATOMIC_OPS(64);
1275*353e7300SChristophe Leroy #endif
12760f8ad5f2SMarco Elver
12770f8ad5f2SMarco Elver void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)12780f8ad5f2SMarco Elver void __tsan_atomic_thread_fence(int memorder)
12790f8ad5f2SMarco Elver {
12800b8b0830SMarco Elver kcsan_atomic_builtin_memorder(memorder);
12810f8ad5f2SMarco Elver __atomic_thread_fence(memorder);
12820f8ad5f2SMarco Elver }
12830f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic_thread_fence);
12840f8ad5f2SMarco Elver
12850b8b0830SMarco Elver /*
12860b8b0830SMarco Elver * In instrumented files, we emit instrumentation for barriers by mapping the
12870b8b0830SMarco Elver * kernel barriers to an __atomic_signal_fence(), which is interpreted specially
12880b8b0830SMarco Elver * and otherwise has no relation to a real __atomic_signal_fence(). No known
12890b8b0830SMarco Elver * kernel code uses __atomic_signal_fence().
12900b8b0830SMarco Elver *
12910b8b0830SMarco Elver * Since fsanitize=thread instrumentation handles __atomic_signal_fence(), which
12920b8b0830SMarco Elver * are turned into calls to __tsan_atomic_signal_fence(), such instrumentation
12930b8b0830SMarco Elver * can be disabled via the __no_kcsan function attribute (vs. an explicit call
12940b8b0830SMarco Elver * which could not). When __no_kcsan is requested, __atomic_signal_fence()
12950b8b0830SMarco Elver * generates no code.
12960b8b0830SMarco Elver *
12970b8b0830SMarco Elver * Note: The result of using __atomic_signal_fence() with KCSAN enabled is
12980b8b0830SMarco Elver * potentially limiting the compiler's ability to reorder operations; however,
12990b8b0830SMarco Elver * if barriers were instrumented with explicit calls (without LTO), the compiler
13000b8b0830SMarco Elver * couldn't optimize much anyway. The result of a hypothetical architecture
13010b8b0830SMarco Elver * using __atomic_signal_fence() in normal code would be KCSAN false negatives.
13020b8b0830SMarco Elver */
13030f8ad5f2SMarco Elver void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)13040b8b0830SMarco Elver noinline void __tsan_atomic_signal_fence(int memorder)
13050b8b0830SMarco Elver {
13060b8b0830SMarco Elver switch (memorder) {
13070b8b0830SMarco Elver case __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb:
13080b8b0830SMarco Elver __kcsan_mb();
13090b8b0830SMarco Elver break;
13100b8b0830SMarco Elver case __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb:
13110b8b0830SMarco Elver __kcsan_wmb();
13120b8b0830SMarco Elver break;
13130b8b0830SMarco Elver case __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb:
13140b8b0830SMarco Elver __kcsan_rmb();
13150b8b0830SMarco Elver break;
13160b8b0830SMarco Elver case __KCSAN_BARRIER_TO_SIGNAL_FENCE_release:
13170b8b0830SMarco Elver __kcsan_release();
13180b8b0830SMarco Elver break;
13190b8b0830SMarco Elver default:
13200b8b0830SMarco Elver break;
13210b8b0830SMarco Elver }
13220b8b0830SMarco Elver }
13230f8ad5f2SMarco Elver EXPORT_SYMBOL(__tsan_atomic_signal_fence);
13247c201739SMarco Elver
13257c201739SMarco Elver #ifdef __HAVE_ARCH_MEMSET
13267c201739SMarco Elver void *__tsan_memset(void *s, int c, size_t count);
__tsan_memset(void * s,int c,size_t count)13277c201739SMarco Elver noinline void *__tsan_memset(void *s, int c, size_t count)
13287c201739SMarco Elver {
13297c201739SMarco Elver /*
13307c201739SMarco Elver * Instead of not setting up watchpoints where accessed size is greater
13317c201739SMarco Elver * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
13327c201739SMarco Elver */
13337c201739SMarco Elver size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
13347c201739SMarco Elver
13357c201739SMarco Elver check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
13367c201739SMarco Elver return memset(s, c, count);
13377c201739SMarco Elver }
13387c201739SMarco Elver #else
13397c201739SMarco Elver void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
13407c201739SMarco Elver #endif
13417c201739SMarco Elver EXPORT_SYMBOL(__tsan_memset);
13427c201739SMarco Elver
13437c201739SMarco Elver #ifdef __HAVE_ARCH_MEMMOVE
13447c201739SMarco Elver void *__tsan_memmove(void *dst, const void *src, size_t len);
__tsan_memmove(void * dst,const void * src,size_t len)13457c201739SMarco Elver noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
13467c201739SMarco Elver {
13477c201739SMarco Elver size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
13487c201739SMarco Elver
13497c201739SMarco Elver check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
13507c201739SMarco Elver check_access(src, check_len, 0, _RET_IP_);
13517c201739SMarco Elver return memmove(dst, src, len);
13527c201739SMarco Elver }
13537c201739SMarco Elver #else
13547c201739SMarco Elver void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
13557c201739SMarco Elver #endif
13567c201739SMarco Elver EXPORT_SYMBOL(__tsan_memmove);
13577c201739SMarco Elver
13587c201739SMarco Elver #ifdef __HAVE_ARCH_MEMCPY
13597c201739SMarco Elver void *__tsan_memcpy(void *dst, const void *src, size_t len);
__tsan_memcpy(void * dst,const void * src,size_t len)13607c201739SMarco Elver noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
13617c201739SMarco Elver {
13627c201739SMarco Elver size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
13637c201739SMarco Elver
13647c201739SMarco Elver check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
13657c201739SMarco Elver check_access(src, check_len, 0, _RET_IP_);
13667c201739SMarco Elver return memcpy(dst, src, len);
13677c201739SMarco Elver }
13687c201739SMarco Elver #else
13697c201739SMarco Elver void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
13707c201739SMarco Elver #endif
13717c201739SMarco Elver EXPORT_SYMBOL(__tsan_memcpy);
1372