xref: /linux/tools/testing/selftests/rseq/rseq.h (revision ea1413e5b53a8dd4fa7675edb23cdf828bbdce1e)
1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3  * rseq.h
4  *
5  * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6  */
7 
8 #ifndef RSEQ_H
9 #define RSEQ_H
10 
11 #include <stdint.h>
12 #include <stdbool.h>
13 #include <pthread.h>
14 #include <signal.h>
15 #include <sched.h>
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stddef.h>
20 #include "rseq-abi.h"
21 #include "compiler.h"
22 
23 #ifndef rseq_sizeof_field
24 #define rseq_sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
25 #endif
26 
27 #ifndef rseq_offsetofend
28 #define rseq_offsetofend(TYPE, MEMBER) \
29 	(offsetof(TYPE, MEMBER)	+ rseq_sizeof_field(TYPE, MEMBER))
30 #endif
31 
32 /*
33  * Empty code injection macros, override when testing.
34  * It is important to consider that the ASM injection macros need to be
35  * fully reentrant (e.g. do not modify the stack).
36  */
37 #ifndef RSEQ_INJECT_ASM
38 #define RSEQ_INJECT_ASM(n)
39 #endif
40 
41 #ifndef RSEQ_INJECT_C
42 #define RSEQ_INJECT_C(n)
43 #endif
44 
45 #ifndef RSEQ_INJECT_INPUT
46 #define RSEQ_INJECT_INPUT
47 #endif
48 
49 #ifndef RSEQ_INJECT_CLOBBER
50 #define RSEQ_INJECT_CLOBBER
51 #endif
52 
53 #ifndef RSEQ_INJECT_FAILED
54 #define RSEQ_INJECT_FAILED
55 #endif
56 
57 #include "rseq-thread-pointer.h"
58 
59 /* Offset from the thread pointer to the rseq area. */
60 extern ptrdiff_t rseq_offset;
61 
62 /*
63  * Size of the registered rseq area. 0 if the registration was
64  * unsuccessful.
65  */
66 extern unsigned int rseq_size;
67 
68 /* Flags used during rseq registration. */
69 extern unsigned int rseq_flags;
70 
71 enum rseq_mo {
72 	RSEQ_MO_RELAXED = 0,
73 	RSEQ_MO_CONSUME = 1,	/* Unused */
74 	RSEQ_MO_ACQUIRE = 2,	/* Unused */
75 	RSEQ_MO_RELEASE = 3,
76 	RSEQ_MO_ACQ_REL = 4,	/* Unused */
77 	RSEQ_MO_SEQ_CST = 5,	/* Unused */
78 };
79 
80 enum rseq_percpu_mode {
81 	RSEQ_PERCPU_CPU_ID = 0,
82 	RSEQ_PERCPU_MM_CID = 1,
83 };
84 
85 static inline struct rseq_abi *rseq_get_abi(void)
86 {
87 	return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
88 }
89 
90 #define rseq_likely(x)		__builtin_expect(!!(x), 1)
91 #define rseq_unlikely(x)	__builtin_expect(!!(x), 0)
92 #define rseq_barrier()		__asm__ __volatile__("" : : : "memory")
93 
94 #define RSEQ_ACCESS_ONCE(x)	(*(__volatile__  __typeof__(x) *)&(x))
95 #define RSEQ_WRITE_ONCE(x, v)	__extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
96 #define RSEQ_READ_ONCE(x)	RSEQ_ACCESS_ONCE(x)
97 
98 #define __rseq_str_1(x)	#x
99 #define __rseq_str(x)		__rseq_str_1(x)
100 
101 #define rseq_log(fmt, args...)						       \
102 	fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
103 		## args, __func__)
104 
105 #define rseq_bug(fmt, args...)		\
106 	do {				\
107 		rseq_log(fmt, ##args);	\
108 		abort();		\
109 	} while (0)
110 
111 #if defined(__x86_64__) || defined(__i386__)
112 #include <rseq-x86.h>
113 #elif defined(__ARMEL__)
114 #include <rseq-arm.h>
115 #elif defined (__AARCH64EL__)
116 #include <rseq-arm64.h>
117 #elif defined(__PPC__)
118 #include <rseq-ppc.h>
119 #elif defined(__mips__)
120 #include <rseq-mips.h>
121 #elif defined(__s390__)
122 #include <rseq-s390.h>
123 #elif defined(__riscv)
124 #include <rseq-riscv.h>
125 #elif defined(__or1k__)
126 #include <rseq-or1k.h>
127 #else
128 #error unsupported target
129 #endif
130 
131 /*
132  * Register rseq for the current thread. This needs to be called once
133  * by any thread which uses restartable sequences, before they start
134  * using restartable sequences, to ensure restartable sequences
135  * succeed. A restartable sequence executed from a non-registered
136  * thread will always fail.
137  */
138 int rseq_register_current_thread(void);
139 
140 /*
141  * Unregister rseq for current thread.
142  */
143 int rseq_unregister_current_thread(void);
144 
145 /*
146  * Restartable sequence fallback for reading the current CPU number.
147  */
148 int32_t rseq_fallback_current_cpu(void);
149 
150 /*
151  * Restartable sequence fallback for reading the current node number.
152  */
153 int32_t rseq_fallback_current_node(void);
154 
155 /*
156  * Values returned can be either the current CPU number, -1 (rseq is
157  * uninitialized), or -2 (rseq initialization has failed).
158  */
159 static inline int32_t rseq_current_cpu_raw(void)
160 {
161 	return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id);
162 }
163 
164 /*
165  * Returns a possible CPU number, which is typically the current CPU.
166  * The returned CPU number can be used to prepare for an rseq critical
167  * section, which will confirm whether the cpu number is indeed the
168  * current one, and whether rseq is initialized.
169  *
170  * The CPU number returned by rseq_cpu_start should always be validated
171  * by passing it to a rseq asm sequence, or by comparing it to the
172  * return value of rseq_current_cpu_raw() if the rseq asm sequence
173  * does not need to be invoked.
174  */
175 static inline uint32_t rseq_cpu_start(void)
176 {
177 	return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start);
178 }
179 
180 static inline uint32_t rseq_current_cpu(void)
181 {
182 	int32_t cpu;
183 
184 	cpu = rseq_current_cpu_raw();
185 	if (rseq_unlikely(cpu < 0))
186 		cpu = rseq_fallback_current_cpu();
187 	return cpu;
188 }
189 
190 static inline bool rseq_node_id_available(void)
191 {
192 	return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, node_id);
193 }
194 
195 /*
196  * Current NUMA node number.
197  */
198 static inline uint32_t rseq_current_node_id(void)
199 {
200 	assert(rseq_node_id_available());
201 	return RSEQ_ACCESS_ONCE(rseq_get_abi()->node_id);
202 }
203 
204 static inline bool rseq_mm_cid_available(void)
205 {
206 	return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, mm_cid);
207 }
208 
209 static inline uint32_t rseq_current_mm_cid(void)
210 {
211 	return RSEQ_ACCESS_ONCE(rseq_get_abi()->mm_cid);
212 }
213 
214 static inline void rseq_clear_rseq_cs(void)
215 {
216 	RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
217 }
218 
219 /*
220  * rseq_prepare_unload() should be invoked by each thread executing a rseq
221  * critical section at least once between their last critical section and
222  * library unload of the library defining the rseq critical section (struct
223  * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
224  * post_commit_offset fields. This also applies to use of rseq in code
225  * generated by JIT: rseq_prepare_unload() should be invoked at least once by
226  * each thread executing a rseq critical section before reclaim of the memory
227  * holding the struct rseq_cs or reclaim of the code pointed to by struct
228  * rseq_cs start_ip and post_commit_offset fields.
229  */
230 static inline void rseq_prepare_unload(void)
231 {
232 	rseq_clear_rseq_cs();
233 }
234 
235 static inline __attribute__((always_inline))
236 int rseq_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
237 		       intptr_t *v, intptr_t expect,
238 		       intptr_t newv, int cpu)
239 {
240 	if (rseq_mo != RSEQ_MO_RELAXED)
241 		return -1;
242 	switch (percpu_mode) {
243 	case RSEQ_PERCPU_CPU_ID:
244 		return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
245 	case RSEQ_PERCPU_MM_CID:
246 		return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
247 	}
248 	return -1;
249 }
250 
251 /*
252  * Compare @v against @expectnot. When it does _not_ match, load @v
253  * into @load, and store the content of *@v + voffp into @v.
254  */
255 static inline __attribute__((always_inline))
256 int rseq_cmpnev_storeoffp_load(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
257 			       intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
258 			       int cpu)
259 {
260 	if (rseq_mo != RSEQ_MO_RELAXED)
261 		return -1;
262 	switch (percpu_mode) {
263 	case RSEQ_PERCPU_CPU_ID:
264 		return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
265 	case RSEQ_PERCPU_MM_CID:
266 		return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
267 	}
268 	return -1;
269 }
270 
271 static inline __attribute__((always_inline))
272 int rseq_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
273 	      intptr_t *v, intptr_t count, int cpu)
274 {
275 	if (rseq_mo != RSEQ_MO_RELAXED)
276 		return -1;
277 	switch (percpu_mode) {
278 	case RSEQ_PERCPU_CPU_ID:
279 		return rseq_addv_relaxed_cpu_id(v, count, cpu);
280 	case RSEQ_PERCPU_MM_CID:
281 		return rseq_addv_relaxed_mm_cid(v, count, cpu);
282 	}
283 	return -1;
284 }
285 
286 #ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
287 /*
288  *   pval = *(ptr+off)
289  *  *pval += inc;
290  */
291 static inline __attribute__((always_inline))
292 int rseq_offset_deref_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
293 			   intptr_t *ptr, long off, intptr_t inc, int cpu)
294 {
295 	if (rseq_mo != RSEQ_MO_RELAXED)
296 		return -1;
297 	switch (percpu_mode) {
298 	case RSEQ_PERCPU_CPU_ID:
299 		return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
300 	case RSEQ_PERCPU_MM_CID:
301 		return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
302 	}
303 	return -1;
304 }
305 #endif
306 
307 static inline __attribute__((always_inline))
308 int rseq_cmpeqv_trystorev_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
309 				 intptr_t *v, intptr_t expect,
310 				 intptr_t *v2, intptr_t newv2,
311 				 intptr_t newv, int cpu)
312 {
313 	switch (rseq_mo) {
314 	case RSEQ_MO_RELAXED:
315 		switch (percpu_mode) {
316 		case RSEQ_PERCPU_CPU_ID:
317 			return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
318 		case RSEQ_PERCPU_MM_CID:
319 			return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
320 		}
321 		return -1;
322 	case RSEQ_MO_RELEASE:
323 		switch (percpu_mode) {
324 		case RSEQ_PERCPU_CPU_ID:
325 			return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
326 		case RSEQ_PERCPU_MM_CID:
327 			return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
328 		}
329 		return -1;
330 	default:
331 		return -1;
332 	}
333 }
334 
335 static inline __attribute__((always_inline))
336 int rseq_cmpeqv_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
337 			      intptr_t *v, intptr_t expect,
338 			      intptr_t *v2, intptr_t expect2,
339 			      intptr_t newv, int cpu)
340 {
341 	if (rseq_mo != RSEQ_MO_RELAXED)
342 		return -1;
343 	switch (percpu_mode) {
344 	case RSEQ_PERCPU_CPU_ID:
345 		return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
346 	case RSEQ_PERCPU_MM_CID:
347 		return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
348 	}
349 	return -1;
350 }
351 
352 static inline __attribute__((always_inline))
353 int rseq_cmpeqv_trymemcpy_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
354 				 intptr_t *v, intptr_t expect,
355 				 void *dst, void *src, size_t len,
356 				 intptr_t newv, int cpu)
357 {
358 	switch (rseq_mo) {
359 	case RSEQ_MO_RELAXED:
360 		switch (percpu_mode) {
361 		case RSEQ_PERCPU_CPU_ID:
362 			return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
363 		case RSEQ_PERCPU_MM_CID:
364 			return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
365 		}
366 		return -1;
367 	case RSEQ_MO_RELEASE:
368 		switch (percpu_mode) {
369 		case RSEQ_PERCPU_CPU_ID:
370 			return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
371 		case RSEQ_PERCPU_MM_CID:
372 			return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
373 		}
374 		return -1;
375 	default:
376 		return -1;
377 	}
378 }
379 
380 #endif  /* RSEQ_H_ */
381