xref: /linux/include/linux/jump_label.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4 
5 /*
6  * Jump label support
7  *
8  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  *
11  * DEPRECATED API:
12  *
13  * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14  * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15  *
16  * struct static_key false = STATIC_KEY_INIT_FALSE;
17  * struct static_key true = STATIC_KEY_INIT_TRUE;
18  * static_key_true()
19  * static_key_false()
20  *
21  * The updated API replacements are:
22  *
23  * DEFINE_STATIC_KEY_TRUE(key);
24  * DEFINE_STATIC_KEY_FALSE(key);
25  * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26  * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27  * static_branch_likely()
28  * static_branch_unlikely()
29  *
30  * Jump labels provide an interface to generate dynamic branches using
31  * self-modifying code. Assuming toolchain and architecture support, if we
32  * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33  * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34  * (which defaults to false - and the true block is placed out of line).
35  * Similarly, we can define an initially true key via
36  * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37  * "if (static_branch_unlikely(&key))", in which case we will generate an
38  * unconditional branch to the out-of-line true branch. Keys that are
39  * initially true or false can be using in both static_branch_unlikely()
40  * and static_branch_likely() statements.
41  *
42  * At runtime we can change the branch target by setting the key
43  * to true via a call to static_branch_enable(), or false using
44  * static_branch_disable(). If the direction of the branch is switched by
45  * these calls then we run-time modify the branch target via a
46  * no-op -> jump or jump -> no-op conversion. For example, for an
47  * initially false key that is used in an "if (static_branch_unlikely(&key))"
48  * statement, setting the key to true requires us to patch in a jump
49  * to the out-of-line of true branch.
50  *
51  * In addition to static_branch_{enable,disable}, we can also reference count
52  * the key or branch direction via static_branch_{inc,dec}. Thus,
53  * static_branch_inc() can be thought of as a 'make more true' and
54  * static_branch_dec() as a 'make more false'.
55  *
56  * Since this relies on modifying code, the branch modifying functions
57  * must be considered absolute slow paths (machine wide synchronization etc.).
58  * OTOH, since the affected branches are unconditional, their runtime overhead
59  * will be absolutely minimal, esp. in the default (off) case where the total
60  * effect is a single NOP of appropriate size. The on case will patch in a jump
61  * to the out-of-line block.
62  *
63  * When the control is directly exposed to userspace, it is prudent to delay the
64  * decrement to avoid high frequency code modifications which can (and do)
65  * cause significant performance degradation. Struct static_key_deferred and
66  * static_key_slow_dec_deferred() provide for this.
67  *
68  * Lacking toolchain and or architecture support, static keys fall back to a
69  * simple conditional branch.
70  *
71  * Additional babbling in: Documentation/staging/static-keys.rst
72  */
73 
74 #ifndef __ASSEMBLY__
75 
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78 
79 extern bool static_key_initialized;
80 
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized,		      \
82 				    "%s(): static key '%pS' used before call to jump_label_init()", \
83 				    __func__, (key))
84 
85 struct static_key {
86 	atomic_t enabled;
87 #ifdef CONFIG_JUMP_LABEL
88 /*
89  * Note:
90  *   To make anonymous unions work with old compilers, the static
91  *   initialization of them requires brackets. This creates a dependency
92  *   on the order of the struct with the initializers. If any fields
93  *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
94  *   to be modified.
95  *
96  * bit 0 => 1 if key is initially true
97  *	    0 if initially false
98  * bit 1 => 1 if points to struct static_key_mod
99  *	    0 if points to struct jump_entry
100  */
101 	union {
102 		unsigned long type;
103 		struct jump_entry *entries;
104 		struct static_key_mod *next;
105 	};
106 #endif	/* CONFIG_JUMP_LABEL */
107 };
108 
109 #endif /* __ASSEMBLY__ */
110 
111 #ifdef CONFIG_JUMP_LABEL
112 #include <asm/jump_label.h>
113 
114 #ifndef __ASSEMBLY__
115 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
116 
117 struct jump_entry {
118 	s32 code;
119 	s32 target;
120 	long key;	// key may be far away from the core kernel under KASLR
121 };
122 
jump_entry_code(const struct jump_entry * entry)123 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
124 {
125 	return (unsigned long)&entry->code + entry->code;
126 }
127 
jump_entry_target(const struct jump_entry * entry)128 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
129 {
130 	return (unsigned long)&entry->target + entry->target;
131 }
132 
jump_entry_key(const struct jump_entry * entry)133 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
134 {
135 	long offset = entry->key & ~3L;
136 
137 	return (struct static_key *)((unsigned long)&entry->key + offset);
138 }
139 
140 #else
141 
jump_entry_code(const struct jump_entry * entry)142 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
143 {
144 	return entry->code;
145 }
146 
jump_entry_target(const struct jump_entry * entry)147 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
148 {
149 	return entry->target;
150 }
151 
jump_entry_key(const struct jump_entry * entry)152 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
153 {
154 	return (struct static_key *)((unsigned long)entry->key & ~3UL);
155 }
156 
157 #endif
158 
jump_entry_is_branch(const struct jump_entry * entry)159 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
160 {
161 	return (unsigned long)entry->key & 1UL;
162 }
163 
jump_entry_is_init(const struct jump_entry * entry)164 static inline bool jump_entry_is_init(const struct jump_entry *entry)
165 {
166 	return (unsigned long)entry->key & 2UL;
167 }
168 
jump_entry_set_init(struct jump_entry * entry,bool set)169 static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
170 {
171 	if (set)
172 		entry->key |= 2;
173 	else
174 		entry->key &= ~2;
175 }
176 
jump_entry_size(struct jump_entry * entry)177 static inline int jump_entry_size(struct jump_entry *entry)
178 {
179 #ifdef JUMP_LABEL_NOP_SIZE
180 	return JUMP_LABEL_NOP_SIZE;
181 #else
182 	return arch_jump_entry_size(entry);
183 #endif
184 }
185 
186 #endif
187 #endif
188 
189 #ifndef __ASSEMBLY__
190 
191 enum jump_label_type {
192 	JUMP_LABEL_NOP = 0,
193 	JUMP_LABEL_JMP,
194 };
195 
196 struct module;
197 
198 #ifdef CONFIG_JUMP_LABEL
199 
200 #define JUMP_TYPE_FALSE		0UL
201 #define JUMP_TYPE_TRUE		1UL
202 #define JUMP_TYPE_LINKED	2UL
203 #define JUMP_TYPE_MASK		3UL
204 
static_key_false(struct static_key * key)205 static __always_inline bool static_key_false(struct static_key *key)
206 {
207 	return arch_static_branch(key, false);
208 }
209 
static_key_true(struct static_key * key)210 static __always_inline bool static_key_true(struct static_key *key)
211 {
212 	return !arch_static_branch(key, true);
213 }
214 
215 extern struct jump_entry __start___jump_table[];
216 extern struct jump_entry __stop___jump_table[];
217 
218 extern void jump_label_init(void);
219 extern void jump_label_init_ro(void);
220 extern void jump_label_lock(void);
221 extern void jump_label_unlock(void);
222 extern void arch_jump_label_transform(struct jump_entry *entry,
223 				      enum jump_label_type type);
224 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
225 					    enum jump_label_type type);
226 extern void arch_jump_label_transform_apply(void);
227 extern int jump_label_text_reserved(void *start, void *end);
228 extern bool static_key_slow_inc(struct static_key *key);
229 extern bool static_key_fast_inc_not_disabled(struct static_key *key);
230 extern void static_key_slow_dec(struct static_key *key);
231 extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
232 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
233 extern int static_key_count(struct static_key *key);
234 extern void static_key_enable(struct static_key *key);
235 extern void static_key_disable(struct static_key *key);
236 extern void static_key_enable_cpuslocked(struct static_key *key);
237 extern void static_key_disable_cpuslocked(struct static_key *key);
238 extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
239 
240 /*
241  * We should be using ATOMIC_INIT() for initializing .enabled, but
242  * the inclusion of atomic.h is problematic for inclusion of jump_label.h
243  * in 'low-level' headers. Thus, we are initializing .enabled with a
244  * raw value, but have added a BUILD_BUG_ON() to catch any issues in
245  * jump_label_init() see: kernel/jump_label.c.
246  */
247 #define STATIC_KEY_INIT_TRUE					\
248 	{ .enabled = { 1 },					\
249 	  { .type = JUMP_TYPE_TRUE } }
250 #define STATIC_KEY_INIT_FALSE					\
251 	{ .enabled = { 0 },					\
252 	  { .type = JUMP_TYPE_FALSE } }
253 
254 #else  /* !CONFIG_JUMP_LABEL */
255 
256 #include <linux/atomic.h>
257 #include <linux/bug.h>
258 
static_key_count(struct static_key * key)259 static __always_inline int static_key_count(struct static_key *key)
260 {
261 	return raw_atomic_read(&key->enabled);
262 }
263 
jump_label_init(void)264 static __always_inline void jump_label_init(void)
265 {
266 	static_key_initialized = true;
267 }
268 
jump_label_init_ro(void)269 static __always_inline void jump_label_init_ro(void) { }
270 
static_key_false(struct static_key * key)271 static __always_inline bool static_key_false(struct static_key *key)
272 {
273 	if (unlikely_notrace(static_key_count(key) > 0))
274 		return true;
275 	return false;
276 }
277 
static_key_true(struct static_key * key)278 static __always_inline bool static_key_true(struct static_key *key)
279 {
280 	if (likely_notrace(static_key_count(key) > 0))
281 		return true;
282 	return false;
283 }
284 
static_key_fast_inc_not_disabled(struct static_key * key)285 static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
286 {
287 	int v;
288 
289 	STATIC_KEY_CHECK_USE(key);
290 	/*
291 	 * Prevent key->enabled getting negative to follow the same semantics
292 	 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
293 	 */
294 	v = atomic_read(&key->enabled);
295 	do {
296 		if (v < 0 || (v + 1) < 0)
297 			return false;
298 	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
299 	return true;
300 }
301 #define static_key_slow_inc(key)	static_key_fast_inc_not_disabled(key)
302 
static_key_slow_dec(struct static_key * key)303 static inline void static_key_slow_dec(struct static_key *key)
304 {
305 	STATIC_KEY_CHECK_USE(key);
306 	atomic_dec(&key->enabled);
307 }
308 
309 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
310 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
311 
jump_label_text_reserved(void * start,void * end)312 static inline int jump_label_text_reserved(void *start, void *end)
313 {
314 	return 0;
315 }
316 
jump_label_lock(void)317 static inline void jump_label_lock(void) {}
jump_label_unlock(void)318 static inline void jump_label_unlock(void) {}
319 
static_key_enable(struct static_key * key)320 static inline void static_key_enable(struct static_key *key)
321 {
322 	STATIC_KEY_CHECK_USE(key);
323 
324 	if (atomic_read(&key->enabled) != 0) {
325 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
326 		return;
327 	}
328 	atomic_set(&key->enabled, 1);
329 }
330 
static_key_disable(struct static_key * key)331 static inline void static_key_disable(struct static_key *key)
332 {
333 	STATIC_KEY_CHECK_USE(key);
334 
335 	if (atomic_read(&key->enabled) != 1) {
336 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
337 		return;
338 	}
339 	atomic_set(&key->enabled, 0);
340 }
341 
342 #define static_key_enable_cpuslocked(k)		static_key_enable((k))
343 #define static_key_disable_cpuslocked(k)	static_key_disable((k))
344 
345 #define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
346 #define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
347 
348 #endif	/* CONFIG_JUMP_LABEL */
349 
350 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
351 #define jump_label_enabled static_key_enabled
352 
353 /* -------------------------------------------------------------------------- */
354 
355 /*
356  * Two type wrappers around static_key, such that we can use compile time
357  * type differentiation to emit the right code.
358  *
359  * All the below code is macros in order to play type games.
360  */
361 
362 struct static_key_true {
363 	struct static_key key;
364 };
365 
366 struct static_key_false {
367 	struct static_key key;
368 };
369 
370 #define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
371 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
372 
373 #define DEFINE_STATIC_KEY_TRUE(name)	\
374 	struct static_key_true name = STATIC_KEY_TRUE_INIT
375 
376 #define DEFINE_STATIC_KEY_TRUE_RO(name)	\
377 	struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
378 
379 #define DECLARE_STATIC_KEY_TRUE(name)	\
380 	extern struct static_key_true name
381 
382 #define DEFINE_STATIC_KEY_FALSE(name)	\
383 	struct static_key_false name = STATIC_KEY_FALSE_INIT
384 
385 #define DEFINE_STATIC_KEY_FALSE_RO(name)	\
386 	struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
387 
388 #define DECLARE_STATIC_KEY_FALSE(name)	\
389 	extern struct static_key_false name
390 
391 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count)		\
392 	struct static_key_true name[count] = {			\
393 		[0 ... (count) - 1] = STATIC_KEY_TRUE_INIT,	\
394 	}
395 
396 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count)		\
397 	struct static_key_false name[count] = {			\
398 		[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT,	\
399 	}
400 
401 #define _DEFINE_STATIC_KEY_1(name)	DEFINE_STATIC_KEY_TRUE(name)
402 #define _DEFINE_STATIC_KEY_0(name)	DEFINE_STATIC_KEY_FALSE(name)
403 #define DEFINE_STATIC_KEY_MAYBE(cfg, name)			\
404 	__PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
405 
406 #define _DEFINE_STATIC_KEY_RO_1(name)	DEFINE_STATIC_KEY_TRUE_RO(name)
407 #define _DEFINE_STATIC_KEY_RO_0(name)	DEFINE_STATIC_KEY_FALSE_RO(name)
408 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name)			\
409 	__PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
410 
411 #define _DECLARE_STATIC_KEY_1(name)	DECLARE_STATIC_KEY_TRUE(name)
412 #define _DECLARE_STATIC_KEY_0(name)	DECLARE_STATIC_KEY_FALSE(name)
413 #define DECLARE_STATIC_KEY_MAYBE(cfg, name)			\
414 	__PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
415 
416 extern bool ____wrong_branch_error(void);
417 
418 #define static_key_enabled(x)							\
419 ({										\
420 	if (!__builtin_types_compatible_p(typeof(*x), struct static_key) &&	\
421 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
422 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_false))	\
423 		____wrong_branch_error();					\
424 	static_key_count((struct static_key *)x) > 0;				\
425 })
426 
427 #ifdef CONFIG_JUMP_LABEL
428 
429 /*
430  * Combine the right initial value (type) with the right branch order
431  * to generate the desired result.
432  *
433  *
434  * type\branch|	likely (1)	      |	unlikely (0)
435  * -----------+-----------------------+------------------
436  *            |                       |
437  *  true (1)  |	   ...		      |	   ...
438  *            |    NOP		      |	   JMP L
439  *            |    <br-stmts>	      |	1: ...
440  *            |	L: ...		      |
441  *            |			      |
442  *            |			      |	L: <br-stmts>
443  *            |			      |	   jmp 1b
444  *            |                       |
445  * -----------+-----------------------+------------------
446  *            |                       |
447  *  false (0) |	   ...		      |	   ...
448  *            |    JMP L	      |	   NOP
449  *            |    <br-stmts>	      |	1: ...
450  *            |	L: ...		      |
451  *            |			      |
452  *            |			      |	L: <br-stmts>
453  *            |			      |	   jmp 1b
454  *            |                       |
455  * -----------+-----------------------+------------------
456  *
457  * The initial value is encoded in the LSB of static_key::entries,
458  * type: 0 = false, 1 = true.
459  *
460  * The branch type is encoded in the LSB of jump_entry::key,
461  * branch: 0 = unlikely, 1 = likely.
462  *
463  * This gives the following logic table:
464  *
465  *	enabled	type	branch	  instuction
466  * -----------------------------+-----------
467  *	0	0	0	| NOP
468  *	0	0	1	| JMP
469  *	0	1	0	| NOP
470  *	0	1	1	| JMP
471  *
472  *	1	0	0	| JMP
473  *	1	0	1	| NOP
474  *	1	1	0	| JMP
475  *	1	1	1	| NOP
476  *
477  * Which gives the following functions:
478  *
479  *   dynamic: instruction = enabled ^ branch
480  *   static:  instruction = type ^ branch
481  *
482  * See jump_label_type() / jump_label_init_type().
483  */
484 
485 #define static_branch_likely(x)							\
486 ({										\
487 	bool branch;								\
488 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
489 		branch = !arch_static_branch(&(x)->key, true);			\
490 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
491 		branch = !arch_static_branch_jump(&(x)->key, true);		\
492 	else									\
493 		branch = ____wrong_branch_error();				\
494 	likely_notrace(branch);								\
495 })
496 
497 #define static_branch_unlikely(x)						\
498 ({										\
499 	bool branch;								\
500 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
501 		branch = arch_static_branch_jump(&(x)->key, false);		\
502 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
503 		branch = arch_static_branch(&(x)->key, false);			\
504 	else									\
505 		branch = ____wrong_branch_error();				\
506 	unlikely_notrace(branch);							\
507 })
508 
509 #else /* !CONFIG_JUMP_LABEL */
510 
511 #define static_branch_likely(x)		likely_notrace(static_key_enabled(&(x)->key))
512 #define static_branch_unlikely(x)	unlikely_notrace(static_key_enabled(&(x)->key))
513 
514 #endif /* CONFIG_JUMP_LABEL */
515 
516 #define static_branch_maybe(config, x)					\
517 	(IS_ENABLED(config) ? static_branch_likely(x)			\
518 			    : static_branch_unlikely(x))
519 
520 /*
521  * Advanced usage; refcount, branch is enabled when: count != 0
522  */
523 
524 #define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
525 #define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
526 #define static_branch_inc_cpuslocked(x)	static_key_slow_inc_cpuslocked(&(x)->key)
527 #define static_branch_dec_cpuslocked(x)	static_key_slow_dec_cpuslocked(&(x)->key)
528 
529 /*
530  * Normal usage; boolean enable/disable.
531  */
532 
533 #define static_branch_enable(x)			static_key_enable(&(x)->key)
534 #define static_branch_disable(x)		static_key_disable(&(x)->key)
535 #define static_branch_enable_cpuslocked(x)	static_key_enable_cpuslocked(&(x)->key)
536 #define static_branch_disable_cpuslocked(x)	static_key_disable_cpuslocked(&(x)->key)
537 
538 #endif /* __ASSEMBLY__ */
539 
540 #endif	/* _LINUX_JUMP_LABEL_H */
541