1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4
5 /*
6 * Jump label support
7 *
8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 *
11 * DEPRECATED API:
12 *
13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15 *
16 * struct static_key false = STATIC_KEY_INIT_FALSE;
17 * struct static_key true = STATIC_KEY_INIT_TRUE;
18 * static_key_true()
19 * static_key_false()
20 *
21 * The updated API replacements are:
22 *
23 * DEFINE_STATIC_KEY_TRUE(key);
24 * DEFINE_STATIC_KEY_FALSE(key);
25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27 * static_branch_likely()
28 * static_branch_unlikely()
29 *
30 * Jump labels provide an interface to generate dynamic branches using
31 * self-modifying code. Assuming toolchain and architecture support, if we
32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34 * (which defaults to false - and the true block is placed out of line).
35 * Similarly, we can define an initially true key via
36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37 * "if (static_branch_unlikely(&key))", in which case we will generate an
38 * unconditional branch to the out-of-line true branch. Keys that are
39 * initially true or false can be using in both static_branch_unlikely()
40 * and static_branch_likely() statements.
41 *
42 * At runtime we can change the branch target by setting the key
43 * to true via a call to static_branch_enable(), or false using
44 * static_branch_disable(). If the direction of the branch is switched by
45 * these calls then we run-time modify the branch target via a
46 * no-op -> jump or jump -> no-op conversion. For example, for an
47 * initially false key that is used in an "if (static_branch_unlikely(&key))"
48 * statement, setting the key to true requires us to patch in a jump
49 * to the out-of-line of true branch.
50 *
51 * In addition to static_branch_{enable,disable}, we can also reference count
52 * the key or branch direction via static_branch_{inc,dec}. Thus,
53 * static_branch_inc() can be thought of as a 'make more true' and
54 * static_branch_dec() as a 'make more false'.
55 *
56 * Since this relies on modifying code, the branch modifying functions
57 * must be considered absolute slow paths (machine wide synchronization etc.).
58 * OTOH, since the affected branches are unconditional, their runtime overhead
59 * will be absolutely minimal, esp. in the default (off) case where the total
60 * effect is a single NOP of appropriate size. The on case will patch in a jump
61 * to the out-of-line block.
62 *
63 * When the control is directly exposed to userspace, it is prudent to delay the
64 * decrement to avoid high frequency code modifications which can (and do)
65 * cause significant performance degradation. Struct static_key_deferred and
66 * static_key_slow_dec_deferred() provide for this.
67 *
68 * Lacking toolchain and or architecture support, static keys fall back to a
69 * simple conditional branch.
70 *
71 * Additional babbling in: Documentation/staging/static-keys.rst
72 */
73
74 #ifndef __ASSEMBLY__
75
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78 #include <linux/cleanup.h>
79
80 extern bool static_key_initialized;
81
82 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
83 "%s(): static key '%pS' used before call to jump_label_init()", \
84 __func__, (key))
85
86 struct static_key {
87 atomic_t enabled;
88 #ifdef CONFIG_JUMP_LABEL
89 /*
90 * Note:
91 * To make anonymous unions work with old compilers, the static
92 * initialization of them requires brackets. This creates a dependency
93 * on the order of the struct with the initializers. If any fields
94 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
95 * to be modified.
96 *
97 * bit 0 => 1 if key is initially true
98 * 0 if initially false
99 * bit 1 => 1 if points to struct static_key_mod
100 * 0 if points to struct jump_entry
101 */
102 union {
103 unsigned long type;
104 struct jump_entry *entries;
105 struct static_key_mod *next;
106 };
107 #endif /* CONFIG_JUMP_LABEL */
108 };
109
110 #endif /* __ASSEMBLY__ */
111
112 #ifdef CONFIG_JUMP_LABEL
113 #include <asm/jump_label.h>
114
115 #ifndef __ASSEMBLY__
116 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
117
118 struct jump_entry {
119 s32 code;
120 s32 target;
121 long key; // key may be far away from the core kernel under KASLR
122 };
123
jump_entry_code(const struct jump_entry * entry)124 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
125 {
126 return (unsigned long)&entry->code + entry->code;
127 }
128
jump_entry_target(const struct jump_entry * entry)129 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
130 {
131 return (unsigned long)&entry->target + entry->target;
132 }
133
jump_entry_key(const struct jump_entry * entry)134 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
135 {
136 long offset = entry->key & ~3L;
137
138 return (struct static_key *)((unsigned long)&entry->key + offset);
139 }
140
141 #else
142
jump_entry_code(const struct jump_entry * entry)143 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
144 {
145 return entry->code;
146 }
147
jump_entry_target(const struct jump_entry * entry)148 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
149 {
150 return entry->target;
151 }
152
jump_entry_key(const struct jump_entry * entry)153 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
154 {
155 return (struct static_key *)((unsigned long)entry->key & ~3UL);
156 }
157
158 #endif
159
jump_entry_is_branch(const struct jump_entry * entry)160 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
161 {
162 return (unsigned long)entry->key & 1UL;
163 }
164
jump_entry_is_init(const struct jump_entry * entry)165 static inline bool jump_entry_is_init(const struct jump_entry *entry)
166 {
167 return (unsigned long)entry->key & 2UL;
168 }
169
jump_entry_set_init(struct jump_entry * entry,bool set)170 static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
171 {
172 if (set)
173 entry->key |= 2;
174 else
175 entry->key &= ~2;
176 }
177
jump_entry_size(struct jump_entry * entry)178 static inline int jump_entry_size(struct jump_entry *entry)
179 {
180 #ifdef JUMP_LABEL_NOP_SIZE
181 return JUMP_LABEL_NOP_SIZE;
182 #else
183 return arch_jump_entry_size(entry);
184 #endif
185 }
186
187 #endif
188 #endif
189
190 #ifndef __ASSEMBLY__
191
192 enum jump_label_type {
193 JUMP_LABEL_NOP = 0,
194 JUMP_LABEL_JMP,
195 };
196
197 struct module;
198
199 #ifdef CONFIG_JUMP_LABEL
200
201 #define JUMP_TYPE_FALSE 0UL
202 #define JUMP_TYPE_TRUE 1UL
203 #define JUMP_TYPE_LINKED 2UL
204 #define JUMP_TYPE_MASK 3UL
205
static_key_false(struct static_key * key)206 static __always_inline bool static_key_false(struct static_key *key)
207 {
208 return arch_static_branch(key, false);
209 }
210
static_key_true(struct static_key * key)211 static __always_inline bool static_key_true(struct static_key *key)
212 {
213 return !arch_static_branch(key, true);
214 }
215
216 extern struct jump_entry __start___jump_table[];
217 extern struct jump_entry __stop___jump_table[];
218
219 extern void jump_label_init(void);
220 extern void jump_label_init_ro(void);
221 extern void jump_label_lock(void);
222 extern void jump_label_unlock(void);
223 extern void arch_jump_label_transform(struct jump_entry *entry,
224 enum jump_label_type type);
225 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
226 enum jump_label_type type);
227 extern void arch_jump_label_transform_apply(void);
228 extern int jump_label_text_reserved(void *start, void *end);
229 extern bool static_key_slow_inc(struct static_key *key);
230 extern bool static_key_fast_inc_not_disabled(struct static_key *key);
231 extern void static_key_slow_dec(struct static_key *key);
232 extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
233 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
234 extern int static_key_count(struct static_key *key);
235 extern void static_key_enable(struct static_key *key);
236 extern void static_key_disable(struct static_key *key);
237 extern void static_key_enable_cpuslocked(struct static_key *key);
238 extern void static_key_disable_cpuslocked(struct static_key *key);
239 extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
240
241 /*
242 * We should be using ATOMIC_INIT() for initializing .enabled, but
243 * the inclusion of atomic.h is problematic for inclusion of jump_label.h
244 * in 'low-level' headers. Thus, we are initializing .enabled with a
245 * raw value, but have added a BUILD_BUG_ON() to catch any issues in
246 * jump_label_init() see: kernel/jump_label.c.
247 */
248 #define STATIC_KEY_INIT_TRUE \
249 { .enabled = { 1 }, \
250 { .type = JUMP_TYPE_TRUE } }
251 #define STATIC_KEY_INIT_FALSE \
252 { .enabled = { 0 }, \
253 { .type = JUMP_TYPE_FALSE } }
254
255 #else /* !CONFIG_JUMP_LABEL */
256
257 #include <linux/atomic.h>
258 #include <linux/bug.h>
259
static_key_count(struct static_key * key)260 static __always_inline int static_key_count(struct static_key *key)
261 {
262 return raw_atomic_read(&key->enabled);
263 }
264
jump_label_init(void)265 static __always_inline void jump_label_init(void)
266 {
267 static_key_initialized = true;
268 }
269
jump_label_init_ro(void)270 static __always_inline void jump_label_init_ro(void) { }
271
static_key_false(struct static_key * key)272 static __always_inline bool static_key_false(struct static_key *key)
273 {
274 if (unlikely_notrace(static_key_count(key) > 0))
275 return true;
276 return false;
277 }
278
static_key_true(struct static_key * key)279 static __always_inline bool static_key_true(struct static_key *key)
280 {
281 if (likely_notrace(static_key_count(key) > 0))
282 return true;
283 return false;
284 }
285
static_key_fast_inc_not_disabled(struct static_key * key)286 static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
287 {
288 int v;
289
290 STATIC_KEY_CHECK_USE(key);
291 /*
292 * Prevent key->enabled getting negative to follow the same semantics
293 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
294 */
295 v = atomic_read(&key->enabled);
296 do {
297 if (v < 0 || (v + 1) < 0)
298 return false;
299 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
300 return true;
301 }
302 #define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
303
static_key_slow_dec(struct static_key * key)304 static inline void static_key_slow_dec(struct static_key *key)
305 {
306 STATIC_KEY_CHECK_USE(key);
307 atomic_dec(&key->enabled);
308 }
309
310 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
311 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
312
jump_label_text_reserved(void * start,void * end)313 static inline int jump_label_text_reserved(void *start, void *end)
314 {
315 return 0;
316 }
317
jump_label_lock(void)318 static inline void jump_label_lock(void) {}
jump_label_unlock(void)319 static inline void jump_label_unlock(void) {}
320
static_key_enable(struct static_key * key)321 static inline void static_key_enable(struct static_key *key)
322 {
323 STATIC_KEY_CHECK_USE(key);
324
325 if (atomic_read(&key->enabled) != 0) {
326 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
327 return;
328 }
329 atomic_set(&key->enabled, 1);
330 }
331
static_key_disable(struct static_key * key)332 static inline void static_key_disable(struct static_key *key)
333 {
334 STATIC_KEY_CHECK_USE(key);
335
336 if (atomic_read(&key->enabled) != 1) {
337 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
338 return;
339 }
340 atomic_set(&key->enabled, 0);
341 }
342
343 #define static_key_enable_cpuslocked(k) static_key_enable((k))
344 #define static_key_disable_cpuslocked(k) static_key_disable((k))
345
346 #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
347 #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
348
349 #endif /* CONFIG_JUMP_LABEL */
350
351 DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
352
353 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
354 #define jump_label_enabled static_key_enabled
355
356 /* -------------------------------------------------------------------------- */
357
358 /*
359 * Two type wrappers around static_key, such that we can use compile time
360 * type differentiation to emit the right code.
361 *
362 * All the below code is macros in order to play type games.
363 */
364
365 struct static_key_true {
366 struct static_key key;
367 };
368
369 struct static_key_false {
370 struct static_key key;
371 };
372
373 #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
374 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
375
376 #define DEFINE_STATIC_KEY_TRUE(name) \
377 struct static_key_true name = STATIC_KEY_TRUE_INIT
378
379 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
380 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
381
382 #define DECLARE_STATIC_KEY_TRUE(name) \
383 extern struct static_key_true name
384
385 #define DEFINE_STATIC_KEY_FALSE(name) \
386 struct static_key_false name = STATIC_KEY_FALSE_INIT
387
388 #define DEFINE_STATIC_KEY_FALSE_RO(name) \
389 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
390
391 #define DECLARE_STATIC_KEY_FALSE(name) \
392 extern struct static_key_false name
393
394 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
395 struct static_key_true name[count] = { \
396 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
397 }
398
399 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
400 struct static_key_false name[count] = { \
401 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
402 }
403
404 #define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
405 #define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
406 #define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
407 __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
408
409 #define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
410 #define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
411 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
412 __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
413
414 #define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
415 #define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
416 #define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
417 __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
418
419 extern bool ____wrong_branch_error(void);
420
421 #define static_key_enabled(x) \
422 ({ \
423 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
424 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
425 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
426 ____wrong_branch_error(); \
427 static_key_count((struct static_key *)x) > 0; \
428 })
429
430 #ifdef CONFIG_JUMP_LABEL
431
432 /*
433 * Combine the right initial value (type) with the right branch order
434 * to generate the desired result.
435 *
436 *
437 * type\branch| likely (1) | unlikely (0)
438 * -----------+-----------------------+------------------
439 * | |
440 * true (1) | ... | ...
441 * | NOP | JMP L
442 * | <br-stmts> | 1: ...
443 * | L: ... |
444 * | |
445 * | | L: <br-stmts>
446 * | | jmp 1b
447 * | |
448 * -----------+-----------------------+------------------
449 * | |
450 * false (0) | ... | ...
451 * | JMP L | NOP
452 * | <br-stmts> | 1: ...
453 * | L: ... |
454 * | |
455 * | | L: <br-stmts>
456 * | | jmp 1b
457 * | |
458 * -----------+-----------------------+------------------
459 *
460 * The initial value is encoded in the LSB of static_key::entries,
461 * type: 0 = false, 1 = true.
462 *
463 * The branch type is encoded in the LSB of jump_entry::key,
464 * branch: 0 = unlikely, 1 = likely.
465 *
466 * This gives the following logic table:
467 *
468 * enabled type branch instuction
469 * -----------------------------+-----------
470 * 0 0 0 | NOP
471 * 0 0 1 | JMP
472 * 0 1 0 | NOP
473 * 0 1 1 | JMP
474 *
475 * 1 0 0 | JMP
476 * 1 0 1 | NOP
477 * 1 1 0 | JMP
478 * 1 1 1 | NOP
479 *
480 * Which gives the following functions:
481 *
482 * dynamic: instruction = enabled ^ branch
483 * static: instruction = type ^ branch
484 *
485 * See jump_label_type() / jump_label_init_type().
486 */
487
488 #define static_branch_likely(x) \
489 ({ \
490 bool branch; \
491 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
492 branch = !arch_static_branch(&(x)->key, true); \
493 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
494 branch = !arch_static_branch_jump(&(x)->key, true); \
495 else \
496 branch = ____wrong_branch_error(); \
497 likely_notrace(branch); \
498 })
499
500 #define static_branch_unlikely(x) \
501 ({ \
502 bool branch; \
503 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
504 branch = arch_static_branch_jump(&(x)->key, false); \
505 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
506 branch = arch_static_branch(&(x)->key, false); \
507 else \
508 branch = ____wrong_branch_error(); \
509 unlikely_notrace(branch); \
510 })
511
512 #else /* !CONFIG_JUMP_LABEL */
513
514 #define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
515 #define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
516
517 #endif /* CONFIG_JUMP_LABEL */
518
519 #define static_branch_maybe(config, x) \
520 (IS_ENABLED(config) ? static_branch_likely(x) \
521 : static_branch_unlikely(x))
522
523 /*
524 * Advanced usage; refcount, branch is enabled when: count != 0
525 */
526
527 #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
528 #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
529 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
530 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
531
532 /*
533 * Normal usage; boolean enable/disable.
534 */
535
536 #define static_branch_enable(x) static_key_enable(&(x)->key)
537 #define static_branch_disable(x) static_key_disable(&(x)->key)
538 #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
539 #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
540
541 #endif /* __ASSEMBLY__ */
542
543 #endif /* _LINUX_JUMP_LABEL_H */
544