xref: /linux/kernel/jump_label.c (revision a594533df0f6ca391da003f43d53b336a2d23ffa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * jump label support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  * Copyright (C) 2011 Peter Zijlstra
7  *
8  */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21 
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24 
25 void jump_label_lock(void)
26 {
27 	mutex_lock(&jump_label_mutex);
28 }
29 
30 void jump_label_unlock(void)
31 {
32 	mutex_unlock(&jump_label_mutex);
33 }
34 
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 	const struct jump_entry *jea = a;
38 	const struct jump_entry *jeb = b;
39 
40 	/*
41 	 * Entrires are sorted by key.
42 	 */
43 	if (jump_entry_key(jea) < jump_entry_key(jeb))
44 		return -1;
45 
46 	if (jump_entry_key(jea) > jump_entry_key(jeb))
47 		return 1;
48 
49 	/*
50 	 * In the batching mode, entries should also be sorted by the code
51 	 * inside the already sorted list of entries, enabling a bsearch in
52 	 * the vector.
53 	 */
54 	if (jump_entry_code(jea) < jump_entry_code(jeb))
55 		return -1;
56 
57 	if (jump_entry_code(jea) > jump_entry_code(jeb))
58 		return 1;
59 
60 	return 0;
61 }
62 
63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 	long delta = (unsigned long)a - (unsigned long)b;
66 	struct jump_entry *jea = a;
67 	struct jump_entry *jeb = b;
68 	struct jump_entry tmp = *jea;
69 
70 	jea->code	= jeb->code - delta;
71 	jea->target	= jeb->target - delta;
72 	jea->key	= jeb->key - delta;
73 
74 	jeb->code	= tmp.code + delta;
75 	jeb->target	= tmp.target + delta;
76 	jeb->key	= tmp.key + delta;
77 }
78 
79 static void
80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 	unsigned long size;
83 	void *swapfn = NULL;
84 
85 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 		swapfn = jump_label_swap;
87 
88 	size = (((unsigned long)stop - (unsigned long)start)
89 					/ sizeof(struct jump_entry));
90 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92 
93 static void jump_label_update(struct static_key *key);
94 
95 /*
96  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97  * The use of 'atomic_read()' requires atomic.h and its problematic for some
98  * kernel headers such as kernel.h and others. Since static_key_count() is not
99  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100  * to have it be a function here. Similarly, for 'static_key_enable()' and
101  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102  * to be included from most/all places for CONFIG_JUMP_LABEL.
103  */
104 int static_key_count(struct static_key *key)
105 {
106 	/*
107 	 * -1 means the first static_key_slow_inc() is in progress.
108 	 *  static_key_enabled() must return true, so return 1 here.
109 	 */
110 	int n = atomic_read(&key->enabled);
111 
112 	return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115 
116 void static_key_slow_inc_cpuslocked(struct static_key *key)
117 {
118 	STATIC_KEY_CHECK_USE(key);
119 	lockdep_assert_cpus_held();
120 
121 	/*
122 	 * Careful if we get concurrent static_key_slow_inc() calls;
123 	 * later calls must wait for the first one to _finish_ the
124 	 * jump_label_update() process.  At the same time, however,
125 	 * the jump_label_update() call below wants to see
126 	 * static_key_enabled(&key) for jumps to be updated properly.
127 	 *
128 	 * So give a special meaning to negative key->enabled: it sends
129 	 * static_key_slow_inc() down the slow path, and it is non-zero
130 	 * so it counts as "enabled" in jump_label_update().  Note that
131 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
132 	 */
133 	for (int v = atomic_read(&key->enabled); v > 0; )
134 		if (likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)))
135 			return;
136 
137 	jump_label_lock();
138 	if (atomic_read(&key->enabled) == 0) {
139 		atomic_set(&key->enabled, -1);
140 		jump_label_update(key);
141 		/*
142 		 * Ensure that if the above cmpxchg loop observes our positive
143 		 * value, it must also observe all the text changes.
144 		 */
145 		atomic_set_release(&key->enabled, 1);
146 	} else {
147 		atomic_inc(&key->enabled);
148 	}
149 	jump_label_unlock();
150 }
151 
152 void static_key_slow_inc(struct static_key *key)
153 {
154 	cpus_read_lock();
155 	static_key_slow_inc_cpuslocked(key);
156 	cpus_read_unlock();
157 }
158 EXPORT_SYMBOL_GPL(static_key_slow_inc);
159 
160 void static_key_enable_cpuslocked(struct static_key *key)
161 {
162 	STATIC_KEY_CHECK_USE(key);
163 	lockdep_assert_cpus_held();
164 
165 	if (atomic_read(&key->enabled) > 0) {
166 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
167 		return;
168 	}
169 
170 	jump_label_lock();
171 	if (atomic_read(&key->enabled) == 0) {
172 		atomic_set(&key->enabled, -1);
173 		jump_label_update(key);
174 		/*
175 		 * See static_key_slow_inc().
176 		 */
177 		atomic_set_release(&key->enabled, 1);
178 	}
179 	jump_label_unlock();
180 }
181 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
182 
183 void static_key_enable(struct static_key *key)
184 {
185 	cpus_read_lock();
186 	static_key_enable_cpuslocked(key);
187 	cpus_read_unlock();
188 }
189 EXPORT_SYMBOL_GPL(static_key_enable);
190 
191 void static_key_disable_cpuslocked(struct static_key *key)
192 {
193 	STATIC_KEY_CHECK_USE(key);
194 	lockdep_assert_cpus_held();
195 
196 	if (atomic_read(&key->enabled) != 1) {
197 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
198 		return;
199 	}
200 
201 	jump_label_lock();
202 	if (atomic_cmpxchg(&key->enabled, 1, 0))
203 		jump_label_update(key);
204 	jump_label_unlock();
205 }
206 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
207 
208 void static_key_disable(struct static_key *key)
209 {
210 	cpus_read_lock();
211 	static_key_disable_cpuslocked(key);
212 	cpus_read_unlock();
213 }
214 EXPORT_SYMBOL_GPL(static_key_disable);
215 
216 static bool static_key_slow_try_dec(struct static_key *key)
217 {
218 	int val;
219 
220 	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
221 	if (val == 1)
222 		return false;
223 
224 	/*
225 	 * The negative count check is valid even when a negative
226 	 * key->enabled is in use by static_key_slow_inc(); a
227 	 * __static_key_slow_dec() before the first static_key_slow_inc()
228 	 * returns is unbalanced, because all other static_key_slow_inc()
229 	 * instances block while the update is in progress.
230 	 */
231 	WARN(val < 0, "jump label: negative count!\n");
232 	return true;
233 }
234 
235 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
236 {
237 	lockdep_assert_cpus_held();
238 
239 	if (static_key_slow_try_dec(key))
240 		return;
241 
242 	jump_label_lock();
243 	if (atomic_dec_and_test(&key->enabled))
244 		jump_label_update(key);
245 	jump_label_unlock();
246 }
247 
248 static void __static_key_slow_dec(struct static_key *key)
249 {
250 	cpus_read_lock();
251 	__static_key_slow_dec_cpuslocked(key);
252 	cpus_read_unlock();
253 }
254 
255 void jump_label_update_timeout(struct work_struct *work)
256 {
257 	struct static_key_deferred *key =
258 		container_of(work, struct static_key_deferred, work.work);
259 	__static_key_slow_dec(&key->key);
260 }
261 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
262 
263 void static_key_slow_dec(struct static_key *key)
264 {
265 	STATIC_KEY_CHECK_USE(key);
266 	__static_key_slow_dec(key);
267 }
268 EXPORT_SYMBOL_GPL(static_key_slow_dec);
269 
270 void static_key_slow_dec_cpuslocked(struct static_key *key)
271 {
272 	STATIC_KEY_CHECK_USE(key);
273 	__static_key_slow_dec_cpuslocked(key);
274 }
275 
276 void __static_key_slow_dec_deferred(struct static_key *key,
277 				    struct delayed_work *work,
278 				    unsigned long timeout)
279 {
280 	STATIC_KEY_CHECK_USE(key);
281 
282 	if (static_key_slow_try_dec(key))
283 		return;
284 
285 	schedule_delayed_work(work, timeout);
286 }
287 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
288 
289 void __static_key_deferred_flush(void *key, struct delayed_work *work)
290 {
291 	STATIC_KEY_CHECK_USE(key);
292 	flush_delayed_work(work);
293 }
294 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
295 
296 void jump_label_rate_limit(struct static_key_deferred *key,
297 		unsigned long rl)
298 {
299 	STATIC_KEY_CHECK_USE(key);
300 	key->timeout = rl;
301 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
302 }
303 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
304 
305 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
306 {
307 	if (jump_entry_code(entry) <= (unsigned long)end &&
308 	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
309 		return 1;
310 
311 	return 0;
312 }
313 
314 static int __jump_label_text_reserved(struct jump_entry *iter_start,
315 		struct jump_entry *iter_stop, void *start, void *end, bool init)
316 {
317 	struct jump_entry *iter;
318 
319 	iter = iter_start;
320 	while (iter < iter_stop) {
321 		if (init || !jump_entry_is_init(iter)) {
322 			if (addr_conflict(iter, start, end))
323 				return 1;
324 		}
325 		iter++;
326 	}
327 
328 	return 0;
329 }
330 
331 #ifndef arch_jump_label_transform_static
332 static void arch_jump_label_transform_static(struct jump_entry *entry,
333 					     enum jump_label_type type)
334 {
335 	/* nothing to do on most architectures */
336 }
337 #endif
338 
339 static inline struct jump_entry *static_key_entries(struct static_key *key)
340 {
341 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
342 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
343 }
344 
345 static inline bool static_key_type(struct static_key *key)
346 {
347 	return key->type & JUMP_TYPE_TRUE;
348 }
349 
350 static inline bool static_key_linked(struct static_key *key)
351 {
352 	return key->type & JUMP_TYPE_LINKED;
353 }
354 
355 static inline void static_key_clear_linked(struct static_key *key)
356 {
357 	key->type &= ~JUMP_TYPE_LINKED;
358 }
359 
360 static inline void static_key_set_linked(struct static_key *key)
361 {
362 	key->type |= JUMP_TYPE_LINKED;
363 }
364 
365 /***
366  * A 'struct static_key' uses a union such that it either points directly
367  * to a table of 'struct jump_entry' or to a linked list of modules which in
368  * turn point to 'struct jump_entry' tables.
369  *
370  * The two lower bits of the pointer are used to keep track of which pointer
371  * type is in use and to store the initial branch direction, we use an access
372  * function which preserves these bits.
373  */
374 static void static_key_set_entries(struct static_key *key,
375 				   struct jump_entry *entries)
376 {
377 	unsigned long type;
378 
379 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
380 	type = key->type & JUMP_TYPE_MASK;
381 	key->entries = entries;
382 	key->type |= type;
383 }
384 
385 static enum jump_label_type jump_label_type(struct jump_entry *entry)
386 {
387 	struct static_key *key = jump_entry_key(entry);
388 	bool enabled = static_key_enabled(key);
389 	bool branch = jump_entry_is_branch(entry);
390 
391 	/* See the comment in linux/jump_label.h */
392 	return enabled ^ branch;
393 }
394 
395 static bool jump_label_can_update(struct jump_entry *entry, bool init)
396 {
397 	/*
398 	 * Cannot update code that was in an init text area.
399 	 */
400 	if (!init && jump_entry_is_init(entry))
401 		return false;
402 
403 	if (!kernel_text_address(jump_entry_code(entry))) {
404 		/*
405 		 * This skips patching built-in __exit, which
406 		 * is part of init_section_contains() but is
407 		 * not part of kernel_text_address().
408 		 *
409 		 * Skipping built-in __exit is fine since it
410 		 * will never be executed.
411 		 */
412 		WARN_ONCE(!jump_entry_is_init(entry),
413 			  "can't patch jump_label at %pS",
414 			  (void *)jump_entry_code(entry));
415 		return false;
416 	}
417 
418 	return true;
419 }
420 
421 #ifndef HAVE_JUMP_LABEL_BATCH
422 static void __jump_label_update(struct static_key *key,
423 				struct jump_entry *entry,
424 				struct jump_entry *stop,
425 				bool init)
426 {
427 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
428 		if (jump_label_can_update(entry, init))
429 			arch_jump_label_transform(entry, jump_label_type(entry));
430 	}
431 }
432 #else
433 static void __jump_label_update(struct static_key *key,
434 				struct jump_entry *entry,
435 				struct jump_entry *stop,
436 				bool init)
437 {
438 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
439 
440 		if (!jump_label_can_update(entry, init))
441 			continue;
442 
443 		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
444 			/*
445 			 * Queue is full: Apply the current queue and try again.
446 			 */
447 			arch_jump_label_transform_apply();
448 			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
449 		}
450 	}
451 	arch_jump_label_transform_apply();
452 }
453 #endif
454 
455 void __init jump_label_init(void)
456 {
457 	struct jump_entry *iter_start = __start___jump_table;
458 	struct jump_entry *iter_stop = __stop___jump_table;
459 	struct static_key *key = NULL;
460 	struct jump_entry *iter;
461 
462 	/*
463 	 * Since we are initializing the static_key.enabled field with
464 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
465 	 * jump_label.h, let's make sure that is safe. There are only two
466 	 * cases to check since we initialize to 0 or 1.
467 	 */
468 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
469 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
470 
471 	if (static_key_initialized)
472 		return;
473 
474 	cpus_read_lock();
475 	jump_label_lock();
476 	jump_label_sort_entries(iter_start, iter_stop);
477 
478 	for (iter = iter_start; iter < iter_stop; iter++) {
479 		struct static_key *iterk;
480 		bool in_init;
481 
482 		/* rewrite NOPs */
483 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
484 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
485 
486 		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
487 		jump_entry_set_init(iter, in_init);
488 
489 		iterk = jump_entry_key(iter);
490 		if (iterk == key)
491 			continue;
492 
493 		key = iterk;
494 		static_key_set_entries(key, iter);
495 	}
496 	static_key_initialized = true;
497 	jump_label_unlock();
498 	cpus_read_unlock();
499 }
500 
501 #ifdef CONFIG_MODULES
502 
503 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
504 {
505 	struct static_key *key = jump_entry_key(entry);
506 	bool type = static_key_type(key);
507 	bool branch = jump_entry_is_branch(entry);
508 
509 	/* See the comment in linux/jump_label.h */
510 	return type ^ branch;
511 }
512 
513 struct static_key_mod {
514 	struct static_key_mod *next;
515 	struct jump_entry *entries;
516 	struct module *mod;
517 };
518 
519 static inline struct static_key_mod *static_key_mod(struct static_key *key)
520 {
521 	WARN_ON_ONCE(!static_key_linked(key));
522 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
523 }
524 
525 /***
526  * key->type and key->next are the same via union.
527  * This sets key->next and preserves the type bits.
528  *
529  * See additional comments above static_key_set_entries().
530  */
531 static void static_key_set_mod(struct static_key *key,
532 			       struct static_key_mod *mod)
533 {
534 	unsigned long type;
535 
536 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
537 	type = key->type & JUMP_TYPE_MASK;
538 	key->next = mod;
539 	key->type |= type;
540 }
541 
542 static int __jump_label_mod_text_reserved(void *start, void *end)
543 {
544 	struct module *mod;
545 	int ret;
546 
547 	preempt_disable();
548 	mod = __module_text_address((unsigned long)start);
549 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
550 	if (!try_module_get(mod))
551 		mod = NULL;
552 	preempt_enable();
553 
554 	if (!mod)
555 		return 0;
556 
557 	ret = __jump_label_text_reserved(mod->jump_entries,
558 				mod->jump_entries + mod->num_jump_entries,
559 				start, end, mod->state == MODULE_STATE_COMING);
560 
561 	module_put(mod);
562 
563 	return ret;
564 }
565 
566 static void __jump_label_mod_update(struct static_key *key)
567 {
568 	struct static_key_mod *mod;
569 
570 	for (mod = static_key_mod(key); mod; mod = mod->next) {
571 		struct jump_entry *stop;
572 		struct module *m;
573 
574 		/*
575 		 * NULL if the static_key is defined in a module
576 		 * that does not use it
577 		 */
578 		if (!mod->entries)
579 			continue;
580 
581 		m = mod->mod;
582 		if (!m)
583 			stop = __stop___jump_table;
584 		else
585 			stop = m->jump_entries + m->num_jump_entries;
586 		__jump_label_update(key, mod->entries, stop,
587 				    m && m->state == MODULE_STATE_COMING);
588 	}
589 }
590 
591 static int jump_label_add_module(struct module *mod)
592 {
593 	struct jump_entry *iter_start = mod->jump_entries;
594 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
595 	struct jump_entry *iter;
596 	struct static_key *key = NULL;
597 	struct static_key_mod *jlm, *jlm2;
598 
599 	/* if the module doesn't have jump label entries, just return */
600 	if (iter_start == iter_stop)
601 		return 0;
602 
603 	jump_label_sort_entries(iter_start, iter_stop);
604 
605 	for (iter = iter_start; iter < iter_stop; iter++) {
606 		struct static_key *iterk;
607 		bool in_init;
608 
609 		in_init = within_module_init(jump_entry_code(iter), mod);
610 		jump_entry_set_init(iter, in_init);
611 
612 		iterk = jump_entry_key(iter);
613 		if (iterk == key)
614 			continue;
615 
616 		key = iterk;
617 		if (within_module((unsigned long)key, mod)) {
618 			static_key_set_entries(key, iter);
619 			continue;
620 		}
621 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
622 		if (!jlm)
623 			return -ENOMEM;
624 		if (!static_key_linked(key)) {
625 			jlm2 = kzalloc(sizeof(struct static_key_mod),
626 				       GFP_KERNEL);
627 			if (!jlm2) {
628 				kfree(jlm);
629 				return -ENOMEM;
630 			}
631 			preempt_disable();
632 			jlm2->mod = __module_address((unsigned long)key);
633 			preempt_enable();
634 			jlm2->entries = static_key_entries(key);
635 			jlm2->next = NULL;
636 			static_key_set_mod(key, jlm2);
637 			static_key_set_linked(key);
638 		}
639 		jlm->mod = mod;
640 		jlm->entries = iter;
641 		jlm->next = static_key_mod(key);
642 		static_key_set_mod(key, jlm);
643 		static_key_set_linked(key);
644 
645 		/* Only update if we've changed from our initial state */
646 		if (jump_label_type(iter) != jump_label_init_type(iter))
647 			__jump_label_update(key, iter, iter_stop, true);
648 	}
649 
650 	return 0;
651 }
652 
653 static void jump_label_del_module(struct module *mod)
654 {
655 	struct jump_entry *iter_start = mod->jump_entries;
656 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
657 	struct jump_entry *iter;
658 	struct static_key *key = NULL;
659 	struct static_key_mod *jlm, **prev;
660 
661 	for (iter = iter_start; iter < iter_stop; iter++) {
662 		if (jump_entry_key(iter) == key)
663 			continue;
664 
665 		key = jump_entry_key(iter);
666 
667 		if (within_module((unsigned long)key, mod))
668 			continue;
669 
670 		/* No memory during module load */
671 		if (WARN_ON(!static_key_linked(key)))
672 			continue;
673 
674 		prev = &key->next;
675 		jlm = static_key_mod(key);
676 
677 		while (jlm && jlm->mod != mod) {
678 			prev = &jlm->next;
679 			jlm = jlm->next;
680 		}
681 
682 		/* No memory during module load */
683 		if (WARN_ON(!jlm))
684 			continue;
685 
686 		if (prev == &key->next)
687 			static_key_set_mod(key, jlm->next);
688 		else
689 			*prev = jlm->next;
690 
691 		kfree(jlm);
692 
693 		jlm = static_key_mod(key);
694 		/* if only one etry is left, fold it back into the static_key */
695 		if (jlm->next == NULL) {
696 			static_key_set_entries(key, jlm->entries);
697 			static_key_clear_linked(key);
698 			kfree(jlm);
699 		}
700 	}
701 }
702 
703 static int
704 jump_label_module_notify(struct notifier_block *self, unsigned long val,
705 			 void *data)
706 {
707 	struct module *mod = data;
708 	int ret = 0;
709 
710 	cpus_read_lock();
711 	jump_label_lock();
712 
713 	switch (val) {
714 	case MODULE_STATE_COMING:
715 		ret = jump_label_add_module(mod);
716 		if (ret) {
717 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
718 			jump_label_del_module(mod);
719 		}
720 		break;
721 	case MODULE_STATE_GOING:
722 		jump_label_del_module(mod);
723 		break;
724 	}
725 
726 	jump_label_unlock();
727 	cpus_read_unlock();
728 
729 	return notifier_from_errno(ret);
730 }
731 
732 static struct notifier_block jump_label_module_nb = {
733 	.notifier_call = jump_label_module_notify,
734 	.priority = 1, /* higher than tracepoints */
735 };
736 
737 static __init int jump_label_init_module(void)
738 {
739 	return register_module_notifier(&jump_label_module_nb);
740 }
741 early_initcall(jump_label_init_module);
742 
743 #endif /* CONFIG_MODULES */
744 
745 /***
746  * jump_label_text_reserved - check if addr range is reserved
747  * @start: start text addr
748  * @end: end text addr
749  *
750  * checks if the text addr located between @start and @end
751  * overlaps with any of the jump label patch addresses. Code
752  * that wants to modify kernel text should first verify that
753  * it does not overlap with any of the jump label addresses.
754  * Caller must hold jump_label_mutex.
755  *
756  * returns 1 if there is an overlap, 0 otherwise
757  */
758 int jump_label_text_reserved(void *start, void *end)
759 {
760 	bool init = system_state < SYSTEM_RUNNING;
761 	int ret = __jump_label_text_reserved(__start___jump_table,
762 			__stop___jump_table, start, end, init);
763 
764 	if (ret)
765 		return ret;
766 
767 #ifdef CONFIG_MODULES
768 	ret = __jump_label_mod_text_reserved(start, end);
769 #endif
770 	return ret;
771 }
772 
773 static void jump_label_update(struct static_key *key)
774 {
775 	struct jump_entry *stop = __stop___jump_table;
776 	bool init = system_state < SYSTEM_RUNNING;
777 	struct jump_entry *entry;
778 #ifdef CONFIG_MODULES
779 	struct module *mod;
780 
781 	if (static_key_linked(key)) {
782 		__jump_label_mod_update(key);
783 		return;
784 	}
785 
786 	preempt_disable();
787 	mod = __module_address((unsigned long)key);
788 	if (mod) {
789 		stop = mod->jump_entries + mod->num_jump_entries;
790 		init = mod->state == MODULE_STATE_COMING;
791 	}
792 	preempt_enable();
793 #endif
794 	entry = static_key_entries(key);
795 	/* if there are no users, entry can be NULL */
796 	if (entry)
797 		__jump_label_update(key, entry, stop, init);
798 }
799 
800 #ifdef CONFIG_STATIC_KEYS_SELFTEST
801 static DEFINE_STATIC_KEY_TRUE(sk_true);
802 static DEFINE_STATIC_KEY_FALSE(sk_false);
803 
804 static __init int jump_label_test(void)
805 {
806 	int i;
807 
808 	for (i = 0; i < 2; i++) {
809 		WARN_ON(static_key_enabled(&sk_true.key) != true);
810 		WARN_ON(static_key_enabled(&sk_false.key) != false);
811 
812 		WARN_ON(!static_branch_likely(&sk_true));
813 		WARN_ON(!static_branch_unlikely(&sk_true));
814 		WARN_ON(static_branch_likely(&sk_false));
815 		WARN_ON(static_branch_unlikely(&sk_false));
816 
817 		static_branch_disable(&sk_true);
818 		static_branch_enable(&sk_false);
819 
820 		WARN_ON(static_key_enabled(&sk_true.key) == true);
821 		WARN_ON(static_key_enabled(&sk_false.key) == false);
822 
823 		WARN_ON(static_branch_likely(&sk_true));
824 		WARN_ON(static_branch_unlikely(&sk_true));
825 		WARN_ON(!static_branch_likely(&sk_false));
826 		WARN_ON(!static_branch_unlikely(&sk_false));
827 
828 		static_branch_enable(&sk_true);
829 		static_branch_disable(&sk_false);
830 	}
831 
832 	return 0;
833 }
834 early_initcall(jump_label_test);
835 #endif /* STATIC_KEYS_SELFTEST */
836