1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * jump label support 4 * 5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 6 * Copyright (C) 2011 Peter Zijlstra 7 * 8 */ 9 #include <linux/memory.h> 10 #include <linux/uaccess.h> 11 #include <linux/module.h> 12 #include <linux/list.h> 13 #include <linux/slab.h> 14 #include <linux/sort.h> 15 #include <linux/err.h> 16 #include <linux/static_key.h> 17 #include <linux/jump_label_ratelimit.h> 18 #include <linux/bug.h> 19 #include <linux/cpu.h> 20 #include <asm/sections.h> 21 22 /* mutex to protect coming/going of the jump_label table */ 23 static DEFINE_MUTEX(jump_label_mutex); 24 25 void jump_label_lock(void) 26 { 27 mutex_lock(&jump_label_mutex); 28 } 29 30 void jump_label_unlock(void) 31 { 32 mutex_unlock(&jump_label_mutex); 33 } 34 35 static int jump_label_cmp(const void *a, const void *b) 36 { 37 const struct jump_entry *jea = a; 38 const struct jump_entry *jeb = b; 39 40 /* 41 * Entrires are sorted by key. 42 */ 43 if (jump_entry_key(jea) < jump_entry_key(jeb)) 44 return -1; 45 46 if (jump_entry_key(jea) > jump_entry_key(jeb)) 47 return 1; 48 49 /* 50 * In the batching mode, entries should also be sorted by the code 51 * inside the already sorted list of entries, enabling a bsearch in 52 * the vector. 53 */ 54 if (jump_entry_code(jea) < jump_entry_code(jeb)) 55 return -1; 56 57 if (jump_entry_code(jea) > jump_entry_code(jeb)) 58 return 1; 59 60 return 0; 61 } 62 63 static void jump_label_swap(void *a, void *b, int size) 64 { 65 long delta = (unsigned long)a - (unsigned long)b; 66 struct jump_entry *jea = a; 67 struct jump_entry *jeb = b; 68 struct jump_entry tmp = *jea; 69 70 jea->code = jeb->code - delta; 71 jea->target = jeb->target - delta; 72 jea->key = jeb->key - delta; 73 74 jeb->code = tmp.code + delta; 75 jeb->target = tmp.target + delta; 76 jeb->key = tmp.key + delta; 77 } 78 79 static void 80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) 81 { 82 unsigned long size; 83 void *swapfn = NULL; 84 85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) 86 swapfn = jump_label_swap; 87 88 size = (((unsigned long)stop - (unsigned long)start) 89 / sizeof(struct jump_entry)); 90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); 91 } 92 93 static void jump_label_update(struct static_key *key); 94 95 /* 96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. 97 * The use of 'atomic_read()' requires atomic.h and its problematic for some 98 * kernel headers such as kernel.h and others. Since static_key_count() is not 99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok 100 * to have it be a function here. Similarly, for 'static_key_enable()' and 101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h 102 * to be included from most/all places for CONFIG_JUMP_LABEL. 103 */ 104 int static_key_count(struct static_key *key) 105 { 106 /* 107 * -1 means the first static_key_slow_inc() is in progress. 108 * static_key_enabled() must return true, so return 1 here. 109 */ 110 int n = atomic_read(&key->enabled); 111 112 return n >= 0 ? n : 1; 113 } 114 EXPORT_SYMBOL_GPL(static_key_count); 115 116 /* 117 * static_key_fast_inc_not_disabled - adds a user for a static key 118 * @key: static key that must be already enabled 119 * 120 * The caller must make sure that the static key can't get disabled while 121 * in this function. It doesn't patch jump labels, only adds a user to 122 * an already enabled static key. 123 * 124 * Returns true if the increment was done. Unlike refcount_t the ref counter 125 * is not saturated, but will fail to increment on overflow. 126 */ 127 bool static_key_fast_inc_not_disabled(struct static_key *key) 128 { 129 int v; 130 131 STATIC_KEY_CHECK_USE(key); 132 /* 133 * Negative key->enabled has a special meaning: it sends 134 * static_key_slow_inc/dec() down the slow path, and it is non-zero 135 * so it counts as "enabled" in jump_label_update(). 136 * 137 * The INT_MAX overflow condition is either used by the networking 138 * code to reset or detected in the slow path of 139 * static_key_slow_inc_cpuslocked(). 140 */ 141 v = atomic_read(&key->enabled); 142 do { 143 if (v <= 0 || v == INT_MAX) 144 return false; 145 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); 146 147 return true; 148 } 149 EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled); 150 151 bool static_key_slow_inc_cpuslocked(struct static_key *key) 152 { 153 lockdep_assert_cpus_held(); 154 155 /* 156 * Careful if we get concurrent static_key_slow_inc/dec() calls; 157 * later calls must wait for the first one to _finish_ the 158 * jump_label_update() process. At the same time, however, 159 * the jump_label_update() call below wants to see 160 * static_key_enabled(&key) for jumps to be updated properly. 161 */ 162 if (static_key_fast_inc_not_disabled(key)) 163 return true; 164 165 guard(mutex)(&jump_label_mutex); 166 /* Try to mark it as 'enabling in progress. */ 167 if (!atomic_cmpxchg(&key->enabled, 0, -1)) { 168 jump_label_update(key); 169 /* 170 * Ensure that when static_key_fast_inc_not_disabled() or 171 * static_key_dec_not_one() observe the positive value, 172 * they must also observe all the text changes. 173 */ 174 atomic_set_release(&key->enabled, 1); 175 } else { 176 /* 177 * While holding the mutex this should never observe 178 * anything else than a value >= 1 and succeed 179 */ 180 if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) 181 return false; 182 } 183 return true; 184 } 185 186 bool static_key_slow_inc(struct static_key *key) 187 { 188 bool ret; 189 190 cpus_read_lock(); 191 ret = static_key_slow_inc_cpuslocked(key); 192 cpus_read_unlock(); 193 return ret; 194 } 195 EXPORT_SYMBOL_GPL(static_key_slow_inc); 196 197 void static_key_enable_cpuslocked(struct static_key *key) 198 { 199 STATIC_KEY_CHECK_USE(key); 200 lockdep_assert_cpus_held(); 201 202 if (atomic_read(&key->enabled) > 0) { 203 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); 204 return; 205 } 206 207 jump_label_lock(); 208 if (atomic_read(&key->enabled) == 0) { 209 atomic_set(&key->enabled, -1); 210 jump_label_update(key); 211 /* 212 * See static_key_slow_inc(). 213 */ 214 atomic_set_release(&key->enabled, 1); 215 } 216 jump_label_unlock(); 217 } 218 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); 219 220 void static_key_enable(struct static_key *key) 221 { 222 cpus_read_lock(); 223 static_key_enable_cpuslocked(key); 224 cpus_read_unlock(); 225 } 226 EXPORT_SYMBOL_GPL(static_key_enable); 227 228 void static_key_disable_cpuslocked(struct static_key *key) 229 { 230 STATIC_KEY_CHECK_USE(key); 231 lockdep_assert_cpus_held(); 232 233 if (atomic_read(&key->enabled) != 1) { 234 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); 235 return; 236 } 237 238 jump_label_lock(); 239 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) 240 jump_label_update(key); 241 jump_label_unlock(); 242 } 243 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); 244 245 void static_key_disable(struct static_key *key) 246 { 247 cpus_read_lock(); 248 static_key_disable_cpuslocked(key); 249 cpus_read_unlock(); 250 } 251 EXPORT_SYMBOL_GPL(static_key_disable); 252 253 static bool static_key_dec_not_one(struct static_key *key) 254 { 255 int v; 256 257 /* 258 * Go into the slow path if key::enabled is less than or equal than 259 * one. One is valid to shut down the key, anything less than one 260 * is an imbalance, which is handled at the call site. 261 * 262 * That includes the special case of '-1' which is set in 263 * static_key_slow_inc_cpuslocked(), but that's harmless as it is 264 * fully serialized in the slow path below. By the time this task 265 * acquires the jump label lock the value is back to one and the 266 * retry under the lock must succeed. 267 */ 268 v = atomic_read(&key->enabled); 269 do { 270 /* 271 * Warn about the '-1' case though; since that means a 272 * decrement is concurrent with a first (0->1) increment. IOW 273 * people are trying to disable something that wasn't yet fully 274 * enabled. This suggests an ordering problem on the user side. 275 */ 276 WARN_ON_ONCE(v < 0); 277 278 /* 279 * Warn about underflow, and lie about success in an attempt to 280 * not make things worse. 281 */ 282 if (WARN_ON_ONCE(v == 0)) 283 return true; 284 285 if (v <= 1) 286 return false; 287 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); 288 289 return true; 290 } 291 292 static void __static_key_slow_dec_cpuslocked(struct static_key *key) 293 { 294 lockdep_assert_cpus_held(); 295 int val; 296 297 if (static_key_dec_not_one(key)) 298 return; 299 300 guard(mutex)(&jump_label_mutex); 301 val = atomic_read(&key->enabled); 302 /* 303 * It should be impossible to observe -1 with jump_label_mutex held, 304 * see static_key_slow_inc_cpuslocked(). 305 */ 306 if (WARN_ON_ONCE(val == -1)) 307 return; 308 /* 309 * Cannot already be 0, something went sideways. 310 */ 311 if (WARN_ON_ONCE(val == 0)) 312 return; 313 314 if (atomic_dec_and_test(&key->enabled)) 315 jump_label_update(key); 316 } 317 318 static void __static_key_slow_dec(struct static_key *key) 319 { 320 cpus_read_lock(); 321 __static_key_slow_dec_cpuslocked(key); 322 cpus_read_unlock(); 323 } 324 325 void jump_label_update_timeout(struct work_struct *work) 326 { 327 struct static_key_deferred *key = 328 container_of(work, struct static_key_deferred, work.work); 329 __static_key_slow_dec(&key->key); 330 } 331 EXPORT_SYMBOL_GPL(jump_label_update_timeout); 332 333 void static_key_slow_dec(struct static_key *key) 334 { 335 STATIC_KEY_CHECK_USE(key); 336 __static_key_slow_dec(key); 337 } 338 EXPORT_SYMBOL_GPL(static_key_slow_dec); 339 340 void static_key_slow_dec_cpuslocked(struct static_key *key) 341 { 342 STATIC_KEY_CHECK_USE(key); 343 __static_key_slow_dec_cpuslocked(key); 344 } 345 346 void __static_key_slow_dec_deferred(struct static_key *key, 347 struct delayed_work *work, 348 unsigned long timeout) 349 { 350 STATIC_KEY_CHECK_USE(key); 351 352 if (static_key_dec_not_one(key)) 353 return; 354 355 schedule_delayed_work(work, timeout); 356 } 357 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); 358 359 void __static_key_deferred_flush(void *key, struct delayed_work *work) 360 { 361 STATIC_KEY_CHECK_USE(key); 362 flush_delayed_work(work); 363 } 364 EXPORT_SYMBOL_GPL(__static_key_deferred_flush); 365 366 void jump_label_rate_limit(struct static_key_deferred *key, 367 unsigned long rl) 368 { 369 STATIC_KEY_CHECK_USE(key); 370 key->timeout = rl; 371 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); 372 } 373 EXPORT_SYMBOL_GPL(jump_label_rate_limit); 374 375 static int addr_conflict(struct jump_entry *entry, void *start, void *end) 376 { 377 if (jump_entry_code(entry) <= (unsigned long)end && 378 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) 379 return 1; 380 381 return 0; 382 } 383 384 static int __jump_label_text_reserved(struct jump_entry *iter_start, 385 struct jump_entry *iter_stop, void *start, void *end, bool init) 386 { 387 struct jump_entry *iter; 388 389 iter = iter_start; 390 while (iter < iter_stop) { 391 if (init || !jump_entry_is_init(iter)) { 392 if (addr_conflict(iter, start, end)) 393 return 1; 394 } 395 iter++; 396 } 397 398 return 0; 399 } 400 401 #ifndef arch_jump_label_transform_static 402 static void arch_jump_label_transform_static(struct jump_entry *entry, 403 enum jump_label_type type) 404 { 405 /* nothing to do on most architectures */ 406 } 407 #endif 408 409 static inline struct jump_entry *static_key_entries(struct static_key *key) 410 { 411 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); 412 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); 413 } 414 415 static inline bool static_key_type(struct static_key *key) 416 { 417 return key->type & JUMP_TYPE_TRUE; 418 } 419 420 static inline bool static_key_linked(struct static_key *key) 421 { 422 return key->type & JUMP_TYPE_LINKED; 423 } 424 425 static inline void static_key_clear_linked(struct static_key *key) 426 { 427 key->type &= ~JUMP_TYPE_LINKED; 428 } 429 430 static inline void static_key_set_linked(struct static_key *key) 431 { 432 key->type |= JUMP_TYPE_LINKED; 433 } 434 435 /*** 436 * A 'struct static_key' uses a union such that it either points directly 437 * to a table of 'struct jump_entry' or to a linked list of modules which in 438 * turn point to 'struct jump_entry' tables. 439 * 440 * The two lower bits of the pointer are used to keep track of which pointer 441 * type is in use and to store the initial branch direction, we use an access 442 * function which preserves these bits. 443 */ 444 static void static_key_set_entries(struct static_key *key, 445 struct jump_entry *entries) 446 { 447 unsigned long type; 448 449 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); 450 type = key->type & JUMP_TYPE_MASK; 451 key->entries = entries; 452 key->type |= type; 453 } 454 455 static enum jump_label_type jump_label_type(struct jump_entry *entry) 456 { 457 struct static_key *key = jump_entry_key(entry); 458 bool enabled = static_key_enabled(key); 459 bool branch = jump_entry_is_branch(entry); 460 461 /* See the comment in linux/jump_label.h */ 462 return enabled ^ branch; 463 } 464 465 static bool jump_label_can_update(struct jump_entry *entry, bool init) 466 { 467 /* 468 * Cannot update code that was in an init text area. 469 */ 470 if (!init && jump_entry_is_init(entry)) 471 return false; 472 473 if (!kernel_text_address(jump_entry_code(entry))) { 474 /* 475 * This skips patching built-in __exit, which 476 * is part of init_section_contains() but is 477 * not part of kernel_text_address(). 478 * 479 * Skipping built-in __exit is fine since it 480 * will never be executed. 481 */ 482 WARN_ONCE(!jump_entry_is_init(entry), 483 "can't patch jump_label at %pS", 484 (void *)jump_entry_code(entry)); 485 return false; 486 } 487 488 return true; 489 } 490 491 #ifndef HAVE_JUMP_LABEL_BATCH 492 static void __jump_label_update(struct static_key *key, 493 struct jump_entry *entry, 494 struct jump_entry *stop, 495 bool init) 496 { 497 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { 498 if (jump_label_can_update(entry, init)) 499 arch_jump_label_transform(entry, jump_label_type(entry)); 500 } 501 } 502 #else 503 static void __jump_label_update(struct static_key *key, 504 struct jump_entry *entry, 505 struct jump_entry *stop, 506 bool init) 507 { 508 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { 509 510 if (!jump_label_can_update(entry, init)) 511 continue; 512 513 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { 514 /* 515 * Queue is full: Apply the current queue and try again. 516 */ 517 arch_jump_label_transform_apply(); 518 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); 519 } 520 } 521 arch_jump_label_transform_apply(); 522 } 523 #endif 524 525 void __init jump_label_init(void) 526 { 527 struct jump_entry *iter_start = __start___jump_table; 528 struct jump_entry *iter_stop = __stop___jump_table; 529 struct static_key *key = NULL; 530 struct jump_entry *iter; 531 532 if (static_key_initialized) 533 return; 534 535 cpus_read_lock(); 536 jump_label_lock(); 537 jump_label_sort_entries(iter_start, iter_stop); 538 539 for (iter = iter_start; iter < iter_stop; iter++) { 540 struct static_key *iterk; 541 bool in_init; 542 543 /* rewrite NOPs */ 544 if (jump_label_type(iter) == JUMP_LABEL_NOP) 545 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 546 547 in_init = init_section_contains((void *)jump_entry_code(iter), 1); 548 jump_entry_set_init(iter, in_init); 549 550 iterk = jump_entry_key(iter); 551 if (iterk == key) 552 continue; 553 554 key = iterk; 555 static_key_set_entries(key, iter); 556 } 557 static_key_initialized = true; 558 jump_label_unlock(); 559 cpus_read_unlock(); 560 } 561 562 static inline bool static_key_sealed(struct static_key *key) 563 { 564 return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK); 565 } 566 567 static inline void static_key_seal(struct static_key *key) 568 { 569 unsigned long type = key->type & JUMP_TYPE_TRUE; 570 key->type = JUMP_TYPE_LINKED | type; 571 } 572 573 void jump_label_init_ro(void) 574 { 575 struct jump_entry *iter_start = __start___jump_table; 576 struct jump_entry *iter_stop = __stop___jump_table; 577 struct jump_entry *iter; 578 579 if (WARN_ON_ONCE(!static_key_initialized)) 580 return; 581 582 cpus_read_lock(); 583 jump_label_lock(); 584 585 for (iter = iter_start; iter < iter_stop; iter++) { 586 struct static_key *iterk = jump_entry_key(iter); 587 588 if (!is_kernel_ro_after_init((unsigned long)iterk)) 589 continue; 590 591 if (static_key_sealed(iterk)) 592 continue; 593 594 static_key_seal(iterk); 595 } 596 597 jump_label_unlock(); 598 cpus_read_unlock(); 599 } 600 601 #ifdef CONFIG_MODULES 602 603 enum jump_label_type jump_label_init_type(struct jump_entry *entry) 604 { 605 struct static_key *key = jump_entry_key(entry); 606 bool type = static_key_type(key); 607 bool branch = jump_entry_is_branch(entry); 608 609 /* See the comment in linux/jump_label.h */ 610 return type ^ branch; 611 } 612 613 struct static_key_mod { 614 struct static_key_mod *next; 615 struct jump_entry *entries; 616 struct module *mod; 617 }; 618 619 static inline struct static_key_mod *static_key_mod(struct static_key *key) 620 { 621 WARN_ON_ONCE(!static_key_linked(key)); 622 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); 623 } 624 625 /*** 626 * key->type and key->next are the same via union. 627 * This sets key->next and preserves the type bits. 628 * 629 * See additional comments above static_key_set_entries(). 630 */ 631 static void static_key_set_mod(struct static_key *key, 632 struct static_key_mod *mod) 633 { 634 unsigned long type; 635 636 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); 637 type = key->type & JUMP_TYPE_MASK; 638 key->next = mod; 639 key->type |= type; 640 } 641 642 static int __jump_label_mod_text_reserved(void *start, void *end) 643 { 644 struct module *mod; 645 int ret; 646 647 scoped_guard(rcu) { 648 mod = __module_text_address((unsigned long)start); 649 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 650 if (!try_module_get(mod)) 651 mod = NULL; 652 } 653 if (!mod) 654 return 0; 655 656 ret = __jump_label_text_reserved(mod->jump_entries, 657 mod->jump_entries + mod->num_jump_entries, 658 start, end, mod->state == MODULE_STATE_COMING); 659 660 module_put(mod); 661 662 return ret; 663 } 664 665 static void __jump_label_mod_update(struct static_key *key) 666 { 667 struct static_key_mod *mod; 668 669 for (mod = static_key_mod(key); mod; mod = mod->next) { 670 struct jump_entry *stop; 671 struct module *m; 672 673 /* 674 * NULL if the static_key is defined in a module 675 * that does not use it 676 */ 677 if (!mod->entries) 678 continue; 679 680 m = mod->mod; 681 if (!m) 682 stop = __stop___jump_table; 683 else 684 stop = m->jump_entries + m->num_jump_entries; 685 __jump_label_update(key, mod->entries, stop, 686 m && m->state == MODULE_STATE_COMING); 687 } 688 } 689 690 static int jump_label_add_module(struct module *mod) 691 { 692 struct jump_entry *iter_start = mod->jump_entries; 693 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 694 struct jump_entry *iter; 695 struct static_key *key = NULL; 696 struct static_key_mod *jlm, *jlm2; 697 698 /* if the module doesn't have jump label entries, just return */ 699 if (iter_start == iter_stop) 700 return 0; 701 702 jump_label_sort_entries(iter_start, iter_stop); 703 704 for (iter = iter_start; iter < iter_stop; iter++) { 705 struct static_key *iterk; 706 bool in_init; 707 708 in_init = within_module_init(jump_entry_code(iter), mod); 709 jump_entry_set_init(iter, in_init); 710 711 iterk = jump_entry_key(iter); 712 if (iterk == key) 713 continue; 714 715 key = iterk; 716 if (within_module((unsigned long)key, mod)) { 717 static_key_set_entries(key, iter); 718 continue; 719 } 720 721 /* 722 * If the key was sealed at init, then there's no need to keep a 723 * reference to its module entries - just patch them now and be 724 * done with it. 725 */ 726 if (static_key_sealed(key)) 727 goto do_poke; 728 729 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); 730 if (!jlm) 731 return -ENOMEM; 732 if (!static_key_linked(key)) { 733 jlm2 = kzalloc(sizeof(struct static_key_mod), 734 GFP_KERNEL); 735 if (!jlm2) { 736 kfree(jlm); 737 return -ENOMEM; 738 } 739 scoped_guard(rcu) 740 jlm2->mod = __module_address((unsigned long)key); 741 742 jlm2->entries = static_key_entries(key); 743 jlm2->next = NULL; 744 static_key_set_mod(key, jlm2); 745 static_key_set_linked(key); 746 } 747 jlm->mod = mod; 748 jlm->entries = iter; 749 jlm->next = static_key_mod(key); 750 static_key_set_mod(key, jlm); 751 static_key_set_linked(key); 752 753 /* Only update if we've changed from our initial state */ 754 do_poke: 755 if (jump_label_type(iter) != jump_label_init_type(iter)) 756 __jump_label_update(key, iter, iter_stop, true); 757 } 758 759 return 0; 760 } 761 762 static void jump_label_del_module(struct module *mod) 763 { 764 struct jump_entry *iter_start = mod->jump_entries; 765 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 766 struct jump_entry *iter; 767 struct static_key *key = NULL; 768 struct static_key_mod *jlm, **prev; 769 770 for (iter = iter_start; iter < iter_stop; iter++) { 771 if (jump_entry_key(iter) == key) 772 continue; 773 774 key = jump_entry_key(iter); 775 776 if (within_module((unsigned long)key, mod)) 777 continue; 778 779 /* No @jlm allocated because key was sealed at init. */ 780 if (static_key_sealed(key)) 781 continue; 782 783 /* No memory during module load */ 784 if (WARN_ON(!static_key_linked(key))) 785 continue; 786 787 prev = &key->next; 788 jlm = static_key_mod(key); 789 790 while (jlm && jlm->mod != mod) { 791 prev = &jlm->next; 792 jlm = jlm->next; 793 } 794 795 /* No memory during module load */ 796 if (WARN_ON(!jlm)) 797 continue; 798 799 if (prev == &key->next) 800 static_key_set_mod(key, jlm->next); 801 else 802 *prev = jlm->next; 803 804 kfree(jlm); 805 806 jlm = static_key_mod(key); 807 /* if only one etry is left, fold it back into the static_key */ 808 if (jlm->next == NULL) { 809 static_key_set_entries(key, jlm->entries); 810 static_key_clear_linked(key); 811 kfree(jlm); 812 } 813 } 814 } 815 816 static int 817 jump_label_module_notify(struct notifier_block *self, unsigned long val, 818 void *data) 819 { 820 struct module *mod = data; 821 int ret = 0; 822 823 cpus_read_lock(); 824 jump_label_lock(); 825 826 switch (val) { 827 case MODULE_STATE_COMING: 828 ret = jump_label_add_module(mod); 829 if (ret) { 830 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); 831 jump_label_del_module(mod); 832 } 833 break; 834 case MODULE_STATE_GOING: 835 jump_label_del_module(mod); 836 break; 837 } 838 839 jump_label_unlock(); 840 cpus_read_unlock(); 841 842 return notifier_from_errno(ret); 843 } 844 845 static struct notifier_block jump_label_module_nb = { 846 .notifier_call = jump_label_module_notify, 847 .priority = 1, /* higher than tracepoints */ 848 }; 849 850 static __init int jump_label_init_module(void) 851 { 852 return register_module_notifier(&jump_label_module_nb); 853 } 854 early_initcall(jump_label_init_module); 855 856 #endif /* CONFIG_MODULES */ 857 858 /*** 859 * jump_label_text_reserved - check if addr range is reserved 860 * @start: start text addr 861 * @end: end text addr 862 * 863 * checks if the text addr located between @start and @end 864 * overlaps with any of the jump label patch addresses. Code 865 * that wants to modify kernel text should first verify that 866 * it does not overlap with any of the jump label addresses. 867 * Caller must hold jump_label_mutex. 868 * 869 * returns 1 if there is an overlap, 0 otherwise 870 */ 871 int jump_label_text_reserved(void *start, void *end) 872 { 873 bool init = system_state < SYSTEM_RUNNING; 874 int ret = __jump_label_text_reserved(__start___jump_table, 875 __stop___jump_table, start, end, init); 876 877 if (ret) 878 return ret; 879 880 #ifdef CONFIG_MODULES 881 ret = __jump_label_mod_text_reserved(start, end); 882 #endif 883 return ret; 884 } 885 886 static void jump_label_update(struct static_key *key) 887 { 888 struct jump_entry *stop = __stop___jump_table; 889 bool init = system_state < SYSTEM_RUNNING; 890 struct jump_entry *entry; 891 #ifdef CONFIG_MODULES 892 struct module *mod; 893 894 if (static_key_linked(key)) { 895 __jump_label_mod_update(key); 896 return; 897 } 898 899 scoped_guard(rcu) { 900 mod = __module_address((unsigned long)key); 901 if (mod) { 902 stop = mod->jump_entries + mod->num_jump_entries; 903 init = mod->state == MODULE_STATE_COMING; 904 } 905 } 906 #endif 907 entry = static_key_entries(key); 908 /* if there are no users, entry can be NULL */ 909 if (entry) 910 __jump_label_update(key, entry, stop, init); 911 } 912 913 #ifdef CONFIG_STATIC_KEYS_SELFTEST 914 static DEFINE_STATIC_KEY_TRUE(sk_true); 915 static DEFINE_STATIC_KEY_FALSE(sk_false); 916 917 static __init int jump_label_test(void) 918 { 919 int i; 920 921 for (i = 0; i < 2; i++) { 922 WARN_ON(static_key_enabled(&sk_true.key) != true); 923 WARN_ON(static_key_enabled(&sk_false.key) != false); 924 925 WARN_ON(!static_branch_likely(&sk_true)); 926 WARN_ON(!static_branch_unlikely(&sk_true)); 927 WARN_ON(static_branch_likely(&sk_false)); 928 WARN_ON(static_branch_unlikely(&sk_false)); 929 930 static_branch_disable(&sk_true); 931 static_branch_enable(&sk_false); 932 933 WARN_ON(static_key_enabled(&sk_true.key) == true); 934 WARN_ON(static_key_enabled(&sk_false.key) == false); 935 936 WARN_ON(static_branch_likely(&sk_true)); 937 WARN_ON(static_branch_unlikely(&sk_true)); 938 WARN_ON(!static_branch_likely(&sk_false)); 939 WARN_ON(!static_branch_unlikely(&sk_false)); 940 941 static_branch_enable(&sk_true); 942 static_branch_disable(&sk_false); 943 } 944 945 return 0; 946 } 947 early_initcall(jump_label_test); 948 #endif /* STATIC_KEYS_SELFTEST */ 949