1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * core.c - Kernel Live Patching Core 4 * 5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 6 * Copyright (C) 2014 SUSE 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/list.h> 16 #include <linux/kallsyms.h> 17 #include <linux/livepatch.h> 18 #include <linux/elf.h> 19 #include <linux/moduleloader.h> 20 #include <linux/completion.h> 21 #include <linux/memory.h> 22 #include <linux/rcupdate.h> 23 #include <asm/cacheflush.h> 24 #include "core.h" 25 #include "patch.h" 26 #include "state.h" 27 #include "transition.h" 28 29 /* 30 * klp_mutex is a coarse lock which serializes access to klp data. All 31 * accesses to klp-related variables and structures must have mutex protection, 32 * except within the following functions which carefully avoid the need for it: 33 * 34 * - klp_ftrace_handler() 35 * - klp_update_patch_state() 36 * - __klp_sched_try_switch() 37 */ 38 DEFINE_MUTEX(klp_mutex); 39 40 /* 41 * Actively used patches: enabled or in transition. Note that replaced 42 * or disabled patches are not listed even though the related kernel 43 * module still can be loaded. 44 */ 45 LIST_HEAD(klp_patches); 46 47 static struct kobject *klp_root_kobj; 48 49 static bool klp_is_module(struct klp_object *obj) 50 { 51 return obj->name; 52 } 53 54 /* sets obj->mod if object is not vmlinux and module is found */ 55 static void klp_find_object_module(struct klp_object *obj) 56 { 57 struct module *mod; 58 59 if (!klp_is_module(obj)) 60 return; 61 62 rcu_read_lock_sched(); 63 /* 64 * We do not want to block removal of patched modules and therefore 65 * we do not take a reference here. The patches are removed by 66 * klp_module_going() instead. 67 */ 68 mod = find_module(obj->name); 69 /* 70 * Do not mess work of klp_module_coming() and klp_module_going(). 71 * Note that the patch might still be needed before klp_module_going() 72 * is called. Module functions can be called even in the GOING state 73 * until mod->exit() finishes. This is especially important for 74 * patches that modify semantic of the functions. 75 */ 76 if (mod && mod->klp_alive) 77 obj->mod = mod; 78 79 rcu_read_unlock_sched(); 80 } 81 82 static bool klp_initialized(void) 83 { 84 return !!klp_root_kobj; 85 } 86 87 static struct klp_func *klp_find_func(struct klp_object *obj, 88 struct klp_func *old_func) 89 { 90 struct klp_func *func; 91 92 klp_for_each_func(obj, func) { 93 if ((strcmp(old_func->old_name, func->old_name) == 0) && 94 (old_func->old_sympos == func->old_sympos)) { 95 return func; 96 } 97 } 98 99 return NULL; 100 } 101 102 static struct klp_object *klp_find_object(struct klp_patch *patch, 103 struct klp_object *old_obj) 104 { 105 struct klp_object *obj; 106 107 klp_for_each_object(patch, obj) { 108 if (klp_is_module(old_obj)) { 109 if (klp_is_module(obj) && 110 strcmp(old_obj->name, obj->name) == 0) { 111 return obj; 112 } 113 } else if (!klp_is_module(obj)) { 114 return obj; 115 } 116 } 117 118 return NULL; 119 } 120 121 struct klp_find_arg { 122 const char *name; 123 unsigned long addr; 124 unsigned long count; 125 unsigned long pos; 126 }; 127 128 static int klp_match_callback(void *data, unsigned long addr) 129 { 130 struct klp_find_arg *args = data; 131 132 args->addr = addr; 133 args->count++; 134 135 /* 136 * Finish the search when the symbol is found for the desired position 137 * or the position is not defined for a non-unique symbol. 138 */ 139 if ((args->pos && (args->count == args->pos)) || 140 (!args->pos && (args->count > 1))) 141 return 1; 142 143 return 0; 144 } 145 146 static int klp_find_callback(void *data, const char *name, unsigned long addr) 147 { 148 struct klp_find_arg *args = data; 149 150 if (strcmp(args->name, name)) 151 return 0; 152 153 return klp_match_callback(data, addr); 154 } 155 156 static int klp_find_object_symbol(const char *objname, const char *name, 157 unsigned long sympos, unsigned long *addr) 158 { 159 struct klp_find_arg args = { 160 .name = name, 161 .addr = 0, 162 .count = 0, 163 .pos = sympos, 164 }; 165 166 if (objname) 167 module_kallsyms_on_each_symbol(objname, klp_find_callback, &args); 168 else 169 kallsyms_on_each_match_symbol(klp_match_callback, name, &args); 170 171 /* 172 * Ensure an address was found. If sympos is 0, ensure symbol is unique; 173 * otherwise ensure the symbol position count matches sympos. 174 */ 175 if (args.addr == 0) 176 pr_err("symbol '%s' not found in symbol table\n", name); 177 else if (args.count > 1 && sympos == 0) { 178 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", 179 name, objname); 180 } else if (sympos != args.count && sympos > 0) { 181 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", 182 sympos, name, objname ? objname : "vmlinux"); 183 } else { 184 *addr = args.addr; 185 return 0; 186 } 187 188 *addr = 0; 189 return -EINVAL; 190 } 191 192 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, 193 unsigned int symndx, Elf_Shdr *relasec, 194 const char *sec_objname) 195 { 196 int i, cnt, ret; 197 char sym_objname[MODULE_NAME_LEN]; 198 char sym_name[KSYM_NAME_LEN]; 199 Elf_Rela *relas; 200 Elf_Sym *sym; 201 unsigned long sympos, addr; 202 bool sym_vmlinux; 203 bool sec_vmlinux = !strcmp(sec_objname, "vmlinux"); 204 205 /* 206 * Since the field widths for sym_objname and sym_name in the sscanf() 207 * call are hard-coded and correspond to MODULE_NAME_LEN and 208 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN 209 * and KSYM_NAME_LEN have the values we expect them to have. 210 * 211 * Because the value of MODULE_NAME_LEN can differ among architectures, 212 * we use the smallest/strictest upper bound possible (56, based on 213 * the current definition of MODULE_NAME_LEN) to prevent overflows. 214 */ 215 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512); 216 217 relas = (Elf_Rela *) relasec->sh_addr; 218 /* For each rela in this klp relocation section */ 219 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 220 sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); 221 if (sym->st_shndx != SHN_LIVEPATCH) { 222 pr_err("symbol %s is not marked as a livepatch symbol\n", 223 strtab + sym->st_name); 224 return -EINVAL; 225 } 226 227 /* Format: .klp.sym.sym_objname.sym_name,sympos */ 228 cnt = sscanf(strtab + sym->st_name, 229 ".klp.sym.%55[^.].%511[^,],%lu", 230 sym_objname, sym_name, &sympos); 231 if (cnt != 3) { 232 pr_err("symbol %s has an incorrectly formatted name\n", 233 strtab + sym->st_name); 234 return -EINVAL; 235 } 236 237 sym_vmlinux = !strcmp(sym_objname, "vmlinux"); 238 239 /* 240 * Prevent module-specific KLP rela sections from referencing 241 * vmlinux symbols. This helps prevent ordering issues with 242 * module special section initializations. Presumably such 243 * symbols are exported and normal relas can be used instead. 244 */ 245 if (!sec_vmlinux && sym_vmlinux) { 246 pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n", 247 sym_name); 248 return -EINVAL; 249 } 250 251 /* klp_find_object_symbol() treats a NULL objname as vmlinux */ 252 ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname, 253 sym_name, sympos, &addr); 254 if (ret) 255 return ret; 256 257 sym->st_value = addr; 258 } 259 260 return 0; 261 } 262 263 void __weak clear_relocate_add(Elf_Shdr *sechdrs, 264 const char *strtab, 265 unsigned int symindex, 266 unsigned int relsec, 267 struct module *me) 268 { 269 } 270 271 /* 272 * At a high-level, there are two types of klp relocation sections: those which 273 * reference symbols which live in vmlinux; and those which reference symbols 274 * which live in other modules. This function is called for both types: 275 * 276 * 1) When a klp module itself loads, the module code calls this function to 277 * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections). 278 * These relocations are written to the klp module text to allow the patched 279 * code/data to reference unexported vmlinux symbols. They're written as 280 * early as possible to ensure that other module init code (.e.g., 281 * jump_label_apply_nops) can access any unexported vmlinux symbols which 282 * might be referenced by the klp module's special sections. 283 * 284 * 2) When a to-be-patched module loads -- or is already loaded when a 285 * corresponding klp module loads -- klp code calls this function to write 286 * module-specific klp relocations (.klp.rela.{module}.* sections). These 287 * are written to the klp module text to allow the patched code/data to 288 * reference symbols which live in the to-be-patched module or one of its 289 * module dependencies. Exported symbols are supported, in addition to 290 * unexported symbols, in order to enable late module patching, which allows 291 * the to-be-patched module to be loaded and patched sometime *after* the 292 * klp module is loaded. 293 */ 294 static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, 295 const char *shstrtab, const char *strtab, 296 unsigned int symndx, unsigned int secndx, 297 const char *objname, bool apply) 298 { 299 int cnt, ret; 300 char sec_objname[MODULE_NAME_LEN]; 301 Elf_Shdr *sec = sechdrs + secndx; 302 303 /* 304 * Format: .klp.rela.sec_objname.section_name 305 * See comment in klp_resolve_symbols() for an explanation 306 * of the selected field width value. 307 */ 308 cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]", 309 sec_objname); 310 if (cnt != 1) { 311 pr_err("section %s has an incorrectly formatted name\n", 312 shstrtab + sec->sh_name); 313 return -EINVAL; 314 } 315 316 if (strcmp(objname ? objname : "vmlinux", sec_objname)) 317 return 0; 318 319 if (apply) { 320 ret = klp_resolve_symbols(sechdrs, strtab, symndx, 321 sec, sec_objname); 322 if (ret) 323 return ret; 324 325 return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod); 326 } 327 328 clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod); 329 return 0; 330 } 331 332 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, 333 const char *shstrtab, const char *strtab, 334 unsigned int symndx, unsigned int secndx, 335 const char *objname) 336 { 337 return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx, 338 secndx, objname, true); 339 } 340 341 /* 342 * Sysfs Interface 343 * 344 * /sys/kernel/livepatch 345 * /sys/kernel/livepatch/<patch> 346 * /sys/kernel/livepatch/<patch>/enabled 347 * /sys/kernel/livepatch/<patch>/transition 348 * /sys/kernel/livepatch/<patch>/force 349 * /sys/kernel/livepatch/<patch>/replace 350 * /sys/kernel/livepatch/<patch>/stack_order 351 * /sys/kernel/livepatch/<patch>/<object> 352 * /sys/kernel/livepatch/<patch>/<object>/patched 353 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 354 */ 355 static int __klp_disable_patch(struct klp_patch *patch); 356 357 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 358 const char *buf, size_t count) 359 { 360 struct klp_patch *patch; 361 int ret; 362 bool enabled; 363 364 ret = kstrtobool(buf, &enabled); 365 if (ret) 366 return ret; 367 368 patch = container_of(kobj, struct klp_patch, kobj); 369 370 mutex_lock(&klp_mutex); 371 372 if (patch->enabled == enabled) { 373 /* already in requested state */ 374 ret = -EINVAL; 375 goto out; 376 } 377 378 /* 379 * Allow to reverse a pending transition in both ways. It might be 380 * necessary to complete the transition without forcing and breaking 381 * the system integrity. 382 * 383 * Do not allow to re-enable a disabled patch. 384 */ 385 if (patch == klp_transition_patch) 386 klp_reverse_transition(); 387 else if (!enabled) 388 ret = __klp_disable_patch(patch); 389 else 390 ret = -EINVAL; 391 392 out: 393 mutex_unlock(&klp_mutex); 394 395 if (ret) 396 return ret; 397 return count; 398 } 399 400 static ssize_t enabled_show(struct kobject *kobj, 401 struct kobj_attribute *attr, char *buf) 402 { 403 struct klp_patch *patch; 404 405 patch = container_of(kobj, struct klp_patch, kobj); 406 return sysfs_emit(buf, "%d\n", patch->enabled); 407 } 408 409 static ssize_t transition_show(struct kobject *kobj, 410 struct kobj_attribute *attr, char *buf) 411 { 412 struct klp_patch *patch; 413 414 patch = container_of(kobj, struct klp_patch, kobj); 415 return sysfs_emit(buf, "%d\n", patch == klp_transition_patch); 416 } 417 418 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 419 const char *buf, size_t count) 420 { 421 struct klp_patch *patch; 422 int ret; 423 bool val; 424 425 ret = kstrtobool(buf, &val); 426 if (ret) 427 return ret; 428 429 if (!val) 430 return count; 431 432 mutex_lock(&klp_mutex); 433 434 patch = container_of(kobj, struct klp_patch, kobj); 435 if (patch != klp_transition_patch) { 436 mutex_unlock(&klp_mutex); 437 return -EINVAL; 438 } 439 440 klp_force_transition(); 441 442 mutex_unlock(&klp_mutex); 443 444 return count; 445 } 446 447 static ssize_t replace_show(struct kobject *kobj, 448 struct kobj_attribute *attr, char *buf) 449 { 450 struct klp_patch *patch; 451 452 patch = container_of(kobj, struct klp_patch, kobj); 453 return sysfs_emit(buf, "%d\n", patch->replace); 454 } 455 456 static ssize_t stack_order_show(struct kobject *kobj, 457 struct kobj_attribute *attr, char *buf) 458 { 459 struct klp_patch *patch, *this_patch; 460 int stack_order = 0; 461 462 this_patch = container_of(kobj, struct klp_patch, kobj); 463 464 mutex_lock(&klp_mutex); 465 466 klp_for_each_patch(patch) { 467 stack_order++; 468 if (patch == this_patch) 469 break; 470 } 471 472 mutex_unlock(&klp_mutex); 473 474 return sysfs_emit(buf, "%d\n", stack_order); 475 } 476 477 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 478 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); 479 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); 480 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace); 481 static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order); 482 static struct attribute *klp_patch_attrs[] = { 483 &enabled_kobj_attr.attr, 484 &transition_kobj_attr.attr, 485 &force_kobj_attr.attr, 486 &replace_kobj_attr.attr, 487 &stack_order_kobj_attr.attr, 488 NULL 489 }; 490 ATTRIBUTE_GROUPS(klp_patch); 491 492 static ssize_t patched_show(struct kobject *kobj, 493 struct kobj_attribute *attr, char *buf) 494 { 495 struct klp_object *obj; 496 497 obj = container_of(kobj, struct klp_object, kobj); 498 return sysfs_emit(buf, "%d\n", obj->patched); 499 } 500 501 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched); 502 static struct attribute *klp_object_attrs[] = { 503 &patched_kobj_attr.attr, 504 NULL, 505 }; 506 ATTRIBUTE_GROUPS(klp_object); 507 508 static void klp_free_object_dynamic(struct klp_object *obj) 509 { 510 kfree(obj->name); 511 kfree(obj); 512 } 513 514 static void klp_init_func_early(struct klp_object *obj, 515 struct klp_func *func); 516 static void klp_init_object_early(struct klp_patch *patch, 517 struct klp_object *obj); 518 519 static struct klp_object *klp_alloc_object_dynamic(const char *name, 520 struct klp_patch *patch) 521 { 522 struct klp_object *obj; 523 524 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 525 if (!obj) 526 return NULL; 527 528 if (name) { 529 obj->name = kstrdup(name, GFP_KERNEL); 530 if (!obj->name) { 531 kfree(obj); 532 return NULL; 533 } 534 } 535 536 klp_init_object_early(patch, obj); 537 obj->dynamic = true; 538 539 return obj; 540 } 541 542 static void klp_free_func_nop(struct klp_func *func) 543 { 544 kfree(func->old_name); 545 kfree(func); 546 } 547 548 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, 549 struct klp_object *obj) 550 { 551 struct klp_func *func; 552 553 func = kzalloc(sizeof(*func), GFP_KERNEL); 554 if (!func) 555 return NULL; 556 557 if (old_func->old_name) { 558 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL); 559 if (!func->old_name) { 560 kfree(func); 561 return NULL; 562 } 563 } 564 565 klp_init_func_early(obj, func); 566 /* 567 * func->new_func is same as func->old_func. These addresses are 568 * set when the object is loaded, see klp_init_object_loaded(). 569 */ 570 func->old_sympos = old_func->old_sympos; 571 func->nop = true; 572 573 return func; 574 } 575 576 static int klp_add_object_nops(struct klp_patch *patch, 577 struct klp_object *old_obj) 578 { 579 struct klp_object *obj; 580 struct klp_func *func, *old_func; 581 582 obj = klp_find_object(patch, old_obj); 583 584 if (!obj) { 585 obj = klp_alloc_object_dynamic(old_obj->name, patch); 586 if (!obj) 587 return -ENOMEM; 588 } 589 590 klp_for_each_func(old_obj, old_func) { 591 func = klp_find_func(obj, old_func); 592 if (func) 593 continue; 594 595 func = klp_alloc_func_nop(old_func, obj); 596 if (!func) 597 return -ENOMEM; 598 } 599 600 return 0; 601 } 602 603 /* 604 * Add 'nop' functions which simply return to the caller to run 605 * the original function. The 'nop' functions are added to a 606 * patch to facilitate a 'replace' mode. 607 */ 608 static int klp_add_nops(struct klp_patch *patch) 609 { 610 struct klp_patch *old_patch; 611 struct klp_object *old_obj; 612 613 klp_for_each_patch(old_patch) { 614 klp_for_each_object(old_patch, old_obj) { 615 int err; 616 617 err = klp_add_object_nops(patch, old_obj); 618 if (err) 619 return err; 620 } 621 } 622 623 return 0; 624 } 625 626 static void klp_kobj_release_patch(struct kobject *kobj) 627 { 628 struct klp_patch *patch; 629 630 patch = container_of(kobj, struct klp_patch, kobj); 631 complete(&patch->finish); 632 } 633 634 static const struct kobj_type klp_ktype_patch = { 635 .release = klp_kobj_release_patch, 636 .sysfs_ops = &kobj_sysfs_ops, 637 .default_groups = klp_patch_groups, 638 }; 639 640 static void klp_kobj_release_object(struct kobject *kobj) 641 { 642 struct klp_object *obj; 643 644 obj = container_of(kobj, struct klp_object, kobj); 645 646 if (obj->dynamic) 647 klp_free_object_dynamic(obj); 648 } 649 650 static const struct kobj_type klp_ktype_object = { 651 .release = klp_kobj_release_object, 652 .sysfs_ops = &kobj_sysfs_ops, 653 .default_groups = klp_object_groups, 654 }; 655 656 static void klp_kobj_release_func(struct kobject *kobj) 657 { 658 struct klp_func *func; 659 660 func = container_of(kobj, struct klp_func, kobj); 661 662 if (func->nop) 663 klp_free_func_nop(func); 664 } 665 666 static const struct kobj_type klp_ktype_func = { 667 .release = klp_kobj_release_func, 668 .sysfs_ops = &kobj_sysfs_ops, 669 }; 670 671 static void __klp_free_funcs(struct klp_object *obj, bool nops_only) 672 { 673 struct klp_func *func, *tmp_func; 674 675 klp_for_each_func_safe(obj, func, tmp_func) { 676 if (nops_only && !func->nop) 677 continue; 678 679 list_del(&func->node); 680 kobject_put(&func->kobj); 681 } 682 } 683 684 /* Clean up when a patched object is unloaded */ 685 static void klp_free_object_loaded(struct klp_object *obj) 686 { 687 struct klp_func *func; 688 689 obj->mod = NULL; 690 691 klp_for_each_func(obj, func) { 692 func->old_func = NULL; 693 694 if (func->nop) 695 func->new_func = NULL; 696 } 697 } 698 699 static void __klp_free_objects(struct klp_patch *patch, bool nops_only) 700 { 701 struct klp_object *obj, *tmp_obj; 702 703 klp_for_each_object_safe(patch, obj, tmp_obj) { 704 __klp_free_funcs(obj, nops_only); 705 706 if (nops_only && !obj->dynamic) 707 continue; 708 709 list_del(&obj->node); 710 kobject_put(&obj->kobj); 711 } 712 } 713 714 static void klp_free_objects(struct klp_patch *patch) 715 { 716 __klp_free_objects(patch, false); 717 } 718 719 static void klp_free_objects_dynamic(struct klp_patch *patch) 720 { 721 __klp_free_objects(patch, true); 722 } 723 724 /* 725 * This function implements the free operations that can be called safely 726 * under klp_mutex. 727 * 728 * The operation must be completed by calling klp_free_patch_finish() 729 * outside klp_mutex. 730 */ 731 static void klp_free_patch_start(struct klp_patch *patch) 732 { 733 if (!list_empty(&patch->list)) 734 list_del(&patch->list); 735 736 klp_free_objects(patch); 737 } 738 739 /* 740 * This function implements the free part that must be called outside 741 * klp_mutex. 742 * 743 * It must be called after klp_free_patch_start(). And it has to be 744 * the last function accessing the livepatch structures when the patch 745 * gets disabled. 746 */ 747 static void klp_free_patch_finish(struct klp_patch *patch) 748 { 749 /* 750 * Avoid deadlock with enabled_store() sysfs callback by 751 * calling this outside klp_mutex. It is safe because 752 * this is called when the patch gets disabled and it 753 * cannot get enabled again. 754 */ 755 kobject_put(&patch->kobj); 756 wait_for_completion(&patch->finish); 757 758 /* Put the module after the last access to struct klp_patch. */ 759 if (!patch->forced) 760 module_put(patch->mod); 761 } 762 763 /* 764 * The livepatch might be freed from sysfs interface created by the patch. 765 * This work allows to wait until the interface is destroyed in a separate 766 * context. 767 */ 768 static void klp_free_patch_work_fn(struct work_struct *work) 769 { 770 struct klp_patch *patch = 771 container_of(work, struct klp_patch, free_work); 772 773 klp_free_patch_finish(patch); 774 } 775 776 void klp_free_patch_async(struct klp_patch *patch) 777 { 778 klp_free_patch_start(patch); 779 schedule_work(&patch->free_work); 780 } 781 782 void klp_free_replaced_patches_async(struct klp_patch *new_patch) 783 { 784 struct klp_patch *old_patch, *tmp_patch; 785 786 klp_for_each_patch_safe(old_patch, tmp_patch) { 787 if (old_patch == new_patch) 788 return; 789 klp_free_patch_async(old_patch); 790 } 791 } 792 793 static int klp_init_func(struct klp_object *obj, struct klp_func *func) 794 { 795 if (!func->old_name) 796 return -EINVAL; 797 798 /* 799 * NOPs get the address later. The patched module must be loaded, 800 * see klp_init_object_loaded(). 801 */ 802 if (!func->new_func && !func->nop) 803 return -EINVAL; 804 805 if (strlen(func->old_name) >= KSYM_NAME_LEN) 806 return -EINVAL; 807 808 INIT_LIST_HEAD(&func->stack_node); 809 func->patched = false; 810 func->transition = false; 811 812 /* The format for the sysfs directory is <function,sympos> where sympos 813 * is the nth occurrence of this symbol in kallsyms for the patched 814 * object. If the user selects 0 for old_sympos, then 1 will be used 815 * since a unique symbol will be the first occurrence. 816 */ 817 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu", 818 func->old_name, 819 func->old_sympos ? func->old_sympos : 1); 820 } 821 822 static int klp_write_object_relocs(struct klp_patch *patch, 823 struct klp_object *obj, 824 bool apply) 825 { 826 int i, ret; 827 struct klp_modinfo *info = patch->mod->klp_info; 828 829 for (i = 1; i < info->hdr.e_shnum; i++) { 830 Elf_Shdr *sec = info->sechdrs + i; 831 832 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) 833 continue; 834 835 ret = klp_write_section_relocs(patch->mod, info->sechdrs, 836 info->secstrings, 837 patch->mod->core_kallsyms.strtab, 838 info->symndx, i, obj->name, apply); 839 if (ret) 840 return ret; 841 } 842 843 return 0; 844 } 845 846 static int klp_apply_object_relocs(struct klp_patch *patch, 847 struct klp_object *obj) 848 { 849 return klp_write_object_relocs(patch, obj, true); 850 } 851 852 static void klp_clear_object_relocs(struct klp_patch *patch, 853 struct klp_object *obj) 854 { 855 klp_write_object_relocs(patch, obj, false); 856 } 857 858 /* parts of the initialization that is done only when the object is loaded */ 859 static int klp_init_object_loaded(struct klp_patch *patch, 860 struct klp_object *obj) 861 { 862 struct klp_func *func; 863 int ret; 864 865 if (klp_is_module(obj)) { 866 /* 867 * Only write module-specific relocations here 868 * (.klp.rela.{module}.*). vmlinux-specific relocations were 869 * written earlier during the initialization of the klp module 870 * itself. 871 */ 872 ret = klp_apply_object_relocs(patch, obj); 873 if (ret) 874 return ret; 875 } 876 877 klp_for_each_func(obj, func) { 878 ret = klp_find_object_symbol(obj->name, func->old_name, 879 func->old_sympos, 880 (unsigned long *)&func->old_func); 881 if (ret) 882 return ret; 883 884 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, 885 &func->old_size, NULL); 886 if (!ret) { 887 pr_err("kallsyms size lookup failed for '%s'\n", 888 func->old_name); 889 return -ENOENT; 890 } 891 892 if (func->nop) 893 func->new_func = func->old_func; 894 895 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, 896 &func->new_size, NULL); 897 if (!ret) { 898 pr_err("kallsyms size lookup failed for '%s' replacement\n", 899 func->old_name); 900 return -ENOENT; 901 } 902 } 903 904 return 0; 905 } 906 907 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) 908 { 909 struct klp_func *func; 910 int ret; 911 const char *name; 912 913 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) 914 return -EINVAL; 915 916 obj->patched = false; 917 obj->mod = NULL; 918 919 klp_find_object_module(obj); 920 921 name = klp_is_module(obj) ? obj->name : "vmlinux"; 922 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name); 923 if (ret) 924 return ret; 925 926 klp_for_each_func(obj, func) { 927 ret = klp_init_func(obj, func); 928 if (ret) 929 return ret; 930 } 931 932 if (klp_is_object_loaded(obj)) 933 ret = klp_init_object_loaded(patch, obj); 934 935 return ret; 936 } 937 938 static void klp_init_func_early(struct klp_object *obj, 939 struct klp_func *func) 940 { 941 kobject_init(&func->kobj, &klp_ktype_func); 942 list_add_tail(&func->node, &obj->func_list); 943 } 944 945 static void klp_init_object_early(struct klp_patch *patch, 946 struct klp_object *obj) 947 { 948 INIT_LIST_HEAD(&obj->func_list); 949 kobject_init(&obj->kobj, &klp_ktype_object); 950 list_add_tail(&obj->node, &patch->obj_list); 951 } 952 953 static void klp_init_patch_early(struct klp_patch *patch) 954 { 955 struct klp_object *obj; 956 struct klp_func *func; 957 958 INIT_LIST_HEAD(&patch->list); 959 INIT_LIST_HEAD(&patch->obj_list); 960 kobject_init(&patch->kobj, &klp_ktype_patch); 961 patch->enabled = false; 962 patch->forced = false; 963 INIT_WORK(&patch->free_work, klp_free_patch_work_fn); 964 init_completion(&patch->finish); 965 966 klp_for_each_object_static(patch, obj) { 967 klp_init_object_early(patch, obj); 968 969 klp_for_each_func_static(obj, func) { 970 klp_init_func_early(obj, func); 971 } 972 } 973 } 974 975 static int klp_init_patch(struct klp_patch *patch) 976 { 977 struct klp_object *obj; 978 int ret; 979 980 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name); 981 if (ret) 982 return ret; 983 984 if (patch->replace) { 985 ret = klp_add_nops(patch); 986 if (ret) 987 return ret; 988 } 989 990 klp_for_each_object(patch, obj) { 991 ret = klp_init_object(patch, obj); 992 if (ret) 993 return ret; 994 } 995 996 list_add_tail(&patch->list, &klp_patches); 997 998 return 0; 999 } 1000 1001 static int __klp_disable_patch(struct klp_patch *patch) 1002 { 1003 struct klp_object *obj; 1004 1005 if (WARN_ON(!patch->enabled)) 1006 return -EINVAL; 1007 1008 if (klp_transition_patch) 1009 return -EBUSY; 1010 1011 klp_init_transition(patch, KLP_TRANSITION_UNPATCHED); 1012 1013 klp_for_each_object(patch, obj) 1014 if (obj->patched) 1015 klp_pre_unpatch_callback(obj); 1016 1017 /* 1018 * Enforce the order of the func->transition writes in 1019 * klp_init_transition() and the TIF_PATCH_PENDING writes in 1020 * klp_start_transition(). In the rare case where klp_ftrace_handler() 1021 * is called shortly after klp_update_patch_state() switches the task, 1022 * this ensures the handler sees that func->transition is set. 1023 */ 1024 smp_wmb(); 1025 1026 klp_start_transition(); 1027 patch->enabled = false; 1028 klp_try_complete_transition(); 1029 1030 return 0; 1031 } 1032 1033 static int __klp_enable_patch(struct klp_patch *patch) 1034 { 1035 struct klp_object *obj; 1036 int ret; 1037 1038 if (klp_transition_patch) 1039 return -EBUSY; 1040 1041 if (WARN_ON(patch->enabled)) 1042 return -EINVAL; 1043 1044 pr_notice("enabling patch '%s'\n", patch->mod->name); 1045 1046 klp_init_transition(patch, KLP_TRANSITION_PATCHED); 1047 1048 /* 1049 * Enforce the order of the func->transition writes in 1050 * klp_init_transition() and the ops->func_stack writes in 1051 * klp_patch_object(), so that klp_ftrace_handler() will see the 1052 * func->transition updates before the handler is registered and the 1053 * new funcs become visible to the handler. 1054 */ 1055 smp_wmb(); 1056 1057 klp_for_each_object(patch, obj) { 1058 if (!klp_is_object_loaded(obj)) 1059 continue; 1060 1061 ret = klp_pre_patch_callback(obj); 1062 if (ret) { 1063 pr_warn("pre-patch callback failed for object '%s'\n", 1064 klp_is_module(obj) ? obj->name : "vmlinux"); 1065 goto err; 1066 } 1067 1068 ret = klp_patch_object(obj); 1069 if (ret) { 1070 pr_warn("failed to patch object '%s'\n", 1071 klp_is_module(obj) ? obj->name : "vmlinux"); 1072 goto err; 1073 } 1074 } 1075 1076 klp_start_transition(); 1077 patch->enabled = true; 1078 klp_try_complete_transition(); 1079 1080 return 0; 1081 err: 1082 pr_warn("failed to enable patch '%s'\n", patch->mod->name); 1083 1084 klp_cancel_transition(); 1085 return ret; 1086 } 1087 1088 /** 1089 * klp_enable_patch() - enable the livepatch 1090 * @patch: patch to be enabled 1091 * 1092 * Initializes the data structure associated with the patch, creates the sysfs 1093 * interface, performs the needed symbol lookups and code relocations, 1094 * registers the patched functions with ftrace. 1095 * 1096 * This function is supposed to be called from the livepatch module_init() 1097 * callback. 1098 * 1099 * Return: 0 on success, otherwise error 1100 */ 1101 int klp_enable_patch(struct klp_patch *patch) 1102 { 1103 int ret; 1104 struct klp_object *obj; 1105 1106 if (!patch || !patch->mod || !patch->objs) 1107 return -EINVAL; 1108 1109 klp_for_each_object_static(patch, obj) { 1110 if (!obj->funcs) 1111 return -EINVAL; 1112 } 1113 1114 1115 if (!is_livepatch_module(patch->mod)) { 1116 pr_err("module %s is not marked as a livepatch module\n", 1117 patch->mod->name); 1118 return -EINVAL; 1119 } 1120 1121 if (!klp_initialized()) 1122 return -ENODEV; 1123 1124 if (!klp_have_reliable_stack()) { 1125 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n"); 1126 pr_warn("The livepatch transition may never complete.\n"); 1127 } 1128 1129 mutex_lock(&klp_mutex); 1130 1131 if (!klp_is_patch_compatible(patch)) { 1132 pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n", 1133 patch->mod->name); 1134 mutex_unlock(&klp_mutex); 1135 return -EINVAL; 1136 } 1137 1138 if (!try_module_get(patch->mod)) { 1139 mutex_unlock(&klp_mutex); 1140 return -ENODEV; 1141 } 1142 1143 klp_init_patch_early(patch); 1144 1145 ret = klp_init_patch(patch); 1146 if (ret) 1147 goto err; 1148 1149 ret = __klp_enable_patch(patch); 1150 if (ret) 1151 goto err; 1152 1153 mutex_unlock(&klp_mutex); 1154 1155 return 0; 1156 1157 err: 1158 klp_free_patch_start(patch); 1159 1160 mutex_unlock(&klp_mutex); 1161 1162 klp_free_patch_finish(patch); 1163 1164 return ret; 1165 } 1166 EXPORT_SYMBOL_GPL(klp_enable_patch); 1167 1168 /* 1169 * This function unpatches objects from the replaced livepatches. 1170 * 1171 * We could be pretty aggressive here. It is called in the situation where 1172 * these structures are no longer accessed from the ftrace handler. 1173 * All functions are redirected by the klp_transition_patch. They 1174 * use either a new code or they are in the original code because 1175 * of the special nop function patches. 1176 * 1177 * The only exception is when the transition was forced. In this case, 1178 * klp_ftrace_handler() might still see the replaced patch on the stack. 1179 * Fortunately, it is carefully designed to work with removed functions 1180 * thanks to RCU. We only have to keep the patches on the system. Also 1181 * this is handled transparently by patch->module_put. 1182 */ 1183 void klp_unpatch_replaced_patches(struct klp_patch *new_patch) 1184 { 1185 struct klp_patch *old_patch; 1186 1187 klp_for_each_patch(old_patch) { 1188 if (old_patch == new_patch) 1189 return; 1190 1191 old_patch->enabled = false; 1192 klp_unpatch_objects(old_patch); 1193 } 1194 } 1195 1196 /* 1197 * This function removes the dynamically allocated 'nop' functions. 1198 * 1199 * We could be pretty aggressive. NOPs do not change the existing 1200 * behavior except for adding unnecessary delay by the ftrace handler. 1201 * 1202 * It is safe even when the transition was forced. The ftrace handler 1203 * will see a valid ops->func_stack entry thanks to RCU. 1204 * 1205 * We could even free the NOPs structures. They must be the last entry 1206 * in ops->func_stack. Therefore unregister_ftrace_function() is called. 1207 * It does the same as klp_synchronize_transition() to make sure that 1208 * nobody is inside the ftrace handler once the operation finishes. 1209 * 1210 * IMPORTANT: It must be called right after removing the replaced patches! 1211 */ 1212 void klp_discard_nops(struct klp_patch *new_patch) 1213 { 1214 klp_unpatch_objects_dynamic(klp_transition_patch); 1215 klp_free_objects_dynamic(klp_transition_patch); 1216 } 1217 1218 /* 1219 * Remove parts of patches that touch a given kernel module. The list of 1220 * patches processed might be limited. When limit is NULL, all patches 1221 * will be handled. 1222 */ 1223 static void klp_cleanup_module_patches_limited(struct module *mod, 1224 struct klp_patch *limit) 1225 { 1226 struct klp_patch *patch; 1227 struct klp_object *obj; 1228 1229 klp_for_each_patch(patch) { 1230 if (patch == limit) 1231 break; 1232 1233 klp_for_each_object(patch, obj) { 1234 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 1235 continue; 1236 1237 if (patch != klp_transition_patch) 1238 klp_pre_unpatch_callback(obj); 1239 1240 pr_notice("reverting patch '%s' on unloading module '%s'\n", 1241 patch->mod->name, obj->mod->name); 1242 klp_unpatch_object(obj); 1243 1244 klp_post_unpatch_callback(obj); 1245 klp_clear_object_relocs(patch, obj); 1246 klp_free_object_loaded(obj); 1247 break; 1248 } 1249 } 1250 } 1251 1252 int klp_module_coming(struct module *mod) 1253 { 1254 int ret; 1255 struct klp_patch *patch; 1256 struct klp_object *obj; 1257 1258 if (WARN_ON(mod->state != MODULE_STATE_COMING)) 1259 return -EINVAL; 1260 1261 if (!strcmp(mod->name, "vmlinux")) { 1262 pr_err("vmlinux.ko: invalid module name\n"); 1263 return -EINVAL; 1264 } 1265 1266 mutex_lock(&klp_mutex); 1267 /* 1268 * Each module has to know that klp_module_coming() 1269 * has been called. We never know what module will 1270 * get patched by a new patch. 1271 */ 1272 mod->klp_alive = true; 1273 1274 klp_for_each_patch(patch) { 1275 klp_for_each_object(patch, obj) { 1276 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 1277 continue; 1278 1279 obj->mod = mod; 1280 1281 ret = klp_init_object_loaded(patch, obj); 1282 if (ret) { 1283 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 1284 patch->mod->name, obj->mod->name, ret); 1285 goto err; 1286 } 1287 1288 pr_notice("applying patch '%s' to loading module '%s'\n", 1289 patch->mod->name, obj->mod->name); 1290 1291 ret = klp_pre_patch_callback(obj); 1292 if (ret) { 1293 pr_warn("pre-patch callback failed for object '%s'\n", 1294 obj->name); 1295 goto err; 1296 } 1297 1298 ret = klp_patch_object(obj); 1299 if (ret) { 1300 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 1301 patch->mod->name, obj->mod->name, ret); 1302 1303 klp_post_unpatch_callback(obj); 1304 goto err; 1305 } 1306 1307 if (patch != klp_transition_patch) 1308 klp_post_patch_callback(obj); 1309 1310 break; 1311 } 1312 } 1313 1314 mutex_unlock(&klp_mutex); 1315 1316 return 0; 1317 1318 err: 1319 /* 1320 * If a patch is unsuccessfully applied, return 1321 * error to the module loader. 1322 */ 1323 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 1324 patch->mod->name, obj->mod->name, obj->mod->name); 1325 mod->klp_alive = false; 1326 obj->mod = NULL; 1327 klp_cleanup_module_patches_limited(mod, patch); 1328 mutex_unlock(&klp_mutex); 1329 1330 return ret; 1331 } 1332 1333 void klp_module_going(struct module *mod) 1334 { 1335 if (WARN_ON(mod->state != MODULE_STATE_GOING && 1336 mod->state != MODULE_STATE_COMING)) 1337 return; 1338 1339 mutex_lock(&klp_mutex); 1340 /* 1341 * Each module has to know that klp_module_going() 1342 * has been called. We never know what module will 1343 * get patched by a new patch. 1344 */ 1345 mod->klp_alive = false; 1346 1347 klp_cleanup_module_patches_limited(mod, NULL); 1348 1349 mutex_unlock(&klp_mutex); 1350 } 1351 1352 static int __init klp_init(void) 1353 { 1354 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1355 if (!klp_root_kobj) 1356 return -ENOMEM; 1357 1358 return 0; 1359 } 1360 1361 module_init(klp_init); 1362