1 /* 2 * core.c - Kernel Live Patching Core 3 * 4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 5 * Copyright (C) 2014 SUSE 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/kernel.h> 25 #include <linux/mutex.h> 26 #include <linux/slab.h> 27 #include <linux/list.h> 28 #include <linux/kallsyms.h> 29 #include <linux/livepatch.h> 30 #include <linux/elf.h> 31 #include <linux/moduleloader.h> 32 #include <linux/completion.h> 33 #include <asm/cacheflush.h> 34 #include "core.h" 35 #include "patch.h" 36 #include "transition.h" 37 38 /* 39 * klp_mutex is a coarse lock which serializes access to klp data. All 40 * accesses to klp-related variables and structures must have mutex protection, 41 * except within the following functions which carefully avoid the need for it: 42 * 43 * - klp_ftrace_handler() 44 * - klp_update_patch_state() 45 */ 46 DEFINE_MUTEX(klp_mutex); 47 48 static LIST_HEAD(klp_patches); 49 50 static struct kobject *klp_root_kobj; 51 52 static bool klp_is_module(struct klp_object *obj) 53 { 54 return obj->name; 55 } 56 57 /* sets obj->mod if object is not vmlinux and module is found */ 58 static void klp_find_object_module(struct klp_object *obj) 59 { 60 struct module *mod; 61 62 if (!klp_is_module(obj)) 63 return; 64 65 mutex_lock(&module_mutex); 66 /* 67 * We do not want to block removal of patched modules and therefore 68 * we do not take a reference here. The patches are removed by 69 * klp_module_going() instead. 70 */ 71 mod = find_module(obj->name); 72 /* 73 * Do not mess work of klp_module_coming() and klp_module_going(). 74 * Note that the patch might still be needed before klp_module_going() 75 * is called. Module functions can be called even in the GOING state 76 * until mod->exit() finishes. This is especially important for 77 * patches that modify semantic of the functions. 78 */ 79 if (mod && mod->klp_alive) 80 obj->mod = mod; 81 82 mutex_unlock(&module_mutex); 83 } 84 85 static bool klp_is_patch_registered(struct klp_patch *patch) 86 { 87 struct klp_patch *mypatch; 88 89 list_for_each_entry(mypatch, &klp_patches, list) 90 if (mypatch == patch) 91 return true; 92 93 return false; 94 } 95 96 static bool klp_initialized(void) 97 { 98 return !!klp_root_kobj; 99 } 100 101 struct klp_find_arg { 102 const char *objname; 103 const char *name; 104 unsigned long addr; 105 unsigned long count; 106 unsigned long pos; 107 }; 108 109 static int klp_find_callback(void *data, const char *name, 110 struct module *mod, unsigned long addr) 111 { 112 struct klp_find_arg *args = data; 113 114 if ((mod && !args->objname) || (!mod && args->objname)) 115 return 0; 116 117 if (strcmp(args->name, name)) 118 return 0; 119 120 if (args->objname && strcmp(args->objname, mod->name)) 121 return 0; 122 123 args->addr = addr; 124 args->count++; 125 126 /* 127 * Finish the search when the symbol is found for the desired position 128 * or the position is not defined for a non-unique symbol. 129 */ 130 if ((args->pos && (args->count == args->pos)) || 131 (!args->pos && (args->count > 1))) 132 return 1; 133 134 return 0; 135 } 136 137 static int klp_find_object_symbol(const char *objname, const char *name, 138 unsigned long sympos, unsigned long *addr) 139 { 140 struct klp_find_arg args = { 141 .objname = objname, 142 .name = name, 143 .addr = 0, 144 .count = 0, 145 .pos = sympos, 146 }; 147 148 mutex_lock(&module_mutex); 149 if (objname) 150 module_kallsyms_on_each_symbol(klp_find_callback, &args); 151 else 152 kallsyms_on_each_symbol(klp_find_callback, &args); 153 mutex_unlock(&module_mutex); 154 155 /* 156 * Ensure an address was found. If sympos is 0, ensure symbol is unique; 157 * otherwise ensure the symbol position count matches sympos. 158 */ 159 if (args.addr == 0) 160 pr_err("symbol '%s' not found in symbol table\n", name); 161 else if (args.count > 1 && sympos == 0) { 162 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", 163 name, objname); 164 } else if (sympos != args.count && sympos > 0) { 165 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", 166 sympos, name, objname ? objname : "vmlinux"); 167 } else { 168 *addr = args.addr; 169 return 0; 170 } 171 172 *addr = 0; 173 return -EINVAL; 174 } 175 176 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) 177 { 178 int i, cnt, vmlinux, ret; 179 char objname[MODULE_NAME_LEN]; 180 char symname[KSYM_NAME_LEN]; 181 char *strtab = pmod->core_kallsyms.strtab; 182 Elf_Rela *relas; 183 Elf_Sym *sym; 184 unsigned long sympos, addr; 185 186 /* 187 * Since the field widths for objname and symname in the sscanf() 188 * call are hard-coded and correspond to MODULE_NAME_LEN and 189 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN 190 * and KSYM_NAME_LEN have the values we expect them to have. 191 * 192 * Because the value of MODULE_NAME_LEN can differ among architectures, 193 * we use the smallest/strictest upper bound possible (56, based on 194 * the current definition of MODULE_NAME_LEN) to prevent overflows. 195 */ 196 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128); 197 198 relas = (Elf_Rela *) relasec->sh_addr; 199 /* For each rela in this klp relocation section */ 200 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 201 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); 202 if (sym->st_shndx != SHN_LIVEPATCH) { 203 pr_err("symbol %s is not marked as a livepatch symbol\n", 204 strtab + sym->st_name); 205 return -EINVAL; 206 } 207 208 /* Format: .klp.sym.objname.symname,sympos */ 209 cnt = sscanf(strtab + sym->st_name, 210 ".klp.sym.%55[^.].%127[^,],%lu", 211 objname, symname, &sympos); 212 if (cnt != 3) { 213 pr_err("symbol %s has an incorrectly formatted name\n", 214 strtab + sym->st_name); 215 return -EINVAL; 216 } 217 218 /* klp_find_object_symbol() treats a NULL objname as vmlinux */ 219 vmlinux = !strcmp(objname, "vmlinux"); 220 ret = klp_find_object_symbol(vmlinux ? NULL : objname, 221 symname, sympos, &addr); 222 if (ret) 223 return ret; 224 225 sym->st_value = addr; 226 } 227 228 return 0; 229 } 230 231 static int klp_write_object_relocations(struct module *pmod, 232 struct klp_object *obj) 233 { 234 int i, cnt, ret = 0; 235 const char *objname, *secname; 236 char sec_objname[MODULE_NAME_LEN]; 237 Elf_Shdr *sec; 238 239 if (WARN_ON(!klp_is_object_loaded(obj))) 240 return -EINVAL; 241 242 objname = klp_is_module(obj) ? obj->name : "vmlinux"; 243 244 /* For each klp relocation section */ 245 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) { 246 sec = pmod->klp_info->sechdrs + i; 247 secname = pmod->klp_info->secstrings + sec->sh_name; 248 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) 249 continue; 250 251 /* 252 * Format: .klp.rela.sec_objname.section_name 253 * See comment in klp_resolve_symbols() for an explanation 254 * of the selected field width value. 255 */ 256 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); 257 if (cnt != 1) { 258 pr_err("section %s has an incorrectly formatted name\n", 259 secname); 260 ret = -EINVAL; 261 break; 262 } 263 264 if (strcmp(objname, sec_objname)) 265 continue; 266 267 ret = klp_resolve_symbols(sec, pmod); 268 if (ret) 269 break; 270 271 ret = apply_relocate_add(pmod->klp_info->sechdrs, 272 pmod->core_kallsyms.strtab, 273 pmod->klp_info->symndx, i, pmod); 274 if (ret) 275 break; 276 } 277 278 return ret; 279 } 280 281 static int __klp_disable_patch(struct klp_patch *patch) 282 { 283 struct klp_object *obj; 284 285 if (WARN_ON(!patch->enabled)) 286 return -EINVAL; 287 288 if (klp_transition_patch) 289 return -EBUSY; 290 291 /* enforce stacking: only the last enabled patch can be disabled */ 292 if (!list_is_last(&patch->list, &klp_patches) && 293 list_next_entry(patch, list)->enabled) 294 return -EBUSY; 295 296 klp_init_transition(patch, KLP_UNPATCHED); 297 298 klp_for_each_object(patch, obj) 299 if (obj->patched) 300 klp_pre_unpatch_callback(obj); 301 302 /* 303 * Enforce the order of the func->transition writes in 304 * klp_init_transition() and the TIF_PATCH_PENDING writes in 305 * klp_start_transition(). In the rare case where klp_ftrace_handler() 306 * is called shortly after klp_update_patch_state() switches the task, 307 * this ensures the handler sees that func->transition is set. 308 */ 309 smp_wmb(); 310 311 klp_start_transition(); 312 klp_try_complete_transition(); 313 patch->enabled = false; 314 315 return 0; 316 } 317 318 /** 319 * klp_disable_patch() - disables a registered patch 320 * @patch: The registered, enabled patch to be disabled 321 * 322 * Unregisters the patched functions from ftrace. 323 * 324 * Return: 0 on success, otherwise error 325 */ 326 int klp_disable_patch(struct klp_patch *patch) 327 { 328 int ret; 329 330 mutex_lock(&klp_mutex); 331 332 if (!klp_is_patch_registered(patch)) { 333 ret = -EINVAL; 334 goto err; 335 } 336 337 if (!patch->enabled) { 338 ret = -EINVAL; 339 goto err; 340 } 341 342 ret = __klp_disable_patch(patch); 343 344 err: 345 mutex_unlock(&klp_mutex); 346 return ret; 347 } 348 EXPORT_SYMBOL_GPL(klp_disable_patch); 349 350 static int __klp_enable_patch(struct klp_patch *patch) 351 { 352 struct klp_object *obj; 353 int ret; 354 355 if (klp_transition_patch) 356 return -EBUSY; 357 358 if (WARN_ON(patch->enabled)) 359 return -EINVAL; 360 361 /* enforce stacking: only the first disabled patch can be enabled */ 362 if (patch->list.prev != &klp_patches && 363 !list_prev_entry(patch, list)->enabled) 364 return -EBUSY; 365 366 /* 367 * A reference is taken on the patch module to prevent it from being 368 * unloaded. 369 * 370 * Note: For immediate (no consistency model) patches we don't allow 371 * patch modules to unload since there is no safe/sane method to 372 * determine if a thread is still running in the patched code contained 373 * in the patch module once the ftrace registration is successful. 374 */ 375 if (!try_module_get(patch->mod)) 376 return -ENODEV; 377 378 pr_notice("enabling patch '%s'\n", patch->mod->name); 379 380 klp_init_transition(patch, KLP_PATCHED); 381 382 /* 383 * Enforce the order of the func->transition writes in 384 * klp_init_transition() and the ops->func_stack writes in 385 * klp_patch_object(), so that klp_ftrace_handler() will see the 386 * func->transition updates before the handler is registered and the 387 * new funcs become visible to the handler. 388 */ 389 smp_wmb(); 390 391 klp_for_each_object(patch, obj) { 392 if (!klp_is_object_loaded(obj)) 393 continue; 394 395 ret = klp_pre_patch_callback(obj); 396 if (ret) { 397 pr_warn("pre-patch callback failed for object '%s'\n", 398 klp_is_module(obj) ? obj->name : "vmlinux"); 399 goto err; 400 } 401 402 ret = klp_patch_object(obj); 403 if (ret) { 404 pr_warn("failed to patch object '%s'\n", 405 klp_is_module(obj) ? obj->name : "vmlinux"); 406 goto err; 407 } 408 } 409 410 klp_start_transition(); 411 klp_try_complete_transition(); 412 patch->enabled = true; 413 414 return 0; 415 err: 416 pr_warn("failed to enable patch '%s'\n", patch->mod->name); 417 418 klp_cancel_transition(); 419 return ret; 420 } 421 422 /** 423 * klp_enable_patch() - enables a registered patch 424 * @patch: The registered, disabled patch to be enabled 425 * 426 * Performs the needed symbol lookups and code relocations, 427 * then registers the patched functions with ftrace. 428 * 429 * Return: 0 on success, otherwise error 430 */ 431 int klp_enable_patch(struct klp_patch *patch) 432 { 433 int ret; 434 435 mutex_lock(&klp_mutex); 436 437 if (!klp_is_patch_registered(patch)) { 438 ret = -EINVAL; 439 goto err; 440 } 441 442 ret = __klp_enable_patch(patch); 443 444 err: 445 mutex_unlock(&klp_mutex); 446 return ret; 447 } 448 EXPORT_SYMBOL_GPL(klp_enable_patch); 449 450 /* 451 * Sysfs Interface 452 * 453 * /sys/kernel/livepatch 454 * /sys/kernel/livepatch/<patch> 455 * /sys/kernel/livepatch/<patch>/enabled 456 * /sys/kernel/livepatch/<patch>/transition 457 * /sys/kernel/livepatch/<patch>/signal 458 * /sys/kernel/livepatch/<patch>/force 459 * /sys/kernel/livepatch/<patch>/<object> 460 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 461 */ 462 463 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 464 const char *buf, size_t count) 465 { 466 struct klp_patch *patch; 467 int ret; 468 bool enabled; 469 470 ret = kstrtobool(buf, &enabled); 471 if (ret) 472 return ret; 473 474 patch = container_of(kobj, struct klp_patch, kobj); 475 476 mutex_lock(&klp_mutex); 477 478 if (!klp_is_patch_registered(patch)) { 479 /* 480 * Module with the patch could either disappear meanwhile or is 481 * not properly initialized yet. 482 */ 483 ret = -EINVAL; 484 goto err; 485 } 486 487 if (patch->enabled == enabled) { 488 /* already in requested state */ 489 ret = -EINVAL; 490 goto err; 491 } 492 493 if (patch == klp_transition_patch) { 494 klp_reverse_transition(); 495 } else if (enabled) { 496 ret = __klp_enable_patch(patch); 497 if (ret) 498 goto err; 499 } else { 500 ret = __klp_disable_patch(patch); 501 if (ret) 502 goto err; 503 } 504 505 mutex_unlock(&klp_mutex); 506 507 return count; 508 509 err: 510 mutex_unlock(&klp_mutex); 511 return ret; 512 } 513 514 static ssize_t enabled_show(struct kobject *kobj, 515 struct kobj_attribute *attr, char *buf) 516 { 517 struct klp_patch *patch; 518 519 patch = container_of(kobj, struct klp_patch, kobj); 520 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); 521 } 522 523 static ssize_t transition_show(struct kobject *kobj, 524 struct kobj_attribute *attr, char *buf) 525 { 526 struct klp_patch *patch; 527 528 patch = container_of(kobj, struct klp_patch, kobj); 529 return snprintf(buf, PAGE_SIZE-1, "%d\n", 530 patch == klp_transition_patch); 531 } 532 533 static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, 534 const char *buf, size_t count) 535 { 536 struct klp_patch *patch; 537 int ret; 538 bool val; 539 540 patch = container_of(kobj, struct klp_patch, kobj); 541 542 /* 543 * klp_mutex lock is not grabbed here intentionally. It is not really 544 * needed. The race window is harmless and grabbing the lock would only 545 * hold the action back. 546 */ 547 if (patch != klp_transition_patch) 548 return -EINVAL; 549 550 ret = kstrtobool(buf, &val); 551 if (ret) 552 return ret; 553 554 if (val) 555 klp_send_signals(); 556 557 return count; 558 } 559 560 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, 561 const char *buf, size_t count) 562 { 563 struct klp_patch *patch; 564 int ret; 565 bool val; 566 567 patch = container_of(kobj, struct klp_patch, kobj); 568 569 /* 570 * klp_mutex lock is not grabbed here intentionally. It is not really 571 * needed. The race window is harmless and grabbing the lock would only 572 * hold the action back. 573 */ 574 if (patch != klp_transition_patch) 575 return -EINVAL; 576 577 ret = kstrtobool(buf, &val); 578 if (ret) 579 return ret; 580 581 if (val) 582 klp_force_transition(); 583 584 return count; 585 } 586 587 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 588 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); 589 static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); 590 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); 591 static struct attribute *klp_patch_attrs[] = { 592 &enabled_kobj_attr.attr, 593 &transition_kobj_attr.attr, 594 &signal_kobj_attr.attr, 595 &force_kobj_attr.attr, 596 NULL 597 }; 598 599 static void klp_kobj_release_patch(struct kobject *kobj) 600 { 601 struct klp_patch *patch; 602 603 patch = container_of(kobj, struct klp_patch, kobj); 604 complete(&patch->finish); 605 } 606 607 static struct kobj_type klp_ktype_patch = { 608 .release = klp_kobj_release_patch, 609 .sysfs_ops = &kobj_sysfs_ops, 610 .default_attrs = klp_patch_attrs, 611 }; 612 613 static void klp_kobj_release_object(struct kobject *kobj) 614 { 615 } 616 617 static struct kobj_type klp_ktype_object = { 618 .release = klp_kobj_release_object, 619 .sysfs_ops = &kobj_sysfs_ops, 620 }; 621 622 static void klp_kobj_release_func(struct kobject *kobj) 623 { 624 } 625 626 static struct kobj_type klp_ktype_func = { 627 .release = klp_kobj_release_func, 628 .sysfs_ops = &kobj_sysfs_ops, 629 }; 630 631 /* 632 * Free all functions' kobjects in the array up to some limit. When limit is 633 * NULL, all kobjects are freed. 634 */ 635 static void klp_free_funcs_limited(struct klp_object *obj, 636 struct klp_func *limit) 637 { 638 struct klp_func *func; 639 640 for (func = obj->funcs; func->old_name && func != limit; func++) 641 kobject_put(&func->kobj); 642 } 643 644 /* Clean up when a patched object is unloaded */ 645 static void klp_free_object_loaded(struct klp_object *obj) 646 { 647 struct klp_func *func; 648 649 obj->mod = NULL; 650 651 klp_for_each_func(obj, func) 652 func->old_addr = 0; 653 } 654 655 /* 656 * Free all objects' kobjects in the array up to some limit. When limit is 657 * NULL, all kobjects are freed. 658 */ 659 static void klp_free_objects_limited(struct klp_patch *patch, 660 struct klp_object *limit) 661 { 662 struct klp_object *obj; 663 664 for (obj = patch->objs; obj->funcs && obj != limit; obj++) { 665 klp_free_funcs_limited(obj, NULL); 666 kobject_put(&obj->kobj); 667 } 668 } 669 670 static void klp_free_patch(struct klp_patch *patch) 671 { 672 klp_free_objects_limited(patch, NULL); 673 if (!list_empty(&patch->list)) 674 list_del(&patch->list); 675 } 676 677 static int klp_init_func(struct klp_object *obj, struct klp_func *func) 678 { 679 if (!func->old_name || !func->new_func) 680 return -EINVAL; 681 682 INIT_LIST_HEAD(&func->stack_node); 683 func->patched = false; 684 func->transition = false; 685 686 /* The format for the sysfs directory is <function,sympos> where sympos 687 * is the nth occurrence of this symbol in kallsyms for the patched 688 * object. If the user selects 0 for old_sympos, then 1 will be used 689 * since a unique symbol will be the first occurrence. 690 */ 691 return kobject_init_and_add(&func->kobj, &klp_ktype_func, 692 &obj->kobj, "%s,%lu", func->old_name, 693 func->old_sympos ? func->old_sympos : 1); 694 } 695 696 /* Arches may override this to finish any remaining arch-specific tasks */ 697 void __weak arch_klp_init_object_loaded(struct klp_patch *patch, 698 struct klp_object *obj) 699 { 700 } 701 702 /* parts of the initialization that is done only when the object is loaded */ 703 static int klp_init_object_loaded(struct klp_patch *patch, 704 struct klp_object *obj) 705 { 706 struct klp_func *func; 707 int ret; 708 709 module_disable_ro(patch->mod); 710 ret = klp_write_object_relocations(patch->mod, obj); 711 if (ret) { 712 module_enable_ro(patch->mod, true); 713 return ret; 714 } 715 716 arch_klp_init_object_loaded(patch, obj); 717 module_enable_ro(patch->mod, true); 718 719 klp_for_each_func(obj, func) { 720 ret = klp_find_object_symbol(obj->name, func->old_name, 721 func->old_sympos, 722 &func->old_addr); 723 if (ret) 724 return ret; 725 726 ret = kallsyms_lookup_size_offset(func->old_addr, 727 &func->old_size, NULL); 728 if (!ret) { 729 pr_err("kallsyms size lookup failed for '%s'\n", 730 func->old_name); 731 return -ENOENT; 732 } 733 734 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, 735 &func->new_size, NULL); 736 if (!ret) { 737 pr_err("kallsyms size lookup failed for '%s' replacement\n", 738 func->old_name); 739 return -ENOENT; 740 } 741 } 742 743 return 0; 744 } 745 746 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) 747 { 748 struct klp_func *func; 749 int ret; 750 const char *name; 751 752 if (!obj->funcs) 753 return -EINVAL; 754 755 obj->patched = false; 756 obj->mod = NULL; 757 758 klp_find_object_module(obj); 759 760 name = klp_is_module(obj) ? obj->name : "vmlinux"; 761 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, 762 &patch->kobj, "%s", name); 763 if (ret) 764 return ret; 765 766 klp_for_each_func(obj, func) { 767 ret = klp_init_func(obj, func); 768 if (ret) 769 goto free; 770 } 771 772 if (klp_is_object_loaded(obj)) { 773 ret = klp_init_object_loaded(patch, obj); 774 if (ret) 775 goto free; 776 } 777 778 return 0; 779 780 free: 781 klp_free_funcs_limited(obj, func); 782 kobject_put(&obj->kobj); 783 return ret; 784 } 785 786 static int klp_init_patch(struct klp_patch *patch) 787 { 788 struct klp_object *obj; 789 int ret; 790 791 if (!patch->objs) 792 return -EINVAL; 793 794 mutex_lock(&klp_mutex); 795 796 patch->enabled = false; 797 init_completion(&patch->finish); 798 799 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 800 klp_root_kobj, "%s", patch->mod->name); 801 if (ret) { 802 mutex_unlock(&klp_mutex); 803 return ret; 804 } 805 806 klp_for_each_object(patch, obj) { 807 ret = klp_init_object(patch, obj); 808 if (ret) 809 goto free; 810 } 811 812 list_add_tail(&patch->list, &klp_patches); 813 814 mutex_unlock(&klp_mutex); 815 816 return 0; 817 818 free: 819 klp_free_objects_limited(patch, obj); 820 821 mutex_unlock(&klp_mutex); 822 823 kobject_put(&patch->kobj); 824 wait_for_completion(&patch->finish); 825 826 return ret; 827 } 828 829 /** 830 * klp_unregister_patch() - unregisters a patch 831 * @patch: Disabled patch to be unregistered 832 * 833 * Frees the data structures and removes the sysfs interface. 834 * 835 * Return: 0 on success, otherwise error 836 */ 837 int klp_unregister_patch(struct klp_patch *patch) 838 { 839 int ret; 840 841 mutex_lock(&klp_mutex); 842 843 if (!klp_is_patch_registered(patch)) { 844 ret = -EINVAL; 845 goto err; 846 } 847 848 if (patch->enabled) { 849 ret = -EBUSY; 850 goto err; 851 } 852 853 klp_free_patch(patch); 854 855 mutex_unlock(&klp_mutex); 856 857 kobject_put(&patch->kobj); 858 wait_for_completion(&patch->finish); 859 860 return 0; 861 err: 862 mutex_unlock(&klp_mutex); 863 return ret; 864 } 865 EXPORT_SYMBOL_GPL(klp_unregister_patch); 866 867 /** 868 * klp_register_patch() - registers a patch 869 * @patch: Patch to be registered 870 * 871 * Initializes the data structure associated with the patch and 872 * creates the sysfs interface. 873 * 874 * There is no need to take the reference on the patch module here. It is done 875 * later when the patch is enabled. 876 * 877 * Return: 0 on success, otherwise error 878 */ 879 int klp_register_patch(struct klp_patch *patch) 880 { 881 if (!patch || !patch->mod) 882 return -EINVAL; 883 884 if (!is_livepatch_module(patch->mod)) { 885 pr_err("module %s is not marked as a livepatch module\n", 886 patch->mod->name); 887 return -EINVAL; 888 } 889 890 if (!klp_initialized()) 891 return -ENODEV; 892 893 /* 894 * Architectures without reliable stack traces have to set 895 * patch->immediate because there's currently no way to patch kthreads 896 * with the consistency model. 897 */ 898 if (!klp_have_reliable_stack() && !patch->immediate) { 899 pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); 900 return -ENOSYS; 901 } 902 903 return klp_init_patch(patch); 904 } 905 EXPORT_SYMBOL_GPL(klp_register_patch); 906 907 /* 908 * Remove parts of patches that touch a given kernel module. The list of 909 * patches processed might be limited. When limit is NULL, all patches 910 * will be handled. 911 */ 912 static void klp_cleanup_module_patches_limited(struct module *mod, 913 struct klp_patch *limit) 914 { 915 struct klp_patch *patch; 916 struct klp_object *obj; 917 918 list_for_each_entry(patch, &klp_patches, list) { 919 if (patch == limit) 920 break; 921 922 klp_for_each_object(patch, obj) { 923 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 924 continue; 925 926 /* 927 * Only unpatch the module if the patch is enabled or 928 * is in transition. 929 */ 930 if (patch->enabled || patch == klp_transition_patch) { 931 932 if (patch != klp_transition_patch) 933 klp_pre_unpatch_callback(obj); 934 935 pr_notice("reverting patch '%s' on unloading module '%s'\n", 936 patch->mod->name, obj->mod->name); 937 klp_unpatch_object(obj); 938 939 klp_post_unpatch_callback(obj); 940 } 941 942 klp_free_object_loaded(obj); 943 break; 944 } 945 } 946 } 947 948 int klp_module_coming(struct module *mod) 949 { 950 int ret; 951 struct klp_patch *patch; 952 struct klp_object *obj; 953 954 if (WARN_ON(mod->state != MODULE_STATE_COMING)) 955 return -EINVAL; 956 957 mutex_lock(&klp_mutex); 958 /* 959 * Each module has to know that klp_module_coming() 960 * has been called. We never know what module will 961 * get patched by a new patch. 962 */ 963 mod->klp_alive = true; 964 965 list_for_each_entry(patch, &klp_patches, list) { 966 klp_for_each_object(patch, obj) { 967 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 968 continue; 969 970 obj->mod = mod; 971 972 ret = klp_init_object_loaded(patch, obj); 973 if (ret) { 974 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 975 patch->mod->name, obj->mod->name, ret); 976 goto err; 977 } 978 979 /* 980 * Only patch the module if the patch is enabled or is 981 * in transition. 982 */ 983 if (!patch->enabled && patch != klp_transition_patch) 984 break; 985 986 pr_notice("applying patch '%s' to loading module '%s'\n", 987 patch->mod->name, obj->mod->name); 988 989 ret = klp_pre_patch_callback(obj); 990 if (ret) { 991 pr_warn("pre-patch callback failed for object '%s'\n", 992 obj->name); 993 goto err; 994 } 995 996 ret = klp_patch_object(obj); 997 if (ret) { 998 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 999 patch->mod->name, obj->mod->name, ret); 1000 1001 klp_post_unpatch_callback(obj); 1002 goto err; 1003 } 1004 1005 if (patch != klp_transition_patch) 1006 klp_post_patch_callback(obj); 1007 1008 break; 1009 } 1010 } 1011 1012 mutex_unlock(&klp_mutex); 1013 1014 return 0; 1015 1016 err: 1017 /* 1018 * If a patch is unsuccessfully applied, return 1019 * error to the module loader. 1020 */ 1021 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 1022 patch->mod->name, obj->mod->name, obj->mod->name); 1023 mod->klp_alive = false; 1024 klp_cleanup_module_patches_limited(mod, patch); 1025 mutex_unlock(&klp_mutex); 1026 1027 return ret; 1028 } 1029 1030 void klp_module_going(struct module *mod) 1031 { 1032 if (WARN_ON(mod->state != MODULE_STATE_GOING && 1033 mod->state != MODULE_STATE_COMING)) 1034 return; 1035 1036 mutex_lock(&klp_mutex); 1037 /* 1038 * Each module has to know that klp_module_going() 1039 * has been called. We never know what module will 1040 * get patched by a new patch. 1041 */ 1042 mod->klp_alive = false; 1043 1044 klp_cleanup_module_patches_limited(mod, NULL); 1045 1046 mutex_unlock(&klp_mutex); 1047 } 1048 1049 static int __init klp_init(void) 1050 { 1051 int ret; 1052 1053 ret = klp_check_compiler_support(); 1054 if (ret) { 1055 pr_info("Your compiler is too old; turning off.\n"); 1056 return -EINVAL; 1057 } 1058 1059 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1060 if (!klp_root_kobj) 1061 return -ENOMEM; 1062 1063 return 0; 1064 } 1065 1066 module_init(klp_init); 1067