1 /* 2 * core.c - Kernel Live Patching Core 3 * 4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 5 * Copyright (C) 2014 SUSE 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/kernel.h> 25 #include <linux/mutex.h> 26 #include <linux/slab.h> 27 #include <linux/ftrace.h> 28 #include <linux/list.h> 29 #include <linux/kallsyms.h> 30 #include <linux/livepatch.h> 31 #include <asm/cacheflush.h> 32 33 /** 34 * struct klp_ops - structure for tracking registered ftrace ops structs 35 * 36 * A single ftrace_ops is shared between all enabled replacement functions 37 * (klp_func structs) which have the same old_addr. This allows the switch 38 * between function versions to happen instantaneously by updating the klp_ops 39 * struct's func_stack list. The winner is the klp_func at the top of the 40 * func_stack (front of the list). 41 * 42 * @node: node for the global klp_ops list 43 * @func_stack: list head for the stack of klp_func's (active func is on top) 44 * @fops: registered ftrace ops struct 45 */ 46 struct klp_ops { 47 struct list_head node; 48 struct list_head func_stack; 49 struct ftrace_ops fops; 50 }; 51 52 /* 53 * The klp_mutex protects the global lists and state transitions of any 54 * structure reachable from them. References to any structure must be obtained 55 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to 56 * ensure it gets consistent data). 57 */ 58 static DEFINE_MUTEX(klp_mutex); 59 60 static LIST_HEAD(klp_patches); 61 static LIST_HEAD(klp_ops); 62 63 static struct kobject *klp_root_kobj; 64 65 static struct klp_ops *klp_find_ops(unsigned long old_addr) 66 { 67 struct klp_ops *ops; 68 struct klp_func *func; 69 70 list_for_each_entry(ops, &klp_ops, node) { 71 func = list_first_entry(&ops->func_stack, struct klp_func, 72 stack_node); 73 if (func->old_addr == old_addr) 74 return ops; 75 } 76 77 return NULL; 78 } 79 80 static bool klp_is_module(struct klp_object *obj) 81 { 82 return obj->name; 83 } 84 85 static bool klp_is_object_loaded(struct klp_object *obj) 86 { 87 return !obj->name || obj->mod; 88 } 89 90 /* sets obj->mod if object is not vmlinux and module is found */ 91 static void klp_find_object_module(struct klp_object *obj) 92 { 93 struct module *mod; 94 95 if (!klp_is_module(obj)) 96 return; 97 98 mutex_lock(&module_mutex); 99 /* 100 * We do not want to block removal of patched modules and therefore 101 * we do not take a reference here. The patches are removed by 102 * klp_module_going() instead. 103 */ 104 mod = find_module(obj->name); 105 /* 106 * Do not mess work of klp_module_coming() and klp_module_going(). 107 * Note that the patch might still be needed before klp_module_going() 108 * is called. Module functions can be called even in the GOING state 109 * until mod->exit() finishes. This is especially important for 110 * patches that modify semantic of the functions. 111 */ 112 if (mod && mod->klp_alive) 113 obj->mod = mod; 114 115 mutex_unlock(&module_mutex); 116 } 117 118 /* klp_mutex must be held by caller */ 119 static bool klp_is_patch_registered(struct klp_patch *patch) 120 { 121 struct klp_patch *mypatch; 122 123 list_for_each_entry(mypatch, &klp_patches, list) 124 if (mypatch == patch) 125 return true; 126 127 return false; 128 } 129 130 static bool klp_initialized(void) 131 { 132 return !!klp_root_kobj; 133 } 134 135 struct klp_find_arg { 136 const char *objname; 137 const char *name; 138 unsigned long addr; 139 unsigned long count; 140 unsigned long pos; 141 }; 142 143 static int klp_find_callback(void *data, const char *name, 144 struct module *mod, unsigned long addr) 145 { 146 struct klp_find_arg *args = data; 147 148 if ((mod && !args->objname) || (!mod && args->objname)) 149 return 0; 150 151 if (strcmp(args->name, name)) 152 return 0; 153 154 if (args->objname && strcmp(args->objname, mod->name)) 155 return 0; 156 157 args->addr = addr; 158 args->count++; 159 160 /* 161 * Finish the search when the symbol is found for the desired position 162 * or the position is not defined for a non-unique symbol. 163 */ 164 if ((args->pos && (args->count == args->pos)) || 165 (!args->pos && (args->count > 1))) 166 return 1; 167 168 return 0; 169 } 170 171 static int klp_find_object_symbol(const char *objname, const char *name, 172 unsigned long sympos, unsigned long *addr) 173 { 174 struct klp_find_arg args = { 175 .objname = objname, 176 .name = name, 177 .addr = 0, 178 .count = 0, 179 .pos = sympos, 180 }; 181 182 mutex_lock(&module_mutex); 183 kallsyms_on_each_symbol(klp_find_callback, &args); 184 mutex_unlock(&module_mutex); 185 186 /* 187 * Ensure an address was found. If sympos is 0, ensure symbol is unique; 188 * otherwise ensure the symbol position count matches sympos. 189 */ 190 if (args.addr == 0) 191 pr_err("symbol '%s' not found in symbol table\n", name); 192 else if (args.count > 1 && sympos == 0) { 193 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", 194 name, objname); 195 } else if (sympos != args.count && sympos > 0) { 196 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", 197 sympos, name, objname ? objname : "vmlinux"); 198 } else { 199 *addr = args.addr; 200 return 0; 201 } 202 203 *addr = 0; 204 return -EINVAL; 205 } 206 207 /* 208 * external symbols are located outside the parent object (where the parent 209 * object is either vmlinux or the kmod being patched). 210 */ 211 static int klp_find_external_symbol(struct module *pmod, const char *name, 212 unsigned long *addr) 213 { 214 const struct kernel_symbol *sym; 215 216 /* first, check if it's an exported symbol */ 217 preempt_disable(); 218 sym = find_symbol(name, NULL, NULL, true, true); 219 if (sym) { 220 *addr = sym->value; 221 preempt_enable(); 222 return 0; 223 } 224 preempt_enable(); 225 226 /* 227 * Check if it's in another .o within the patch module. This also 228 * checks that the external symbol is unique. 229 */ 230 return klp_find_object_symbol(pmod->name, name, 0, addr); 231 } 232 233 static int klp_write_object_relocations(struct module *pmod, 234 struct klp_object *obj) 235 { 236 int ret = 0; 237 unsigned long val; 238 struct klp_reloc *reloc; 239 240 if (WARN_ON(!klp_is_object_loaded(obj))) 241 return -EINVAL; 242 243 if (WARN_ON(!obj->relocs)) 244 return -EINVAL; 245 246 module_disable_ro(pmod); 247 248 for (reloc = obj->relocs; reloc->name; reloc++) { 249 /* discover the address of the referenced symbol */ 250 if (reloc->external) { 251 if (reloc->sympos > 0) { 252 pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n", 253 reloc->name); 254 ret = -EINVAL; 255 goto out; 256 } 257 ret = klp_find_external_symbol(pmod, reloc->name, &val); 258 } else 259 ret = klp_find_object_symbol(obj->name, 260 reloc->name, 261 reloc->sympos, 262 &val); 263 if (ret) 264 goto out; 265 266 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc, 267 val + reloc->addend); 268 if (ret) { 269 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n", 270 reloc->name, val, ret); 271 goto out; 272 } 273 } 274 275 out: 276 module_enable_ro(pmod); 277 return ret; 278 } 279 280 static void notrace klp_ftrace_handler(unsigned long ip, 281 unsigned long parent_ip, 282 struct ftrace_ops *fops, 283 struct pt_regs *regs) 284 { 285 struct klp_ops *ops; 286 struct klp_func *func; 287 288 ops = container_of(fops, struct klp_ops, fops); 289 290 rcu_read_lock(); 291 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 292 stack_node); 293 if (WARN_ON_ONCE(!func)) 294 goto unlock; 295 296 klp_arch_set_pc(regs, (unsigned long)func->new_func); 297 unlock: 298 rcu_read_unlock(); 299 } 300 301 /* 302 * Convert a function address into the appropriate ftrace location. 303 * 304 * Usually this is just the address of the function, but on some architectures 305 * it's more complicated so allow them to provide a custom behaviour. 306 */ 307 #ifndef klp_get_ftrace_location 308 static unsigned long klp_get_ftrace_location(unsigned long faddr) 309 { 310 return faddr; 311 } 312 #endif 313 314 static void klp_disable_func(struct klp_func *func) 315 { 316 struct klp_ops *ops; 317 318 if (WARN_ON(func->state != KLP_ENABLED)) 319 return; 320 if (WARN_ON(!func->old_addr)) 321 return; 322 323 ops = klp_find_ops(func->old_addr); 324 if (WARN_ON(!ops)) 325 return; 326 327 if (list_is_singular(&ops->func_stack)) { 328 unsigned long ftrace_loc; 329 330 ftrace_loc = klp_get_ftrace_location(func->old_addr); 331 if (WARN_ON(!ftrace_loc)) 332 return; 333 334 WARN_ON(unregister_ftrace_function(&ops->fops)); 335 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); 336 337 list_del_rcu(&func->stack_node); 338 list_del(&ops->node); 339 kfree(ops); 340 } else { 341 list_del_rcu(&func->stack_node); 342 } 343 344 func->state = KLP_DISABLED; 345 } 346 347 static int klp_enable_func(struct klp_func *func) 348 { 349 struct klp_ops *ops; 350 int ret; 351 352 if (WARN_ON(!func->old_addr)) 353 return -EINVAL; 354 355 if (WARN_ON(func->state != KLP_DISABLED)) 356 return -EINVAL; 357 358 ops = klp_find_ops(func->old_addr); 359 if (!ops) { 360 unsigned long ftrace_loc; 361 362 ftrace_loc = klp_get_ftrace_location(func->old_addr); 363 if (!ftrace_loc) { 364 pr_err("failed to find location for function '%s'\n", 365 func->old_name); 366 return -EINVAL; 367 } 368 369 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 370 if (!ops) 371 return -ENOMEM; 372 373 ops->fops.func = klp_ftrace_handler; 374 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | 375 FTRACE_OPS_FL_DYNAMIC | 376 FTRACE_OPS_FL_IPMODIFY; 377 378 list_add(&ops->node, &klp_ops); 379 380 INIT_LIST_HEAD(&ops->func_stack); 381 list_add_rcu(&func->stack_node, &ops->func_stack); 382 383 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); 384 if (ret) { 385 pr_err("failed to set ftrace filter for function '%s' (%d)\n", 386 func->old_name, ret); 387 goto err; 388 } 389 390 ret = register_ftrace_function(&ops->fops); 391 if (ret) { 392 pr_err("failed to register ftrace handler for function '%s' (%d)\n", 393 func->old_name, ret); 394 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); 395 goto err; 396 } 397 398 399 } else { 400 list_add_rcu(&func->stack_node, &ops->func_stack); 401 } 402 403 func->state = KLP_ENABLED; 404 405 return 0; 406 407 err: 408 list_del_rcu(&func->stack_node); 409 list_del(&ops->node); 410 kfree(ops); 411 return ret; 412 } 413 414 static void klp_disable_object(struct klp_object *obj) 415 { 416 struct klp_func *func; 417 418 klp_for_each_func(obj, func) 419 if (func->state == KLP_ENABLED) 420 klp_disable_func(func); 421 422 obj->state = KLP_DISABLED; 423 } 424 425 static int klp_enable_object(struct klp_object *obj) 426 { 427 struct klp_func *func; 428 int ret; 429 430 if (WARN_ON(obj->state != KLP_DISABLED)) 431 return -EINVAL; 432 433 if (WARN_ON(!klp_is_object_loaded(obj))) 434 return -EINVAL; 435 436 klp_for_each_func(obj, func) { 437 ret = klp_enable_func(func); 438 if (ret) { 439 klp_disable_object(obj); 440 return ret; 441 } 442 } 443 obj->state = KLP_ENABLED; 444 445 return 0; 446 } 447 448 static int __klp_disable_patch(struct klp_patch *patch) 449 { 450 struct klp_object *obj; 451 452 /* enforce stacking: only the last enabled patch can be disabled */ 453 if (!list_is_last(&patch->list, &klp_patches) && 454 list_next_entry(patch, list)->state == KLP_ENABLED) 455 return -EBUSY; 456 457 pr_notice("disabling patch '%s'\n", patch->mod->name); 458 459 klp_for_each_object(patch, obj) { 460 if (obj->state == KLP_ENABLED) 461 klp_disable_object(obj); 462 } 463 464 patch->state = KLP_DISABLED; 465 466 return 0; 467 } 468 469 /** 470 * klp_disable_patch() - disables a registered patch 471 * @patch: The registered, enabled patch to be disabled 472 * 473 * Unregisters the patched functions from ftrace. 474 * 475 * Return: 0 on success, otherwise error 476 */ 477 int klp_disable_patch(struct klp_patch *patch) 478 { 479 int ret; 480 481 mutex_lock(&klp_mutex); 482 483 if (!klp_is_patch_registered(patch)) { 484 ret = -EINVAL; 485 goto err; 486 } 487 488 if (patch->state == KLP_DISABLED) { 489 ret = -EINVAL; 490 goto err; 491 } 492 493 ret = __klp_disable_patch(patch); 494 495 err: 496 mutex_unlock(&klp_mutex); 497 return ret; 498 } 499 EXPORT_SYMBOL_GPL(klp_disable_patch); 500 501 static int __klp_enable_patch(struct klp_patch *patch) 502 { 503 struct klp_object *obj; 504 int ret; 505 506 if (WARN_ON(patch->state != KLP_DISABLED)) 507 return -EINVAL; 508 509 /* enforce stacking: only the first disabled patch can be enabled */ 510 if (patch->list.prev != &klp_patches && 511 list_prev_entry(patch, list)->state == KLP_DISABLED) 512 return -EBUSY; 513 514 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); 515 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 516 517 pr_notice("enabling patch '%s'\n", patch->mod->name); 518 519 klp_for_each_object(patch, obj) { 520 if (!klp_is_object_loaded(obj)) 521 continue; 522 523 ret = klp_enable_object(obj); 524 if (ret) 525 goto unregister; 526 } 527 528 patch->state = KLP_ENABLED; 529 530 return 0; 531 532 unregister: 533 WARN_ON(__klp_disable_patch(patch)); 534 return ret; 535 } 536 537 /** 538 * klp_enable_patch() - enables a registered patch 539 * @patch: The registered, disabled patch to be enabled 540 * 541 * Performs the needed symbol lookups and code relocations, 542 * then registers the patched functions with ftrace. 543 * 544 * Return: 0 on success, otherwise error 545 */ 546 int klp_enable_patch(struct klp_patch *patch) 547 { 548 int ret; 549 550 mutex_lock(&klp_mutex); 551 552 if (!klp_is_patch_registered(patch)) { 553 ret = -EINVAL; 554 goto err; 555 } 556 557 ret = __klp_enable_patch(patch); 558 559 err: 560 mutex_unlock(&klp_mutex); 561 return ret; 562 } 563 EXPORT_SYMBOL_GPL(klp_enable_patch); 564 565 /* 566 * Sysfs Interface 567 * 568 * /sys/kernel/livepatch 569 * /sys/kernel/livepatch/<patch> 570 * /sys/kernel/livepatch/<patch>/enabled 571 * /sys/kernel/livepatch/<patch>/<object> 572 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 573 */ 574 575 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 576 const char *buf, size_t count) 577 { 578 struct klp_patch *patch; 579 int ret; 580 unsigned long val; 581 582 ret = kstrtoul(buf, 10, &val); 583 if (ret) 584 return -EINVAL; 585 586 if (val != KLP_DISABLED && val != KLP_ENABLED) 587 return -EINVAL; 588 589 patch = container_of(kobj, struct klp_patch, kobj); 590 591 mutex_lock(&klp_mutex); 592 593 if (val == patch->state) { 594 /* already in requested state */ 595 ret = -EINVAL; 596 goto err; 597 } 598 599 if (val == KLP_ENABLED) { 600 ret = __klp_enable_patch(patch); 601 if (ret) 602 goto err; 603 } else { 604 ret = __klp_disable_patch(patch); 605 if (ret) 606 goto err; 607 } 608 609 mutex_unlock(&klp_mutex); 610 611 return count; 612 613 err: 614 mutex_unlock(&klp_mutex); 615 return ret; 616 } 617 618 static ssize_t enabled_show(struct kobject *kobj, 619 struct kobj_attribute *attr, char *buf) 620 { 621 struct klp_patch *patch; 622 623 patch = container_of(kobj, struct klp_patch, kobj); 624 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state); 625 } 626 627 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 628 static struct attribute *klp_patch_attrs[] = { 629 &enabled_kobj_attr.attr, 630 NULL 631 }; 632 633 static void klp_kobj_release_patch(struct kobject *kobj) 634 { 635 /* 636 * Once we have a consistency model we'll need to module_put() the 637 * patch module here. See klp_register_patch() for more details. 638 */ 639 } 640 641 static struct kobj_type klp_ktype_patch = { 642 .release = klp_kobj_release_patch, 643 .sysfs_ops = &kobj_sysfs_ops, 644 .default_attrs = klp_patch_attrs, 645 }; 646 647 static void klp_kobj_release_object(struct kobject *kobj) 648 { 649 } 650 651 static struct kobj_type klp_ktype_object = { 652 .release = klp_kobj_release_object, 653 .sysfs_ops = &kobj_sysfs_ops, 654 }; 655 656 static void klp_kobj_release_func(struct kobject *kobj) 657 { 658 } 659 660 static struct kobj_type klp_ktype_func = { 661 .release = klp_kobj_release_func, 662 .sysfs_ops = &kobj_sysfs_ops, 663 }; 664 665 /* 666 * Free all functions' kobjects in the array up to some limit. When limit is 667 * NULL, all kobjects are freed. 668 */ 669 static void klp_free_funcs_limited(struct klp_object *obj, 670 struct klp_func *limit) 671 { 672 struct klp_func *func; 673 674 for (func = obj->funcs; func->old_name && func != limit; func++) 675 kobject_put(&func->kobj); 676 } 677 678 /* Clean up when a patched object is unloaded */ 679 static void klp_free_object_loaded(struct klp_object *obj) 680 { 681 struct klp_func *func; 682 683 obj->mod = NULL; 684 685 klp_for_each_func(obj, func) 686 func->old_addr = 0; 687 } 688 689 /* 690 * Free all objects' kobjects in the array up to some limit. When limit is 691 * NULL, all kobjects are freed. 692 */ 693 static void klp_free_objects_limited(struct klp_patch *patch, 694 struct klp_object *limit) 695 { 696 struct klp_object *obj; 697 698 for (obj = patch->objs; obj->funcs && obj != limit; obj++) { 699 klp_free_funcs_limited(obj, NULL); 700 kobject_put(&obj->kobj); 701 } 702 } 703 704 static void klp_free_patch(struct klp_patch *patch) 705 { 706 klp_free_objects_limited(patch, NULL); 707 if (!list_empty(&patch->list)) 708 list_del(&patch->list); 709 kobject_put(&patch->kobj); 710 } 711 712 static int klp_init_func(struct klp_object *obj, struct klp_func *func) 713 { 714 INIT_LIST_HEAD(&func->stack_node); 715 func->state = KLP_DISABLED; 716 717 /* The format for the sysfs directory is <function,sympos> where sympos 718 * is the nth occurrence of this symbol in kallsyms for the patched 719 * object. If the user selects 0 for old_sympos, then 1 will be used 720 * since a unique symbol will be the first occurrence. 721 */ 722 return kobject_init_and_add(&func->kobj, &klp_ktype_func, 723 &obj->kobj, "%s,%lu", func->old_name, 724 func->old_sympos ? func->old_sympos : 1); 725 } 726 727 /* parts of the initialization that is done only when the object is loaded */ 728 static int klp_init_object_loaded(struct klp_patch *patch, 729 struct klp_object *obj) 730 { 731 struct klp_func *func; 732 int ret; 733 734 if (obj->relocs) { 735 ret = klp_write_object_relocations(patch->mod, obj); 736 if (ret) 737 return ret; 738 } 739 740 klp_for_each_func(obj, func) { 741 ret = klp_find_object_symbol(obj->name, func->old_name, 742 func->old_sympos, 743 &func->old_addr); 744 if (ret) 745 return ret; 746 } 747 748 return 0; 749 } 750 751 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) 752 { 753 struct klp_func *func; 754 int ret; 755 const char *name; 756 757 if (!obj->funcs) 758 return -EINVAL; 759 760 obj->state = KLP_DISABLED; 761 obj->mod = NULL; 762 763 klp_find_object_module(obj); 764 765 name = klp_is_module(obj) ? obj->name : "vmlinux"; 766 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, 767 &patch->kobj, "%s", name); 768 if (ret) 769 return ret; 770 771 klp_for_each_func(obj, func) { 772 ret = klp_init_func(obj, func); 773 if (ret) 774 goto free; 775 } 776 777 if (klp_is_object_loaded(obj)) { 778 ret = klp_init_object_loaded(patch, obj); 779 if (ret) 780 goto free; 781 } 782 783 return 0; 784 785 free: 786 klp_free_funcs_limited(obj, func); 787 kobject_put(&obj->kobj); 788 return ret; 789 } 790 791 static int klp_init_patch(struct klp_patch *patch) 792 { 793 struct klp_object *obj; 794 int ret; 795 796 if (!patch->objs) 797 return -EINVAL; 798 799 mutex_lock(&klp_mutex); 800 801 patch->state = KLP_DISABLED; 802 803 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 804 klp_root_kobj, "%s", patch->mod->name); 805 if (ret) 806 goto unlock; 807 808 klp_for_each_object(patch, obj) { 809 ret = klp_init_object(patch, obj); 810 if (ret) 811 goto free; 812 } 813 814 list_add_tail(&patch->list, &klp_patches); 815 816 mutex_unlock(&klp_mutex); 817 818 return 0; 819 820 free: 821 klp_free_objects_limited(patch, obj); 822 kobject_put(&patch->kobj); 823 unlock: 824 mutex_unlock(&klp_mutex); 825 return ret; 826 } 827 828 /** 829 * klp_unregister_patch() - unregisters a patch 830 * @patch: Disabled patch to be unregistered 831 * 832 * Frees the data structures and removes the sysfs interface. 833 * 834 * Return: 0 on success, otherwise error 835 */ 836 int klp_unregister_patch(struct klp_patch *patch) 837 { 838 int ret = 0; 839 840 mutex_lock(&klp_mutex); 841 842 if (!klp_is_patch_registered(patch)) { 843 ret = -EINVAL; 844 goto out; 845 } 846 847 if (patch->state == KLP_ENABLED) { 848 ret = -EBUSY; 849 goto out; 850 } 851 852 klp_free_patch(patch); 853 854 out: 855 mutex_unlock(&klp_mutex); 856 return ret; 857 } 858 EXPORT_SYMBOL_GPL(klp_unregister_patch); 859 860 /** 861 * klp_register_patch() - registers a patch 862 * @patch: Patch to be registered 863 * 864 * Initializes the data structure associated with the patch and 865 * creates the sysfs interface. 866 * 867 * Return: 0 on success, otherwise error 868 */ 869 int klp_register_patch(struct klp_patch *patch) 870 { 871 int ret; 872 873 if (!klp_initialized()) 874 return -ENODEV; 875 876 if (!patch || !patch->mod) 877 return -EINVAL; 878 879 /* 880 * A reference is taken on the patch module to prevent it from being 881 * unloaded. Right now, we don't allow patch modules to unload since 882 * there is currently no method to determine if a thread is still 883 * running in the patched code contained in the patch module once 884 * the ftrace registration is successful. 885 */ 886 if (!try_module_get(patch->mod)) 887 return -ENODEV; 888 889 ret = klp_init_patch(patch); 890 if (ret) 891 module_put(patch->mod); 892 893 return ret; 894 } 895 EXPORT_SYMBOL_GPL(klp_register_patch); 896 897 int klp_module_coming(struct module *mod) 898 { 899 int ret; 900 struct klp_patch *patch; 901 struct klp_object *obj; 902 903 if (WARN_ON(mod->state != MODULE_STATE_COMING)) 904 return -EINVAL; 905 906 mutex_lock(&klp_mutex); 907 /* 908 * Each module has to know that klp_module_coming() 909 * has been called. We never know what module will 910 * get patched by a new patch. 911 */ 912 mod->klp_alive = true; 913 914 list_for_each_entry(patch, &klp_patches, list) { 915 klp_for_each_object(patch, obj) { 916 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 917 continue; 918 919 obj->mod = mod; 920 921 ret = klp_init_object_loaded(patch, obj); 922 if (ret) { 923 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 924 patch->mod->name, obj->mod->name, ret); 925 goto err; 926 } 927 928 if (patch->state == KLP_DISABLED) 929 break; 930 931 pr_notice("applying patch '%s' to loading module '%s'\n", 932 patch->mod->name, obj->mod->name); 933 934 ret = klp_enable_object(obj); 935 if (ret) { 936 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 937 patch->mod->name, obj->mod->name, ret); 938 goto err; 939 } 940 941 break; 942 } 943 } 944 945 mutex_unlock(&klp_mutex); 946 947 return 0; 948 949 err: 950 /* 951 * If a patch is unsuccessfully applied, return 952 * error to the module loader. 953 */ 954 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 955 patch->mod->name, obj->mod->name, obj->mod->name); 956 mod->klp_alive = false; 957 klp_free_object_loaded(obj); 958 mutex_unlock(&klp_mutex); 959 960 return ret; 961 } 962 963 void klp_module_going(struct module *mod) 964 { 965 struct klp_patch *patch; 966 struct klp_object *obj; 967 968 if (WARN_ON(mod->state != MODULE_STATE_GOING && 969 mod->state != MODULE_STATE_COMING)) 970 return; 971 972 mutex_lock(&klp_mutex); 973 /* 974 * Each module has to know that klp_module_going() 975 * has been called. We never know what module will 976 * get patched by a new patch. 977 */ 978 mod->klp_alive = false; 979 980 list_for_each_entry(patch, &klp_patches, list) { 981 klp_for_each_object(patch, obj) { 982 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 983 continue; 984 985 if (patch->state != KLP_DISABLED) { 986 pr_notice("reverting patch '%s' on unloading module '%s'\n", 987 patch->mod->name, obj->mod->name); 988 klp_disable_object(obj); 989 } 990 991 klp_free_object_loaded(obj); 992 break; 993 } 994 } 995 996 mutex_unlock(&klp_mutex); 997 } 998 999 static int __init klp_init(void) 1000 { 1001 int ret; 1002 1003 ret = klp_check_compiler_support(); 1004 if (ret) { 1005 pr_info("Your compiler is too old; turning off.\n"); 1006 return -EINVAL; 1007 } 1008 1009 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1010 if (!klp_root_kobj) 1011 return -ENOMEM; 1012 1013 return 0; 1014 } 1015 1016 module_init(klp_init); 1017