xref: /linux/kernel/livepatch/core.c (revision c942fddf8793b2013be8c901b47d0a8dc02bf99f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <asm/cacheflush.h>
22 #include "core.h"
23 #include "patch.h"
24 #include "transition.h"
25 
26 /*
27  * klp_mutex is a coarse lock which serializes access to klp data.  All
28  * accesses to klp-related variables and structures must have mutex protection,
29  * except within the following functions which carefully avoid the need for it:
30  *
31  * - klp_ftrace_handler()
32  * - klp_update_patch_state()
33  */
34 DEFINE_MUTEX(klp_mutex);
35 
36 /*
37  * Actively used patches: enabled or in transition. Note that replaced
38  * or disabled patches are not listed even though the related kernel
39  * module still can be loaded.
40  */
41 LIST_HEAD(klp_patches);
42 
43 static struct kobject *klp_root_kobj;
44 
45 static bool klp_is_module(struct klp_object *obj)
46 {
47 	return obj->name;
48 }
49 
50 /* sets obj->mod if object is not vmlinux and module is found */
51 static void klp_find_object_module(struct klp_object *obj)
52 {
53 	struct module *mod;
54 
55 	if (!klp_is_module(obj))
56 		return;
57 
58 	mutex_lock(&module_mutex);
59 	/*
60 	 * We do not want to block removal of patched modules and therefore
61 	 * we do not take a reference here. The patches are removed by
62 	 * klp_module_going() instead.
63 	 */
64 	mod = find_module(obj->name);
65 	/*
66 	 * Do not mess work of klp_module_coming() and klp_module_going().
67 	 * Note that the patch might still be needed before klp_module_going()
68 	 * is called. Module functions can be called even in the GOING state
69 	 * until mod->exit() finishes. This is especially important for
70 	 * patches that modify semantic of the functions.
71 	 */
72 	if (mod && mod->klp_alive)
73 		obj->mod = mod;
74 
75 	mutex_unlock(&module_mutex);
76 }
77 
78 static bool klp_initialized(void)
79 {
80 	return !!klp_root_kobj;
81 }
82 
83 static struct klp_func *klp_find_func(struct klp_object *obj,
84 				      struct klp_func *old_func)
85 {
86 	struct klp_func *func;
87 
88 	klp_for_each_func(obj, func) {
89 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
90 		    (old_func->old_sympos == func->old_sympos)) {
91 			return func;
92 		}
93 	}
94 
95 	return NULL;
96 }
97 
98 static struct klp_object *klp_find_object(struct klp_patch *patch,
99 					  struct klp_object *old_obj)
100 {
101 	struct klp_object *obj;
102 
103 	klp_for_each_object(patch, obj) {
104 		if (klp_is_module(old_obj)) {
105 			if (klp_is_module(obj) &&
106 			    strcmp(old_obj->name, obj->name) == 0) {
107 				return obj;
108 			}
109 		} else if (!klp_is_module(obj)) {
110 			return obj;
111 		}
112 	}
113 
114 	return NULL;
115 }
116 
117 struct klp_find_arg {
118 	const char *objname;
119 	const char *name;
120 	unsigned long addr;
121 	unsigned long count;
122 	unsigned long pos;
123 };
124 
125 static int klp_find_callback(void *data, const char *name,
126 			     struct module *mod, unsigned long addr)
127 {
128 	struct klp_find_arg *args = data;
129 
130 	if ((mod && !args->objname) || (!mod && args->objname))
131 		return 0;
132 
133 	if (strcmp(args->name, name))
134 		return 0;
135 
136 	if (args->objname && strcmp(args->objname, mod->name))
137 		return 0;
138 
139 	args->addr = addr;
140 	args->count++;
141 
142 	/*
143 	 * Finish the search when the symbol is found for the desired position
144 	 * or the position is not defined for a non-unique symbol.
145 	 */
146 	if ((args->pos && (args->count == args->pos)) ||
147 	    (!args->pos && (args->count > 1)))
148 		return 1;
149 
150 	return 0;
151 }
152 
153 static int klp_find_object_symbol(const char *objname, const char *name,
154 				  unsigned long sympos, unsigned long *addr)
155 {
156 	struct klp_find_arg args = {
157 		.objname = objname,
158 		.name = name,
159 		.addr = 0,
160 		.count = 0,
161 		.pos = sympos,
162 	};
163 
164 	mutex_lock(&module_mutex);
165 	if (objname)
166 		module_kallsyms_on_each_symbol(klp_find_callback, &args);
167 	else
168 		kallsyms_on_each_symbol(klp_find_callback, &args);
169 	mutex_unlock(&module_mutex);
170 
171 	/*
172 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
173 	 * otherwise ensure the symbol position count matches sympos.
174 	 */
175 	if (args.addr == 0)
176 		pr_err("symbol '%s' not found in symbol table\n", name);
177 	else if (args.count > 1 && sympos == 0) {
178 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
179 		       name, objname);
180 	} else if (sympos != args.count && sympos > 0) {
181 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
182 		       sympos, name, objname ? objname : "vmlinux");
183 	} else {
184 		*addr = args.addr;
185 		return 0;
186 	}
187 
188 	*addr = 0;
189 	return -EINVAL;
190 }
191 
192 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
193 {
194 	int i, cnt, vmlinux, ret;
195 	char objname[MODULE_NAME_LEN];
196 	char symname[KSYM_NAME_LEN];
197 	char *strtab = pmod->core_kallsyms.strtab;
198 	Elf_Rela *relas;
199 	Elf_Sym *sym;
200 	unsigned long sympos, addr;
201 
202 	/*
203 	 * Since the field widths for objname and symname in the sscanf()
204 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
205 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
206 	 * and KSYM_NAME_LEN have the values we expect them to have.
207 	 *
208 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
209 	 * we use the smallest/strictest upper bound possible (56, based on
210 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
211 	 */
212 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
213 
214 	relas = (Elf_Rela *) relasec->sh_addr;
215 	/* For each rela in this klp relocation section */
216 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
217 		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
218 		if (sym->st_shndx != SHN_LIVEPATCH) {
219 			pr_err("symbol %s is not marked as a livepatch symbol\n",
220 			       strtab + sym->st_name);
221 			return -EINVAL;
222 		}
223 
224 		/* Format: .klp.sym.objname.symname,sympos */
225 		cnt = sscanf(strtab + sym->st_name,
226 			     ".klp.sym.%55[^.].%127[^,],%lu",
227 			     objname, symname, &sympos);
228 		if (cnt != 3) {
229 			pr_err("symbol %s has an incorrectly formatted name\n",
230 			       strtab + sym->st_name);
231 			return -EINVAL;
232 		}
233 
234 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
235 		vmlinux = !strcmp(objname, "vmlinux");
236 		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
237 					     symname, sympos, &addr);
238 		if (ret)
239 			return ret;
240 
241 		sym->st_value = addr;
242 	}
243 
244 	return 0;
245 }
246 
247 static int klp_write_object_relocations(struct module *pmod,
248 					struct klp_object *obj)
249 {
250 	int i, cnt, ret = 0;
251 	const char *objname, *secname;
252 	char sec_objname[MODULE_NAME_LEN];
253 	Elf_Shdr *sec;
254 
255 	if (WARN_ON(!klp_is_object_loaded(obj)))
256 		return -EINVAL;
257 
258 	objname = klp_is_module(obj) ? obj->name : "vmlinux";
259 
260 	/* For each klp relocation section */
261 	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
262 		sec = pmod->klp_info->sechdrs + i;
263 		secname = pmod->klp_info->secstrings + sec->sh_name;
264 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
265 			continue;
266 
267 		/*
268 		 * Format: .klp.rela.sec_objname.section_name
269 		 * See comment in klp_resolve_symbols() for an explanation
270 		 * of the selected field width value.
271 		 */
272 		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
273 		if (cnt != 1) {
274 			pr_err("section %s has an incorrectly formatted name\n",
275 			       secname);
276 			ret = -EINVAL;
277 			break;
278 		}
279 
280 		if (strcmp(objname, sec_objname))
281 			continue;
282 
283 		ret = klp_resolve_symbols(sec, pmod);
284 		if (ret)
285 			break;
286 
287 		ret = apply_relocate_add(pmod->klp_info->sechdrs,
288 					 pmod->core_kallsyms.strtab,
289 					 pmod->klp_info->symndx, i, pmod);
290 		if (ret)
291 			break;
292 	}
293 
294 	return ret;
295 }
296 
297 /*
298  * Sysfs Interface
299  *
300  * /sys/kernel/livepatch
301  * /sys/kernel/livepatch/<patch>
302  * /sys/kernel/livepatch/<patch>/enabled
303  * /sys/kernel/livepatch/<patch>/transition
304  * /sys/kernel/livepatch/<patch>/force
305  * /sys/kernel/livepatch/<patch>/<object>
306  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
307  */
308 static int __klp_disable_patch(struct klp_patch *patch);
309 
310 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
311 			     const char *buf, size_t count)
312 {
313 	struct klp_patch *patch;
314 	int ret;
315 	bool enabled;
316 
317 	ret = kstrtobool(buf, &enabled);
318 	if (ret)
319 		return ret;
320 
321 	patch = container_of(kobj, struct klp_patch, kobj);
322 
323 	mutex_lock(&klp_mutex);
324 
325 	if (patch->enabled == enabled) {
326 		/* already in requested state */
327 		ret = -EINVAL;
328 		goto out;
329 	}
330 
331 	/*
332 	 * Allow to reverse a pending transition in both ways. It might be
333 	 * necessary to complete the transition without forcing and breaking
334 	 * the system integrity.
335 	 *
336 	 * Do not allow to re-enable a disabled patch.
337 	 */
338 	if (patch == klp_transition_patch)
339 		klp_reverse_transition();
340 	else if (!enabled)
341 		ret = __klp_disable_patch(patch);
342 	else
343 		ret = -EINVAL;
344 
345 out:
346 	mutex_unlock(&klp_mutex);
347 
348 	if (ret)
349 		return ret;
350 	return count;
351 }
352 
353 static ssize_t enabled_show(struct kobject *kobj,
354 			    struct kobj_attribute *attr, char *buf)
355 {
356 	struct klp_patch *patch;
357 
358 	patch = container_of(kobj, struct klp_patch, kobj);
359 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
360 }
361 
362 static ssize_t transition_show(struct kobject *kobj,
363 			       struct kobj_attribute *attr, char *buf)
364 {
365 	struct klp_patch *patch;
366 
367 	patch = container_of(kobj, struct klp_patch, kobj);
368 	return snprintf(buf, PAGE_SIZE-1, "%d\n",
369 			patch == klp_transition_patch);
370 }
371 
372 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
373 			   const char *buf, size_t count)
374 {
375 	struct klp_patch *patch;
376 	int ret;
377 	bool val;
378 
379 	ret = kstrtobool(buf, &val);
380 	if (ret)
381 		return ret;
382 
383 	if (!val)
384 		return count;
385 
386 	mutex_lock(&klp_mutex);
387 
388 	patch = container_of(kobj, struct klp_patch, kobj);
389 	if (patch != klp_transition_patch) {
390 		mutex_unlock(&klp_mutex);
391 		return -EINVAL;
392 	}
393 
394 	klp_force_transition();
395 
396 	mutex_unlock(&klp_mutex);
397 
398 	return count;
399 }
400 
401 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
402 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
403 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
404 static struct attribute *klp_patch_attrs[] = {
405 	&enabled_kobj_attr.attr,
406 	&transition_kobj_attr.attr,
407 	&force_kobj_attr.attr,
408 	NULL
409 };
410 ATTRIBUTE_GROUPS(klp_patch);
411 
412 static void klp_free_object_dynamic(struct klp_object *obj)
413 {
414 	kfree(obj->name);
415 	kfree(obj);
416 }
417 
418 static void klp_init_func_early(struct klp_object *obj,
419 				struct klp_func *func);
420 static void klp_init_object_early(struct klp_patch *patch,
421 				  struct klp_object *obj);
422 
423 static struct klp_object *klp_alloc_object_dynamic(const char *name,
424 						   struct klp_patch *patch)
425 {
426 	struct klp_object *obj;
427 
428 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
429 	if (!obj)
430 		return NULL;
431 
432 	if (name) {
433 		obj->name = kstrdup(name, GFP_KERNEL);
434 		if (!obj->name) {
435 			kfree(obj);
436 			return NULL;
437 		}
438 	}
439 
440 	klp_init_object_early(patch, obj);
441 	obj->dynamic = true;
442 
443 	return obj;
444 }
445 
446 static void klp_free_func_nop(struct klp_func *func)
447 {
448 	kfree(func->old_name);
449 	kfree(func);
450 }
451 
452 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
453 					   struct klp_object *obj)
454 {
455 	struct klp_func *func;
456 
457 	func = kzalloc(sizeof(*func), GFP_KERNEL);
458 	if (!func)
459 		return NULL;
460 
461 	if (old_func->old_name) {
462 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
463 		if (!func->old_name) {
464 			kfree(func);
465 			return NULL;
466 		}
467 	}
468 
469 	klp_init_func_early(obj, func);
470 	/*
471 	 * func->new_func is same as func->old_func. These addresses are
472 	 * set when the object is loaded, see klp_init_object_loaded().
473 	 */
474 	func->old_sympos = old_func->old_sympos;
475 	func->nop = true;
476 
477 	return func;
478 }
479 
480 static int klp_add_object_nops(struct klp_patch *patch,
481 			       struct klp_object *old_obj)
482 {
483 	struct klp_object *obj;
484 	struct klp_func *func, *old_func;
485 
486 	obj = klp_find_object(patch, old_obj);
487 
488 	if (!obj) {
489 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
490 		if (!obj)
491 			return -ENOMEM;
492 	}
493 
494 	klp_for_each_func(old_obj, old_func) {
495 		func = klp_find_func(obj, old_func);
496 		if (func)
497 			continue;
498 
499 		func = klp_alloc_func_nop(old_func, obj);
500 		if (!func)
501 			return -ENOMEM;
502 	}
503 
504 	return 0;
505 }
506 
507 /*
508  * Add 'nop' functions which simply return to the caller to run
509  * the original function. The 'nop' functions are added to a
510  * patch to facilitate a 'replace' mode.
511  */
512 static int klp_add_nops(struct klp_patch *patch)
513 {
514 	struct klp_patch *old_patch;
515 	struct klp_object *old_obj;
516 
517 	klp_for_each_patch(old_patch) {
518 		klp_for_each_object(old_patch, old_obj) {
519 			int err;
520 
521 			err = klp_add_object_nops(patch, old_obj);
522 			if (err)
523 				return err;
524 		}
525 	}
526 
527 	return 0;
528 }
529 
530 static void klp_kobj_release_patch(struct kobject *kobj)
531 {
532 	struct klp_patch *patch;
533 
534 	patch = container_of(kobj, struct klp_patch, kobj);
535 	complete(&patch->finish);
536 }
537 
538 static struct kobj_type klp_ktype_patch = {
539 	.release = klp_kobj_release_patch,
540 	.sysfs_ops = &kobj_sysfs_ops,
541 	.default_groups = klp_patch_groups,
542 };
543 
544 static void klp_kobj_release_object(struct kobject *kobj)
545 {
546 	struct klp_object *obj;
547 
548 	obj = container_of(kobj, struct klp_object, kobj);
549 
550 	if (obj->dynamic)
551 		klp_free_object_dynamic(obj);
552 }
553 
554 static struct kobj_type klp_ktype_object = {
555 	.release = klp_kobj_release_object,
556 	.sysfs_ops = &kobj_sysfs_ops,
557 };
558 
559 static void klp_kobj_release_func(struct kobject *kobj)
560 {
561 	struct klp_func *func;
562 
563 	func = container_of(kobj, struct klp_func, kobj);
564 
565 	if (func->nop)
566 		klp_free_func_nop(func);
567 }
568 
569 static struct kobj_type klp_ktype_func = {
570 	.release = klp_kobj_release_func,
571 	.sysfs_ops = &kobj_sysfs_ops,
572 };
573 
574 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
575 {
576 	struct klp_func *func, *tmp_func;
577 
578 	klp_for_each_func_safe(obj, func, tmp_func) {
579 		if (nops_only && !func->nop)
580 			continue;
581 
582 		list_del(&func->node);
583 		kobject_put(&func->kobj);
584 	}
585 }
586 
587 /* Clean up when a patched object is unloaded */
588 static void klp_free_object_loaded(struct klp_object *obj)
589 {
590 	struct klp_func *func;
591 
592 	obj->mod = NULL;
593 
594 	klp_for_each_func(obj, func) {
595 		func->old_func = NULL;
596 
597 		if (func->nop)
598 			func->new_func = NULL;
599 	}
600 }
601 
602 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
603 {
604 	struct klp_object *obj, *tmp_obj;
605 
606 	klp_for_each_object_safe(patch, obj, tmp_obj) {
607 		__klp_free_funcs(obj, nops_only);
608 
609 		if (nops_only && !obj->dynamic)
610 			continue;
611 
612 		list_del(&obj->node);
613 		kobject_put(&obj->kobj);
614 	}
615 }
616 
617 static void klp_free_objects(struct klp_patch *patch)
618 {
619 	__klp_free_objects(patch, false);
620 }
621 
622 static void klp_free_objects_dynamic(struct klp_patch *patch)
623 {
624 	__klp_free_objects(patch, true);
625 }
626 
627 /*
628  * This function implements the free operations that can be called safely
629  * under klp_mutex.
630  *
631  * The operation must be completed by calling klp_free_patch_finish()
632  * outside klp_mutex.
633  */
634 void klp_free_patch_start(struct klp_patch *patch)
635 {
636 	if (!list_empty(&patch->list))
637 		list_del(&patch->list);
638 
639 	klp_free_objects(patch);
640 }
641 
642 /*
643  * This function implements the free part that must be called outside
644  * klp_mutex.
645  *
646  * It must be called after klp_free_patch_start(). And it has to be
647  * the last function accessing the livepatch structures when the patch
648  * gets disabled.
649  */
650 static void klp_free_patch_finish(struct klp_patch *patch)
651 {
652 	/*
653 	 * Avoid deadlock with enabled_store() sysfs callback by
654 	 * calling this outside klp_mutex. It is safe because
655 	 * this is called when the patch gets disabled and it
656 	 * cannot get enabled again.
657 	 */
658 	kobject_put(&patch->kobj);
659 	wait_for_completion(&patch->finish);
660 
661 	/* Put the module after the last access to struct klp_patch. */
662 	if (!patch->forced)
663 		module_put(patch->mod);
664 }
665 
666 /*
667  * The livepatch might be freed from sysfs interface created by the patch.
668  * This work allows to wait until the interface is destroyed in a separate
669  * context.
670  */
671 static void klp_free_patch_work_fn(struct work_struct *work)
672 {
673 	struct klp_patch *patch =
674 		container_of(work, struct klp_patch, free_work);
675 
676 	klp_free_patch_finish(patch);
677 }
678 
679 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
680 {
681 	if (!func->old_name)
682 		return -EINVAL;
683 
684 	/*
685 	 * NOPs get the address later. The patched module must be loaded,
686 	 * see klp_init_object_loaded().
687 	 */
688 	if (!func->new_func && !func->nop)
689 		return -EINVAL;
690 
691 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
692 		return -EINVAL;
693 
694 	INIT_LIST_HEAD(&func->stack_node);
695 	func->patched = false;
696 	func->transition = false;
697 
698 	/* The format for the sysfs directory is <function,sympos> where sympos
699 	 * is the nth occurrence of this symbol in kallsyms for the patched
700 	 * object. If the user selects 0 for old_sympos, then 1 will be used
701 	 * since a unique symbol will be the first occurrence.
702 	 */
703 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
704 			   func->old_name,
705 			   func->old_sympos ? func->old_sympos : 1);
706 }
707 
708 /* Arches may override this to finish any remaining arch-specific tasks */
709 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
710 					struct klp_object *obj)
711 {
712 }
713 
714 /* parts of the initialization that is done only when the object is loaded */
715 static int klp_init_object_loaded(struct klp_patch *patch,
716 				  struct klp_object *obj)
717 {
718 	struct klp_func *func;
719 	int ret;
720 
721 	module_disable_ro(patch->mod);
722 	ret = klp_write_object_relocations(patch->mod, obj);
723 	if (ret) {
724 		module_enable_ro(patch->mod, true);
725 		return ret;
726 	}
727 
728 	arch_klp_init_object_loaded(patch, obj);
729 	module_enable_ro(patch->mod, true);
730 
731 	klp_for_each_func(obj, func) {
732 		ret = klp_find_object_symbol(obj->name, func->old_name,
733 					     func->old_sympos,
734 					     (unsigned long *)&func->old_func);
735 		if (ret)
736 			return ret;
737 
738 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
739 						  &func->old_size, NULL);
740 		if (!ret) {
741 			pr_err("kallsyms size lookup failed for '%s'\n",
742 			       func->old_name);
743 			return -ENOENT;
744 		}
745 
746 		if (func->nop)
747 			func->new_func = func->old_func;
748 
749 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
750 						  &func->new_size, NULL);
751 		if (!ret) {
752 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
753 			       func->old_name);
754 			return -ENOENT;
755 		}
756 	}
757 
758 	return 0;
759 }
760 
761 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
762 {
763 	struct klp_func *func;
764 	int ret;
765 	const char *name;
766 
767 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
768 		return -EINVAL;
769 
770 	obj->patched = false;
771 	obj->mod = NULL;
772 
773 	klp_find_object_module(obj);
774 
775 	name = klp_is_module(obj) ? obj->name : "vmlinux";
776 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
777 	if (ret)
778 		return ret;
779 
780 	klp_for_each_func(obj, func) {
781 		ret = klp_init_func(obj, func);
782 		if (ret)
783 			return ret;
784 	}
785 
786 	if (klp_is_object_loaded(obj))
787 		ret = klp_init_object_loaded(patch, obj);
788 
789 	return ret;
790 }
791 
792 static void klp_init_func_early(struct klp_object *obj,
793 				struct klp_func *func)
794 {
795 	kobject_init(&func->kobj, &klp_ktype_func);
796 	list_add_tail(&func->node, &obj->func_list);
797 }
798 
799 static void klp_init_object_early(struct klp_patch *patch,
800 				  struct klp_object *obj)
801 {
802 	INIT_LIST_HEAD(&obj->func_list);
803 	kobject_init(&obj->kobj, &klp_ktype_object);
804 	list_add_tail(&obj->node, &patch->obj_list);
805 }
806 
807 static int klp_init_patch_early(struct klp_patch *patch)
808 {
809 	struct klp_object *obj;
810 	struct klp_func *func;
811 
812 	if (!patch->objs)
813 		return -EINVAL;
814 
815 	INIT_LIST_HEAD(&patch->list);
816 	INIT_LIST_HEAD(&patch->obj_list);
817 	kobject_init(&patch->kobj, &klp_ktype_patch);
818 	patch->enabled = false;
819 	patch->forced = false;
820 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
821 	init_completion(&patch->finish);
822 
823 	klp_for_each_object_static(patch, obj) {
824 		if (!obj->funcs)
825 			return -EINVAL;
826 
827 		klp_init_object_early(patch, obj);
828 
829 		klp_for_each_func_static(obj, func) {
830 			klp_init_func_early(obj, func);
831 		}
832 	}
833 
834 	if (!try_module_get(patch->mod))
835 		return -ENODEV;
836 
837 	return 0;
838 }
839 
840 static int klp_init_patch(struct klp_patch *patch)
841 {
842 	struct klp_object *obj;
843 	int ret;
844 
845 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
846 	if (ret)
847 		return ret;
848 
849 	if (patch->replace) {
850 		ret = klp_add_nops(patch);
851 		if (ret)
852 			return ret;
853 	}
854 
855 	klp_for_each_object(patch, obj) {
856 		ret = klp_init_object(patch, obj);
857 		if (ret)
858 			return ret;
859 	}
860 
861 	list_add_tail(&patch->list, &klp_patches);
862 
863 	return 0;
864 }
865 
866 static int __klp_disable_patch(struct klp_patch *patch)
867 {
868 	struct klp_object *obj;
869 
870 	if (WARN_ON(!patch->enabled))
871 		return -EINVAL;
872 
873 	if (klp_transition_patch)
874 		return -EBUSY;
875 
876 	klp_init_transition(patch, KLP_UNPATCHED);
877 
878 	klp_for_each_object(patch, obj)
879 		if (obj->patched)
880 			klp_pre_unpatch_callback(obj);
881 
882 	/*
883 	 * Enforce the order of the func->transition writes in
884 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
885 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
886 	 * is called shortly after klp_update_patch_state() switches the task,
887 	 * this ensures the handler sees that func->transition is set.
888 	 */
889 	smp_wmb();
890 
891 	klp_start_transition();
892 	patch->enabled = false;
893 	klp_try_complete_transition();
894 
895 	return 0;
896 }
897 
898 static int __klp_enable_patch(struct klp_patch *patch)
899 {
900 	struct klp_object *obj;
901 	int ret;
902 
903 	if (klp_transition_patch)
904 		return -EBUSY;
905 
906 	if (WARN_ON(patch->enabled))
907 		return -EINVAL;
908 
909 	pr_notice("enabling patch '%s'\n", patch->mod->name);
910 
911 	klp_init_transition(patch, KLP_PATCHED);
912 
913 	/*
914 	 * Enforce the order of the func->transition writes in
915 	 * klp_init_transition() and the ops->func_stack writes in
916 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
917 	 * func->transition updates before the handler is registered and the
918 	 * new funcs become visible to the handler.
919 	 */
920 	smp_wmb();
921 
922 	klp_for_each_object(patch, obj) {
923 		if (!klp_is_object_loaded(obj))
924 			continue;
925 
926 		ret = klp_pre_patch_callback(obj);
927 		if (ret) {
928 			pr_warn("pre-patch callback failed for object '%s'\n",
929 				klp_is_module(obj) ? obj->name : "vmlinux");
930 			goto err;
931 		}
932 
933 		ret = klp_patch_object(obj);
934 		if (ret) {
935 			pr_warn("failed to patch object '%s'\n",
936 				klp_is_module(obj) ? obj->name : "vmlinux");
937 			goto err;
938 		}
939 	}
940 
941 	klp_start_transition();
942 	patch->enabled = true;
943 	klp_try_complete_transition();
944 
945 	return 0;
946 err:
947 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
948 
949 	klp_cancel_transition();
950 	return ret;
951 }
952 
953 /**
954  * klp_enable_patch() - enable the livepatch
955  * @patch:	patch to be enabled
956  *
957  * Initializes the data structure associated with the patch, creates the sysfs
958  * interface, performs the needed symbol lookups and code relocations,
959  * registers the patched functions with ftrace.
960  *
961  * This function is supposed to be called from the livepatch module_init()
962  * callback.
963  *
964  * Return: 0 on success, otherwise error
965  */
966 int klp_enable_patch(struct klp_patch *patch)
967 {
968 	int ret;
969 
970 	if (!patch || !patch->mod)
971 		return -EINVAL;
972 
973 	if (!is_livepatch_module(patch->mod)) {
974 		pr_err("module %s is not marked as a livepatch module\n",
975 		       patch->mod->name);
976 		return -EINVAL;
977 	}
978 
979 	if (!klp_initialized())
980 		return -ENODEV;
981 
982 	if (!klp_have_reliable_stack()) {
983 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
984 		pr_warn("The livepatch transition may never complete.\n");
985 	}
986 
987 	mutex_lock(&klp_mutex);
988 
989 	ret = klp_init_patch_early(patch);
990 	if (ret) {
991 		mutex_unlock(&klp_mutex);
992 		return ret;
993 	}
994 
995 	ret = klp_init_patch(patch);
996 	if (ret)
997 		goto err;
998 
999 	ret = __klp_enable_patch(patch);
1000 	if (ret)
1001 		goto err;
1002 
1003 	mutex_unlock(&klp_mutex);
1004 
1005 	return 0;
1006 
1007 err:
1008 	klp_free_patch_start(patch);
1009 
1010 	mutex_unlock(&klp_mutex);
1011 
1012 	klp_free_patch_finish(patch);
1013 
1014 	return ret;
1015 }
1016 EXPORT_SYMBOL_GPL(klp_enable_patch);
1017 
1018 /*
1019  * This function removes replaced patches.
1020  *
1021  * We could be pretty aggressive here. It is called in the situation where
1022  * these structures are no longer accessible. All functions are redirected
1023  * by the klp_transition_patch. They use either a new code or they are in
1024  * the original code because of the special nop function patches.
1025  *
1026  * The only exception is when the transition was forced. In this case,
1027  * klp_ftrace_handler() might still see the replaced patch on the stack.
1028  * Fortunately, it is carefully designed to work with removed functions
1029  * thanks to RCU. We only have to keep the patches on the system. Also
1030  * this is handled transparently by patch->module_put.
1031  */
1032 void klp_discard_replaced_patches(struct klp_patch *new_patch)
1033 {
1034 	struct klp_patch *old_patch, *tmp_patch;
1035 
1036 	klp_for_each_patch_safe(old_patch, tmp_patch) {
1037 		if (old_patch == new_patch)
1038 			return;
1039 
1040 		old_patch->enabled = false;
1041 		klp_unpatch_objects(old_patch);
1042 		klp_free_patch_start(old_patch);
1043 		schedule_work(&old_patch->free_work);
1044 	}
1045 }
1046 
1047 /*
1048  * This function removes the dynamically allocated 'nop' functions.
1049  *
1050  * We could be pretty aggressive. NOPs do not change the existing
1051  * behavior except for adding unnecessary delay by the ftrace handler.
1052  *
1053  * It is safe even when the transition was forced. The ftrace handler
1054  * will see a valid ops->func_stack entry thanks to RCU.
1055  *
1056  * We could even free the NOPs structures. They must be the last entry
1057  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1058  * It does the same as klp_synchronize_transition() to make sure that
1059  * nobody is inside the ftrace handler once the operation finishes.
1060  *
1061  * IMPORTANT: It must be called right after removing the replaced patches!
1062  */
1063 void klp_discard_nops(struct klp_patch *new_patch)
1064 {
1065 	klp_unpatch_objects_dynamic(klp_transition_patch);
1066 	klp_free_objects_dynamic(klp_transition_patch);
1067 }
1068 
1069 /*
1070  * Remove parts of patches that touch a given kernel module. The list of
1071  * patches processed might be limited. When limit is NULL, all patches
1072  * will be handled.
1073  */
1074 static void klp_cleanup_module_patches_limited(struct module *mod,
1075 					       struct klp_patch *limit)
1076 {
1077 	struct klp_patch *patch;
1078 	struct klp_object *obj;
1079 
1080 	klp_for_each_patch(patch) {
1081 		if (patch == limit)
1082 			break;
1083 
1084 		klp_for_each_object(patch, obj) {
1085 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1086 				continue;
1087 
1088 			if (patch != klp_transition_patch)
1089 				klp_pre_unpatch_callback(obj);
1090 
1091 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1092 				  patch->mod->name, obj->mod->name);
1093 			klp_unpatch_object(obj);
1094 
1095 			klp_post_unpatch_callback(obj);
1096 
1097 			klp_free_object_loaded(obj);
1098 			break;
1099 		}
1100 	}
1101 }
1102 
1103 int klp_module_coming(struct module *mod)
1104 {
1105 	int ret;
1106 	struct klp_patch *patch;
1107 	struct klp_object *obj;
1108 
1109 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1110 		return -EINVAL;
1111 
1112 	mutex_lock(&klp_mutex);
1113 	/*
1114 	 * Each module has to know that klp_module_coming()
1115 	 * has been called. We never know what module will
1116 	 * get patched by a new patch.
1117 	 */
1118 	mod->klp_alive = true;
1119 
1120 	klp_for_each_patch(patch) {
1121 		klp_for_each_object(patch, obj) {
1122 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1123 				continue;
1124 
1125 			obj->mod = mod;
1126 
1127 			ret = klp_init_object_loaded(patch, obj);
1128 			if (ret) {
1129 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1130 					patch->mod->name, obj->mod->name, ret);
1131 				goto err;
1132 			}
1133 
1134 			pr_notice("applying patch '%s' to loading module '%s'\n",
1135 				  patch->mod->name, obj->mod->name);
1136 
1137 			ret = klp_pre_patch_callback(obj);
1138 			if (ret) {
1139 				pr_warn("pre-patch callback failed for object '%s'\n",
1140 					obj->name);
1141 				goto err;
1142 			}
1143 
1144 			ret = klp_patch_object(obj);
1145 			if (ret) {
1146 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1147 					patch->mod->name, obj->mod->name, ret);
1148 
1149 				klp_post_unpatch_callback(obj);
1150 				goto err;
1151 			}
1152 
1153 			if (patch != klp_transition_patch)
1154 				klp_post_patch_callback(obj);
1155 
1156 			break;
1157 		}
1158 	}
1159 
1160 	mutex_unlock(&klp_mutex);
1161 
1162 	return 0;
1163 
1164 err:
1165 	/*
1166 	 * If a patch is unsuccessfully applied, return
1167 	 * error to the module loader.
1168 	 */
1169 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1170 		patch->mod->name, obj->mod->name, obj->mod->name);
1171 	mod->klp_alive = false;
1172 	klp_cleanup_module_patches_limited(mod, patch);
1173 	mutex_unlock(&klp_mutex);
1174 
1175 	return ret;
1176 }
1177 
1178 void klp_module_going(struct module *mod)
1179 {
1180 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1181 		    mod->state != MODULE_STATE_COMING))
1182 		return;
1183 
1184 	mutex_lock(&klp_mutex);
1185 	/*
1186 	 * Each module has to know that klp_module_going()
1187 	 * has been called. We never know what module will
1188 	 * get patched by a new patch.
1189 	 */
1190 	mod->klp_alive = false;
1191 
1192 	klp_cleanup_module_patches_limited(mod, NULL);
1193 
1194 	mutex_unlock(&klp_mutex);
1195 }
1196 
1197 static int __init klp_init(void)
1198 {
1199 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1200 	if (!klp_root_kobj)
1201 		return -ENOMEM;
1202 
1203 	return 0;
1204 }
1205 
1206 module_init(klp_init);
1207