xref: /linux/kernel/livepatch/core.c (revision 5cb5575308bce9d63178fe943bf89c520a348808)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <linux/rcupdate.h>
23 #include <asm/cacheflush.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "state.h"
27 #include "transition.h"
28 
29 /*
30  * klp_mutex is a coarse lock which serializes access to klp data.  All
31  * accesses to klp-related variables and structures must have mutex protection,
32  * except within the following functions which carefully avoid the need for it:
33  *
34  * - klp_ftrace_handler()
35  * - klp_update_patch_state()
36  * - __klp_sched_try_switch()
37  */
38 DEFINE_MUTEX(klp_mutex);
39 
40 /*
41  * Actively used patches: enabled or in transition. Note that replaced
42  * or disabled patches are not listed even though the related kernel
43  * module still can be loaded.
44  */
45 LIST_HEAD(klp_patches);
46 
47 static struct kobject *klp_root_kobj;
48 
49 static bool klp_is_module(struct klp_object *obj)
50 {
51 	return obj->name;
52 }
53 
54 /* sets obj->mod if object is not vmlinux and module is found */
55 static void klp_find_object_module(struct klp_object *obj)
56 {
57 	struct module *mod;
58 
59 	if (!klp_is_module(obj))
60 		return;
61 
62 	rcu_read_lock_sched();
63 	/*
64 	 * We do not want to block removal of patched modules and therefore
65 	 * we do not take a reference here. The patches are removed by
66 	 * klp_module_going() instead.
67 	 */
68 	mod = find_module(obj->name);
69 	/*
70 	 * Do not mess work of klp_module_coming() and klp_module_going().
71 	 * Note that the patch might still be needed before klp_module_going()
72 	 * is called. Module functions can be called even in the GOING state
73 	 * until mod->exit() finishes. This is especially important for
74 	 * patches that modify semantic of the functions.
75 	 */
76 	if (mod && mod->klp_alive)
77 		obj->mod = mod;
78 
79 	rcu_read_unlock_sched();
80 }
81 
82 static bool klp_initialized(void)
83 {
84 	return !!klp_root_kobj;
85 }
86 
87 static struct klp_func *klp_find_func(struct klp_object *obj,
88 				      struct klp_func *old_func)
89 {
90 	struct klp_func *func;
91 
92 	klp_for_each_func(obj, func) {
93 		/*
94 		 * Besides identical old_sympos, also consider old_sympos
95 		 * of 0 and 1 are identical.
96 		 */
97 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
98 		    ((old_func->old_sympos == func->old_sympos) ||
99 		     (old_func->old_sympos == 0 && func->old_sympos == 1) ||
100 		     (old_func->old_sympos == 1 && func->old_sympos == 0))) {
101 			return func;
102 		}
103 	}
104 
105 	return NULL;
106 }
107 
108 static struct klp_object *klp_find_object(struct klp_patch *patch,
109 					  struct klp_object *old_obj)
110 {
111 	struct klp_object *obj;
112 
113 	klp_for_each_object(patch, obj) {
114 		if (klp_is_module(old_obj)) {
115 			if (klp_is_module(obj) &&
116 			    strcmp(old_obj->name, obj->name) == 0) {
117 				return obj;
118 			}
119 		} else if (!klp_is_module(obj)) {
120 			return obj;
121 		}
122 	}
123 
124 	return NULL;
125 }
126 
127 struct klp_find_arg {
128 	const char *name;
129 	unsigned long addr;
130 	unsigned long count;
131 	unsigned long pos;
132 };
133 
134 static int klp_match_callback(void *data, unsigned long addr)
135 {
136 	struct klp_find_arg *args = data;
137 
138 	args->addr = addr;
139 	args->count++;
140 
141 	/*
142 	 * Finish the search when the symbol is found for the desired position
143 	 * or the position is not defined for a non-unique symbol.
144 	 */
145 	if ((args->pos && (args->count == args->pos)) ||
146 	    (!args->pos && (args->count > 1)))
147 		return 1;
148 
149 	return 0;
150 }
151 
152 static int klp_find_callback(void *data, const char *name, unsigned long addr)
153 {
154 	struct klp_find_arg *args = data;
155 
156 	if (strcmp(args->name, name))
157 		return 0;
158 
159 	return klp_match_callback(data, addr);
160 }
161 
162 static int klp_find_object_symbol(const char *objname, const char *name,
163 				  unsigned long sympos, unsigned long *addr)
164 {
165 	struct klp_find_arg args = {
166 		.name = name,
167 		.addr = 0,
168 		.count = 0,
169 		.pos = sympos,
170 	};
171 
172 	if (objname)
173 		module_kallsyms_on_each_symbol(objname, klp_find_callback, &args);
174 	else
175 		kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
176 
177 	/*
178 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
179 	 * otherwise ensure the symbol position count matches sympos.
180 	 */
181 	if (args.addr == 0)
182 		pr_err("symbol '%s' not found in symbol table\n", name);
183 	else if (args.count > 1 && sympos == 0) {
184 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
185 		       name, objname);
186 	} else if (sympos != args.count && sympos > 0) {
187 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
188 		       sympos, name, objname ? objname : "vmlinux");
189 	} else {
190 		*addr = args.addr;
191 		return 0;
192 	}
193 
194 	*addr = 0;
195 	return -EINVAL;
196 }
197 
198 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
199 			       unsigned int symndx, Elf_Shdr *relasec,
200 			       const char *sec_objname)
201 {
202 	int i, cnt, ret;
203 	char sym_objname[MODULE_NAME_LEN];
204 	char sym_name[KSYM_NAME_LEN];
205 	Elf_Rela *relas;
206 	Elf_Sym *sym;
207 	unsigned long sympos, addr;
208 	bool sym_vmlinux;
209 	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
210 
211 	/*
212 	 * Since the field widths for sym_objname and sym_name in the sscanf()
213 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
214 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
215 	 * and KSYM_NAME_LEN have the values we expect them to have.
216 	 *
217 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
218 	 * we use the smallest/strictest upper bound possible (56, based on
219 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
220 	 */
221 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
222 
223 	relas = (Elf_Rela *) relasec->sh_addr;
224 	/* For each rela in this klp relocation section */
225 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
226 		sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
227 		if (sym->st_shndx != SHN_LIVEPATCH) {
228 			pr_err("symbol %s is not marked as a livepatch symbol\n",
229 			       strtab + sym->st_name);
230 			return -EINVAL;
231 		}
232 
233 		/* Format: .klp.sym.sym_objname.sym_name,sympos */
234 		cnt = sscanf(strtab + sym->st_name,
235 			     ".klp.sym.%55[^.].%511[^,],%lu",
236 			     sym_objname, sym_name, &sympos);
237 		if (cnt != 3) {
238 			pr_err("symbol %s has an incorrectly formatted name\n",
239 			       strtab + sym->st_name);
240 			return -EINVAL;
241 		}
242 
243 		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
244 
245 		/*
246 		 * Prevent module-specific KLP rela sections from referencing
247 		 * vmlinux symbols.  This helps prevent ordering issues with
248 		 * module special section initializations.  Presumably such
249 		 * symbols are exported and normal relas can be used instead.
250 		 */
251 		if (!sec_vmlinux && sym_vmlinux) {
252 			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
253 			       sym_name);
254 			return -EINVAL;
255 		}
256 
257 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
258 		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
259 					     sym_name, sympos, &addr);
260 		if (ret)
261 			return ret;
262 
263 		sym->st_value = addr;
264 	}
265 
266 	return 0;
267 }
268 
269 void __weak clear_relocate_add(Elf_Shdr *sechdrs,
270 		   const char *strtab,
271 		   unsigned int symindex,
272 		   unsigned int relsec,
273 		   struct module *me)
274 {
275 }
276 
277 /*
278  * At a high-level, there are two types of klp relocation sections: those which
279  * reference symbols which live in vmlinux; and those which reference symbols
280  * which live in other modules.  This function is called for both types:
281  *
282  * 1) When a klp module itself loads, the module code calls this function to
283  *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
284  *    These relocations are written to the klp module text to allow the patched
285  *    code/data to reference unexported vmlinux symbols.  They're written as
286  *    early as possible to ensure that other module init code (.e.g.,
287  *    jump_label_apply_nops) can access any unexported vmlinux symbols which
288  *    might be referenced by the klp module's special sections.
289  *
290  * 2) When a to-be-patched module loads -- or is already loaded when a
291  *    corresponding klp module loads -- klp code calls this function to write
292  *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
293  *    are written to the klp module text to allow the patched code/data to
294  *    reference symbols which live in the to-be-patched module or one of its
295  *    module dependencies.  Exported symbols are supported, in addition to
296  *    unexported symbols, in order to enable late module patching, which allows
297  *    the to-be-patched module to be loaded and patched sometime *after* the
298  *    klp module is loaded.
299  */
300 static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
301 				    const char *shstrtab, const char *strtab,
302 				    unsigned int symndx, unsigned int secndx,
303 				    const char *objname, bool apply)
304 {
305 	int cnt, ret;
306 	char sec_objname[MODULE_NAME_LEN];
307 	Elf_Shdr *sec = sechdrs + secndx;
308 
309 	/*
310 	 * Format: .klp.rela.sec_objname.section_name
311 	 * See comment in klp_resolve_symbols() for an explanation
312 	 * of the selected field width value.
313 	 */
314 	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
315 		     sec_objname);
316 	if (cnt != 1) {
317 		pr_err("section %s has an incorrectly formatted name\n",
318 		       shstrtab + sec->sh_name);
319 		return -EINVAL;
320 	}
321 
322 	if (strcmp(objname ? objname : "vmlinux", sec_objname))
323 		return 0;
324 
325 	if (apply) {
326 		ret = klp_resolve_symbols(sechdrs, strtab, symndx,
327 					  sec, sec_objname);
328 		if (ret)
329 			return ret;
330 
331 		return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
332 	}
333 
334 	clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
335 	return 0;
336 }
337 
338 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
339 			     const char *shstrtab, const char *strtab,
340 			     unsigned int symndx, unsigned int secndx,
341 			     const char *objname)
342 {
343 	return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx,
344 					secndx, objname, true);
345 }
346 
347 /*
348  * Sysfs Interface
349  *
350  * /sys/kernel/livepatch
351  * /sys/kernel/livepatch/<patch>
352  * /sys/kernel/livepatch/<patch>/enabled
353  * /sys/kernel/livepatch/<patch>/transition
354  * /sys/kernel/livepatch/<patch>/force
355  * /sys/kernel/livepatch/<patch>/replace
356  * /sys/kernel/livepatch/<patch>/stack_order
357  * /sys/kernel/livepatch/<patch>/<object>
358  * /sys/kernel/livepatch/<patch>/<object>/patched
359  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
360  */
361 static int __klp_disable_patch(struct klp_patch *patch);
362 
363 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
364 			     const char *buf, size_t count)
365 {
366 	struct klp_patch *patch;
367 	int ret;
368 	bool enabled;
369 
370 	ret = kstrtobool(buf, &enabled);
371 	if (ret)
372 		return ret;
373 
374 	patch = container_of(kobj, struct klp_patch, kobj);
375 
376 	mutex_lock(&klp_mutex);
377 
378 	if (patch->enabled == enabled) {
379 		/* already in requested state */
380 		ret = -EINVAL;
381 		goto out;
382 	}
383 
384 	/*
385 	 * Allow to reverse a pending transition in both ways. It might be
386 	 * necessary to complete the transition without forcing and breaking
387 	 * the system integrity.
388 	 *
389 	 * Do not allow to re-enable a disabled patch.
390 	 */
391 	if (patch == klp_transition_patch)
392 		klp_reverse_transition();
393 	else if (!enabled)
394 		ret = __klp_disable_patch(patch);
395 	else
396 		ret = -EINVAL;
397 
398 out:
399 	mutex_unlock(&klp_mutex);
400 
401 	if (ret)
402 		return ret;
403 	return count;
404 }
405 
406 static ssize_t enabled_show(struct kobject *kobj,
407 			    struct kobj_attribute *attr, char *buf)
408 {
409 	struct klp_patch *patch;
410 
411 	patch = container_of(kobj, struct klp_patch, kobj);
412 	return sysfs_emit(buf, "%d\n", patch->enabled);
413 }
414 
415 static ssize_t transition_show(struct kobject *kobj,
416 			       struct kobj_attribute *attr, char *buf)
417 {
418 	struct klp_patch *patch;
419 
420 	patch = container_of(kobj, struct klp_patch, kobj);
421 	return sysfs_emit(buf, "%d\n", patch == klp_transition_patch);
422 }
423 
424 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
425 			   const char *buf, size_t count)
426 {
427 	struct klp_patch *patch;
428 	int ret;
429 	bool val;
430 
431 	ret = kstrtobool(buf, &val);
432 	if (ret)
433 		return ret;
434 
435 	if (!val)
436 		return count;
437 
438 	mutex_lock(&klp_mutex);
439 
440 	patch = container_of(kobj, struct klp_patch, kobj);
441 	if (patch != klp_transition_patch) {
442 		mutex_unlock(&klp_mutex);
443 		return -EINVAL;
444 	}
445 
446 	klp_force_transition();
447 
448 	mutex_unlock(&klp_mutex);
449 
450 	return count;
451 }
452 
453 static ssize_t replace_show(struct kobject *kobj,
454 			    struct kobj_attribute *attr, char *buf)
455 {
456 	struct klp_patch *patch;
457 
458 	patch = container_of(kobj, struct klp_patch, kobj);
459 	return sysfs_emit(buf, "%d\n", patch->replace);
460 }
461 
462 static ssize_t stack_order_show(struct kobject *kobj,
463 				struct kobj_attribute *attr, char *buf)
464 {
465 	struct klp_patch *patch, *this_patch;
466 	int stack_order = 0;
467 
468 	this_patch = container_of(kobj, struct klp_patch, kobj);
469 
470 	mutex_lock(&klp_mutex);
471 
472 	klp_for_each_patch(patch) {
473 		stack_order++;
474 		if (patch == this_patch)
475 			break;
476 	}
477 
478 	mutex_unlock(&klp_mutex);
479 
480 	return sysfs_emit(buf, "%d\n", stack_order);
481 }
482 
483 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
484 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
485 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
486 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
487 static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order);
488 static struct attribute *klp_patch_attrs[] = {
489 	&enabled_kobj_attr.attr,
490 	&transition_kobj_attr.attr,
491 	&force_kobj_attr.attr,
492 	&replace_kobj_attr.attr,
493 	&stack_order_kobj_attr.attr,
494 	NULL
495 };
496 ATTRIBUTE_GROUPS(klp_patch);
497 
498 static ssize_t patched_show(struct kobject *kobj,
499 			    struct kobj_attribute *attr, char *buf)
500 {
501 	struct klp_object *obj;
502 
503 	obj = container_of(kobj, struct klp_object, kobj);
504 	return sysfs_emit(buf, "%d\n", obj->patched);
505 }
506 
507 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched);
508 static struct attribute *klp_object_attrs[] = {
509 	&patched_kobj_attr.attr,
510 	NULL,
511 };
512 ATTRIBUTE_GROUPS(klp_object);
513 
514 static void klp_free_object_dynamic(struct klp_object *obj)
515 {
516 	kfree(obj->name);
517 	kfree(obj);
518 }
519 
520 static void klp_init_func_early(struct klp_object *obj,
521 				struct klp_func *func);
522 static void klp_init_object_early(struct klp_patch *patch,
523 				  struct klp_object *obj);
524 
525 static struct klp_object *klp_alloc_object_dynamic(const char *name,
526 						   struct klp_patch *patch)
527 {
528 	struct klp_object *obj;
529 
530 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
531 	if (!obj)
532 		return NULL;
533 
534 	if (name) {
535 		obj->name = kstrdup(name, GFP_KERNEL);
536 		if (!obj->name) {
537 			kfree(obj);
538 			return NULL;
539 		}
540 	}
541 
542 	klp_init_object_early(patch, obj);
543 	obj->dynamic = true;
544 
545 	return obj;
546 }
547 
548 static void klp_free_func_nop(struct klp_func *func)
549 {
550 	kfree(func->old_name);
551 	kfree(func);
552 }
553 
554 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
555 					   struct klp_object *obj)
556 {
557 	struct klp_func *func;
558 
559 	func = kzalloc(sizeof(*func), GFP_KERNEL);
560 	if (!func)
561 		return NULL;
562 
563 	if (old_func->old_name) {
564 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
565 		if (!func->old_name) {
566 			kfree(func);
567 			return NULL;
568 		}
569 	}
570 
571 	klp_init_func_early(obj, func);
572 	/*
573 	 * func->new_func is same as func->old_func. These addresses are
574 	 * set when the object is loaded, see klp_init_object_loaded().
575 	 */
576 	func->old_sympos = old_func->old_sympos;
577 	func->nop = true;
578 
579 	return func;
580 }
581 
582 static int klp_add_object_nops(struct klp_patch *patch,
583 			       struct klp_object *old_obj)
584 {
585 	struct klp_object *obj;
586 	struct klp_func *func, *old_func;
587 
588 	obj = klp_find_object(patch, old_obj);
589 
590 	if (!obj) {
591 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
592 		if (!obj)
593 			return -ENOMEM;
594 	}
595 
596 	klp_for_each_func(old_obj, old_func) {
597 		func = klp_find_func(obj, old_func);
598 		if (func)
599 			continue;
600 
601 		func = klp_alloc_func_nop(old_func, obj);
602 		if (!func)
603 			return -ENOMEM;
604 	}
605 
606 	return 0;
607 }
608 
609 /*
610  * Add 'nop' functions which simply return to the caller to run the
611  * original function.
612  *
613  * They are added only when the atomic replace mode is used and only for
614  * functions which are currently livepatched but are no longer included
615  * in the new livepatch.
616  */
617 static int klp_add_nops(struct klp_patch *patch)
618 {
619 	struct klp_patch *old_patch;
620 	struct klp_object *old_obj;
621 
622 	klp_for_each_patch(old_patch) {
623 		klp_for_each_object(old_patch, old_obj) {
624 			int err;
625 
626 			err = klp_add_object_nops(patch, old_obj);
627 			if (err)
628 				return err;
629 		}
630 	}
631 
632 	return 0;
633 }
634 
635 static void klp_kobj_release_patch(struct kobject *kobj)
636 {
637 	struct klp_patch *patch;
638 
639 	patch = container_of(kobj, struct klp_patch, kobj);
640 	complete(&patch->finish);
641 }
642 
643 static const struct kobj_type klp_ktype_patch = {
644 	.release = klp_kobj_release_patch,
645 	.sysfs_ops = &kobj_sysfs_ops,
646 	.default_groups = klp_patch_groups,
647 };
648 
649 static void klp_kobj_release_object(struct kobject *kobj)
650 {
651 	struct klp_object *obj;
652 
653 	obj = container_of(kobj, struct klp_object, kobj);
654 
655 	if (obj->dynamic)
656 		klp_free_object_dynamic(obj);
657 }
658 
659 static const struct kobj_type klp_ktype_object = {
660 	.release = klp_kobj_release_object,
661 	.sysfs_ops = &kobj_sysfs_ops,
662 	.default_groups = klp_object_groups,
663 };
664 
665 static void klp_kobj_release_func(struct kobject *kobj)
666 {
667 	struct klp_func *func;
668 
669 	func = container_of(kobj, struct klp_func, kobj);
670 
671 	if (func->nop)
672 		klp_free_func_nop(func);
673 }
674 
675 static const struct kobj_type klp_ktype_func = {
676 	.release = klp_kobj_release_func,
677 	.sysfs_ops = &kobj_sysfs_ops,
678 };
679 
680 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
681 {
682 	struct klp_func *func, *tmp_func;
683 
684 	klp_for_each_func_safe(obj, func, tmp_func) {
685 		if (nops_only && !func->nop)
686 			continue;
687 
688 		list_del(&func->node);
689 		kobject_put(&func->kobj);
690 	}
691 }
692 
693 /* Clean up when a patched object is unloaded */
694 static void klp_free_object_loaded(struct klp_object *obj)
695 {
696 	struct klp_func *func;
697 
698 	obj->mod = NULL;
699 
700 	klp_for_each_func(obj, func) {
701 		func->old_func = NULL;
702 
703 		if (func->nop)
704 			func->new_func = NULL;
705 	}
706 }
707 
708 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
709 {
710 	struct klp_object *obj, *tmp_obj;
711 
712 	klp_for_each_object_safe(patch, obj, tmp_obj) {
713 		__klp_free_funcs(obj, nops_only);
714 
715 		if (nops_only && !obj->dynamic)
716 			continue;
717 
718 		list_del(&obj->node);
719 		kobject_put(&obj->kobj);
720 	}
721 }
722 
723 static void klp_free_objects(struct klp_patch *patch)
724 {
725 	__klp_free_objects(patch, false);
726 }
727 
728 static void klp_free_objects_dynamic(struct klp_patch *patch)
729 {
730 	__klp_free_objects(patch, true);
731 }
732 
733 /*
734  * This function implements the free operations that can be called safely
735  * under klp_mutex.
736  *
737  * The operation must be completed by calling klp_free_patch_finish()
738  * outside klp_mutex.
739  */
740 static void klp_free_patch_start(struct klp_patch *patch)
741 {
742 	if (!list_empty(&patch->list))
743 		list_del(&patch->list);
744 
745 	klp_free_objects(patch);
746 }
747 
748 /*
749  * This function implements the free part that must be called outside
750  * klp_mutex.
751  *
752  * It must be called after klp_free_patch_start(). And it has to be
753  * the last function accessing the livepatch structures when the patch
754  * gets disabled.
755  */
756 static void klp_free_patch_finish(struct klp_patch *patch)
757 {
758 	/*
759 	 * Avoid deadlock with enabled_store() sysfs callback by
760 	 * calling this outside klp_mutex. It is safe because
761 	 * this is called when the patch gets disabled and it
762 	 * cannot get enabled again.
763 	 */
764 	kobject_put(&patch->kobj);
765 	wait_for_completion(&patch->finish);
766 
767 	/* Put the module after the last access to struct klp_patch. */
768 	if (!patch->forced)
769 		module_put(patch->mod);
770 }
771 
772 /*
773  * The livepatch might be freed from sysfs interface created by the patch.
774  * This work allows to wait until the interface is destroyed in a separate
775  * context.
776  */
777 static void klp_free_patch_work_fn(struct work_struct *work)
778 {
779 	struct klp_patch *patch =
780 		container_of(work, struct klp_patch, free_work);
781 
782 	klp_free_patch_finish(patch);
783 }
784 
785 void klp_free_patch_async(struct klp_patch *patch)
786 {
787 	klp_free_patch_start(patch);
788 	schedule_work(&patch->free_work);
789 }
790 
791 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
792 {
793 	struct klp_patch *old_patch, *tmp_patch;
794 
795 	klp_for_each_patch_safe(old_patch, tmp_patch) {
796 		if (old_patch == new_patch)
797 			return;
798 		klp_free_patch_async(old_patch);
799 	}
800 }
801 
802 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
803 {
804 	if (!func->old_name)
805 		return -EINVAL;
806 
807 	/*
808 	 * NOPs get the address later. The patched module must be loaded,
809 	 * see klp_init_object_loaded().
810 	 */
811 	if (!func->new_func && !func->nop)
812 		return -EINVAL;
813 
814 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
815 		return -EINVAL;
816 
817 	INIT_LIST_HEAD(&func->stack_node);
818 	func->patched = false;
819 	func->transition = false;
820 
821 	/* The format for the sysfs directory is <function,sympos> where sympos
822 	 * is the nth occurrence of this symbol in kallsyms for the patched
823 	 * object. If the user selects 0 for old_sympos, then 1 will be used
824 	 * since a unique symbol will be the first occurrence.
825 	 */
826 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
827 			   func->old_name,
828 			   func->old_sympos ? func->old_sympos : 1);
829 }
830 
831 static int klp_write_object_relocs(struct klp_patch *patch,
832 				   struct klp_object *obj,
833 				   bool apply)
834 {
835 	int i, ret;
836 	struct klp_modinfo *info = patch->mod->klp_info;
837 
838 	for (i = 1; i < info->hdr.e_shnum; i++) {
839 		Elf_Shdr *sec = info->sechdrs + i;
840 
841 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
842 			continue;
843 
844 		ret = klp_write_section_relocs(patch->mod, info->sechdrs,
845 					       info->secstrings,
846 					       patch->mod->core_kallsyms.strtab,
847 					       info->symndx, i, obj->name, apply);
848 		if (ret)
849 			return ret;
850 	}
851 
852 	return 0;
853 }
854 
855 static int klp_apply_object_relocs(struct klp_patch *patch,
856 				   struct klp_object *obj)
857 {
858 	return klp_write_object_relocs(patch, obj, true);
859 }
860 
861 static void klp_clear_object_relocs(struct klp_patch *patch,
862 				    struct klp_object *obj)
863 {
864 	klp_write_object_relocs(patch, obj, false);
865 }
866 
867 /* parts of the initialization that is done only when the object is loaded */
868 static int klp_init_object_loaded(struct klp_patch *patch,
869 				  struct klp_object *obj)
870 {
871 	struct klp_func *func;
872 	int ret;
873 
874 	if (klp_is_module(obj)) {
875 		/*
876 		 * Only write module-specific relocations here
877 		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
878 		 * written earlier during the initialization of the klp module
879 		 * itself.
880 		 */
881 		ret = klp_apply_object_relocs(patch, obj);
882 		if (ret)
883 			return ret;
884 	}
885 
886 	klp_for_each_func(obj, func) {
887 		ret = klp_find_object_symbol(obj->name, func->old_name,
888 					     func->old_sympos,
889 					     (unsigned long *)&func->old_func);
890 		if (ret)
891 			return ret;
892 
893 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
894 						  &func->old_size, NULL);
895 		if (!ret) {
896 			pr_err("kallsyms size lookup failed for '%s'\n",
897 			       func->old_name);
898 			return -ENOENT;
899 		}
900 
901 		if (func->nop)
902 			func->new_func = func->old_func;
903 
904 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
905 						  &func->new_size, NULL);
906 		if (!ret) {
907 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
908 			       func->old_name);
909 			return -ENOENT;
910 		}
911 	}
912 
913 	return 0;
914 }
915 
916 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
917 {
918 	struct klp_func *func;
919 	int ret;
920 	const char *name;
921 
922 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
923 		return -EINVAL;
924 
925 	obj->patched = false;
926 	obj->mod = NULL;
927 
928 	klp_find_object_module(obj);
929 
930 	name = klp_is_module(obj) ? obj->name : "vmlinux";
931 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
932 	if (ret)
933 		return ret;
934 
935 	klp_for_each_func(obj, func) {
936 		ret = klp_init_func(obj, func);
937 		if (ret)
938 			return ret;
939 	}
940 
941 	if (klp_is_object_loaded(obj))
942 		ret = klp_init_object_loaded(patch, obj);
943 
944 	return ret;
945 }
946 
947 static void klp_init_func_early(struct klp_object *obj,
948 				struct klp_func *func)
949 {
950 	kobject_init(&func->kobj, &klp_ktype_func);
951 	list_add_tail(&func->node, &obj->func_list);
952 }
953 
954 static void klp_init_object_early(struct klp_patch *patch,
955 				  struct klp_object *obj)
956 {
957 	INIT_LIST_HEAD(&obj->func_list);
958 	kobject_init(&obj->kobj, &klp_ktype_object);
959 	list_add_tail(&obj->node, &patch->obj_list);
960 }
961 
962 static void klp_init_patch_early(struct klp_patch *patch)
963 {
964 	struct klp_object *obj;
965 	struct klp_func *func;
966 
967 	INIT_LIST_HEAD(&patch->list);
968 	INIT_LIST_HEAD(&patch->obj_list);
969 	kobject_init(&patch->kobj, &klp_ktype_patch);
970 	patch->enabled = false;
971 	patch->forced = false;
972 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
973 	init_completion(&patch->finish);
974 
975 	klp_for_each_object_static(patch, obj) {
976 		klp_init_object_early(patch, obj);
977 
978 		klp_for_each_func_static(obj, func) {
979 			klp_init_func_early(obj, func);
980 		}
981 	}
982 }
983 
984 static int klp_init_patch(struct klp_patch *patch)
985 {
986 	struct klp_object *obj;
987 	int ret;
988 
989 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
990 	if (ret)
991 		return ret;
992 
993 	if (patch->replace) {
994 		ret = klp_add_nops(patch);
995 		if (ret)
996 			return ret;
997 	}
998 
999 	klp_for_each_object(patch, obj) {
1000 		ret = klp_init_object(patch, obj);
1001 		if (ret)
1002 			return ret;
1003 	}
1004 
1005 	list_add_tail(&patch->list, &klp_patches);
1006 
1007 	return 0;
1008 }
1009 
1010 static int __klp_disable_patch(struct klp_patch *patch)
1011 {
1012 	struct klp_object *obj;
1013 
1014 	if (WARN_ON(!patch->enabled))
1015 		return -EINVAL;
1016 
1017 	if (klp_transition_patch)
1018 		return -EBUSY;
1019 
1020 	klp_init_transition(patch, KLP_TRANSITION_UNPATCHED);
1021 
1022 	klp_for_each_object(patch, obj)
1023 		if (obj->patched)
1024 			klp_pre_unpatch_callback(obj);
1025 
1026 	/*
1027 	 * Enforce the order of the func->transition writes in
1028 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
1029 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
1030 	 * is called shortly after klp_update_patch_state() switches the task,
1031 	 * this ensures the handler sees that func->transition is set.
1032 	 */
1033 	smp_wmb();
1034 
1035 	klp_start_transition();
1036 	patch->enabled = false;
1037 	klp_try_complete_transition();
1038 
1039 	return 0;
1040 }
1041 
1042 static int __klp_enable_patch(struct klp_patch *patch)
1043 {
1044 	struct klp_object *obj;
1045 	int ret;
1046 
1047 	if (klp_transition_patch)
1048 		return -EBUSY;
1049 
1050 	if (WARN_ON(patch->enabled))
1051 		return -EINVAL;
1052 
1053 	pr_notice("enabling patch '%s'\n", patch->mod->name);
1054 
1055 	klp_init_transition(patch, KLP_TRANSITION_PATCHED);
1056 
1057 	/*
1058 	 * Enforce the order of the func->transition writes in
1059 	 * klp_init_transition() and the ops->func_stack writes in
1060 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
1061 	 * func->transition updates before the handler is registered and the
1062 	 * new funcs become visible to the handler.
1063 	 */
1064 	smp_wmb();
1065 
1066 	klp_for_each_object(patch, obj) {
1067 		if (!klp_is_object_loaded(obj))
1068 			continue;
1069 
1070 		ret = klp_pre_patch_callback(obj);
1071 		if (ret) {
1072 			pr_warn("pre-patch callback failed for object '%s'\n",
1073 				klp_is_module(obj) ? obj->name : "vmlinux");
1074 			goto err;
1075 		}
1076 
1077 		ret = klp_patch_object(obj);
1078 		if (ret) {
1079 			pr_warn("failed to patch object '%s'\n",
1080 				klp_is_module(obj) ? obj->name : "vmlinux");
1081 			goto err;
1082 		}
1083 	}
1084 
1085 	klp_start_transition();
1086 	patch->enabled = true;
1087 	klp_try_complete_transition();
1088 
1089 	return 0;
1090 err:
1091 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1092 
1093 	klp_cancel_transition();
1094 	return ret;
1095 }
1096 
1097 /**
1098  * klp_enable_patch() - enable the livepatch
1099  * @patch:	patch to be enabled
1100  *
1101  * Initializes the data structure associated with the patch, creates the sysfs
1102  * interface, performs the needed symbol lookups and code relocations,
1103  * registers the patched functions with ftrace.
1104  *
1105  * This function is supposed to be called from the livepatch module_init()
1106  * callback.
1107  *
1108  * Return: 0 on success, otherwise error
1109  */
1110 int klp_enable_patch(struct klp_patch *patch)
1111 {
1112 	int ret;
1113 	struct klp_object *obj;
1114 
1115 	if (!patch || !patch->mod || !patch->objs)
1116 		return -EINVAL;
1117 
1118 	klp_for_each_object_static(patch, obj) {
1119 		if (!obj->funcs)
1120 			return -EINVAL;
1121 	}
1122 
1123 
1124 	if (!is_livepatch_module(patch->mod)) {
1125 		pr_err("module %s is not marked as a livepatch module\n",
1126 		       patch->mod->name);
1127 		return -EINVAL;
1128 	}
1129 
1130 	if (!klp_initialized())
1131 		return -ENODEV;
1132 
1133 	if (!klp_have_reliable_stack()) {
1134 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1135 		pr_warn("The livepatch transition may never complete.\n");
1136 	}
1137 
1138 	mutex_lock(&klp_mutex);
1139 
1140 	if (!klp_is_patch_compatible(patch)) {
1141 		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1142 			patch->mod->name);
1143 		mutex_unlock(&klp_mutex);
1144 		return -EINVAL;
1145 	}
1146 
1147 	if (!try_module_get(patch->mod)) {
1148 		mutex_unlock(&klp_mutex);
1149 		return -ENODEV;
1150 	}
1151 
1152 	klp_init_patch_early(patch);
1153 
1154 	ret = klp_init_patch(patch);
1155 	if (ret)
1156 		goto err;
1157 
1158 	ret = __klp_enable_patch(patch);
1159 	if (ret)
1160 		goto err;
1161 
1162 	mutex_unlock(&klp_mutex);
1163 
1164 	return 0;
1165 
1166 err:
1167 	klp_free_patch_start(patch);
1168 
1169 	mutex_unlock(&klp_mutex);
1170 
1171 	klp_free_patch_finish(patch);
1172 
1173 	return ret;
1174 }
1175 EXPORT_SYMBOL_GPL(klp_enable_patch);
1176 
1177 /*
1178  * This function unpatches objects from the replaced livepatches.
1179  *
1180  * We could be pretty aggressive here. It is called in the situation where
1181  * these structures are no longer accessed from the ftrace handler.
1182  * All functions are redirected by the klp_transition_patch. They
1183  * use either a new code or they are in the original code because
1184  * of the special nop function patches.
1185  *
1186  * The only exception is when the transition was forced. In this case,
1187  * klp_ftrace_handler() might still see the replaced patch on the stack.
1188  * Fortunately, it is carefully designed to work with removed functions
1189  * thanks to RCU. We only have to keep the patches on the system. Also
1190  * this is handled transparently by patch->module_put.
1191  */
1192 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1193 {
1194 	struct klp_patch *old_patch;
1195 
1196 	klp_for_each_patch(old_patch) {
1197 		if (old_patch == new_patch)
1198 			return;
1199 
1200 		old_patch->enabled = false;
1201 		klp_unpatch_objects(old_patch);
1202 	}
1203 }
1204 
1205 /*
1206  * This function removes the dynamically allocated 'nop' functions.
1207  *
1208  * We could be pretty aggressive. NOPs do not change the existing
1209  * behavior except for adding unnecessary delay by the ftrace handler.
1210  *
1211  * It is safe even when the transition was forced. The ftrace handler
1212  * will see a valid ops->func_stack entry thanks to RCU.
1213  *
1214  * We could even free the NOPs structures. They must be the last entry
1215  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1216  * It does the same as klp_synchronize_transition() to make sure that
1217  * nobody is inside the ftrace handler once the operation finishes.
1218  *
1219  * IMPORTANT: It must be called right after removing the replaced patches!
1220  */
1221 void klp_discard_nops(struct klp_patch *new_patch)
1222 {
1223 	klp_unpatch_objects_dynamic(klp_transition_patch);
1224 	klp_free_objects_dynamic(klp_transition_patch);
1225 }
1226 
1227 /*
1228  * Remove parts of patches that touch a given kernel module. The list of
1229  * patches processed might be limited. When limit is NULL, all patches
1230  * will be handled.
1231  */
1232 static void klp_cleanup_module_patches_limited(struct module *mod,
1233 					       struct klp_patch *limit)
1234 {
1235 	struct klp_patch *patch;
1236 	struct klp_object *obj;
1237 
1238 	klp_for_each_patch(patch) {
1239 		if (patch == limit)
1240 			break;
1241 
1242 		klp_for_each_object(patch, obj) {
1243 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1244 				continue;
1245 
1246 			if (patch != klp_transition_patch)
1247 				klp_pre_unpatch_callback(obj);
1248 
1249 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1250 				  patch->mod->name, obj->mod->name);
1251 			klp_unpatch_object(obj);
1252 
1253 			klp_post_unpatch_callback(obj);
1254 			klp_clear_object_relocs(patch, obj);
1255 			klp_free_object_loaded(obj);
1256 			break;
1257 		}
1258 	}
1259 }
1260 
1261 int klp_module_coming(struct module *mod)
1262 {
1263 	int ret;
1264 	struct klp_patch *patch;
1265 	struct klp_object *obj;
1266 
1267 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1268 		return -EINVAL;
1269 
1270 	if (!strcmp(mod->name, "vmlinux")) {
1271 		pr_err("vmlinux.ko: invalid module name\n");
1272 		return -EINVAL;
1273 	}
1274 
1275 	mutex_lock(&klp_mutex);
1276 	/*
1277 	 * Each module has to know that klp_module_coming()
1278 	 * has been called. We never know what module will
1279 	 * get patched by a new patch.
1280 	 */
1281 	mod->klp_alive = true;
1282 
1283 	klp_for_each_patch(patch) {
1284 		klp_for_each_object(patch, obj) {
1285 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1286 				continue;
1287 
1288 			obj->mod = mod;
1289 
1290 			ret = klp_init_object_loaded(patch, obj);
1291 			if (ret) {
1292 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1293 					patch->mod->name, obj->mod->name, ret);
1294 				goto err;
1295 			}
1296 
1297 			pr_notice("applying patch '%s' to loading module '%s'\n",
1298 				  patch->mod->name, obj->mod->name);
1299 
1300 			ret = klp_pre_patch_callback(obj);
1301 			if (ret) {
1302 				pr_warn("pre-patch callback failed for object '%s'\n",
1303 					obj->name);
1304 				goto err;
1305 			}
1306 
1307 			ret = klp_patch_object(obj);
1308 			if (ret) {
1309 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1310 					patch->mod->name, obj->mod->name, ret);
1311 
1312 				klp_post_unpatch_callback(obj);
1313 				goto err;
1314 			}
1315 
1316 			if (patch != klp_transition_patch)
1317 				klp_post_patch_callback(obj);
1318 
1319 			break;
1320 		}
1321 	}
1322 
1323 	mutex_unlock(&klp_mutex);
1324 
1325 	return 0;
1326 
1327 err:
1328 	/*
1329 	 * If a patch is unsuccessfully applied, return
1330 	 * error to the module loader.
1331 	 */
1332 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1333 		patch->mod->name, obj->mod->name, obj->mod->name);
1334 	mod->klp_alive = false;
1335 	obj->mod = NULL;
1336 	klp_cleanup_module_patches_limited(mod, patch);
1337 	mutex_unlock(&klp_mutex);
1338 
1339 	return ret;
1340 }
1341 
1342 void klp_module_going(struct module *mod)
1343 {
1344 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1345 		    mod->state != MODULE_STATE_COMING))
1346 		return;
1347 
1348 	mutex_lock(&klp_mutex);
1349 	/*
1350 	 * Each module has to know that klp_module_going()
1351 	 * has been called. We never know what module will
1352 	 * get patched by a new patch.
1353 	 */
1354 	mod->klp_alive = false;
1355 
1356 	klp_cleanup_module_patches_limited(mod, NULL);
1357 
1358 	mutex_unlock(&klp_mutex);
1359 }
1360 
1361 static int __init klp_init(void)
1362 {
1363 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1364 	if (!klp_root_kobj)
1365 		return -ENOMEM;
1366 
1367 	return 0;
1368 }
1369 
1370 module_init(klp_init);
1371