xref: /linux/kernel/livepatch/core.c (revision 51ab33fc0a8bef9454849371ef897a1241911b37)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <linux/rcupdate.h>
23 #include <asm/cacheflush.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "state.h"
27 #include "transition.h"
28 
29 /*
30  * klp_mutex is a coarse lock which serializes access to klp data.  All
31  * accesses to klp-related variables and structures must have mutex protection,
32  * except within the following functions which carefully avoid the need for it:
33  *
34  * - klp_ftrace_handler()
35  * - klp_update_patch_state()
36  * - __klp_sched_try_switch()
37  */
38 DEFINE_MUTEX(klp_mutex);
39 
40 /*
41  * Actively used patches: enabled or in transition. Note that replaced
42  * or disabled patches are not listed even though the related kernel
43  * module still can be loaded.
44  */
45 LIST_HEAD(klp_patches);
46 
47 static struct kobject *klp_root_kobj;
48 
klp_is_module(struct klp_object * obj)49 static bool klp_is_module(struct klp_object *obj)
50 {
51 	return obj->name;
52 }
53 
54 /* sets obj->mod if object is not vmlinux and module is found */
klp_find_object_module(struct klp_object * obj)55 static void klp_find_object_module(struct klp_object *obj)
56 {
57 	struct module *mod;
58 
59 	if (!klp_is_module(obj))
60 		return;
61 
62 	guard(rcu)();
63 	/*
64 	 * We do not want to block removal of patched modules and therefore
65 	 * we do not take a reference here. The patches are removed by
66 	 * klp_module_going() instead.
67 	 */
68 	mod = find_module(obj->name);
69 	/*
70 	 * Do not mess work of klp_module_coming() and klp_module_going().
71 	 * Note that the patch might still be needed before klp_module_going()
72 	 * is called. Module functions can be called even in the GOING state
73 	 * until mod->exit() finishes. This is especially important for
74 	 * patches that modify semantic of the functions.
75 	 */
76 	if (mod && mod->klp_alive)
77 		obj->mod = mod;
78 }
79 
klp_initialized(void)80 static bool klp_initialized(void)
81 {
82 	return !!klp_root_kobj;
83 }
84 
klp_find_func(struct klp_object * obj,struct klp_func * old_func)85 static struct klp_func *klp_find_func(struct klp_object *obj,
86 				      struct klp_func *old_func)
87 {
88 	struct klp_func *func;
89 
90 	klp_for_each_func(obj, func) {
91 		/*
92 		 * Besides identical old_sympos, also consider old_sympos
93 		 * of 0 and 1 are identical.
94 		 */
95 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
96 		    ((old_func->old_sympos == func->old_sympos) ||
97 		     (old_func->old_sympos == 0 && func->old_sympos == 1) ||
98 		     (old_func->old_sympos == 1 && func->old_sympos == 0))) {
99 			return func;
100 		}
101 	}
102 
103 	return NULL;
104 }
105 
klp_find_object(struct klp_patch * patch,struct klp_object * old_obj)106 static struct klp_object *klp_find_object(struct klp_patch *patch,
107 					  struct klp_object *old_obj)
108 {
109 	struct klp_object *obj;
110 
111 	klp_for_each_object(patch, obj) {
112 		if (klp_is_module(old_obj)) {
113 			if (klp_is_module(obj) &&
114 			    strcmp(old_obj->name, obj->name) == 0) {
115 				return obj;
116 			}
117 		} else if (!klp_is_module(obj)) {
118 			return obj;
119 		}
120 	}
121 
122 	return NULL;
123 }
124 
125 struct klp_find_arg {
126 	const char *name;
127 	unsigned long addr;
128 	unsigned long count;
129 	unsigned long pos;
130 };
131 
klp_match_callback(void * data,unsigned long addr)132 static int klp_match_callback(void *data, unsigned long addr)
133 {
134 	struct klp_find_arg *args = data;
135 
136 	args->addr = addr;
137 	args->count++;
138 
139 	/*
140 	 * Finish the search when the symbol is found for the desired position
141 	 * or the position is not defined for a non-unique symbol.
142 	 */
143 	if ((args->pos && (args->count == args->pos)) ||
144 	    (!args->pos && (args->count > 1)))
145 		return 1;
146 
147 	return 0;
148 }
149 
klp_find_callback(void * data,const char * name,unsigned long addr)150 static int klp_find_callback(void *data, const char *name, unsigned long addr)
151 {
152 	struct klp_find_arg *args = data;
153 
154 	if (strcmp(args->name, name))
155 		return 0;
156 
157 	return klp_match_callback(data, addr);
158 }
159 
klp_find_object_symbol(const char * objname,const char * name,unsigned long sympos,unsigned long * addr)160 static int klp_find_object_symbol(const char *objname, const char *name,
161 				  unsigned long sympos, unsigned long *addr)
162 {
163 	struct klp_find_arg args = {
164 		.name = name,
165 		.addr = 0,
166 		.count = 0,
167 		.pos = sympos,
168 	};
169 
170 	if (objname)
171 		module_kallsyms_on_each_symbol(objname, klp_find_callback, &args);
172 	else
173 		kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
174 
175 	/*
176 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
177 	 * otherwise ensure the symbol position count matches sympos.
178 	 */
179 	if (args.addr == 0)
180 		pr_err("symbol '%s' not found in symbol table\n", name);
181 	else if (args.count > 1 && sympos == 0) {
182 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
183 		       name, objname);
184 	} else if (sympos != args.count && sympos > 0) {
185 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
186 		       sympos, name, objname ? objname : "vmlinux");
187 	} else {
188 		*addr = args.addr;
189 		return 0;
190 	}
191 
192 	*addr = 0;
193 	return -EINVAL;
194 }
195 
klp_resolve_symbols(Elf_Shdr * sechdrs,const char * strtab,unsigned int symndx,Elf_Shdr * relasec,const char * sec_objname)196 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
197 			       unsigned int symndx, Elf_Shdr *relasec,
198 			       const char *sec_objname)
199 {
200 	int i, cnt, ret;
201 	char sym_objname[MODULE_NAME_LEN];
202 	char sym_name[KSYM_NAME_LEN];
203 	Elf_Rela *relas;
204 	Elf_Sym *sym;
205 	unsigned long sympos, addr;
206 	bool sym_vmlinux;
207 	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
208 
209 	/*
210 	 * Since the field widths for sym_objname and sym_name in the sscanf()
211 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
212 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
213 	 * and KSYM_NAME_LEN have the values we expect them to have.
214 	 *
215 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
216 	 * we use the smallest/strictest upper bound possible (56, based on
217 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
218 	 */
219 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
220 
221 	relas = (Elf_Rela *) relasec->sh_addr;
222 	/* For each rela in this klp relocation section */
223 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
224 		sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
225 		if (sym->st_shndx != SHN_LIVEPATCH) {
226 			pr_err("symbol %s at rela sec %u idx %d is not marked as a livepatch symbol\n",
227 			       strtab + sym->st_name, symndx, i);
228 			return -EINVAL;
229 		}
230 
231 		/* Format: .klp.sym.sym_objname.sym_name,sympos */
232 		cnt = sscanf(strtab + sym->st_name,
233 			     KLP_SYM_PREFIX "%55[^.].%511[^,],%lu",
234 			     sym_objname, sym_name, &sympos);
235 		if (cnt != 3) {
236 			pr_err("symbol %s has an incorrectly formatted name\n",
237 			       strtab + sym->st_name);
238 			return -EINVAL;
239 		}
240 
241 		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
242 
243 		/*
244 		 * Prevent module-specific KLP rela sections from referencing
245 		 * vmlinux symbols.  This helps prevent ordering issues with
246 		 * module special section initializations.  Presumably such
247 		 * symbols are exported and normal relas can be used instead.
248 		 */
249 		if (!sec_vmlinux && sym_vmlinux) {
250 			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
251 			       sym_name);
252 			return -EINVAL;
253 		}
254 
255 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
256 		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
257 					     sym_name, sympos, &addr);
258 		if (ret)
259 			return ret;
260 
261 		sym->st_value = addr;
262 	}
263 
264 	return 0;
265 }
266 
clear_relocate_add(Elf_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)267 void __weak clear_relocate_add(Elf_Shdr *sechdrs,
268 		   const char *strtab,
269 		   unsigned int symindex,
270 		   unsigned int relsec,
271 		   struct module *me)
272 {
273 }
274 
275 /*
276  * At a high-level, there are two types of klp relocation sections: those which
277  * reference symbols which live in vmlinux; and those which reference symbols
278  * which live in other modules.  This function is called for both types:
279  *
280  * 1) When a klp module itself loads, the module code calls this function to
281  *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
282  *    These relocations are written to the klp module text to allow the patched
283  *    code/data to reference unexported vmlinux symbols.  They're written as
284  *    early as possible to ensure that other module init code (.e.g.,
285  *    jump_label_apply_nops) can access any unexported vmlinux symbols which
286  *    might be referenced by the klp module's special sections.
287  *
288  * 2) When a to-be-patched module loads -- or is already loaded when a
289  *    corresponding klp module loads -- klp code calls this function to write
290  *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
291  *    are written to the klp module text to allow the patched code/data to
292  *    reference symbols which live in the to-be-patched module or one of its
293  *    module dependencies.  Exported symbols are supported, in addition to
294  *    unexported symbols, in order to enable late module patching, which allows
295  *    the to-be-patched module to be loaded and patched sometime *after* the
296  *    klp module is loaded.
297  */
klp_write_section_relocs(struct module * pmod,Elf_Shdr * sechdrs,const char * shstrtab,const char * strtab,unsigned int symndx,unsigned int secndx,const char * objname,bool apply)298 static int klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
299 				    const char *shstrtab, const char *strtab,
300 				    unsigned int symndx, unsigned int secndx,
301 				    const char *objname, bool apply)
302 {
303 	int cnt, ret;
304 	char sec_objname[MODULE_NAME_LEN];
305 	Elf_Shdr *sec = sechdrs + secndx;
306 
307 	/*
308 	 * Format: .klp.rela.sec_objname.section_name
309 	 * See comment in klp_resolve_symbols() for an explanation
310 	 * of the selected field width value.
311 	 */
312 	cnt = sscanf(shstrtab + sec->sh_name, KLP_RELOC_SEC_PREFIX "%55[^.]",
313 		     sec_objname);
314 	if (cnt != 1) {
315 		pr_err("section %s has an incorrectly formatted name\n",
316 		       shstrtab + sec->sh_name);
317 		return -EINVAL;
318 	}
319 
320 	if (strcmp(objname ? objname : "vmlinux", sec_objname))
321 		return 0;
322 
323 	if (apply) {
324 		ret = klp_resolve_symbols(sechdrs, strtab, symndx,
325 					  sec, sec_objname);
326 		if (ret)
327 			return ret;
328 
329 		return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
330 	}
331 
332 	clear_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
333 	return 0;
334 }
335 
klp_apply_section_relocs(struct module * pmod,Elf_Shdr * sechdrs,const char * shstrtab,const char * strtab,unsigned int symndx,unsigned int secndx,const char * objname)336 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
337 			     const char *shstrtab, const char *strtab,
338 			     unsigned int symndx, unsigned int secndx,
339 			     const char *objname)
340 {
341 	return klp_write_section_relocs(pmod, sechdrs, shstrtab, strtab, symndx,
342 					secndx, objname, true);
343 }
344 
345 /*
346  * Sysfs Interface
347  *
348  * /sys/kernel/livepatch
349  * /sys/kernel/livepatch/<patch>
350  * /sys/kernel/livepatch/<patch>/enabled
351  * /sys/kernel/livepatch/<patch>/transition
352  * /sys/kernel/livepatch/<patch>/force
353  * /sys/kernel/livepatch/<patch>/replace
354  * /sys/kernel/livepatch/<patch>/stack_order
355  * /sys/kernel/livepatch/<patch>/<object>
356  * /sys/kernel/livepatch/<patch>/<object>/patched
357  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
358  */
359 static int __klp_disable_patch(struct klp_patch *patch);
360 
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)361 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
362 			     const char *buf, size_t count)
363 {
364 	struct klp_patch *patch;
365 	int ret;
366 	bool enabled;
367 
368 	ret = kstrtobool(buf, &enabled);
369 	if (ret)
370 		return ret;
371 
372 	patch = container_of(kobj, struct klp_patch, kobj);
373 
374 	mutex_lock(&klp_mutex);
375 
376 	if (patch->enabled == enabled) {
377 		/* already in requested state */
378 		ret = -EINVAL;
379 		goto out;
380 	}
381 
382 	/*
383 	 * Allow to reverse a pending transition in both ways. It might be
384 	 * necessary to complete the transition without forcing and breaking
385 	 * the system integrity.
386 	 *
387 	 * Do not allow to re-enable a disabled patch.
388 	 */
389 	if (patch == klp_transition_patch)
390 		klp_reverse_transition();
391 	else if (!enabled)
392 		ret = __klp_disable_patch(patch);
393 	else
394 		ret = -EINVAL;
395 
396 out:
397 	mutex_unlock(&klp_mutex);
398 
399 	if (ret)
400 		return ret;
401 	return count;
402 }
403 
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)404 static ssize_t enabled_show(struct kobject *kobj,
405 			    struct kobj_attribute *attr, char *buf)
406 {
407 	struct klp_patch *patch;
408 
409 	patch = container_of(kobj, struct klp_patch, kobj);
410 	return sysfs_emit(buf, "%d\n", patch->enabled);
411 }
412 
transition_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)413 static ssize_t transition_show(struct kobject *kobj,
414 			       struct kobj_attribute *attr, char *buf)
415 {
416 	struct klp_patch *patch;
417 
418 	patch = container_of(kobj, struct klp_patch, kobj);
419 	return sysfs_emit(buf, "%d\n", patch == klp_transition_patch);
420 }
421 
force_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)422 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
423 			   const char *buf, size_t count)
424 {
425 	struct klp_patch *patch;
426 	int ret;
427 	bool val;
428 
429 	ret = kstrtobool(buf, &val);
430 	if (ret)
431 		return ret;
432 
433 	if (!val)
434 		return count;
435 
436 	mutex_lock(&klp_mutex);
437 
438 	patch = container_of(kobj, struct klp_patch, kobj);
439 	if (patch != klp_transition_patch) {
440 		mutex_unlock(&klp_mutex);
441 		return -EINVAL;
442 	}
443 
444 	klp_force_transition();
445 
446 	mutex_unlock(&klp_mutex);
447 
448 	return count;
449 }
450 
replace_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)451 static ssize_t replace_show(struct kobject *kobj,
452 			    struct kobj_attribute *attr, char *buf)
453 {
454 	struct klp_patch *patch;
455 
456 	patch = container_of(kobj, struct klp_patch, kobj);
457 	return sysfs_emit(buf, "%d\n", patch->replace);
458 }
459 
stack_order_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)460 static ssize_t stack_order_show(struct kobject *kobj,
461 				struct kobj_attribute *attr, char *buf)
462 {
463 	struct klp_patch *patch, *this_patch;
464 	int stack_order = 0;
465 
466 	this_patch = container_of(kobj, struct klp_patch, kobj);
467 
468 	mutex_lock(&klp_mutex);
469 
470 	klp_for_each_patch(patch) {
471 		stack_order++;
472 		if (patch == this_patch)
473 			break;
474 	}
475 
476 	mutex_unlock(&klp_mutex);
477 
478 	return sysfs_emit(buf, "%d\n", stack_order);
479 }
480 
481 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
482 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
483 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
484 static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
485 static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order);
486 static struct attribute *klp_patch_attrs[] = {
487 	&enabled_kobj_attr.attr,
488 	&transition_kobj_attr.attr,
489 	&force_kobj_attr.attr,
490 	&replace_kobj_attr.attr,
491 	&stack_order_kobj_attr.attr,
492 	NULL
493 };
494 ATTRIBUTE_GROUPS(klp_patch);
495 
patched_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)496 static ssize_t patched_show(struct kobject *kobj,
497 			    struct kobj_attribute *attr, char *buf)
498 {
499 	struct klp_object *obj;
500 
501 	obj = container_of(kobj, struct klp_object, kobj);
502 	return sysfs_emit(buf, "%d\n", obj->patched);
503 }
504 
505 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched);
506 static struct attribute *klp_object_attrs[] = {
507 	&patched_kobj_attr.attr,
508 	NULL,
509 };
510 ATTRIBUTE_GROUPS(klp_object);
511 
klp_free_object_dynamic(struct klp_object * obj)512 static void klp_free_object_dynamic(struct klp_object *obj)
513 {
514 	kfree(obj->name);
515 	kfree(obj);
516 }
517 
518 static void klp_init_func_early(struct klp_object *obj,
519 				struct klp_func *func);
520 static void klp_init_object_early(struct klp_patch *patch,
521 				  struct klp_object *obj);
522 
klp_alloc_object_dynamic(const char * name,struct klp_patch * patch)523 static struct klp_object *klp_alloc_object_dynamic(const char *name,
524 						   struct klp_patch *patch)
525 {
526 	struct klp_object *obj;
527 
528 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
529 	if (!obj)
530 		return NULL;
531 
532 	if (name) {
533 		obj->name = kstrdup(name, GFP_KERNEL);
534 		if (!obj->name) {
535 			kfree(obj);
536 			return NULL;
537 		}
538 	}
539 
540 	klp_init_object_early(patch, obj);
541 	obj->dynamic = true;
542 
543 	return obj;
544 }
545 
klp_free_func_nop(struct klp_func * func)546 static void klp_free_func_nop(struct klp_func *func)
547 {
548 	kfree(func->old_name);
549 	kfree(func);
550 }
551 
klp_alloc_func_nop(struct klp_func * old_func,struct klp_object * obj)552 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
553 					   struct klp_object *obj)
554 {
555 	struct klp_func *func;
556 
557 	func = kzalloc(sizeof(*func), GFP_KERNEL);
558 	if (!func)
559 		return NULL;
560 
561 	if (old_func->old_name) {
562 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
563 		if (!func->old_name) {
564 			kfree(func);
565 			return NULL;
566 		}
567 	}
568 
569 	klp_init_func_early(obj, func);
570 	/*
571 	 * func->new_func is same as func->old_func. These addresses are
572 	 * set when the object is loaded, see klp_init_object_loaded().
573 	 */
574 	func->old_sympos = old_func->old_sympos;
575 	func->nop = true;
576 
577 	return func;
578 }
579 
klp_add_object_nops(struct klp_patch * patch,struct klp_object * old_obj)580 static int klp_add_object_nops(struct klp_patch *patch,
581 			       struct klp_object *old_obj)
582 {
583 	struct klp_object *obj;
584 	struct klp_func *func, *old_func;
585 
586 	obj = klp_find_object(patch, old_obj);
587 
588 	if (!obj) {
589 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
590 		if (!obj)
591 			return -ENOMEM;
592 	}
593 
594 	klp_for_each_func(old_obj, old_func) {
595 		func = klp_find_func(obj, old_func);
596 		if (func)
597 			continue;
598 
599 		func = klp_alloc_func_nop(old_func, obj);
600 		if (!func)
601 			return -ENOMEM;
602 	}
603 
604 	return 0;
605 }
606 
607 /*
608  * Add 'nop' functions which simply return to the caller to run the
609  * original function.
610  *
611  * They are added only when the atomic replace mode is used and only for
612  * functions which are currently livepatched but are no longer included
613  * in the new livepatch.
614  */
klp_add_nops(struct klp_patch * patch)615 static int klp_add_nops(struct klp_patch *patch)
616 {
617 	struct klp_patch *old_patch;
618 	struct klp_object *old_obj;
619 
620 	klp_for_each_patch(old_patch) {
621 		klp_for_each_object(old_patch, old_obj) {
622 			int err;
623 
624 			err = klp_add_object_nops(patch, old_obj);
625 			if (err)
626 				return err;
627 		}
628 	}
629 
630 	return 0;
631 }
632 
klp_kobj_release_patch(struct kobject * kobj)633 static void klp_kobj_release_patch(struct kobject *kobj)
634 {
635 	struct klp_patch *patch;
636 
637 	patch = container_of(kobj, struct klp_patch, kobj);
638 	complete(&patch->finish);
639 }
640 
641 static const struct kobj_type klp_ktype_patch = {
642 	.release = klp_kobj_release_patch,
643 	.sysfs_ops = &kobj_sysfs_ops,
644 	.default_groups = klp_patch_groups,
645 };
646 
klp_kobj_release_object(struct kobject * kobj)647 static void klp_kobj_release_object(struct kobject *kobj)
648 {
649 	struct klp_object *obj;
650 
651 	obj = container_of(kobj, struct klp_object, kobj);
652 
653 	if (obj->dynamic)
654 		klp_free_object_dynamic(obj);
655 }
656 
657 static const struct kobj_type klp_ktype_object = {
658 	.release = klp_kobj_release_object,
659 	.sysfs_ops = &kobj_sysfs_ops,
660 	.default_groups = klp_object_groups,
661 };
662 
klp_kobj_release_func(struct kobject * kobj)663 static void klp_kobj_release_func(struct kobject *kobj)
664 {
665 	struct klp_func *func;
666 
667 	func = container_of(kobj, struct klp_func, kobj);
668 
669 	if (func->nop)
670 		klp_free_func_nop(func);
671 }
672 
673 static const struct kobj_type klp_ktype_func = {
674 	.release = klp_kobj_release_func,
675 	.sysfs_ops = &kobj_sysfs_ops,
676 };
677 
__klp_free_funcs(struct klp_object * obj,bool nops_only)678 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
679 {
680 	struct klp_func *func, *tmp_func;
681 
682 	klp_for_each_func_safe(obj, func, tmp_func) {
683 		if (nops_only && !func->nop)
684 			continue;
685 
686 		list_del(&func->node);
687 		kobject_put(&func->kobj);
688 	}
689 }
690 
691 /* Clean up when a patched object is unloaded */
klp_free_object_loaded(struct klp_object * obj)692 static void klp_free_object_loaded(struct klp_object *obj)
693 {
694 	struct klp_func *func;
695 
696 	obj->mod = NULL;
697 
698 	klp_for_each_func(obj, func) {
699 		func->old_func = NULL;
700 
701 		if (func->nop)
702 			func->new_func = NULL;
703 	}
704 }
705 
__klp_free_objects(struct klp_patch * patch,bool nops_only)706 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
707 {
708 	struct klp_object *obj, *tmp_obj;
709 
710 	klp_for_each_object_safe(patch, obj, tmp_obj) {
711 		__klp_free_funcs(obj, nops_only);
712 
713 		if (nops_only && !obj->dynamic)
714 			continue;
715 
716 		list_del(&obj->node);
717 		kobject_put(&obj->kobj);
718 	}
719 }
720 
klp_free_objects(struct klp_patch * patch)721 static void klp_free_objects(struct klp_patch *patch)
722 {
723 	__klp_free_objects(patch, false);
724 }
725 
klp_free_objects_dynamic(struct klp_patch * patch)726 static void klp_free_objects_dynamic(struct klp_patch *patch)
727 {
728 	__klp_free_objects(patch, true);
729 }
730 
731 /*
732  * This function implements the free operations that can be called safely
733  * under klp_mutex.
734  *
735  * The operation must be completed by calling klp_free_patch_finish()
736  * outside klp_mutex.
737  */
klp_free_patch_start(struct klp_patch * patch)738 static void klp_free_patch_start(struct klp_patch *patch)
739 {
740 	if (!list_empty(&patch->list))
741 		list_del(&patch->list);
742 
743 	klp_free_objects(patch);
744 }
745 
746 /*
747  * This function implements the free part that must be called outside
748  * klp_mutex.
749  *
750  * It must be called after klp_free_patch_start(). And it has to be
751  * the last function accessing the livepatch structures when the patch
752  * gets disabled.
753  */
klp_free_patch_finish(struct klp_patch * patch)754 static void klp_free_patch_finish(struct klp_patch *patch)
755 {
756 	/*
757 	 * Avoid deadlock with enabled_store() sysfs callback by
758 	 * calling this outside klp_mutex. It is safe because
759 	 * this is called when the patch gets disabled and it
760 	 * cannot get enabled again.
761 	 */
762 	kobject_put(&patch->kobj);
763 	wait_for_completion(&patch->finish);
764 
765 	/* Put the module after the last access to struct klp_patch. */
766 	if (!patch->forced)
767 		module_put(patch->mod);
768 }
769 
770 /*
771  * The livepatch might be freed from sysfs interface created by the patch.
772  * This work allows to wait until the interface is destroyed in a separate
773  * context.
774  */
klp_free_patch_work_fn(struct work_struct * work)775 static void klp_free_patch_work_fn(struct work_struct *work)
776 {
777 	struct klp_patch *patch =
778 		container_of(work, struct klp_patch, free_work);
779 
780 	klp_free_patch_finish(patch);
781 }
782 
klp_free_patch_async(struct klp_patch * patch)783 void klp_free_patch_async(struct klp_patch *patch)
784 {
785 	klp_free_patch_start(patch);
786 	schedule_work(&patch->free_work);
787 }
788 
klp_free_replaced_patches_async(struct klp_patch * new_patch)789 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
790 {
791 	struct klp_patch *old_patch, *tmp_patch;
792 
793 	klp_for_each_patch_safe(old_patch, tmp_patch) {
794 		if (old_patch == new_patch)
795 			return;
796 		klp_free_patch_async(old_patch);
797 	}
798 }
799 
klp_init_func(struct klp_object * obj,struct klp_func * func)800 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
801 {
802 	if (!func->old_name)
803 		return -EINVAL;
804 
805 	/*
806 	 * NOPs get the address later. The patched module must be loaded,
807 	 * see klp_init_object_loaded().
808 	 */
809 	if (!func->new_func && !func->nop)
810 		return -EINVAL;
811 
812 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
813 		return -EINVAL;
814 
815 	INIT_LIST_HEAD(&func->stack_node);
816 	func->patched = false;
817 	func->transition = false;
818 
819 	/* The format for the sysfs directory is <function,sympos> where sympos
820 	 * is the nth occurrence of this symbol in kallsyms for the patched
821 	 * object. If the user selects 0 for old_sympos, then 1 will be used
822 	 * since a unique symbol will be the first occurrence.
823 	 */
824 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
825 			   func->old_name,
826 			   func->old_sympos ? func->old_sympos : 1);
827 }
828 
klp_write_object_relocs(struct klp_patch * patch,struct klp_object * obj,bool apply)829 static int klp_write_object_relocs(struct klp_patch *patch,
830 				   struct klp_object *obj,
831 				   bool apply)
832 {
833 	int i, ret;
834 	struct klp_modinfo *info = patch->mod->klp_info;
835 
836 	for (i = 1; i < info->hdr.e_shnum; i++) {
837 		Elf_Shdr *sec = info->sechdrs + i;
838 
839 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
840 			continue;
841 
842 		ret = klp_write_section_relocs(patch->mod, info->sechdrs,
843 					       info->secstrings,
844 					       patch->mod->core_kallsyms.strtab,
845 					       info->symndx, i, obj->name, apply);
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
klp_apply_object_relocs(struct klp_patch * patch,struct klp_object * obj)853 static int klp_apply_object_relocs(struct klp_patch *patch,
854 				   struct klp_object *obj)
855 {
856 	return klp_write_object_relocs(patch, obj, true);
857 }
858 
klp_clear_object_relocs(struct klp_patch * patch,struct klp_object * obj)859 static void klp_clear_object_relocs(struct klp_patch *patch,
860 				    struct klp_object *obj)
861 {
862 	klp_write_object_relocs(patch, obj, false);
863 }
864 
865 /* parts of the initialization that is done only when the object is loaded */
klp_init_object_loaded(struct klp_patch * patch,struct klp_object * obj)866 static int klp_init_object_loaded(struct klp_patch *patch,
867 				  struct klp_object *obj)
868 {
869 	struct klp_func *func;
870 	int ret;
871 
872 	if (klp_is_module(obj)) {
873 		/*
874 		 * Only write module-specific relocations here
875 		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
876 		 * written earlier during the initialization of the klp module
877 		 * itself.
878 		 */
879 		ret = klp_apply_object_relocs(patch, obj);
880 		if (ret)
881 			return ret;
882 	}
883 
884 	klp_for_each_func(obj, func) {
885 		ret = klp_find_object_symbol(obj->name, func->old_name,
886 					     func->old_sympos,
887 					     (unsigned long *)&func->old_func);
888 		if (ret)
889 			return ret;
890 
891 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
892 						  &func->old_size, NULL);
893 		if (!ret) {
894 			pr_err("kallsyms size lookup failed for '%s'\n",
895 			       func->old_name);
896 			return -ENOENT;
897 		}
898 
899 		if (func->nop)
900 			func->new_func = func->old_func;
901 
902 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
903 						  &func->new_size, NULL);
904 		if (!ret) {
905 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
906 			       func->old_name);
907 			return -ENOENT;
908 		}
909 	}
910 
911 	return 0;
912 }
913 
klp_init_object(struct klp_patch * patch,struct klp_object * obj)914 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
915 {
916 	struct klp_func *func;
917 	int ret;
918 	const char *name;
919 
920 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
921 		return -EINVAL;
922 
923 	obj->patched = false;
924 	obj->mod = NULL;
925 
926 	klp_find_object_module(obj);
927 
928 	name = klp_is_module(obj) ? obj->name : "vmlinux";
929 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
930 	if (ret)
931 		return ret;
932 
933 	klp_for_each_func(obj, func) {
934 		ret = klp_init_func(obj, func);
935 		if (ret)
936 			return ret;
937 	}
938 
939 	if (klp_is_object_loaded(obj))
940 		ret = klp_init_object_loaded(patch, obj);
941 
942 	return ret;
943 }
944 
klp_init_func_early(struct klp_object * obj,struct klp_func * func)945 static void klp_init_func_early(struct klp_object *obj,
946 				struct klp_func *func)
947 {
948 	kobject_init(&func->kobj, &klp_ktype_func);
949 	list_add_tail(&func->node, &obj->func_list);
950 }
951 
klp_init_object_early(struct klp_patch * patch,struct klp_object * obj)952 static void klp_init_object_early(struct klp_patch *patch,
953 				  struct klp_object *obj)
954 {
955 	INIT_LIST_HEAD(&obj->func_list);
956 	kobject_init(&obj->kobj, &klp_ktype_object);
957 	list_add_tail(&obj->node, &patch->obj_list);
958 }
959 
klp_init_patch_early(struct klp_patch * patch)960 static void klp_init_patch_early(struct klp_patch *patch)
961 {
962 	struct klp_object *obj;
963 	struct klp_func *func;
964 
965 	INIT_LIST_HEAD(&patch->list);
966 	INIT_LIST_HEAD(&patch->obj_list);
967 	kobject_init(&patch->kobj, &klp_ktype_patch);
968 	patch->enabled = false;
969 	patch->forced = false;
970 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
971 	init_completion(&patch->finish);
972 
973 	klp_for_each_object_static(patch, obj) {
974 		klp_init_object_early(patch, obj);
975 
976 		klp_for_each_func_static(obj, func) {
977 			klp_init_func_early(obj, func);
978 		}
979 	}
980 }
981 
klp_init_patch(struct klp_patch * patch)982 static int klp_init_patch(struct klp_patch *patch)
983 {
984 	struct klp_object *obj;
985 	int ret;
986 
987 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
988 	if (ret)
989 		return ret;
990 
991 	if (patch->replace) {
992 		ret = klp_add_nops(patch);
993 		if (ret)
994 			return ret;
995 	}
996 
997 	klp_for_each_object(patch, obj) {
998 		ret = klp_init_object(patch, obj);
999 		if (ret)
1000 			return ret;
1001 	}
1002 
1003 	list_add_tail(&patch->list, &klp_patches);
1004 
1005 	return 0;
1006 }
1007 
__klp_disable_patch(struct klp_patch * patch)1008 static int __klp_disable_patch(struct klp_patch *patch)
1009 {
1010 	struct klp_object *obj;
1011 
1012 	if (WARN_ON(!patch->enabled))
1013 		return -EINVAL;
1014 
1015 	if (klp_transition_patch)
1016 		return -EBUSY;
1017 
1018 	klp_init_transition(patch, KLP_TRANSITION_UNPATCHED);
1019 
1020 	klp_for_each_object(patch, obj)
1021 		if (obj->patched)
1022 			klp_pre_unpatch_callback(obj);
1023 
1024 	/*
1025 	 * Enforce the order of the func->transition writes in
1026 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
1027 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
1028 	 * is called shortly after klp_update_patch_state() switches the task,
1029 	 * this ensures the handler sees that func->transition is set.
1030 	 */
1031 	smp_wmb();
1032 
1033 	klp_start_transition();
1034 	patch->enabled = false;
1035 	klp_try_complete_transition();
1036 
1037 	return 0;
1038 }
1039 
__klp_enable_patch(struct klp_patch * patch)1040 static int __klp_enable_patch(struct klp_patch *patch)
1041 {
1042 	struct klp_object *obj;
1043 	int ret;
1044 
1045 	if (klp_transition_patch)
1046 		return -EBUSY;
1047 
1048 	if (WARN_ON(patch->enabled))
1049 		return -EINVAL;
1050 
1051 	pr_notice("enabling patch '%s'\n", patch->mod->name);
1052 
1053 	klp_init_transition(patch, KLP_TRANSITION_PATCHED);
1054 
1055 	/*
1056 	 * Enforce the order of the func->transition writes in
1057 	 * klp_init_transition() and the ops->func_stack writes in
1058 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
1059 	 * func->transition updates before the handler is registered and the
1060 	 * new funcs become visible to the handler.
1061 	 */
1062 	smp_wmb();
1063 
1064 	klp_for_each_object(patch, obj) {
1065 		if (!klp_is_object_loaded(obj))
1066 			continue;
1067 
1068 		ret = klp_pre_patch_callback(obj);
1069 		if (ret) {
1070 			pr_warn("pre-patch callback failed for object '%s'\n",
1071 				klp_is_module(obj) ? obj->name : "vmlinux");
1072 			goto err;
1073 		}
1074 
1075 		ret = klp_patch_object(obj);
1076 		if (ret) {
1077 			pr_warn("failed to patch object '%s'\n",
1078 				klp_is_module(obj) ? obj->name : "vmlinux");
1079 			goto err;
1080 		}
1081 	}
1082 
1083 	klp_start_transition();
1084 	patch->enabled = true;
1085 	klp_try_complete_transition();
1086 
1087 	return 0;
1088 err:
1089 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1090 
1091 	klp_cancel_transition();
1092 	return ret;
1093 }
1094 
1095 /**
1096  * klp_enable_patch() - enable the livepatch
1097  * @patch:	patch to be enabled
1098  *
1099  * Initializes the data structure associated with the patch, creates the sysfs
1100  * interface, performs the needed symbol lookups and code relocations,
1101  * registers the patched functions with ftrace.
1102  *
1103  * This function is supposed to be called from the livepatch module_init()
1104  * callback.
1105  *
1106  * Return: 0 on success, otherwise error
1107  */
klp_enable_patch(struct klp_patch * patch)1108 int klp_enable_patch(struct klp_patch *patch)
1109 {
1110 	int ret;
1111 	struct klp_object *obj;
1112 
1113 	if (!patch || !patch->mod || !patch->objs)
1114 		return -EINVAL;
1115 
1116 	klp_for_each_object_static(patch, obj) {
1117 		if (!obj->funcs)
1118 			return -EINVAL;
1119 	}
1120 
1121 
1122 	if (!is_livepatch_module(patch->mod)) {
1123 		pr_err("module %s is not marked as a livepatch module\n",
1124 		       patch->mod->name);
1125 		return -EINVAL;
1126 	}
1127 
1128 	if (!klp_initialized())
1129 		return -ENODEV;
1130 
1131 	if (!klp_have_reliable_stack()) {
1132 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1133 		pr_warn("The livepatch transition may never complete.\n");
1134 	}
1135 
1136 	mutex_lock(&klp_mutex);
1137 
1138 	if (!klp_is_patch_compatible(patch)) {
1139 		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1140 			patch->mod->name);
1141 		mutex_unlock(&klp_mutex);
1142 		return -EINVAL;
1143 	}
1144 
1145 	if (!try_module_get(patch->mod)) {
1146 		mutex_unlock(&klp_mutex);
1147 		return -ENODEV;
1148 	}
1149 
1150 	klp_init_patch_early(patch);
1151 
1152 	ret = klp_init_patch(patch);
1153 	if (ret)
1154 		goto err;
1155 
1156 	ret = __klp_enable_patch(patch);
1157 	if (ret)
1158 		goto err;
1159 
1160 	mutex_unlock(&klp_mutex);
1161 
1162 	return 0;
1163 
1164 err:
1165 	klp_free_patch_start(patch);
1166 
1167 	mutex_unlock(&klp_mutex);
1168 
1169 	klp_free_patch_finish(patch);
1170 
1171 	return ret;
1172 }
1173 EXPORT_SYMBOL_GPL(klp_enable_patch);
1174 
1175 /*
1176  * This function unpatches objects from the replaced livepatches.
1177  *
1178  * We could be pretty aggressive here. It is called in the situation where
1179  * these structures are no longer accessed from the ftrace handler.
1180  * All functions are redirected by the klp_transition_patch. They
1181  * use either a new code or they are in the original code because
1182  * of the special nop function patches.
1183  *
1184  * The only exception is when the transition was forced. In this case,
1185  * klp_ftrace_handler() might still see the replaced patch on the stack.
1186  * Fortunately, it is carefully designed to work with removed functions
1187  * thanks to RCU. We only have to keep the patches on the system. Also
1188  * this is handled transparently by patch->module_put.
1189  */
klp_unpatch_replaced_patches(struct klp_patch * new_patch)1190 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1191 {
1192 	struct klp_patch *old_patch;
1193 
1194 	klp_for_each_patch(old_patch) {
1195 		if (old_patch == new_patch)
1196 			return;
1197 
1198 		old_patch->enabled = false;
1199 		klp_unpatch_objects(old_patch);
1200 	}
1201 }
1202 
1203 /*
1204  * This function removes the dynamically allocated 'nop' functions.
1205  *
1206  * We could be pretty aggressive. NOPs do not change the existing
1207  * behavior except for adding unnecessary delay by the ftrace handler.
1208  *
1209  * It is safe even when the transition was forced. The ftrace handler
1210  * will see a valid ops->func_stack entry thanks to RCU.
1211  *
1212  * We could even free the NOPs structures. They must be the last entry
1213  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1214  * It does the same as klp_synchronize_transition() to make sure that
1215  * nobody is inside the ftrace handler once the operation finishes.
1216  *
1217  * IMPORTANT: It must be called right after removing the replaced patches!
1218  */
klp_discard_nops(struct klp_patch * new_patch)1219 void klp_discard_nops(struct klp_patch *new_patch)
1220 {
1221 	klp_unpatch_objects_dynamic(klp_transition_patch);
1222 	klp_free_objects_dynamic(klp_transition_patch);
1223 }
1224 
1225 /*
1226  * Remove parts of patches that touch a given kernel module. The list of
1227  * patches processed might be limited. When limit is NULL, all patches
1228  * will be handled.
1229  */
klp_cleanup_module_patches_limited(struct module * mod,struct klp_patch * limit)1230 static void klp_cleanup_module_patches_limited(struct module *mod,
1231 					       struct klp_patch *limit)
1232 {
1233 	struct klp_patch *patch;
1234 	struct klp_object *obj;
1235 
1236 	klp_for_each_patch(patch) {
1237 		if (patch == limit)
1238 			break;
1239 
1240 		klp_for_each_object(patch, obj) {
1241 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1242 				continue;
1243 
1244 			if (patch != klp_transition_patch)
1245 				klp_pre_unpatch_callback(obj);
1246 
1247 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1248 				  patch->mod->name, obj->mod->name);
1249 			klp_unpatch_object(obj);
1250 
1251 			klp_post_unpatch_callback(obj);
1252 			klp_clear_object_relocs(patch, obj);
1253 			klp_free_object_loaded(obj);
1254 			break;
1255 		}
1256 	}
1257 }
1258 
klp_module_coming(struct module * mod)1259 int klp_module_coming(struct module *mod)
1260 {
1261 	int ret;
1262 	struct klp_patch *patch;
1263 	struct klp_object *obj;
1264 
1265 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1266 		return -EINVAL;
1267 
1268 	if (!strcmp(mod->name, "vmlinux")) {
1269 		pr_err("vmlinux.ko: invalid module name\n");
1270 		return -EINVAL;
1271 	}
1272 
1273 	mutex_lock(&klp_mutex);
1274 	/*
1275 	 * Each module has to know that klp_module_coming()
1276 	 * has been called. We never know what module will
1277 	 * get patched by a new patch.
1278 	 */
1279 	mod->klp_alive = true;
1280 
1281 	klp_for_each_patch(patch) {
1282 		klp_for_each_object(patch, obj) {
1283 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1284 				continue;
1285 
1286 			obj->mod = mod;
1287 
1288 			ret = klp_init_object_loaded(patch, obj);
1289 			if (ret) {
1290 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1291 					patch->mod->name, obj->mod->name, ret);
1292 				goto err;
1293 			}
1294 
1295 			pr_notice("applying patch '%s' to loading module '%s'\n",
1296 				  patch->mod->name, obj->mod->name);
1297 
1298 			ret = klp_pre_patch_callback(obj);
1299 			if (ret) {
1300 				pr_warn("pre-patch callback failed for object '%s'\n",
1301 					obj->name);
1302 				goto err;
1303 			}
1304 
1305 			ret = klp_patch_object(obj);
1306 			if (ret) {
1307 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1308 					patch->mod->name, obj->mod->name, ret);
1309 
1310 				klp_post_unpatch_callback(obj);
1311 				goto err;
1312 			}
1313 
1314 			if (patch != klp_transition_patch)
1315 				klp_post_patch_callback(obj);
1316 
1317 			break;
1318 		}
1319 	}
1320 
1321 	mutex_unlock(&klp_mutex);
1322 
1323 	return 0;
1324 
1325 err:
1326 	/*
1327 	 * If a patch is unsuccessfully applied, return
1328 	 * error to the module loader.
1329 	 */
1330 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1331 		patch->mod->name, obj->mod->name, obj->mod->name);
1332 	mod->klp_alive = false;
1333 	obj->mod = NULL;
1334 	klp_cleanup_module_patches_limited(mod, patch);
1335 	mutex_unlock(&klp_mutex);
1336 
1337 	return ret;
1338 }
1339 
klp_module_going(struct module * mod)1340 void klp_module_going(struct module *mod)
1341 {
1342 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1343 		    mod->state != MODULE_STATE_COMING))
1344 		return;
1345 
1346 	mutex_lock(&klp_mutex);
1347 	/*
1348 	 * Each module has to know that klp_module_going()
1349 	 * has been called. We never know what module will
1350 	 * get patched by a new patch.
1351 	 */
1352 	mod->klp_alive = false;
1353 
1354 	klp_cleanup_module_patches_limited(mod, NULL);
1355 
1356 	mutex_unlock(&klp_mutex);
1357 }
1358 
klp_init(void)1359 static int __init klp_init(void)
1360 {
1361 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1362 	if (!klp_root_kobj)
1363 		return -ENOMEM;
1364 
1365 	return 0;
1366 }
1367 
1368 module_init(klp_init);
1369