xref: /linux/kernel/livepatch/patch.c (revision 842c08846420baa619fe3cb8c9af538efdb89428)
1c349cdcaSJosh Poimboeuf /*
2c349cdcaSJosh Poimboeuf  * patch.c - livepatch patching functions
3c349cdcaSJosh Poimboeuf  *
4c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 SUSE
6c349cdcaSJosh Poimboeuf  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7c349cdcaSJosh Poimboeuf  *
8c349cdcaSJosh Poimboeuf  * This program is free software; you can redistribute it and/or
9c349cdcaSJosh Poimboeuf  * modify it under the terms of the GNU General Public License
10c349cdcaSJosh Poimboeuf  * as published by the Free Software Foundation; either version 2
11c349cdcaSJosh Poimboeuf  * of the License, or (at your option) any later version.
12c349cdcaSJosh Poimboeuf  *
13c349cdcaSJosh Poimboeuf  * This program is distributed in the hope that it will be useful,
14c349cdcaSJosh Poimboeuf  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15c349cdcaSJosh Poimboeuf  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16c349cdcaSJosh Poimboeuf  * GNU General Public License for more details.
17c349cdcaSJosh Poimboeuf  *
18c349cdcaSJosh Poimboeuf  * You should have received a copy of the GNU General Public License
19c349cdcaSJosh Poimboeuf  * along with this program; if not, see <http://www.gnu.org/licenses/>.
20c349cdcaSJosh Poimboeuf  */
21c349cdcaSJosh Poimboeuf 
22c349cdcaSJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23c349cdcaSJosh Poimboeuf 
24c349cdcaSJosh Poimboeuf #include <linux/livepatch.h>
25c349cdcaSJosh Poimboeuf #include <linux/list.h>
26c349cdcaSJosh Poimboeuf #include <linux/ftrace.h>
27c349cdcaSJosh Poimboeuf #include <linux/rculist.h>
28c349cdcaSJosh Poimboeuf #include <linux/slab.h>
29c349cdcaSJosh Poimboeuf #include <linux/bug.h>
30c349cdcaSJosh Poimboeuf #include <linux/printk.h>
31c349cdcaSJosh Poimboeuf #include "patch.h"
32d83a7cb3SJosh Poimboeuf #include "transition.h"
33c349cdcaSJosh Poimboeuf 
34c349cdcaSJosh Poimboeuf static LIST_HEAD(klp_ops);
35c349cdcaSJosh Poimboeuf 
36c349cdcaSJosh Poimboeuf struct klp_ops *klp_find_ops(unsigned long old_addr)
37c349cdcaSJosh Poimboeuf {
38c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
39c349cdcaSJosh Poimboeuf 	struct klp_func *func;
40c349cdcaSJosh Poimboeuf 
41c349cdcaSJosh Poimboeuf 	list_for_each_entry(ops, &klp_ops, node) {
42c349cdcaSJosh Poimboeuf 		func = list_first_entry(&ops->func_stack, struct klp_func,
43c349cdcaSJosh Poimboeuf 					stack_node);
44c349cdcaSJosh Poimboeuf 		if (func->old_addr == old_addr)
45c349cdcaSJosh Poimboeuf 			return ops;
46c349cdcaSJosh Poimboeuf 	}
47c349cdcaSJosh Poimboeuf 
48c349cdcaSJosh Poimboeuf 	return NULL;
49c349cdcaSJosh Poimboeuf }
50c349cdcaSJosh Poimboeuf 
51c349cdcaSJosh Poimboeuf static void notrace klp_ftrace_handler(unsigned long ip,
52c349cdcaSJosh Poimboeuf 				       unsigned long parent_ip,
53c349cdcaSJosh Poimboeuf 				       struct ftrace_ops *fops,
54c349cdcaSJosh Poimboeuf 				       struct pt_regs *regs)
55c349cdcaSJosh Poimboeuf {
56c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
57c349cdcaSJosh Poimboeuf 	struct klp_func *func;
58d83a7cb3SJosh Poimboeuf 	int patch_state;
59c349cdcaSJosh Poimboeuf 
60c349cdcaSJosh Poimboeuf 	ops = container_of(fops, struct klp_ops, fops);
61c349cdcaSJosh Poimboeuf 
62*842c0884SPetr Mladek 	/*
63*842c0884SPetr Mladek 	 * A variant of synchronize_sched() is used to allow patching functions
64*842c0884SPetr Mladek 	 * where RCU is not watching, see klp_synchronize_transition().
65*842c0884SPetr Mladek 	 */
66*842c0884SPetr Mladek 	preempt_disable_notrace();
67d83a7cb3SJosh Poimboeuf 
68c349cdcaSJosh Poimboeuf 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
69c349cdcaSJosh Poimboeuf 				      stack_node);
70d83a7cb3SJosh Poimboeuf 
71d83a7cb3SJosh Poimboeuf 	/*
72d83a7cb3SJosh Poimboeuf 	 * func should never be NULL because preemption should be disabled here
73d83a7cb3SJosh Poimboeuf 	 * and unregister_ftrace_function() does the equivalent of a
74d83a7cb3SJosh Poimboeuf 	 * synchronize_sched() before the func_stack removal.
75d83a7cb3SJosh Poimboeuf 	 */
76c349cdcaSJosh Poimboeuf 	if (WARN_ON_ONCE(!func))
77c349cdcaSJosh Poimboeuf 		goto unlock;
78c349cdcaSJosh Poimboeuf 
79d83a7cb3SJosh Poimboeuf 	/*
80d83a7cb3SJosh Poimboeuf 	 * In the enable path, enforce the order of the ops->func_stack and
81d83a7cb3SJosh Poimboeuf 	 * func->transition reads.  The corresponding write barrier is in
82d83a7cb3SJosh Poimboeuf 	 * __klp_enable_patch().
83d83a7cb3SJosh Poimboeuf 	 *
84d83a7cb3SJosh Poimboeuf 	 * (Note that this barrier technically isn't needed in the disable
85d83a7cb3SJosh Poimboeuf 	 * path.  In the rare case where klp_update_patch_state() runs before
86d83a7cb3SJosh Poimboeuf 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
87d83a7cb3SJosh Poimboeuf 	 * read need to be ordered.  But klp_update_patch_state() already
88d83a7cb3SJosh Poimboeuf 	 * enforces that.)
89d83a7cb3SJosh Poimboeuf 	 */
90d83a7cb3SJosh Poimboeuf 	smp_rmb();
91d83a7cb3SJosh Poimboeuf 
92d83a7cb3SJosh Poimboeuf 	if (unlikely(func->transition)) {
93d83a7cb3SJosh Poimboeuf 
94d83a7cb3SJosh Poimboeuf 		/*
95d83a7cb3SJosh Poimboeuf 		 * Enforce the order of the func->transition and
96d83a7cb3SJosh Poimboeuf 		 * current->patch_state reads.  Otherwise we could read an
97d83a7cb3SJosh Poimboeuf 		 * out-of-date task state and pick the wrong function.  The
98d83a7cb3SJosh Poimboeuf 		 * corresponding write barrier is in klp_init_transition().
99d83a7cb3SJosh Poimboeuf 		 */
100d83a7cb3SJosh Poimboeuf 		smp_rmb();
101d83a7cb3SJosh Poimboeuf 
102d83a7cb3SJosh Poimboeuf 		patch_state = current->patch_state;
103d83a7cb3SJosh Poimboeuf 
104d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
105d83a7cb3SJosh Poimboeuf 
106d83a7cb3SJosh Poimboeuf 		if (patch_state == KLP_UNPATCHED) {
107d83a7cb3SJosh Poimboeuf 			/*
108d83a7cb3SJosh Poimboeuf 			 * Use the previously patched version of the function.
109d83a7cb3SJosh Poimboeuf 			 * If no previous patches exist, continue with the
110d83a7cb3SJosh Poimboeuf 			 * original function.
111d83a7cb3SJosh Poimboeuf 			 */
112d83a7cb3SJosh Poimboeuf 			func = list_entry_rcu(func->stack_node.next,
113d83a7cb3SJosh Poimboeuf 					      struct klp_func, stack_node);
114d83a7cb3SJosh Poimboeuf 
115d83a7cb3SJosh Poimboeuf 			if (&func->stack_node == &ops->func_stack)
116d83a7cb3SJosh Poimboeuf 				goto unlock;
117d83a7cb3SJosh Poimboeuf 		}
118d83a7cb3SJosh Poimboeuf 	}
119d83a7cb3SJosh Poimboeuf 
120c349cdcaSJosh Poimboeuf 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
121c349cdcaSJosh Poimboeuf unlock:
122*842c0884SPetr Mladek 	preempt_enable_notrace();
123c349cdcaSJosh Poimboeuf }
124c349cdcaSJosh Poimboeuf 
125c349cdcaSJosh Poimboeuf /*
126c349cdcaSJosh Poimboeuf  * Convert a function address into the appropriate ftrace location.
127c349cdcaSJosh Poimboeuf  *
128c349cdcaSJosh Poimboeuf  * Usually this is just the address of the function, but on some architectures
129c349cdcaSJosh Poimboeuf  * it's more complicated so allow them to provide a custom behaviour.
130c349cdcaSJosh Poimboeuf  */
131c349cdcaSJosh Poimboeuf #ifndef klp_get_ftrace_location
132c349cdcaSJosh Poimboeuf static unsigned long klp_get_ftrace_location(unsigned long faddr)
133c349cdcaSJosh Poimboeuf {
134c349cdcaSJosh Poimboeuf 	return faddr;
135c349cdcaSJosh Poimboeuf }
136c349cdcaSJosh Poimboeuf #endif
137c349cdcaSJosh Poimboeuf 
138c349cdcaSJosh Poimboeuf static void klp_unpatch_func(struct klp_func *func)
139c349cdcaSJosh Poimboeuf {
140c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
141c349cdcaSJosh Poimboeuf 
142c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->patched))
143c349cdcaSJosh Poimboeuf 		return;
144c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->old_addr))
145c349cdcaSJosh Poimboeuf 		return;
146c349cdcaSJosh Poimboeuf 
147c349cdcaSJosh Poimboeuf 	ops = klp_find_ops(func->old_addr);
148c349cdcaSJosh Poimboeuf 	if (WARN_ON(!ops))
149c349cdcaSJosh Poimboeuf 		return;
150c349cdcaSJosh Poimboeuf 
151c349cdcaSJosh Poimboeuf 	if (list_is_singular(&ops->func_stack)) {
152c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
153c349cdcaSJosh Poimboeuf 
154c349cdcaSJosh Poimboeuf 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
155c349cdcaSJosh Poimboeuf 		if (WARN_ON(!ftrace_loc))
156c349cdcaSJosh Poimboeuf 			return;
157c349cdcaSJosh Poimboeuf 
158c349cdcaSJosh Poimboeuf 		WARN_ON(unregister_ftrace_function(&ops->fops));
159c349cdcaSJosh Poimboeuf 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
160c349cdcaSJosh Poimboeuf 
161c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
162c349cdcaSJosh Poimboeuf 		list_del(&ops->node);
163c349cdcaSJosh Poimboeuf 		kfree(ops);
164c349cdcaSJosh Poimboeuf 	} else {
165c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
166c349cdcaSJosh Poimboeuf 	}
167c349cdcaSJosh Poimboeuf 
168c349cdcaSJosh Poimboeuf 	func->patched = false;
169c349cdcaSJosh Poimboeuf }
170c349cdcaSJosh Poimboeuf 
171c349cdcaSJosh Poimboeuf static int klp_patch_func(struct klp_func *func)
172c349cdcaSJosh Poimboeuf {
173c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
174c349cdcaSJosh Poimboeuf 	int ret;
175c349cdcaSJosh Poimboeuf 
176c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->old_addr))
177c349cdcaSJosh Poimboeuf 		return -EINVAL;
178c349cdcaSJosh Poimboeuf 
179c349cdcaSJosh Poimboeuf 	if (WARN_ON(func->patched))
180c349cdcaSJosh Poimboeuf 		return -EINVAL;
181c349cdcaSJosh Poimboeuf 
182c349cdcaSJosh Poimboeuf 	ops = klp_find_ops(func->old_addr);
183c349cdcaSJosh Poimboeuf 	if (!ops) {
184c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
185c349cdcaSJosh Poimboeuf 
186c349cdcaSJosh Poimboeuf 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
187c349cdcaSJosh Poimboeuf 		if (!ftrace_loc) {
188c349cdcaSJosh Poimboeuf 			pr_err("failed to find location for function '%s'\n",
189c349cdcaSJosh Poimboeuf 				func->old_name);
190c349cdcaSJosh Poimboeuf 			return -EINVAL;
191c349cdcaSJosh Poimboeuf 		}
192c349cdcaSJosh Poimboeuf 
193c349cdcaSJosh Poimboeuf 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
194c349cdcaSJosh Poimboeuf 		if (!ops)
195c349cdcaSJosh Poimboeuf 			return -ENOMEM;
196c349cdcaSJosh Poimboeuf 
197c349cdcaSJosh Poimboeuf 		ops->fops.func = klp_ftrace_handler;
198c349cdcaSJosh Poimboeuf 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
199c349cdcaSJosh Poimboeuf 				  FTRACE_OPS_FL_DYNAMIC |
200c349cdcaSJosh Poimboeuf 				  FTRACE_OPS_FL_IPMODIFY;
201c349cdcaSJosh Poimboeuf 
202c349cdcaSJosh Poimboeuf 		list_add(&ops->node, &klp_ops);
203c349cdcaSJosh Poimboeuf 
204c349cdcaSJosh Poimboeuf 		INIT_LIST_HEAD(&ops->func_stack);
205c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
206c349cdcaSJosh Poimboeuf 
207c349cdcaSJosh Poimboeuf 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208c349cdcaSJosh Poimboeuf 		if (ret) {
209c349cdcaSJosh Poimboeuf 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
211c349cdcaSJosh Poimboeuf 			goto err;
212c349cdcaSJosh Poimboeuf 		}
213c349cdcaSJosh Poimboeuf 
214c349cdcaSJosh Poimboeuf 		ret = register_ftrace_function(&ops->fops);
215c349cdcaSJosh Poimboeuf 		if (ret) {
216c349cdcaSJosh Poimboeuf 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
218c349cdcaSJosh Poimboeuf 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219c349cdcaSJosh Poimboeuf 			goto err;
220c349cdcaSJosh Poimboeuf 		}
221c349cdcaSJosh Poimboeuf 
222c349cdcaSJosh Poimboeuf 
223c349cdcaSJosh Poimboeuf 	} else {
224c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
225c349cdcaSJosh Poimboeuf 	}
226c349cdcaSJosh Poimboeuf 
227c349cdcaSJosh Poimboeuf 	func->patched = true;
228c349cdcaSJosh Poimboeuf 
229c349cdcaSJosh Poimboeuf 	return 0;
230c349cdcaSJosh Poimboeuf 
231c349cdcaSJosh Poimboeuf err:
232c349cdcaSJosh Poimboeuf 	list_del_rcu(&func->stack_node);
233c349cdcaSJosh Poimboeuf 	list_del(&ops->node);
234c349cdcaSJosh Poimboeuf 	kfree(ops);
235c349cdcaSJosh Poimboeuf 	return ret;
236c349cdcaSJosh Poimboeuf }
237c349cdcaSJosh Poimboeuf 
238c349cdcaSJosh Poimboeuf void klp_unpatch_object(struct klp_object *obj)
239c349cdcaSJosh Poimboeuf {
240c349cdcaSJosh Poimboeuf 	struct klp_func *func;
241c349cdcaSJosh Poimboeuf 
242c349cdcaSJosh Poimboeuf 	klp_for_each_func(obj, func)
243c349cdcaSJosh Poimboeuf 		if (func->patched)
244c349cdcaSJosh Poimboeuf 			klp_unpatch_func(func);
245c349cdcaSJosh Poimboeuf 
246c349cdcaSJosh Poimboeuf 	obj->patched = false;
247c349cdcaSJosh Poimboeuf }
248c349cdcaSJosh Poimboeuf 
249c349cdcaSJosh Poimboeuf int klp_patch_object(struct klp_object *obj)
250c349cdcaSJosh Poimboeuf {
251c349cdcaSJosh Poimboeuf 	struct klp_func *func;
252c349cdcaSJosh Poimboeuf 	int ret;
253c349cdcaSJosh Poimboeuf 
254c349cdcaSJosh Poimboeuf 	if (WARN_ON(obj->patched))
255c349cdcaSJosh Poimboeuf 		return -EINVAL;
256c349cdcaSJosh Poimboeuf 
257c349cdcaSJosh Poimboeuf 	klp_for_each_func(obj, func) {
258c349cdcaSJosh Poimboeuf 		ret = klp_patch_func(func);
259c349cdcaSJosh Poimboeuf 		if (ret) {
260c349cdcaSJosh Poimboeuf 			klp_unpatch_object(obj);
261c349cdcaSJosh Poimboeuf 			return ret;
262c349cdcaSJosh Poimboeuf 		}
263c349cdcaSJosh Poimboeuf 	}
264c349cdcaSJosh Poimboeuf 	obj->patched = true;
265c349cdcaSJosh Poimboeuf 
266c349cdcaSJosh Poimboeuf 	return 0;
267c349cdcaSJosh Poimboeuf }
268d83a7cb3SJosh Poimboeuf 
269d83a7cb3SJosh Poimboeuf void klp_unpatch_objects(struct klp_patch *patch)
270d83a7cb3SJosh Poimboeuf {
271d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
272d83a7cb3SJosh Poimboeuf 
273d83a7cb3SJosh Poimboeuf 	klp_for_each_object(patch, obj)
274d83a7cb3SJosh Poimboeuf 		if (obj->patched)
275d83a7cb3SJosh Poimboeuf 			klp_unpatch_object(obj);
276d83a7cb3SJosh Poimboeuf }
277