xref: /linux/kernel/livepatch/patch.c (revision 0e2f54f88b8b9bbdb3a73b6e67cffb402187c73f)
1 /*
2  * patch.c - livepatch patching functions
3  *
4  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5  * Copyright (C) 2014 SUSE
6  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version 2
11  * of the License, or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/livepatch.h>
25 #include <linux/list.h>
26 #include <linux/ftrace.h>
27 #include <linux/rculist.h>
28 #include <linux/slab.h>
29 #include <linux/bug.h>
30 #include <linux/printk.h>
31 #include "core.h"
32 #include "patch.h"
33 #include "transition.h"
34 
35 static LIST_HEAD(klp_ops);
36 
37 struct klp_ops *klp_find_ops(void *old_func)
38 {
39 	struct klp_ops *ops;
40 	struct klp_func *func;
41 
42 	list_for_each_entry(ops, &klp_ops, node) {
43 		func = list_first_entry(&ops->func_stack, struct klp_func,
44 					stack_node);
45 		if (func->old_func == old_func)
46 			return ops;
47 	}
48 
49 	return NULL;
50 }
51 
52 static void notrace klp_ftrace_handler(unsigned long ip,
53 				       unsigned long parent_ip,
54 				       struct ftrace_ops *fops,
55 				       struct pt_regs *regs)
56 {
57 	struct klp_ops *ops;
58 	struct klp_func *func;
59 	int patch_state;
60 
61 	ops = container_of(fops, struct klp_ops, fops);
62 
63 	/*
64 	 * A variant of synchronize_rcu() is used to allow patching functions
65 	 * where RCU is not watching, see klp_synchronize_transition().
66 	 */
67 	preempt_disable_notrace();
68 
69 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
70 				      stack_node);
71 
72 	/*
73 	 * func should never be NULL because preemption should be disabled here
74 	 * and unregister_ftrace_function() does the equivalent of a
75 	 * synchronize_rcu() before the func_stack removal.
76 	 */
77 	if (WARN_ON_ONCE(!func))
78 		goto unlock;
79 
80 	/*
81 	 * In the enable path, enforce the order of the ops->func_stack and
82 	 * func->transition reads.  The corresponding write barrier is in
83 	 * __klp_enable_patch().
84 	 *
85 	 * (Note that this barrier technically isn't needed in the disable
86 	 * path.  In the rare case where klp_update_patch_state() runs before
87 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
88 	 * read need to be ordered.  But klp_update_patch_state() already
89 	 * enforces that.)
90 	 */
91 	smp_rmb();
92 
93 	if (unlikely(func->transition)) {
94 
95 		/*
96 		 * Enforce the order of the func->transition and
97 		 * current->patch_state reads.  Otherwise we could read an
98 		 * out-of-date task state and pick the wrong function.  The
99 		 * corresponding write barrier is in klp_init_transition().
100 		 */
101 		smp_rmb();
102 
103 		patch_state = current->patch_state;
104 
105 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
106 
107 		if (patch_state == KLP_UNPATCHED) {
108 			/*
109 			 * Use the previously patched version of the function.
110 			 * If no previous patches exist, continue with the
111 			 * original function.
112 			 */
113 			func = list_entry_rcu(func->stack_node.next,
114 					      struct klp_func, stack_node);
115 
116 			if (&func->stack_node == &ops->func_stack)
117 				goto unlock;
118 		}
119 	}
120 
121 	/*
122 	 * NOPs are used to replace existing patches with original code.
123 	 * Do nothing! Setting pc would cause an infinite loop.
124 	 */
125 	if (func->nop)
126 		goto unlock;
127 
128 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
129 
130 unlock:
131 	preempt_enable_notrace();
132 }
133 
134 /*
135  * Convert a function address into the appropriate ftrace location.
136  *
137  * Usually this is just the address of the function, but on some architectures
138  * it's more complicated so allow them to provide a custom behaviour.
139  */
140 #ifndef klp_get_ftrace_location
141 static unsigned long klp_get_ftrace_location(unsigned long faddr)
142 {
143 	return faddr;
144 }
145 #endif
146 
147 static void klp_unpatch_func(struct klp_func *func)
148 {
149 	struct klp_ops *ops;
150 
151 	if (WARN_ON(!func->patched))
152 		return;
153 	if (WARN_ON(!func->old_func))
154 		return;
155 
156 	ops = klp_find_ops(func->old_func);
157 	if (WARN_ON(!ops))
158 		return;
159 
160 	if (list_is_singular(&ops->func_stack)) {
161 		unsigned long ftrace_loc;
162 
163 		ftrace_loc =
164 			klp_get_ftrace_location((unsigned long)func->old_func);
165 		if (WARN_ON(!ftrace_loc))
166 			return;
167 
168 		WARN_ON(unregister_ftrace_function(&ops->fops));
169 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
170 
171 		list_del_rcu(&func->stack_node);
172 		list_del(&ops->node);
173 		kfree(ops);
174 	} else {
175 		list_del_rcu(&func->stack_node);
176 	}
177 
178 	func->patched = false;
179 }
180 
181 static int klp_patch_func(struct klp_func *func)
182 {
183 	struct klp_ops *ops;
184 	int ret;
185 
186 	if (WARN_ON(!func->old_func))
187 		return -EINVAL;
188 
189 	if (WARN_ON(func->patched))
190 		return -EINVAL;
191 
192 	ops = klp_find_ops(func->old_func);
193 	if (!ops) {
194 		unsigned long ftrace_loc;
195 
196 		ftrace_loc =
197 			klp_get_ftrace_location((unsigned long)func->old_func);
198 		if (!ftrace_loc) {
199 			pr_err("failed to find location for function '%s'\n",
200 				func->old_name);
201 			return -EINVAL;
202 		}
203 
204 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
205 		if (!ops)
206 			return -ENOMEM;
207 
208 		ops->fops.func = klp_ftrace_handler;
209 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
210 				  FTRACE_OPS_FL_DYNAMIC |
211 				  FTRACE_OPS_FL_IPMODIFY;
212 
213 		list_add(&ops->node, &klp_ops);
214 
215 		INIT_LIST_HEAD(&ops->func_stack);
216 		list_add_rcu(&func->stack_node, &ops->func_stack);
217 
218 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
219 		if (ret) {
220 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
221 			       func->old_name, ret);
222 			goto err;
223 		}
224 
225 		ret = register_ftrace_function(&ops->fops);
226 		if (ret) {
227 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
228 			       func->old_name, ret);
229 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
230 			goto err;
231 		}
232 
233 
234 	} else {
235 		list_add_rcu(&func->stack_node, &ops->func_stack);
236 	}
237 
238 	func->patched = true;
239 
240 	return 0;
241 
242 err:
243 	list_del_rcu(&func->stack_node);
244 	list_del(&ops->node);
245 	kfree(ops);
246 	return ret;
247 }
248 
249 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
250 {
251 	struct klp_func *func;
252 
253 	klp_for_each_func(obj, func) {
254 		if (nops_only && !func->nop)
255 			continue;
256 
257 		if (func->patched)
258 			klp_unpatch_func(func);
259 	}
260 
261 	if (obj->dynamic || !nops_only)
262 		obj->patched = false;
263 }
264 
265 
266 void klp_unpatch_object(struct klp_object *obj)
267 {
268 	__klp_unpatch_object(obj, false);
269 }
270 
271 int klp_patch_object(struct klp_object *obj)
272 {
273 	struct klp_func *func;
274 	int ret;
275 
276 	if (WARN_ON(obj->patched))
277 		return -EINVAL;
278 
279 	klp_for_each_func(obj, func) {
280 		ret = klp_patch_func(func);
281 		if (ret) {
282 			klp_unpatch_object(obj);
283 			return ret;
284 		}
285 	}
286 	obj->patched = true;
287 
288 	return 0;
289 }
290 
291 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
292 {
293 	struct klp_object *obj;
294 
295 	klp_for_each_object(patch, obj)
296 		if (obj->patched)
297 			__klp_unpatch_object(obj, nops_only);
298 }
299 
300 void klp_unpatch_objects(struct klp_patch *patch)
301 {
302 	__klp_unpatch_objects(patch, false);
303 }
304 
305 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
306 {
307 	__klp_unpatch_objects(patch, true);
308 }
309