xref: /linux/kernel/livepatch/transition.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * transition.c - Kernel Live Patching transition functions
3  *
4  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "transition.h"
27 #include "../sched/sched.h"
28 
29 #define MAX_STACK_ENTRIES  100
30 #define STACK_ERR_BUF_SIZE 128
31 
32 struct klp_patch *klp_transition_patch;
33 
34 static int klp_target_state = KLP_UNDEFINED;
35 
36 /*
37  * This work can be performed periodically to finish patching or unpatching any
38  * "straggler" tasks which failed to transition in the first attempt.
39  */
40 static void klp_transition_work_fn(struct work_struct *work)
41 {
42 	mutex_lock(&klp_mutex);
43 
44 	if (klp_transition_patch)
45 		klp_try_complete_transition();
46 
47 	mutex_unlock(&klp_mutex);
48 }
49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 
51 /*
52  * The transition to the target patch state is complete.  Clean up the data
53  * structures.
54  */
55 static void klp_complete_transition(void)
56 {
57 	struct klp_object *obj;
58 	struct klp_func *func;
59 	struct task_struct *g, *task;
60 	unsigned int cpu;
61 	bool immediate_func = false;
62 
63 	if (klp_target_state == KLP_UNPATCHED) {
64 		/*
65 		 * All tasks have transitioned to KLP_UNPATCHED so we can now
66 		 * remove the new functions from the func_stack.
67 		 */
68 		klp_unpatch_objects(klp_transition_patch);
69 
70 		/*
71 		 * Make sure klp_ftrace_handler() can no longer see functions
72 		 * from this patch on the ops->func_stack.  Otherwise, after
73 		 * func->transition gets cleared, the handler may choose a
74 		 * removed function.
75 		 */
76 		synchronize_rcu();
77 	}
78 
79 	if (klp_transition_patch->immediate)
80 		goto done;
81 
82 	klp_for_each_object(klp_transition_patch, obj) {
83 		klp_for_each_func(obj, func) {
84 			func->transition = false;
85 			if (func->immediate)
86 				immediate_func = true;
87 		}
88 	}
89 
90 	if (klp_target_state == KLP_UNPATCHED && !immediate_func)
91 		module_put(klp_transition_patch->mod);
92 
93 	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 	if (klp_target_state == KLP_PATCHED)
95 		synchronize_rcu();
96 
97 	read_lock(&tasklist_lock);
98 	for_each_process_thread(g, task) {
99 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
100 		task->patch_state = KLP_UNDEFINED;
101 	}
102 	read_unlock(&tasklist_lock);
103 
104 	for_each_possible_cpu(cpu) {
105 		task = idle_task(cpu);
106 		WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
107 		task->patch_state = KLP_UNDEFINED;
108 	}
109 
110 done:
111 	klp_target_state = KLP_UNDEFINED;
112 	klp_transition_patch = NULL;
113 }
114 
115 /*
116  * This is called in the error path, to cancel a transition before it has
117  * started, i.e. klp_init_transition() has been called but
118  * klp_start_transition() hasn't.  If the transition *has* been started,
119  * klp_reverse_transition() should be used instead.
120  */
121 void klp_cancel_transition(void)
122 {
123 	if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
124 		return;
125 
126 	klp_target_state = KLP_UNPATCHED;
127 	klp_complete_transition();
128 }
129 
130 /*
131  * Switch the patched state of the task to the set of functions in the target
132  * patch state.
133  *
134  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
135  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
136  */
137 void klp_update_patch_state(struct task_struct *task)
138 {
139 	rcu_read_lock();
140 
141 	/*
142 	 * This test_and_clear_tsk_thread_flag() call also serves as a read
143 	 * barrier (smp_rmb) for two cases:
144 	 *
145 	 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
146 	 *    klp_target_state read.  The corresponding write barrier is in
147 	 *    klp_init_transition().
148 	 *
149 	 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
150 	 *    of func->transition, if klp_ftrace_handler() is called later on
151 	 *    the same CPU.  See __klp_disable_patch().
152 	 */
153 	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 		task->patch_state = READ_ONCE(klp_target_state);
155 
156 	rcu_read_unlock();
157 }
158 
159 /*
160  * Determine whether the given stack trace includes any references to a
161  * to-be-patched or to-be-unpatched function.
162  */
163 static int klp_check_stack_func(struct klp_func *func,
164 				struct stack_trace *trace)
165 {
166 	unsigned long func_addr, func_size, address;
167 	struct klp_ops *ops;
168 	int i;
169 
170 	if (func->immediate)
171 		return 0;
172 
173 	for (i = 0; i < trace->nr_entries; i++) {
174 		address = trace->entries[i];
175 
176 		if (klp_target_state == KLP_UNPATCHED) {
177 			 /*
178 			  * Check for the to-be-unpatched function
179 			  * (the func itself).
180 			  */
181 			func_addr = (unsigned long)func->new_func;
182 			func_size = func->new_size;
183 		} else {
184 			/*
185 			 * Check for the to-be-patched function
186 			 * (the previous func).
187 			 */
188 			ops = klp_find_ops(func->old_addr);
189 
190 			if (list_is_singular(&ops->func_stack)) {
191 				/* original function */
192 				func_addr = func->old_addr;
193 				func_size = func->old_size;
194 			} else {
195 				/* previously patched function */
196 				struct klp_func *prev;
197 
198 				prev = list_next_entry(func, stack_node);
199 				func_addr = (unsigned long)prev->new_func;
200 				func_size = prev->new_size;
201 			}
202 		}
203 
204 		if (address >= func_addr && address < func_addr + func_size)
205 			return -EAGAIN;
206 	}
207 
208 	return 0;
209 }
210 
211 /*
212  * Determine whether it's safe to transition the task to the target patch state
213  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
214  */
215 static int klp_check_stack(struct task_struct *task, char *err_buf)
216 {
217 	static unsigned long entries[MAX_STACK_ENTRIES];
218 	struct stack_trace trace;
219 	struct klp_object *obj;
220 	struct klp_func *func;
221 	int ret;
222 
223 	trace.skip = 0;
224 	trace.nr_entries = 0;
225 	trace.max_entries = MAX_STACK_ENTRIES;
226 	trace.entries = entries;
227 	ret = save_stack_trace_tsk_reliable(task, &trace);
228 	WARN_ON_ONCE(ret == -ENOSYS);
229 	if (ret) {
230 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
231 			 "%s: %s:%d has an unreliable stack\n",
232 			 __func__, task->comm, task->pid);
233 		return ret;
234 	}
235 
236 	klp_for_each_object(klp_transition_patch, obj) {
237 		if (!obj->patched)
238 			continue;
239 		klp_for_each_func(obj, func) {
240 			ret = klp_check_stack_func(func, &trace);
241 			if (ret) {
242 				snprintf(err_buf, STACK_ERR_BUF_SIZE,
243 					 "%s: %s:%d is sleeping on function %s\n",
244 					 __func__, task->comm, task->pid,
245 					 func->old_name);
246 				return ret;
247 			}
248 		}
249 	}
250 
251 	return 0;
252 }
253 
254 /*
255  * Try to safely switch a task to the target patch state.  If it's currently
256  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
257  * if the stack is unreliable, return false.
258  */
259 static bool klp_try_switch_task(struct task_struct *task)
260 {
261 	struct rq *rq;
262 	struct rq_flags flags;
263 	int ret;
264 	bool success = false;
265 	char err_buf[STACK_ERR_BUF_SIZE];
266 
267 	err_buf[0] = '\0';
268 
269 	/* check if this task has already switched over */
270 	if (task->patch_state == klp_target_state)
271 		return true;
272 
273 	/*
274 	 * For arches which don't have reliable stack traces, we have to rely
275 	 * on other methods (e.g., switching tasks at kernel exit).
276 	 */
277 	if (!klp_have_reliable_stack())
278 		return false;
279 
280 	/*
281 	 * Now try to check the stack for any to-be-patched or to-be-unpatched
282 	 * functions.  If all goes well, switch the task to the target patch
283 	 * state.
284 	 */
285 	rq = task_rq_lock(task, &flags);
286 
287 	if (task_running(rq, task) && task != current) {
288 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
289 			 "%s: %s:%d is running\n", __func__, task->comm,
290 			 task->pid);
291 		goto done;
292 	}
293 
294 	ret = klp_check_stack(task, err_buf);
295 	if (ret)
296 		goto done;
297 
298 	success = true;
299 
300 	clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
301 	task->patch_state = klp_target_state;
302 
303 done:
304 	task_rq_unlock(rq, task, &flags);
305 
306 	/*
307 	 * Due to console deadlock issues, pr_debug() can't be used while
308 	 * holding the task rq lock.  Instead we have to use a temporary buffer
309 	 * and print the debug message after releasing the lock.
310 	 */
311 	if (err_buf[0] != '\0')
312 		pr_debug("%s", err_buf);
313 
314 	return success;
315 
316 }
317 
318 /*
319  * Try to switch all remaining tasks to the target patch state by walking the
320  * stacks of sleeping tasks and looking for any to-be-patched or
321  * to-be-unpatched functions.  If such functions are found, the task can't be
322  * switched yet.
323  *
324  * If any tasks are still stuck in the initial patch state, schedule a retry.
325  */
326 void klp_try_complete_transition(void)
327 {
328 	unsigned int cpu;
329 	struct task_struct *g, *task;
330 	bool complete = true;
331 
332 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
333 
334 	/*
335 	 * If the patch can be applied or reverted immediately, skip the
336 	 * per-task transitions.
337 	 */
338 	if (klp_transition_patch->immediate)
339 		goto success;
340 
341 	/*
342 	 * Try to switch the tasks to the target patch state by walking their
343 	 * stacks and looking for any to-be-patched or to-be-unpatched
344 	 * functions.  If such functions are found on a stack, or if the stack
345 	 * is deemed unreliable, the task can't be switched yet.
346 	 *
347 	 * Usually this will transition most (or all) of the tasks on a system
348 	 * unless the patch includes changes to a very common function.
349 	 */
350 	read_lock(&tasklist_lock);
351 	for_each_process_thread(g, task)
352 		if (!klp_try_switch_task(task))
353 			complete = false;
354 	read_unlock(&tasklist_lock);
355 
356 	/*
357 	 * Ditto for the idle "swapper" tasks.
358 	 */
359 	get_online_cpus();
360 	for_each_possible_cpu(cpu) {
361 		task = idle_task(cpu);
362 		if (cpu_online(cpu)) {
363 			if (!klp_try_switch_task(task))
364 				complete = false;
365 		} else if (task->patch_state != klp_target_state) {
366 			/* offline idle tasks can be switched immediately */
367 			clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
368 			task->patch_state = klp_target_state;
369 		}
370 	}
371 	put_online_cpus();
372 
373 	if (!complete) {
374 		/*
375 		 * Some tasks weren't able to be switched over.  Try again
376 		 * later and/or wait for other methods like kernel exit
377 		 * switching.
378 		 */
379 		schedule_delayed_work(&klp_transition_work,
380 				      round_jiffies_relative(HZ));
381 		return;
382 	}
383 
384 success:
385 	pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
386 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
387 
388 	/* we're done, now cleanup the data structures */
389 	klp_complete_transition();
390 }
391 
392 /*
393  * Start the transition to the specified target patch state so tasks can begin
394  * switching to it.
395  */
396 void klp_start_transition(void)
397 {
398 	struct task_struct *g, *task;
399 	unsigned int cpu;
400 
401 	WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
402 
403 	pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
404 		  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
405 
406 	/*
407 	 * If the patch can be applied or reverted immediately, skip the
408 	 * per-task transitions.
409 	 */
410 	if (klp_transition_patch->immediate)
411 		return;
412 
413 	/*
414 	 * Mark all normal tasks as needing a patch state update.  They'll
415 	 * switch either in klp_try_complete_transition() or as they exit the
416 	 * kernel.
417 	 */
418 	read_lock(&tasklist_lock);
419 	for_each_process_thread(g, task)
420 		if (task->patch_state != klp_target_state)
421 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
422 	read_unlock(&tasklist_lock);
423 
424 	/*
425 	 * Mark all idle tasks as needing a patch state update.  They'll switch
426 	 * either in klp_try_complete_transition() or at the idle loop switch
427 	 * point.
428 	 */
429 	for_each_possible_cpu(cpu) {
430 		task = idle_task(cpu);
431 		if (task->patch_state != klp_target_state)
432 			set_tsk_thread_flag(task, TIF_PATCH_PENDING);
433 	}
434 }
435 
436 /*
437  * Initialize the global target patch state and all tasks to the initial patch
438  * state, and initialize all function transition states to true in preparation
439  * for patching or unpatching.
440  */
441 void klp_init_transition(struct klp_patch *patch, int state)
442 {
443 	struct task_struct *g, *task;
444 	unsigned int cpu;
445 	struct klp_object *obj;
446 	struct klp_func *func;
447 	int initial_state = !state;
448 
449 	WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
450 
451 	klp_transition_patch = patch;
452 
453 	/*
454 	 * Set the global target patch state which tasks will switch to.  This
455 	 * has no effect until the TIF_PATCH_PENDING flags get set later.
456 	 */
457 	klp_target_state = state;
458 
459 	/*
460 	 * If the patch can be applied or reverted immediately, skip the
461 	 * per-task transitions.
462 	 */
463 	if (patch->immediate)
464 		return;
465 
466 	/*
467 	 * Initialize all tasks to the initial patch state to prepare them for
468 	 * switching to the target state.
469 	 */
470 	read_lock(&tasklist_lock);
471 	for_each_process_thread(g, task) {
472 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
473 		task->patch_state = initial_state;
474 	}
475 	read_unlock(&tasklist_lock);
476 
477 	/*
478 	 * Ditto for the idle "swapper" tasks.
479 	 */
480 	for_each_possible_cpu(cpu) {
481 		task = idle_task(cpu);
482 		WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
483 		task->patch_state = initial_state;
484 	}
485 
486 	/*
487 	 * Enforce the order of the task->patch_state initializations and the
488 	 * func->transition updates to ensure that klp_ftrace_handler() doesn't
489 	 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
490 	 *
491 	 * Also enforce the order of the klp_target_state write and future
492 	 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
493 	 * set a task->patch_state to KLP_UNDEFINED.
494 	 */
495 	smp_wmb();
496 
497 	/*
498 	 * Set the func transition states so klp_ftrace_handler() will know to
499 	 * switch to the transition logic.
500 	 *
501 	 * When patching, the funcs aren't yet in the func_stack and will be
502 	 * made visible to the ftrace handler shortly by the calls to
503 	 * klp_patch_object().
504 	 *
505 	 * When unpatching, the funcs are already in the func_stack and so are
506 	 * already visible to the ftrace handler.
507 	 */
508 	klp_for_each_object(patch, obj)
509 		klp_for_each_func(obj, func)
510 			func->transition = true;
511 }
512 
513 /*
514  * This function can be called in the middle of an existing transition to
515  * reverse the direction of the target patch state.  This can be done to
516  * effectively cancel an existing enable or disable operation if there are any
517  * tasks which are stuck in the initial patch state.
518  */
519 void klp_reverse_transition(void)
520 {
521 	unsigned int cpu;
522 	struct task_struct *g, *task;
523 
524 	klp_transition_patch->enabled = !klp_transition_patch->enabled;
525 
526 	klp_target_state = !klp_target_state;
527 
528 	/*
529 	 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
530 	 * klp_update_patch_state() running in parallel with
531 	 * klp_start_transition().
532 	 */
533 	read_lock(&tasklist_lock);
534 	for_each_process_thread(g, task)
535 		clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
536 	read_unlock(&tasklist_lock);
537 
538 	for_each_possible_cpu(cpu)
539 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540 
541 	/* Let any remaining calls to klp_update_patch_state() complete */
542 	synchronize_rcu();
543 
544 	klp_start_transition();
545 }
546 
547 /* Called from copy_process() during fork */
548 void klp_copy_process(struct task_struct *child)
549 {
550 	child->patch_state = current->patch_state;
551 
552 	/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
553 }
554