xref: /linux/kernel/trace/fgraph.c (revision 5529876063e110ea49326138149fdf2a28a484dd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure to took into function calls and returns.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  * Highly modified by Steven Rostedt (VMware).
9  */
10 #include <linux/bits.h>
11 #include <linux/jump_label.h>
12 #include <linux/suspend.h>
13 #include <linux/ftrace.h>
14 #include <linux/static_call.h>
15 #include <linux/slab.h>
16 
17 #include <trace/events/sched.h>
18 
19 #include "ftrace_internal.h"
20 #include "trace.h"
21 
22 /*
23  * FGRAPH_FRAME_SIZE:	Size in bytes of the meta data on the shadow stack
24  * FGRAPH_FRAME_OFFSET:	Size in long words of the meta data frame
25  */
26 #define FGRAPH_FRAME_SIZE	sizeof(struct ftrace_ret_stack)
27 #define FGRAPH_FRAME_OFFSET	DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
28 
29 /*
30  * On entry to a function (via function_graph_enter()), a new fgraph frame
31  * (ftrace_ret_stack) is pushed onto the stack as well as a word that
32  * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
33  *
34  * bits:  0 -  9	offset in words from the previous ftrace_ret_stack
35  *
36  * bits: 10 - 11	Type of storage
37  *			  0 - reserved
38  *			  1 - bitmap of fgraph_array index
39  *			  2 - reserved data
40  *
41  * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
42  *  bits: 12 - 27	The bitmap of fgraph_ops fgraph_array index
43  *			That is, it's a bitmask of 0-15 (16 bits)
44  *			where if a corresponding ops in the fgraph_array[]
45  *			expects a callback from the return of the function
46  *			it's corresponding bit will be set.
47  *
48  *
49  * The top of the ret_stack (when not empty) will always have a reference
50  * word that points to the last fgraph frame that was saved.
51  *
52  * For reserved data:
53  *  bits: 12 - 17	The size in words that is stored
54  *  bits: 18 - 23	The index of fgraph_array, which shows who is stored
55  *
56  * That is, at the end of function_graph_enter, if the first and forth
57  * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
58  * on the return of the function being traced, and the forth fgraph_ops
59  * stored two words of data, this is what will be on the task's shadow
60  * ret_stack: (the stack grows upward)
61  *
62  *  ret_stack[SHADOW_STACK_OFFSET]
63  * | SHADOW_STACK_TASK_VARS(ret_stack)[15]      |
64  * ...
65  * | SHADOW_STACK_TASK_VARS(ret_stack)[0]       |
66  *  ret_stack[SHADOW_STACK_MAX_OFFSET]
67  * ...
68  * |                                            | <- task->curr_ret_stack
69  * +--------------------------------------------+
70  * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
71  * |         *or put another way*               |
72  * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \          | This is for fgraph_ops[3].
73  * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \          | The data size is 2 words.
74  * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
75  * | (offset2:FGRAPH_FRAME_OFFSET+3)            | <- the offset2 is from here
76  * +--------------------------------------------+ ( It is 4 words from the ret_stack)
77  * |            STORED DATA WORD 2              |
78  * |            STORED DATA WORD 1              |
79  * +--------------------------------------------+
80  * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
81  * |         *or put another way*               |
82  * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \  |
83  * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
84  * | (offset1:FGRAPH_FRAME_OFFSET)              | <- the offset1 is from here
85  * +--------------------------------------------+
86  * | struct ftrace_ret_stack                    |
87  * |   (stores the saved ret pointer)           | <- the offset points here
88  * +--------------------------------------------+
89  * |                 (X) | (N)                  | ( N words away from
90  * |                                            |   previous ret_stack)
91  * ...
92  * ret_stack[0]
93  *
94  * If a backtrace is required, and the real return pointer needs to be
95  * fetched, then it looks at the task's curr_ret_stack offset, if it
96  * is greater than zero (reserved, or right before popped), it would mask
97  * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
98  * ftrace_ret_stack structure stored on the shadow stack.
99  */
100 
101 /*
102  * The following is for the top word on the stack:
103  *
104  *   FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105  *   FGRAPH_TYPE (10-11) holds the type of word this is.
106  *     (RESERVED or BITMAP)
107  */
108 #define FGRAPH_FRAME_OFFSET_BITS	10
109 #define FGRAPH_FRAME_OFFSET_MASK	GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
110 
111 #define FGRAPH_TYPE_BITS	2
112 #define FGRAPH_TYPE_MASK	GENMASK(FGRAPH_TYPE_BITS - 1, 0)
113 #define FGRAPH_TYPE_SHIFT	FGRAPH_FRAME_OFFSET_BITS
114 
115 enum {
116 	FGRAPH_TYPE_RESERVED	= 0,
117 	FGRAPH_TYPE_BITMAP	= 1,
118 	FGRAPH_TYPE_DATA	= 2,
119 };
120 
121 /*
122  * For BITMAP type:
123  *   FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
124  */
125 #define FGRAPH_INDEX_BITS	16
126 #define FGRAPH_INDEX_MASK	GENMASK(FGRAPH_INDEX_BITS - 1, 0)
127 #define FGRAPH_INDEX_SHIFT	(FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
128 
129 /*
130  * For DATA type:
131  *  FGRAPH_DATA (12-17) bits hold the size of data (in words)
132  *  FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
133  *
134  * Note:
135  *  data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
136  */
137 #define FGRAPH_DATA_BITS	5
138 #define FGRAPH_DATA_MASK	GENMASK(FGRAPH_DATA_BITS - 1, 0)
139 #define FGRAPH_DATA_SHIFT	(FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
140 #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
141 
142 #define FGRAPH_DATA_INDEX_BITS	4
143 #define FGRAPH_DATA_INDEX_MASK	GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
144 #define FGRAPH_DATA_INDEX_SHIFT	(FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
145 
146 #define FGRAPH_MAX_INDEX	\
147 	((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
148 
149 #define FGRAPH_ARRAY_SIZE	FGRAPH_INDEX_BITS
150 
151 /*
152  * SHADOW_STACK_SIZE:	The size in bytes of the entire shadow stack
153  * SHADOW_STACK_OFFSET:	The size in long words of the shadow stack
154  * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
155  */
156 #define SHADOW_STACK_SIZE	(4096)
157 #define SHADOW_STACK_OFFSET	(SHADOW_STACK_SIZE / sizeof(long))
158 /* Leave on a buffer at the end */
159 #define SHADOW_STACK_MAX_OFFSET				\
160 	(SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
161 
162 /* RET_STACK():		Return the frame from a given @offset from task @t */
163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
164 
165 /*
166  * Each fgraph_ops has a reservered unsigned long at the end (top) of the
167  * ret_stack to store task specific state.
168  */
169 #define SHADOW_STACK_TASK_VARS(ret_stack) \
170 	((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
171 
172 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173 int ftrace_graph_active;
174 
175 static struct kmem_cache *fgraph_stack_cachep;
176 
177 static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
178 static unsigned long fgraph_array_bitmask;
179 
180 /* LRU index table for fgraph_array */
181 static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
182 static int fgraph_lru_next;
183 static int fgraph_lru_last;
184 
185 /* Initialize fgraph_lru_table with unused index */
fgraph_lru_init(void)186 static void fgraph_lru_init(void)
187 {
188 	int i;
189 
190 	for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
191 		fgraph_lru_table[i] = i;
192 }
193 
194 /* Release the used index to the LRU table */
fgraph_lru_release_index(int idx)195 static int fgraph_lru_release_index(int idx)
196 {
197 	if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
198 	    WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
199 		return -1;
200 
201 	fgraph_lru_table[fgraph_lru_last] = idx;
202 	fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
203 
204 	clear_bit(idx, &fgraph_array_bitmask);
205 	return 0;
206 }
207 
208 /* Allocate a new index from LRU table */
fgraph_lru_alloc_index(void)209 static int fgraph_lru_alloc_index(void)
210 {
211 	int idx = fgraph_lru_table[fgraph_lru_next];
212 
213 	/* No id is available */
214 	if (idx == -1)
215 		return -1;
216 
217 	fgraph_lru_table[fgraph_lru_next] = -1;
218 	fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
219 
220 	set_bit(idx, &fgraph_array_bitmask);
221 	return idx;
222 }
223 
224 /* Get the offset to the fgraph frame from a ret_stack value */
__get_offset(unsigned long val)225 static inline int __get_offset(unsigned long val)
226 {
227 	return val & FGRAPH_FRAME_OFFSET_MASK;
228 }
229 
230 /* Get the type of word from a ret_stack value */
__get_type(unsigned long val)231 static inline int __get_type(unsigned long val)
232 {
233 	return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
234 }
235 
236 /* Get the data_index for a DATA type ret_stack word */
__get_data_index(unsigned long val)237 static inline int __get_data_index(unsigned long val)
238 {
239 	return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
240 }
241 
242 /* Get the data_size for a DATA type ret_stack word */
__get_data_size(unsigned long val)243 static inline int __get_data_size(unsigned long val)
244 {
245 	return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
246 }
247 
248 /* Get the word from the ret_stack at @offset */
get_fgraph_entry(struct task_struct * t,int offset)249 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
250 {
251 	return t->ret_stack[offset];
252 }
253 
254 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
get_frame_offset(struct task_struct * t,int offset)255 static inline int get_frame_offset(struct task_struct *t, int offset)
256 {
257 	return __get_offset(t->ret_stack[offset]);
258 }
259 
260 /* For BITMAP type: get the bitmask from the @offset at ret_stack */
261 static inline unsigned long
get_bitmap_bits(struct task_struct * t,int offset)262 get_bitmap_bits(struct task_struct *t, int offset)
263 {
264 	return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
265 }
266 
267 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
268 static inline void
set_bitmap(struct task_struct * t,int offset,unsigned long bitmap)269 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
270 {
271 	t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
272 		(FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
273 }
274 
275 /* For DATA type: get the data saved under the ret_stack word at @offset */
get_data_type_data(struct task_struct * t,int offset)276 static inline void *get_data_type_data(struct task_struct *t, int offset)
277 {
278 	unsigned long val = t->ret_stack[offset];
279 
280 	if (__get_type(val) != FGRAPH_TYPE_DATA)
281 		return NULL;
282 	offset -= __get_data_size(val);
283 	return (void *)&t->ret_stack[offset];
284 }
285 
286 /* Create the ret_stack word for a DATA type */
make_data_type_val(int idx,int size,int offset)287 static inline unsigned long make_data_type_val(int idx, int size, int offset)
288 {
289 	return (idx << FGRAPH_DATA_INDEX_SHIFT) |
290 		((size - 1) << FGRAPH_DATA_SHIFT) |
291 		(FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
292 }
293 
294 /* ftrace_graph_entry set to this to tell some archs to run function graph */
entry_run(struct ftrace_graph_ent * trace,struct fgraph_ops * ops)295 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops)
296 {
297 	return 0;
298 }
299 
300 /* ftrace_graph_return set to this to tell some archs to run function graph */
return_run(struct ftrace_graph_ret * trace,struct fgraph_ops * ops)301 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops)
302 {
303 }
304 
ret_stack_set_task_var(struct task_struct * t,int idx,long val)305 static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
306 {
307 	unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
308 
309 	gvals[idx] = val;
310 }
311 
312 static unsigned long *
ret_stack_get_task_var(struct task_struct * t,int idx)313 ret_stack_get_task_var(struct task_struct *t, int idx)
314 {
315 	unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
316 
317 	return &gvals[idx];
318 }
319 
ret_stack_init_task_vars(unsigned long * ret_stack)320 static void ret_stack_init_task_vars(unsigned long *ret_stack)
321 {
322 	unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
323 
324 	memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
325 }
326 
327 /**
328  * fgraph_reserve_data - Reserve storage on the task's ret_stack
329  * @idx:	The index of fgraph_array
330  * @size_bytes: The size in bytes to reserve
331  *
332  * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
333  * task's ret_stack shadow stack, for a given fgraph_ops during
334  * the entryfunc() call. If entryfunc() returns zero, the storage
335  * is discarded. An entryfunc() can only call this once per iteration.
336  * The fgraph_ops retfunc() can retrieve this stored data with
337  * fgraph_retrieve_data().
338  *
339  * Returns: On success, a pointer to the data on the stack.
340  *   Otherwise, NULL if there's not enough space left on the
341  *   ret_stack for the data, or if fgraph_reserve_data() was called
342  *   more than once for a single entryfunc() call.
343  */
fgraph_reserve_data(int idx,int size_bytes)344 void *fgraph_reserve_data(int idx, int size_bytes)
345 {
346 	unsigned long val;
347 	void *data;
348 	int curr_ret_stack = current->curr_ret_stack;
349 	int data_size;
350 
351 	if (size_bytes > FGRAPH_MAX_DATA_SIZE)
352 		return NULL;
353 
354 	/* Convert the data size to number of longs. */
355 	data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
356 
357 	val = get_fgraph_entry(current, curr_ret_stack - 1);
358 	data = &current->ret_stack[curr_ret_stack];
359 
360 	curr_ret_stack += data_size + 1;
361 	if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
362 		return NULL;
363 
364 	val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
365 
366 	/* Set the last word to be reserved */
367 	current->ret_stack[curr_ret_stack - 1] = val;
368 
369 	/* Make sure interrupts see this */
370 	barrier();
371 	current->curr_ret_stack = curr_ret_stack;
372 	/* Again sync with interrupts, and reset reserve */
373 	current->ret_stack[curr_ret_stack - 1] = val;
374 
375 	return data;
376 }
377 
378 /**
379  * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
380  * @idx:	the index of fgraph_array (fgraph_ops::idx)
381  * @size_bytes: pointer to retrieved data size.
382  *
383  * This is to be called by a fgraph_ops retfunc(), to retrieve data that
384  * was stored by the fgraph_ops entryfunc() on the function entry.
385  * That is, this will retrieve the data that was reserved on the
386  * entry of the function that corresponds to the exit of the function
387  * that the fgraph_ops retfunc() is called on.
388  *
389  * Returns: The stored data from fgraph_reserve_data() called by the
390  *    matching entryfunc() for the retfunc() this is called from.
391  *   Or NULL if there was nothing stored.
392  */
fgraph_retrieve_data(int idx,int * size_bytes)393 void *fgraph_retrieve_data(int idx, int *size_bytes)
394 {
395 	return fgraph_retrieve_parent_data(idx, size_bytes, 0);
396 }
397 
398 /**
399  * fgraph_get_task_var - retrieve a task specific state variable
400  * @gops: The ftrace_ops that owns the task specific variable
401  *
402  * Every registered fgraph_ops has a task state variable
403  * reserved on the task's ret_stack. This function returns the
404  * address to that variable.
405  *
406  * Returns the address to the fgraph_ops @gops tasks specific
407  * unsigned long variable.
408  */
fgraph_get_task_var(struct fgraph_ops * gops)409 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
410 {
411 	return ret_stack_get_task_var(current, gops->idx);
412 }
413 
414 /*
415  * @offset: The offset into @t->ret_stack to find the ret_stack entry
416  * @frame_offset: Where to place the offset into @t->ret_stack of that entry
417  *
418  * Returns a pointer to the previous ret_stack below @offset or NULL
419  *   when it reaches the bottom of the stack.
420  *
421  * Calling this with:
422  *
423  *   offset = task->curr_ret_stack;
424  *   do {
425  *	ret_stack = get_ret_stack(task, offset, &offset);
426  *   } while (ret_stack);
427  *
428  * Will iterate through all the ret_stack entries from curr_ret_stack
429  * down to the first one.
430  */
431 static inline struct ftrace_ret_stack *
get_ret_stack(struct task_struct * t,int offset,int * frame_offset)432 get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
433 {
434 	int offs;
435 
436 	BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
437 
438 	if (unlikely(offset <= 0))
439 		return NULL;
440 
441 	offs = get_frame_offset(t, --offset);
442 	if (WARN_ON_ONCE(offs <= 0 || offs > offset))
443 		return NULL;
444 
445 	offset -= offs;
446 
447 	*frame_offset = offset;
448 	return RET_STACK(t, offset);
449 }
450 
451 /**
452  * fgraph_retrieve_parent_data - get data from a parent function
453  * @idx: The index into the fgraph_array (fgraph_ops::idx)
454  * @size_bytes: A pointer to retrieved data size
455  * @depth: The depth to find the parent (0 is the current function)
456  *
457  * This is similar to fgraph_retrieve_data() but can be used to retrieve
458  * data from a parent caller function.
459  *
460  * Return: a pointer to the specified parent data or NULL if not found
461  */
fgraph_retrieve_parent_data(int idx,int * size_bytes,int depth)462 void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
463 {
464 	struct ftrace_ret_stack *ret_stack = NULL;
465 	int offset = current->curr_ret_stack;
466 	unsigned long val;
467 
468 	if (offset <= 0)
469 		return NULL;
470 
471 	for (;;) {
472 		int next_offset;
473 
474 		ret_stack = get_ret_stack(current, offset, &next_offset);
475 		if (!ret_stack || --depth < 0)
476 			break;
477 		offset = next_offset;
478 	}
479 
480 	if (!ret_stack)
481 		return NULL;
482 
483 	offset--;
484 
485 	val = get_fgraph_entry(current, offset);
486 	while (__get_type(val) == FGRAPH_TYPE_DATA) {
487 		if (__get_data_index(val) == idx)
488 			goto found;
489 		offset -= __get_data_size(val) + 1;
490 		val = get_fgraph_entry(current, offset);
491 	}
492 	return NULL;
493 found:
494 	if (size_bytes)
495 		*size_bytes = __get_data_size(val) * sizeof(long);
496 	return get_data_type_data(current, offset);
497 }
498 
499 /* Both enabled by default (can be cleared by function_graph tracer flags */
500 bool fgraph_sleep_time = true;
501 
502 #ifdef CONFIG_DYNAMIC_FTRACE
503 /*
504  * archs can override this function if they must do something
505  * to enable hook for graph tracer.
506  */
ftrace_enable_ftrace_graph_caller(void)507 int __weak ftrace_enable_ftrace_graph_caller(void)
508 {
509 	return 0;
510 }
511 
512 /*
513  * archs can override this function if they must do something
514  * to disable hook for graph tracer.
515  */
ftrace_disable_ftrace_graph_caller(void)516 int __weak ftrace_disable_ftrace_graph_caller(void)
517 {
518 	return 0;
519 }
520 #endif
521 
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)522 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
523 			    struct fgraph_ops *gops)
524 {
525 	return 0;
526 }
527 
ftrace_graph_ret_stub(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)528 static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
529 				  struct fgraph_ops *gops)
530 {
531 }
532 
533 static struct fgraph_ops fgraph_stub = {
534 	.entryfunc = ftrace_graph_entry_stub,
535 	.retfunc = ftrace_graph_ret_stub,
536 };
537 
538 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
539 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
540 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
541 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
542 
543 /**
544  * ftrace_graph_stop - set to permanently disable function graph tracing
545  *
546  * In case of an error int function graph tracing, this is called
547  * to try to keep function graph tracing from causing any more harm.
548  * Usually this is pretty severe and this is called to try to at least
549  * get a warning out to the user.
550  */
ftrace_graph_stop(void)551 void ftrace_graph_stop(void)
552 {
553 	static_branch_enable(&kill_ftrace_graph);
554 }
555 
556 /* Add a function return address to the trace stack on thread info.*/
557 static int
ftrace_push_return_trace(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,int fgraph_idx)558 ftrace_push_return_trace(unsigned long ret, unsigned long func,
559 			 unsigned long frame_pointer, unsigned long *retp,
560 			 int fgraph_idx)
561 {
562 	struct ftrace_ret_stack *ret_stack;
563 	unsigned long val;
564 	int offset;
565 
566 	if (unlikely(ftrace_graph_is_dead()))
567 		return -EBUSY;
568 
569 	if (!current->ret_stack)
570 		return -EBUSY;
571 
572 	BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
573 
574 	/* Set val to "reserved" with the delta to the new fgraph frame */
575 	val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
576 
577 	/*
578 	 * We must make sure the ret_stack is tested before we read
579 	 * anything else.
580 	 */
581 	smp_rmb();
582 
583 	/*
584 	 * Check if there's room on the shadow stack to fit a fraph frame
585 	 * and a bitmap word.
586 	 */
587 	if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
588 		atomic_inc(&current->trace_overrun);
589 		return -EBUSY;
590 	}
591 
592 	offset = READ_ONCE(current->curr_ret_stack);
593 	ret_stack = RET_STACK(current, offset);
594 	offset += FGRAPH_FRAME_OFFSET;
595 
596 	/* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
597 	current->ret_stack[offset] = val;
598 	ret_stack->ret = ret;
599 	/*
600 	 * The unwinders expect curr_ret_stack to point to either zero
601 	 * or an offset where to find the next ret_stack. Even though the
602 	 * ret stack might be bogus, we want to write the ret and the
603 	 * offset to find the ret_stack before we increment the stack point.
604 	 * If an interrupt comes in now before we increment the curr_ret_stack
605 	 * it may blow away what we wrote. But that's fine, because the
606 	 * offset will still be correct (even though the 'ret' won't be).
607 	 * What we worry about is the offset being correct after we increment
608 	 * the curr_ret_stack and before we update that offset, as if an
609 	 * interrupt comes in and does an unwind stack dump, it will need
610 	 * at least a correct offset!
611 	 */
612 	barrier();
613 	WRITE_ONCE(current->curr_ret_stack, offset + 1);
614 	/*
615 	 * This next barrier is to ensure that an interrupt coming in
616 	 * will not corrupt what we are about to write.
617 	 */
618 	barrier();
619 
620 	/* Still keep it reserved even if an interrupt came in */
621 	current->ret_stack[offset] = val;
622 
623 	ret_stack->ret = ret;
624 	ret_stack->func = func;
625 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
626 	ret_stack->fp = frame_pointer;
627 #endif
628 	ret_stack->retp = retp;
629 	return offset;
630 }
631 
632 /*
633  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
634  * functions. But those archs currently don't support direct functions
635  * anyway, and ftrace_find_rec_direct() is just a stub for them.
636  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
637  */
638 #ifndef MCOUNT_INSN_SIZE
639 /* Make sure this only works without direct calls */
640 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
641 #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
642 # endif
643 # define MCOUNT_INSN_SIZE 0
644 #endif
645 
646 /* If the caller does not use ftrace, call this function. */
function_graph_enter(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp)647 int function_graph_enter(unsigned long ret, unsigned long func,
648 			 unsigned long frame_pointer, unsigned long *retp)
649 {
650 	struct ftrace_graph_ent trace;
651 	unsigned long bitmap = 0;
652 	int offset;
653 	int i;
654 
655 	trace.func = func;
656 	trace.depth = ++current->curr_ret_depth;
657 
658 	offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
659 	if (offset < 0)
660 		goto out;
661 
662 #ifdef CONFIG_HAVE_STATIC_CALL
663 	if (static_branch_likely(&fgraph_do_direct)) {
664 		int save_curr_ret_stack = current->curr_ret_stack;
665 
666 		if (static_call(fgraph_func)(&trace, fgraph_direct_gops))
667 			bitmap |= BIT(fgraph_direct_gops->idx);
668 		else
669 			/* Clear out any saved storage */
670 			current->curr_ret_stack = save_curr_ret_stack;
671 	} else
672 #endif
673 	{
674 		for_each_set_bit(i, &fgraph_array_bitmask,
675 					 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
676 			struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
677 			int save_curr_ret_stack;
678 
679 			if (gops == &fgraph_stub)
680 				continue;
681 
682 			save_curr_ret_stack = current->curr_ret_stack;
683 			if (ftrace_ops_test(&gops->ops, func, NULL) &&
684 			    gops->entryfunc(&trace, gops))
685 				bitmap |= BIT(i);
686 			else
687 				/* Clear out any saved storage */
688 				current->curr_ret_stack = save_curr_ret_stack;
689 		}
690 	}
691 
692 	if (!bitmap)
693 		goto out_ret;
694 
695 	/*
696 	 * Since this function uses fgraph_idx = 0 as a tail-call checking
697 	 * flag, set that bit always.
698 	 */
699 	set_bitmap(current, offset, bitmap | BIT(0));
700 
701 	return 0;
702  out_ret:
703 	current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
704  out:
705 	current->curr_ret_depth--;
706 	return -EBUSY;
707 }
708 
709 /* Retrieve a function return address to the trace stack on thread info.*/
710 static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer,int * offset)711 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
712 			unsigned long frame_pointer, int *offset)
713 {
714 	struct ftrace_ret_stack *ret_stack;
715 
716 	ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
717 
718 	if (unlikely(!ret_stack)) {
719 		ftrace_graph_stop();
720 		WARN(1, "Bad function graph ret_stack pointer: %d",
721 		     current->curr_ret_stack);
722 		/* Might as well panic, otherwise we have no where to go */
723 		*ret = (unsigned long)panic;
724 		return NULL;
725 	}
726 
727 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
728 	/*
729 	 * The arch may choose to record the frame pointer used
730 	 * and check it here to make sure that it is what we expect it
731 	 * to be. If gcc does not set the place holder of the return
732 	 * address in the frame pointer, and does a copy instead, then
733 	 * the function graph trace will fail. This test detects this
734 	 * case.
735 	 *
736 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
737 	 * gcc do the above.
738 	 *
739 	 * Note, -mfentry does not use frame pointers, and this test
740 	 *  is not needed if CC_USING_FENTRY is set.
741 	 */
742 	if (unlikely(ret_stack->fp != frame_pointer)) {
743 		ftrace_graph_stop();
744 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
745 		     "  from func %ps return to %lx\n",
746 		     ret_stack->fp,
747 		     frame_pointer,
748 		     (void *)ret_stack->func,
749 		     ret_stack->ret);
750 		*ret = (unsigned long)panic;
751 		return NULL;
752 	}
753 #endif
754 
755 	*offset += FGRAPH_FRAME_OFFSET;
756 	*ret = ret_stack->ret;
757 	trace->func = ret_stack->func;
758 	trace->overrun = atomic_read(&current->trace_overrun);
759 	trace->depth = current->curr_ret_depth;
760 	/*
761 	 * We still want to trace interrupts coming in if
762 	 * max_depth is set to 1. Make sure the decrement is
763 	 * seen before ftrace_graph_return.
764 	 */
765 	barrier();
766 
767 	return ret_stack;
768 }
769 
770 /*
771  * Hibernation protection.
772  * The state of the current task is too much unstable during
773  * suspend/restore to disk. We want to protect against that.
774  */
775 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)776 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
777 							void *unused)
778 {
779 	switch (state) {
780 	case PM_HIBERNATION_PREPARE:
781 		pause_graph_tracing();
782 		break;
783 
784 	case PM_POST_HIBERNATION:
785 		unpause_graph_tracing();
786 		break;
787 	}
788 	return NOTIFY_DONE;
789 }
790 
791 static struct notifier_block ftrace_suspend_notifier = {
792 	.notifier_call = ftrace_suspend_notifier_call,
793 };
794 
795 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
796 struct fgraph_ret_regs;
797 
798 /*
799  * Send the trace to the ring-buffer.
800  * @return the original return address.
801  */
__ftrace_return_to_handler(struct fgraph_ret_regs * ret_regs,unsigned long frame_pointer)802 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
803 						unsigned long frame_pointer)
804 {
805 	struct ftrace_ret_stack *ret_stack;
806 	struct ftrace_graph_ret trace;
807 	unsigned long bitmap;
808 	unsigned long ret;
809 	int offset;
810 	int i;
811 
812 	ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
813 
814 	if (unlikely(!ret_stack)) {
815 		ftrace_graph_stop();
816 		WARN_ON(1);
817 		/* Might as well panic. What else to do? */
818 		return (unsigned long)panic;
819 	}
820 
821 	trace.rettime = trace_clock_local();
822 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
823 	trace.retval = fgraph_ret_regs_return_value(ret_regs);
824 #endif
825 
826 	bitmap = get_bitmap_bits(current, offset);
827 
828 #ifdef CONFIG_HAVE_STATIC_CALL
829 	if (static_branch_likely(&fgraph_do_direct)) {
830 		if (test_bit(fgraph_direct_gops->idx, &bitmap))
831 			static_call(fgraph_retfunc)(&trace, fgraph_direct_gops);
832 	} else
833 #endif
834 	{
835 		for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
836 			struct fgraph_ops *gops = fgraph_array[i];
837 
838 			if (gops == &fgraph_stub)
839 				continue;
840 
841 			gops->retfunc(&trace, gops);
842 		}
843 	}
844 
845 	/*
846 	 * The ftrace_graph_return() may still access the current
847 	 * ret_stack structure, we need to make sure the update of
848 	 * curr_ret_stack is after that.
849 	 */
850 	barrier();
851 	current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
852 
853 	current->curr_ret_depth--;
854 	return ret;
855 }
856 
857 /*
858  * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
859  * leave only ftrace_return_to_handler(ret_regs).
860  */
861 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
ftrace_return_to_handler(struct fgraph_ret_regs * ret_regs)862 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
863 {
864 	return __ftrace_return_to_handler(ret_regs,
865 				fgraph_ret_regs_frame_pointer(ret_regs));
866 }
867 #else
ftrace_return_to_handler(unsigned long frame_pointer)868 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
869 {
870 	return __ftrace_return_to_handler(NULL, frame_pointer);
871 }
872 #endif
873 
874 /**
875  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
876  * @task: The task to read the shadow stack from.
877  * @idx: Index down the shadow stack
878  *
879  * Return the ret_struct on the shadow stack of the @task at the
880  * call graph at @idx starting with zero. If @idx is zero, it
881  * will return the last saved ret_stack entry. If it is greater than
882  * zero, it will return the corresponding ret_stack for the depth
883  * of saved return addresses.
884  */
885 struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct * task,int idx)886 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
887 {
888 	struct ftrace_ret_stack *ret_stack = NULL;
889 	int offset = task->curr_ret_stack;
890 
891 	if (offset < 0)
892 		return NULL;
893 
894 	do {
895 		ret_stack = get_ret_stack(task, offset, &offset);
896 	} while (ret_stack && --idx >= 0);
897 
898 	return ret_stack;
899 }
900 
901 /**
902  * ftrace_graph_top_ret_addr - return the top return address in the shadow stack
903  * @task: The task to read the shadow stack from.
904  *
905  * Return the first return address on the shadow stack of the @task, which is
906  * not the fgraph's return_to_handler.
907  */
ftrace_graph_top_ret_addr(struct task_struct * task)908 unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
909 {
910 	unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
911 	struct ftrace_ret_stack *ret_stack = NULL;
912 	int offset = task->curr_ret_stack;
913 
914 	if (offset < 0)
915 		return 0;
916 
917 	do {
918 		ret_stack = get_ret_stack(task, offset, &offset);
919 	} while (ret_stack && ret_stack->ret == return_handler);
920 
921 	return ret_stack ? ret_stack->ret : 0;
922 }
923 
924 /**
925  * ftrace_graph_ret_addr - return the original value of the return address
926  * @task: The task the unwinder is being executed on
927  * @idx: An initialized pointer to the next stack index to use
928  * @ret: The current return address (likely pointing to return_handler)
929  * @retp: The address on the stack of the current return location
930  *
931  * This function can be called by stack unwinding code to convert a found stack
932  * return address (@ret) to its original value, in case the function graph
933  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
934  * been modified, the unchanged value of @ret is returned.
935  *
936  * @idx holds the last index used to know where to start from. It should be
937  * initialized to zero for the first iteration as that will mean to start
938  * at the top of the shadow stack. If the location is found, this pointer
939  * will be assigned that location so that if called again, it will continue
940  * where it left off.
941  *
942  * @retp is a pointer to the return address on the stack.
943  */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)944 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
945 				    unsigned long ret, unsigned long *retp)
946 {
947 	struct ftrace_ret_stack *ret_stack;
948 	unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
949 	int i;
950 
951 	if (ret != return_handler)
952 		return ret;
953 
954 	if (!idx)
955 		return ret;
956 
957 	i = *idx ? : task->curr_ret_stack;
958 	while (i > 0) {
959 		ret_stack = get_ret_stack(task, i, &i);
960 		if (!ret_stack)
961 			break;
962 		/*
963 		 * For the tail-call, there would be 2 or more ftrace_ret_stacks on
964 		 * the ret_stack, which records "return_to_handler" as the return
965 		 * address except for the last one.
966 		 * But on the real stack, there should be 1 entry because tail-call
967 		 * reuses the return address on the stack and jump to the next function.
968 		 * Thus we will continue to find real return address.
969 		 */
970 		if (ret_stack->retp == retp &&
971 		    ret_stack->ret != return_handler) {
972 			*idx = i;
973 			return ret_stack->ret;
974 		}
975 	}
976 
977 	return ret;
978 }
979 
980 static struct ftrace_ops graph_ops = {
981 	.func			= ftrace_graph_func,
982 	.flags			= FTRACE_OPS_GRAPH_STUB,
983 #ifdef FTRACE_GRAPH_TRAMP_ADDR
984 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
985 	/* trampoline_size is only needed for dynamically allocated tramps */
986 #endif
987 };
988 
fgraph_init_ops(struct ftrace_ops * dst_ops,struct ftrace_ops * src_ops)989 void fgraph_init_ops(struct ftrace_ops *dst_ops,
990 		     struct ftrace_ops *src_ops)
991 {
992 	dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
993 
994 #ifdef CONFIG_DYNAMIC_FTRACE
995 	if (src_ops) {
996 		dst_ops->func_hash = &src_ops->local_hash;
997 		mutex_init(&dst_ops->local_hash.regex_lock);
998 		INIT_LIST_HEAD(&dst_ops->subop_list);
999 		dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
1000 	}
1001 #endif
1002 }
1003 
ftrace_graph_sleep_time_control(bool enable)1004 void ftrace_graph_sleep_time_control(bool enable)
1005 {
1006 	fgraph_sleep_time = enable;
1007 }
1008 
1009 /*
1010  * Simply points to ftrace_stub, but with the proper protocol.
1011  * Defined by the linker script in linux/vmlinux.lds.h
1012  */
1013 void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
1014 
1015 /* The callbacks that hook a function */
1016 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
1017 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1018 
1019 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(unsigned long ** ret_stack_list)1020 static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
1021 {
1022 	int i;
1023 	int ret = 0;
1024 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1025 	struct task_struct *g, *t;
1026 
1027 	if (WARN_ON_ONCE(!fgraph_stack_cachep))
1028 		return -ENOMEM;
1029 
1030 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1031 		ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1032 		if (!ret_stack_list[i]) {
1033 			start = 0;
1034 			end = i;
1035 			ret = -ENOMEM;
1036 			goto free;
1037 		}
1038 	}
1039 
1040 	rcu_read_lock();
1041 	for_each_process_thread(g, t) {
1042 		if (start == end) {
1043 			ret = -EAGAIN;
1044 			goto unlock;
1045 		}
1046 
1047 		if (t->ret_stack == NULL) {
1048 			atomic_set(&t->trace_overrun, 0);
1049 			ret_stack_init_task_vars(ret_stack_list[start]);
1050 			t->curr_ret_stack = 0;
1051 			t->curr_ret_depth = -1;
1052 			/* Make sure the tasks see the 0 first: */
1053 			smp_wmb();
1054 			t->ret_stack = ret_stack_list[start++];
1055 		}
1056 	}
1057 
1058 unlock:
1059 	rcu_read_unlock();
1060 free:
1061 	for (i = start; i < end; i++)
1062 		kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
1063 	return ret;
1064 }
1065 
1066 static void
ftrace_graph_probe_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1067 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1068 				struct task_struct *prev,
1069 				struct task_struct *next,
1070 				unsigned int prev_state)
1071 {
1072 	unsigned long long timestamp;
1073 
1074 	/*
1075 	 * Does the user want to count the time a function was asleep.
1076 	 * If so, do not update the time stamps.
1077 	 */
1078 	if (fgraph_sleep_time)
1079 		return;
1080 
1081 	timestamp = trace_clock_local();
1082 
1083 	prev->ftrace_timestamp = timestamp;
1084 
1085 	/* only process tasks that we timestamped */
1086 	if (!next->ftrace_timestamp)
1087 		return;
1088 
1089 	next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
1090 }
1091 
1092 static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1093 
1094 static void
graph_init_task(struct task_struct * t,unsigned long * ret_stack)1095 graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1096 {
1097 	atomic_set(&t->trace_overrun, 0);
1098 	ret_stack_init_task_vars(ret_stack);
1099 	t->ftrace_timestamp = 0;
1100 	t->curr_ret_stack = 0;
1101 	t->curr_ret_depth = -1;
1102 	/* make curr_ret_stack visible before we add the ret_stack */
1103 	smp_wmb();
1104 	t->ret_stack = ret_stack;
1105 }
1106 
1107 /*
1108  * Allocate a return stack for the idle task. May be the first
1109  * time through, or it may be done by CPU hotplug online.
1110  */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1111 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1112 {
1113 	t->curr_ret_stack = 0;
1114 	t->curr_ret_depth = -1;
1115 	/*
1116 	 * The idle task has no parent, it either has its own
1117 	 * stack or no stack at all.
1118 	 */
1119 	if (t->ret_stack)
1120 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1121 
1122 	if (ftrace_graph_active) {
1123 		unsigned long *ret_stack;
1124 
1125 		if (WARN_ON_ONCE(!fgraph_stack_cachep))
1126 			return;
1127 
1128 		ret_stack = per_cpu(idle_ret_stack, cpu);
1129 		if (!ret_stack) {
1130 			ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1131 			if (!ret_stack)
1132 				return;
1133 			per_cpu(idle_ret_stack, cpu) = ret_stack;
1134 		}
1135 		graph_init_task(t, ret_stack);
1136 	}
1137 }
1138 
1139 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)1140 void ftrace_graph_init_task(struct task_struct *t)
1141 {
1142 	/* Make sure we do not use the parent ret_stack */
1143 	t->ret_stack = NULL;
1144 	t->curr_ret_stack = 0;
1145 	t->curr_ret_depth = -1;
1146 
1147 	if (ftrace_graph_active) {
1148 		unsigned long *ret_stack;
1149 
1150 		if (WARN_ON_ONCE(!fgraph_stack_cachep))
1151 			return;
1152 
1153 		ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1154 		if (!ret_stack)
1155 			return;
1156 		graph_init_task(t, ret_stack);
1157 	}
1158 }
1159 
ftrace_graph_exit_task(struct task_struct * t)1160 void ftrace_graph_exit_task(struct task_struct *t)
1161 {
1162 	unsigned long *ret_stack = t->ret_stack;
1163 
1164 	t->ret_stack = NULL;
1165 	/* NULL must become visible to IRQs before we free it: */
1166 	barrier();
1167 
1168 	if (ret_stack) {
1169 		if (WARN_ON_ONCE(!fgraph_stack_cachep))
1170 			return;
1171 		kmem_cache_free(fgraph_stack_cachep, ret_stack);
1172 	}
1173 }
1174 
1175 #ifdef CONFIG_DYNAMIC_FTRACE
fgraph_pid_func(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)1176 static int fgraph_pid_func(struct ftrace_graph_ent *trace,
1177 			   struct fgraph_ops *gops)
1178 {
1179 	struct trace_array *tr = gops->ops.private;
1180 	int pid;
1181 
1182 	if (tr) {
1183 		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1184 		if (pid == FTRACE_PID_IGNORE)
1185 			return 0;
1186 		if (pid != FTRACE_PID_TRACE &&
1187 		    pid != current->pid)
1188 			return 0;
1189 	}
1190 
1191 	return gops->saved_func(trace, gops);
1192 }
1193 
fgraph_update_pid_func(void)1194 void fgraph_update_pid_func(void)
1195 {
1196 	struct fgraph_ops *gops;
1197 	struct ftrace_ops *op;
1198 
1199 	if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1200 		return;
1201 
1202 	list_for_each_entry(op, &graph_ops.subop_list, list) {
1203 		if (op->flags & FTRACE_OPS_FL_PID) {
1204 			gops = container_of(op, struct fgraph_ops, ops);
1205 			gops->entryfunc = ftrace_pids_enabled(op) ?
1206 				fgraph_pid_func : gops->saved_func;
1207 			if (ftrace_graph_active == 1)
1208 				static_call_update(fgraph_func, gops->entryfunc);
1209 		}
1210 	}
1211 }
1212 #endif
1213 
1214 /* Allocate a return stack for each task */
start_graph_tracing(void)1215 static int start_graph_tracing(void)
1216 {
1217 	unsigned long **ret_stack_list;
1218 	int ret, cpu;
1219 
1220 	ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1221 				 sizeof(*ret_stack_list), GFP_KERNEL);
1222 
1223 	if (!ret_stack_list)
1224 		return -ENOMEM;
1225 
1226 	/* The cpu_boot init_task->ret_stack will never be freed */
1227 	for_each_online_cpu(cpu) {
1228 		if (!idle_task(cpu)->ret_stack)
1229 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1230 	}
1231 
1232 	do {
1233 		ret = alloc_retstack_tasklist(ret_stack_list);
1234 	} while (ret == -EAGAIN);
1235 
1236 	if (!ret) {
1237 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1238 		if (ret)
1239 			pr_info("ftrace_graph: Couldn't activate tracepoint"
1240 				" probe to kernel_sched_switch\n");
1241 	}
1242 
1243 	kfree(ret_stack_list);
1244 	return ret;
1245 }
1246 
init_task_vars(int idx)1247 static void init_task_vars(int idx)
1248 {
1249 	struct task_struct *g, *t;
1250 	int cpu;
1251 
1252 	for_each_online_cpu(cpu) {
1253 		if (idle_task(cpu)->ret_stack)
1254 			ret_stack_set_task_var(idle_task(cpu), idx, 0);
1255 	}
1256 
1257 	read_lock(&tasklist_lock);
1258 	for_each_process_thread(g, t) {
1259 		if (t->ret_stack)
1260 			ret_stack_set_task_var(t, idx, 0);
1261 	}
1262 	read_unlock(&tasklist_lock);
1263 }
1264 
ftrace_graph_enable_direct(bool enable_branch,struct fgraph_ops * gops)1265 static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1266 {
1267 	trace_func_graph_ent_t func = NULL;
1268 	trace_func_graph_ret_t retfunc = NULL;
1269 	int i;
1270 
1271 	if (gops) {
1272 		func = gops->entryfunc;
1273 		retfunc = gops->retfunc;
1274 		fgraph_direct_gops = gops;
1275 	} else {
1276 		for_each_set_bit(i, &fgraph_array_bitmask,
1277 				 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1278 			func = fgraph_array[i]->entryfunc;
1279 			retfunc = fgraph_array[i]->retfunc;
1280 			fgraph_direct_gops = fgraph_array[i];
1281 		}
1282 	}
1283 	if (WARN_ON_ONCE(!func))
1284 		return;
1285 
1286 	static_call_update(fgraph_func, func);
1287 	static_call_update(fgraph_retfunc, retfunc);
1288 	if (enable_branch)
1289 		static_branch_disable(&fgraph_do_direct);
1290 }
1291 
ftrace_graph_disable_direct(bool disable_branch)1292 static void ftrace_graph_disable_direct(bool disable_branch)
1293 {
1294 	if (disable_branch)
1295 		static_branch_disable(&fgraph_do_direct);
1296 	static_call_update(fgraph_func, ftrace_graph_entry_stub);
1297 	static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1298 	fgraph_direct_gops = &fgraph_stub;
1299 }
1300 
1301 /* The cpu_boot init_task->ret_stack will never be freed */
fgraph_cpu_init(unsigned int cpu)1302 static int fgraph_cpu_init(unsigned int cpu)
1303 {
1304 	if (!idle_task(cpu)->ret_stack)
1305 		ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1306 	return 0;
1307 }
1308 
register_ftrace_graph(struct fgraph_ops * gops)1309 int register_ftrace_graph(struct fgraph_ops *gops)
1310 {
1311 	static bool fgraph_initialized;
1312 	int command = 0;
1313 	int ret = 0;
1314 	int i = -1;
1315 
1316 	guard(mutex)(&ftrace_lock);
1317 
1318 	if (!fgraph_stack_cachep) {
1319 		fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
1320 							SHADOW_STACK_SIZE,
1321 							SHADOW_STACK_SIZE, 0, NULL);
1322 		if (!fgraph_stack_cachep)
1323 			return -ENOMEM;
1324 	}
1325 
1326 	if (!fgraph_initialized) {
1327 		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
1328 					fgraph_cpu_init, NULL);
1329 		if (ret < 0) {
1330 			pr_warn("fgraph: Error to init cpu hotplug support\n");
1331 			return ret;
1332 		}
1333 		fgraph_initialized = true;
1334 		ret = 0;
1335 	}
1336 
1337 	if (!fgraph_array[0]) {
1338 		/* The array must always have real data on it */
1339 		for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1340 			fgraph_array[i] = &fgraph_stub;
1341 		fgraph_lru_init();
1342 	}
1343 
1344 	i = fgraph_lru_alloc_index();
1345 	if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
1346 		return -ENOSPC;
1347 	gops->idx = i;
1348 
1349 	ftrace_graph_active++;
1350 
1351 	if (ftrace_graph_active == 2)
1352 		ftrace_graph_disable_direct(true);
1353 
1354 	if (ftrace_graph_active == 1) {
1355 		ftrace_graph_enable_direct(false, gops);
1356 		register_pm_notifier(&ftrace_suspend_notifier);
1357 		ret = start_graph_tracing();
1358 		if (ret)
1359 			goto error;
1360 		/*
1361 		 * Some archs just test to see if these are not
1362 		 * the default function
1363 		 */
1364 		ftrace_graph_return = return_run;
1365 		ftrace_graph_entry = entry_run;
1366 		command = FTRACE_START_FUNC_RET;
1367 	} else {
1368 		init_task_vars(gops->idx);
1369 	}
1370 	/* Always save the function, and reset at unregistering */
1371 	gops->saved_func = gops->entryfunc;
1372 
1373 	ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1374 	if (!ret)
1375 		fgraph_array[i] = gops;
1376 
1377 error:
1378 	if (ret) {
1379 		ftrace_graph_active--;
1380 		gops->saved_func = NULL;
1381 		fgraph_lru_release_index(i);
1382 	}
1383 	return ret;
1384 }
1385 
unregister_ftrace_graph(struct fgraph_ops * gops)1386 void unregister_ftrace_graph(struct fgraph_ops *gops)
1387 {
1388 	int command = 0;
1389 
1390 	guard(mutex)(&ftrace_lock);
1391 
1392 	if (unlikely(!ftrace_graph_active))
1393 		return;
1394 
1395 	if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
1396 		     fgraph_array[gops->idx] != gops))
1397 		return;
1398 
1399 	if (fgraph_lru_release_index(gops->idx) < 0)
1400 		return;
1401 
1402 	fgraph_array[gops->idx] = &fgraph_stub;
1403 
1404 	ftrace_graph_active--;
1405 
1406 	if (!ftrace_graph_active)
1407 		command = FTRACE_STOP_FUNC_RET;
1408 
1409 	ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1410 
1411 	if (ftrace_graph_active == 1)
1412 		ftrace_graph_enable_direct(true, NULL);
1413 	else if (!ftrace_graph_active)
1414 		ftrace_graph_disable_direct(false);
1415 
1416 	if (!ftrace_graph_active) {
1417 		ftrace_graph_return = ftrace_stub_graph;
1418 		ftrace_graph_entry = ftrace_graph_entry_stub;
1419 		unregister_pm_notifier(&ftrace_suspend_notifier);
1420 		unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1421 	}
1422 	gops->saved_func = NULL;
1423 }
1424