1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure to took into function calls and returns.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 * Highly modified by Steven Rostedt (VMware).
9 */
10 #include <linux/bits.h>
11 #include <linux/jump_label.h>
12 #include <linux/suspend.h>
13 #include <linux/ftrace.h>
14 #include <linux/static_call.h>
15 #include <linux/slab.h>
16
17 #include <trace/events/sched.h>
18
19 #include "ftrace_internal.h"
20 #include "trace.h"
21
22 /*
23 * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack
24 * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame
25 */
26 #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack)
27 #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
28
29 /*
30 * On entry to a function (via function_graph_enter()), a new fgraph frame
31 * (ftrace_ret_stack) is pushed onto the stack as well as a word that
32 * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
33 *
34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack
35 *
36 * bits: 10 - 11 Type of storage
37 * 0 - reserved
38 * 1 - bitmap of fgraph_array index
39 * 2 - reserved data
40 *
41 * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index
43 * That is, it's a bitmask of 0-15 (16 bits)
44 * where if a corresponding ops in the fgraph_array[]
45 * expects a callback from the return of the function
46 * it's corresponding bit will be set.
47 *
48 *
49 * The top of the ret_stack (when not empty) will always have a reference
50 * word that points to the last fgraph frame that was saved.
51 *
52 * For reserved data:
53 * bits: 12 - 17 The size in words that is stored
54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored
55 *
56 * That is, at the end of function_graph_enter, if the first and forth
57 * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
58 * on the return of the function being traced, and the forth fgraph_ops
59 * stored two words of data, this is what will be on the task's shadow
60 * ret_stack: (the stack grows upward)
61 *
62 * ret_stack[SHADOW_STACK_OFFSET]
63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] |
64 * ...
65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] |
66 * ret_stack[SHADOW_STACK_MAX_OFFSET]
67 * ...
68 * | | <- task->curr_ret_stack
69 * +--------------------------------------------+
70 * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
71 * | *or put another way* |
72 * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3].
73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words.
74 * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here
76 * +--------------------------------------------+ ( It is 4 words from the ret_stack)
77 * | STORED DATA WORD 2 |
78 * | STORED DATA WORD 1 |
79 * +--------------------------------------------+
80 * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
81 * | *or put another way* |
82 * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ |
83 * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here
85 * +--------------------------------------------+
86 * | struct ftrace_ret_stack |
87 * | (stores the saved ret pointer) | <- the offset points here
88 * +--------------------------------------------+
89 * | (X) | (N) | ( N words away from
90 * | | previous ret_stack)
91 * ...
92 * ret_stack[0]
93 *
94 * If a backtrace is required, and the real return pointer needs to be
95 * fetched, then it looks at the task's curr_ret_stack offset, if it
96 * is greater than zero (reserved, or right before popped), it would mask
97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
98 * ftrace_ret_stack structure stored on the shadow stack.
99 */
100
101 /*
102 * The following is for the top word on the stack:
103 *
104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105 * FGRAPH_TYPE (10-11) holds the type of word this is.
106 * (RESERVED or BITMAP)
107 */
108 #define FGRAPH_FRAME_OFFSET_BITS 10
109 #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
110
111 #define FGRAPH_TYPE_BITS 2
112 #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
113 #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS
114
115 enum {
116 FGRAPH_TYPE_RESERVED = 0,
117 FGRAPH_TYPE_BITMAP = 1,
118 FGRAPH_TYPE_DATA = 2,
119 };
120
121 /*
122 * For BITMAP type:
123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
124 */
125 #define FGRAPH_INDEX_BITS 16
126 #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
127 #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
128
129 /*
130 * For DATA type:
131 * FGRAPH_DATA (12-17) bits hold the size of data (in words)
132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
133 *
134 * Note:
135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
136 */
137 #define FGRAPH_DATA_BITS 5
138 #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
139 #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
140 #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
141
142 #define FGRAPH_DATA_INDEX_BITS 4
143 #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
144 #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
145
146 #define FGRAPH_MAX_INDEX \
147 ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
148
149 #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS
150
151 /*
152 * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack
153 * SHADOW_STACK_OFFSET: The size in long words of the shadow stack
154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
155 */
156 #define SHADOW_STACK_SIZE (4096)
157 #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long))
158 /* Leave on a buffer at the end */
159 #define SHADOW_STACK_MAX_OFFSET \
160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
161
162 /* RET_STACK(): Return the frame from a given @offset from task @t */
163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
164
165 /*
166 * Each fgraph_ops has a reservered unsigned long at the end (top) of the
167 * ret_stack to store task specific state.
168 */
169 #define SHADOW_STACK_TASK_VARS(ret_stack) \
170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
171
172 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173 int ftrace_graph_active;
174
175 static struct kmem_cache *fgraph_stack_cachep;
176
177 static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
178 static unsigned long fgraph_array_bitmask;
179
180 /* LRU index table for fgraph_array */
181 static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
182 static int fgraph_lru_next;
183 static int fgraph_lru_last;
184
185 /* Initialize fgraph_lru_table with unused index */
fgraph_lru_init(void)186 static void fgraph_lru_init(void)
187 {
188 int i;
189
190 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
191 fgraph_lru_table[i] = i;
192 }
193
194 /* Release the used index to the LRU table */
fgraph_lru_release_index(int idx)195 static int fgraph_lru_release_index(int idx)
196 {
197 if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
198 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
199 return -1;
200
201 fgraph_lru_table[fgraph_lru_last] = idx;
202 fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
203
204 clear_bit(idx, &fgraph_array_bitmask);
205 return 0;
206 }
207
208 /* Allocate a new index from LRU table */
fgraph_lru_alloc_index(void)209 static int fgraph_lru_alloc_index(void)
210 {
211 int idx = fgraph_lru_table[fgraph_lru_next];
212
213 /* No id is available */
214 if (idx == -1)
215 return -1;
216
217 fgraph_lru_table[fgraph_lru_next] = -1;
218 fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
219
220 set_bit(idx, &fgraph_array_bitmask);
221 return idx;
222 }
223
224 /* Get the offset to the fgraph frame from a ret_stack value */
__get_offset(unsigned long val)225 static inline int __get_offset(unsigned long val)
226 {
227 return val & FGRAPH_FRAME_OFFSET_MASK;
228 }
229
230 /* Get the type of word from a ret_stack value */
__get_type(unsigned long val)231 static inline int __get_type(unsigned long val)
232 {
233 return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
234 }
235
236 /* Get the data_index for a DATA type ret_stack word */
__get_data_index(unsigned long val)237 static inline int __get_data_index(unsigned long val)
238 {
239 return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
240 }
241
242 /* Get the data_size for a DATA type ret_stack word */
__get_data_size(unsigned long val)243 static inline int __get_data_size(unsigned long val)
244 {
245 return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
246 }
247
248 /* Get the word from the ret_stack at @offset */
get_fgraph_entry(struct task_struct * t,int offset)249 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
250 {
251 return t->ret_stack[offset];
252 }
253
254 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
get_frame_offset(struct task_struct * t,int offset)255 static inline int get_frame_offset(struct task_struct *t, int offset)
256 {
257 return __get_offset(t->ret_stack[offset]);
258 }
259
260 /* For BITMAP type: get the bitmask from the @offset at ret_stack */
261 static inline unsigned long
get_bitmap_bits(struct task_struct * t,int offset)262 get_bitmap_bits(struct task_struct *t, int offset)
263 {
264 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
265 }
266
267 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
268 static inline void
set_bitmap(struct task_struct * t,int offset,unsigned long bitmap)269 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
270 {
271 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
272 (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
273 }
274
275 /* For DATA type: get the data saved under the ret_stack word at @offset */
get_data_type_data(struct task_struct * t,int offset)276 static inline void *get_data_type_data(struct task_struct *t, int offset)
277 {
278 unsigned long val = t->ret_stack[offset];
279
280 if (__get_type(val) != FGRAPH_TYPE_DATA)
281 return NULL;
282 offset -= __get_data_size(val);
283 return (void *)&t->ret_stack[offset];
284 }
285
286 /* Create the ret_stack word for a DATA type */
make_data_type_val(int idx,int size,int offset)287 static inline unsigned long make_data_type_val(int idx, int size, int offset)
288 {
289 return (idx << FGRAPH_DATA_INDEX_SHIFT) |
290 ((size - 1) << FGRAPH_DATA_SHIFT) |
291 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
292 }
293
294 /* ftrace_graph_entry set to this to tell some archs to run function graph */
entry_run(struct ftrace_graph_ent * trace,struct fgraph_ops * ops,struct ftrace_regs * fregs)295 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops,
296 struct ftrace_regs *fregs)
297 {
298 return 0;
299 }
300
301 /* ftrace_graph_return set to this to tell some archs to run function graph */
return_run(struct ftrace_graph_ret * trace,struct fgraph_ops * ops,struct ftrace_regs * fregs)302 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops,
303 struct ftrace_regs *fregs)
304 {
305 }
306
ret_stack_set_task_var(struct task_struct * t,int idx,long val)307 static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
308 {
309 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
310
311 gvals[idx] = val;
312 }
313
314 static unsigned long *
ret_stack_get_task_var(struct task_struct * t,int idx)315 ret_stack_get_task_var(struct task_struct *t, int idx)
316 {
317 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
318
319 return &gvals[idx];
320 }
321
ret_stack_init_task_vars(unsigned long * ret_stack)322 static void ret_stack_init_task_vars(unsigned long *ret_stack)
323 {
324 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
325
326 memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
327 }
328
329 /**
330 * fgraph_reserve_data - Reserve storage on the task's ret_stack
331 * @idx: The index of fgraph_array
332 * @size_bytes: The size in bytes to reserve
333 *
334 * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
335 * task's ret_stack shadow stack, for a given fgraph_ops during
336 * the entryfunc() call. If entryfunc() returns zero, the storage
337 * is discarded. An entryfunc() can only call this once per iteration.
338 * The fgraph_ops retfunc() can retrieve this stored data with
339 * fgraph_retrieve_data().
340 *
341 * Returns: On success, a pointer to the data on the stack.
342 * Otherwise, NULL if there's not enough space left on the
343 * ret_stack for the data, or if fgraph_reserve_data() was called
344 * more than once for a single entryfunc() call.
345 */
fgraph_reserve_data(int idx,int size_bytes)346 void *fgraph_reserve_data(int idx, int size_bytes)
347 {
348 unsigned long val;
349 void *data;
350 int curr_ret_stack = current->curr_ret_stack;
351 int data_size;
352
353 if (size_bytes > FGRAPH_MAX_DATA_SIZE)
354 return NULL;
355
356 /* Convert the data size to number of longs. */
357 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
358
359 val = get_fgraph_entry(current, curr_ret_stack - 1);
360 data = ¤t->ret_stack[curr_ret_stack];
361
362 curr_ret_stack += data_size + 1;
363 if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
364 return NULL;
365
366 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
367
368 /* Set the last word to be reserved */
369 current->ret_stack[curr_ret_stack - 1] = val;
370
371 /* Make sure interrupts see this */
372 barrier();
373 current->curr_ret_stack = curr_ret_stack;
374 /* Again sync with interrupts, and reset reserve */
375 current->ret_stack[curr_ret_stack - 1] = val;
376
377 return data;
378 }
379
380 /**
381 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
382 * @idx: the index of fgraph_array (fgraph_ops::idx)
383 * @size_bytes: pointer to retrieved data size.
384 *
385 * This is to be called by a fgraph_ops retfunc(), to retrieve data that
386 * was stored by the fgraph_ops entryfunc() on the function entry.
387 * That is, this will retrieve the data that was reserved on the
388 * entry of the function that corresponds to the exit of the function
389 * that the fgraph_ops retfunc() is called on.
390 *
391 * Returns: The stored data from fgraph_reserve_data() called by the
392 * matching entryfunc() for the retfunc() this is called from.
393 * Or NULL if there was nothing stored.
394 */
fgraph_retrieve_data(int idx,int * size_bytes)395 void *fgraph_retrieve_data(int idx, int *size_bytes)
396 {
397 return fgraph_retrieve_parent_data(idx, size_bytes, 0);
398 }
399
400 /**
401 * fgraph_get_task_var - retrieve a task specific state variable
402 * @gops: The ftrace_ops that owns the task specific variable
403 *
404 * Every registered fgraph_ops has a task state variable
405 * reserved on the task's ret_stack. This function returns the
406 * address to that variable.
407 *
408 * Returns the address to the fgraph_ops @gops tasks specific
409 * unsigned long variable.
410 */
fgraph_get_task_var(struct fgraph_ops * gops)411 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
412 {
413 return ret_stack_get_task_var(current, gops->idx);
414 }
415
416 /*
417 * @offset: The offset into @t->ret_stack to find the ret_stack entry
418 * @frame_offset: Where to place the offset into @t->ret_stack of that entry
419 *
420 * Returns a pointer to the previous ret_stack below @offset or NULL
421 * when it reaches the bottom of the stack.
422 *
423 * Calling this with:
424 *
425 * offset = task->curr_ret_stack;
426 * do {
427 * ret_stack = get_ret_stack(task, offset, &offset);
428 * } while (ret_stack);
429 *
430 * Will iterate through all the ret_stack entries from curr_ret_stack
431 * down to the first one.
432 */
433 static inline struct ftrace_ret_stack *
get_ret_stack(struct task_struct * t,int offset,int * frame_offset)434 get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
435 {
436 int offs;
437
438 BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
439
440 if (unlikely(offset <= 0))
441 return NULL;
442
443 offs = get_frame_offset(t, --offset);
444 if (WARN_ON_ONCE(offs <= 0 || offs > offset))
445 return NULL;
446
447 offset -= offs;
448
449 *frame_offset = offset;
450 return RET_STACK(t, offset);
451 }
452
453 /**
454 * fgraph_retrieve_parent_data - get data from a parent function
455 * @idx: The index into the fgraph_array (fgraph_ops::idx)
456 * @size_bytes: A pointer to retrieved data size
457 * @depth: The depth to find the parent (0 is the current function)
458 *
459 * This is similar to fgraph_retrieve_data() but can be used to retrieve
460 * data from a parent caller function.
461 *
462 * Return: a pointer to the specified parent data or NULL if not found
463 */
fgraph_retrieve_parent_data(int idx,int * size_bytes,int depth)464 void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
465 {
466 struct ftrace_ret_stack *ret_stack = NULL;
467 int offset = current->curr_ret_stack;
468 unsigned long val;
469
470 if (offset <= 0)
471 return NULL;
472
473 for (;;) {
474 int next_offset;
475
476 ret_stack = get_ret_stack(current, offset, &next_offset);
477 if (!ret_stack || --depth < 0)
478 break;
479 offset = next_offset;
480 }
481
482 if (!ret_stack)
483 return NULL;
484
485 offset--;
486
487 val = get_fgraph_entry(current, offset);
488 while (__get_type(val) == FGRAPH_TYPE_DATA) {
489 if (__get_data_index(val) == idx)
490 goto found;
491 offset -= __get_data_size(val) + 1;
492 val = get_fgraph_entry(current, offset);
493 }
494 return NULL;
495 found:
496 if (size_bytes)
497 *size_bytes = __get_data_size(val) * sizeof(long);
498 return get_data_type_data(current, offset);
499 }
500
501 #ifdef CONFIG_DYNAMIC_FTRACE
502 /*
503 * archs can override this function if they must do something
504 * to enable hook for graph tracer.
505 */
ftrace_enable_ftrace_graph_caller(void)506 int __weak ftrace_enable_ftrace_graph_caller(void)
507 {
508 return 0;
509 }
510
511 /*
512 * archs can override this function if they must do something
513 * to disable hook for graph tracer.
514 */
ftrace_disable_ftrace_graph_caller(void)515 int __weak ftrace_disable_ftrace_graph_caller(void)
516 {
517 return 0;
518 }
519 #endif
520
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)521 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
522 struct fgraph_ops *gops,
523 struct ftrace_regs *fregs)
524 {
525 return 0;
526 }
527
ftrace_graph_ret_stub(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)528 static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
529 struct fgraph_ops *gops,
530 struct ftrace_regs *fregs)
531 {
532 }
533
534 static struct fgraph_ops fgraph_stub = {
535 .entryfunc = ftrace_graph_entry_stub,
536 .retfunc = ftrace_graph_ret_stub,
537 };
538
539 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
540 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
541 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
542 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
543
544 /**
545 * ftrace_graph_stop - set to permanently disable function graph tracing
546 *
547 * In case of an error int function graph tracing, this is called
548 * to try to keep function graph tracing from causing any more harm.
549 * Usually this is pretty severe and this is called to try to at least
550 * get a warning out to the user.
551 */
ftrace_graph_stop(void)552 void ftrace_graph_stop(void)
553 {
554 static_branch_enable(&kill_ftrace_graph);
555 }
556
557 /* Add a function return address to the trace stack on thread info.*/
558 static int
ftrace_push_return_trace(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,int fgraph_idx)559 ftrace_push_return_trace(unsigned long ret, unsigned long func,
560 unsigned long frame_pointer, unsigned long *retp,
561 int fgraph_idx)
562 {
563 struct ftrace_ret_stack *ret_stack;
564 unsigned long val;
565 int offset;
566
567 if (unlikely(ftrace_graph_is_dead()))
568 return -EBUSY;
569
570 if (!current->ret_stack)
571 return -EBUSY;
572
573 BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
574
575 /* Set val to "reserved" with the delta to the new fgraph frame */
576 val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
577
578 /*
579 * We must make sure the ret_stack is tested before we read
580 * anything else.
581 */
582 smp_rmb();
583
584 /*
585 * Check if there's room on the shadow stack to fit a fraph frame
586 * and a bitmap word.
587 */
588 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
589 atomic_inc(¤t->trace_overrun);
590 return -EBUSY;
591 }
592
593 offset = READ_ONCE(current->curr_ret_stack);
594 ret_stack = RET_STACK(current, offset);
595 offset += FGRAPH_FRAME_OFFSET;
596
597 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
598 current->ret_stack[offset] = val;
599 ret_stack->ret = ret;
600 /*
601 * The unwinders expect curr_ret_stack to point to either zero
602 * or an offset where to find the next ret_stack. Even though the
603 * ret stack might be bogus, we want to write the ret and the
604 * offset to find the ret_stack before we increment the stack point.
605 * If an interrupt comes in now before we increment the curr_ret_stack
606 * it may blow away what we wrote. But that's fine, because the
607 * offset will still be correct (even though the 'ret' won't be).
608 * What we worry about is the offset being correct after we increment
609 * the curr_ret_stack and before we update that offset, as if an
610 * interrupt comes in and does an unwind stack dump, it will need
611 * at least a correct offset!
612 */
613 barrier();
614 WRITE_ONCE(current->curr_ret_stack, offset + 1);
615 /*
616 * This next barrier is to ensure that an interrupt coming in
617 * will not corrupt what we are about to write.
618 */
619 barrier();
620
621 /* Still keep it reserved even if an interrupt came in */
622 current->ret_stack[offset] = val;
623
624 ret_stack->ret = ret;
625 ret_stack->func = func;
626 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
627 ret_stack->fp = frame_pointer;
628 #endif
629 ret_stack->retp = retp;
630 return offset;
631 }
632
633 /*
634 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
635 * functions. But those archs currently don't support direct functions
636 * anyway, and ftrace_find_rec_direct() is just a stub for them.
637 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
638 */
639 #ifndef MCOUNT_INSN_SIZE
640 /* Make sure this only works without direct calls */
641 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
642 # error MCOUNT_INSN_SIZE not defined with direct calls enabled
643 # endif
644 # define MCOUNT_INSN_SIZE 0
645 #endif
646
647 /* If the caller does not use ftrace, call this function. */
function_graph_enter_regs(unsigned long ret,unsigned long func,unsigned long frame_pointer,unsigned long * retp,struct ftrace_regs * fregs)648 int function_graph_enter_regs(unsigned long ret, unsigned long func,
649 unsigned long frame_pointer, unsigned long *retp,
650 struct ftrace_regs *fregs)
651 {
652 struct ftrace_graph_ent trace;
653 unsigned long bitmap = 0;
654 int offset;
655 int bit;
656 int i;
657
658 bit = ftrace_test_recursion_trylock(func, ret);
659 if (bit < 0)
660 return -EBUSY;
661
662 trace.func = func;
663 trace.depth = ++current->curr_ret_depth;
664
665 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
666 if (offset < 0)
667 goto out;
668
669 #ifdef CONFIG_HAVE_STATIC_CALL
670 if (static_branch_likely(&fgraph_do_direct)) {
671 int save_curr_ret_stack = current->curr_ret_stack;
672
673 if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs))
674 bitmap |= BIT(fgraph_direct_gops->idx);
675 else
676 /* Clear out any saved storage */
677 current->curr_ret_stack = save_curr_ret_stack;
678 } else
679 #endif
680 {
681 for_each_set_bit(i, &fgraph_array_bitmask,
682 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
683 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
684 int save_curr_ret_stack;
685
686 if (gops == &fgraph_stub)
687 continue;
688
689 save_curr_ret_stack = current->curr_ret_stack;
690 if (ftrace_ops_test(&gops->ops, func, NULL) &&
691 gops->entryfunc(&trace, gops, fregs))
692 bitmap |= BIT(i);
693 else
694 /* Clear out any saved storage */
695 current->curr_ret_stack = save_curr_ret_stack;
696 }
697 }
698
699 if (!bitmap)
700 goto out_ret;
701
702 /*
703 * Since this function uses fgraph_idx = 0 as a tail-call checking
704 * flag, set that bit always.
705 */
706 set_bitmap(current, offset, bitmap | BIT(0));
707 ftrace_test_recursion_unlock(bit);
708 return 0;
709 out_ret:
710 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
711 out:
712 current->curr_ret_depth--;
713 ftrace_test_recursion_unlock(bit);
714 return -EBUSY;
715 }
716
717 /* Retrieve a function return address to the trace stack on thread info.*/
718 static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret * trace,unsigned long * ret,unsigned long frame_pointer,int * offset)719 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
720 unsigned long frame_pointer, int *offset)
721 {
722 struct ftrace_ret_stack *ret_stack;
723
724 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
725
726 if (unlikely(!ret_stack)) {
727 ftrace_graph_stop();
728 WARN(1, "Bad function graph ret_stack pointer: %d",
729 current->curr_ret_stack);
730 /* Might as well panic, otherwise we have no where to go */
731 *ret = (unsigned long)panic;
732 return NULL;
733 }
734
735 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
736 /*
737 * The arch may choose to record the frame pointer used
738 * and check it here to make sure that it is what we expect it
739 * to be. If gcc does not set the place holder of the return
740 * address in the frame pointer, and does a copy instead, then
741 * the function graph trace will fail. This test detects this
742 * case.
743 *
744 * Currently, x86_32 with optimize for size (-Os) makes the latest
745 * gcc do the above.
746 *
747 * Note, -mfentry does not use frame pointers, and this test
748 * is not needed if CC_USING_FENTRY is set.
749 */
750 if (unlikely(ret_stack->fp != frame_pointer)) {
751 ftrace_graph_stop();
752 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
753 " from func %ps return to %lx\n",
754 ret_stack->fp,
755 frame_pointer,
756 (void *)ret_stack->func,
757 ret_stack->ret);
758 *ret = (unsigned long)panic;
759 return NULL;
760 }
761 #endif
762
763 *offset += FGRAPH_FRAME_OFFSET;
764 *ret = ret_stack->ret;
765 trace->func = ret_stack->func;
766 trace->overrun = atomic_read(¤t->trace_overrun);
767 trace->depth = current->curr_ret_depth;
768 /*
769 * We still want to trace interrupts coming in if
770 * max_depth is set to 1. Make sure the decrement is
771 * seen before ftrace_graph_return.
772 */
773 barrier();
774
775 return ret_stack;
776 }
777
778 /*
779 * Hibernation protection.
780 * The state of the current task is too much unstable during
781 * suspend/restore to disk. We want to protect against that.
782 */
783 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)784 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
785 void *unused)
786 {
787 switch (state) {
788 case PM_HIBERNATION_PREPARE:
789 pause_graph_tracing();
790 break;
791
792 case PM_POST_HIBERNATION:
793 unpause_graph_tracing();
794 break;
795 }
796 return NOTIFY_DONE;
797 }
798
799 static struct notifier_block ftrace_suspend_notifier = {
800 .notifier_call = ftrace_suspend_notifier_call,
801 };
802
803 /*
804 * Send the trace to the ring-buffer.
805 * @return the original return address.
806 */
807 static inline unsigned long
__ftrace_return_to_handler(struct ftrace_regs * fregs,unsigned long frame_pointer)808 __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointer)
809 {
810 struct ftrace_ret_stack *ret_stack;
811 struct ftrace_graph_ret trace;
812 unsigned long bitmap;
813 unsigned long ret;
814 int offset;
815 int bit;
816 int i;
817
818 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
819
820 if (unlikely(!ret_stack)) {
821 ftrace_graph_stop();
822 WARN_ON(1);
823 /* Might as well panic. What else to do? */
824 return (unsigned long)panic;
825 }
826
827 if (fregs)
828 ftrace_regs_set_instruction_pointer(fregs, ret);
829
830 bit = ftrace_test_recursion_trylock(trace.func, ret);
831 /*
832 * This can fail because ftrace_test_recursion_trylock() allows one nest
833 * call. If we are already in a nested call, then we don't probe this and
834 * just return the original return address.
835 */
836 if (unlikely(bit < 0))
837 goto out;
838
839 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
840 trace.retval = ftrace_regs_get_return_value(fregs);
841 #endif
842
843 bitmap = get_bitmap_bits(current, offset);
844
845 #ifdef CONFIG_HAVE_STATIC_CALL
846 if (static_branch_likely(&fgraph_do_direct)) {
847 if (test_bit(fgraph_direct_gops->idx, &bitmap))
848 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs);
849 } else
850 #endif
851 {
852 for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
853 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
854
855 if (gops == &fgraph_stub)
856 continue;
857
858 gops->retfunc(&trace, gops, fregs);
859 }
860 }
861
862 ftrace_test_recursion_unlock(bit);
863 out:
864 /*
865 * The ftrace_graph_return() may still access the current
866 * ret_stack structure, we need to make sure the update of
867 * curr_ret_stack is after that.
868 */
869 barrier();
870 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
871
872 current->curr_ret_depth--;
873 return ret;
874 }
875
876 /*
877 * After all architectures have selected HAVE_FUNCTION_GRAPH_FREGS, we can
878 * leave only ftrace_return_to_handler(fregs).
879 */
880 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
ftrace_return_to_handler(struct ftrace_regs * fregs)881 unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
882 {
883 return __ftrace_return_to_handler(fregs,
884 ftrace_regs_get_frame_pointer(fregs));
885 }
886 #else
ftrace_return_to_handler(unsigned long frame_pointer)887 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
888 {
889 return __ftrace_return_to_handler(NULL, frame_pointer);
890 }
891 #endif
892
893 /**
894 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
895 * @task: The task to read the shadow stack from.
896 * @idx: Index down the shadow stack
897 *
898 * Return the ret_struct on the shadow stack of the @task at the
899 * call graph at @idx starting with zero. If @idx is zero, it
900 * will return the last saved ret_stack entry. If it is greater than
901 * zero, it will return the corresponding ret_stack for the depth
902 * of saved return addresses.
903 */
904 struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct * task,int idx)905 ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
906 {
907 struct ftrace_ret_stack *ret_stack = NULL;
908 int offset = task->curr_ret_stack;
909
910 if (offset < 0)
911 return NULL;
912
913 do {
914 ret_stack = get_ret_stack(task, offset, &offset);
915 } while (ret_stack && --idx >= 0);
916
917 return ret_stack;
918 }
919
920 /**
921 * ftrace_graph_top_ret_addr - return the top return address in the shadow stack
922 * @task: The task to read the shadow stack from.
923 *
924 * Return the first return address on the shadow stack of the @task, which is
925 * not the fgraph's return_to_handler.
926 */
ftrace_graph_top_ret_addr(struct task_struct * task)927 unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
928 {
929 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
930 struct ftrace_ret_stack *ret_stack = NULL;
931 int offset = task->curr_ret_stack;
932
933 if (offset < 0)
934 return 0;
935
936 do {
937 ret_stack = get_ret_stack(task, offset, &offset);
938 } while (ret_stack && ret_stack->ret == return_handler);
939
940 return ret_stack ? ret_stack->ret : 0;
941 }
942
943 /**
944 * ftrace_graph_ret_addr - return the original value of the return address
945 * @task: The task the unwinder is being executed on
946 * @idx: An initialized pointer to the next stack index to use
947 * @ret: The current return address (likely pointing to return_handler)
948 * @retp: The address on the stack of the current return location
949 *
950 * This function can be called by stack unwinding code to convert a found stack
951 * return address (@ret) to its original value, in case the function graph
952 * tracer has modified it to be 'return_to_handler'. If the address hasn't
953 * been modified, the unchanged value of @ret is returned.
954 *
955 * @idx holds the last index used to know where to start from. It should be
956 * initialized to zero for the first iteration as that will mean to start
957 * at the top of the shadow stack. If the location is found, this pointer
958 * will be assigned that location so that if called again, it will continue
959 * where it left off.
960 *
961 * @retp is a pointer to the return address on the stack.
962 */
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)963 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
964 unsigned long ret, unsigned long *retp)
965 {
966 struct ftrace_ret_stack *ret_stack;
967 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
968 int i;
969
970 if (ret != return_handler)
971 return ret;
972
973 if (!idx)
974 return ret;
975
976 i = *idx ? : task->curr_ret_stack;
977 while (i > 0) {
978 ret_stack = get_ret_stack(task, i, &i);
979 if (!ret_stack)
980 break;
981 /*
982 * For the tail-call, there would be 2 or more ftrace_ret_stacks on
983 * the ret_stack, which records "return_to_handler" as the return
984 * address except for the last one.
985 * But on the real stack, there should be 1 entry because tail-call
986 * reuses the return address on the stack and jump to the next function.
987 * Thus we will continue to find real return address.
988 */
989 if (ret_stack->retp == retp &&
990 ret_stack->ret != return_handler) {
991 *idx = i;
992 return ret_stack->ret;
993 }
994 }
995
996 return ret;
997 }
998
999 static struct ftrace_ops graph_ops = {
1000 .func = ftrace_graph_func,
1001 .flags = FTRACE_OPS_GRAPH_STUB,
1002 #ifdef FTRACE_GRAPH_TRAMP_ADDR
1003 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
1004 /* trampoline_size is only needed for dynamically allocated tramps */
1005 #endif
1006 };
1007
fgraph_init_ops(struct ftrace_ops * dst_ops,struct ftrace_ops * src_ops)1008 void fgraph_init_ops(struct ftrace_ops *dst_ops,
1009 struct ftrace_ops *src_ops)
1010 {
1011 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
1012
1013 #ifdef CONFIG_DYNAMIC_FTRACE
1014 if (src_ops) {
1015 dst_ops->func_hash = &src_ops->local_hash;
1016 mutex_init(&dst_ops->local_hash.regex_lock);
1017 INIT_LIST_HEAD(&dst_ops->subop_list);
1018 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
1019 dst_ops->private = src_ops->private;
1020 }
1021 #endif
1022 }
1023
1024 /*
1025 * Simply points to ftrace_stub, but with the proper protocol.
1026 * Defined by the linker script in linux/vmlinux.lds.h
1027 */
1028 void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops,
1029 struct ftrace_regs *fregs);
1030
1031 /* The callbacks that hook a function */
1032 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
1033 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1034
1035 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(unsigned long ** ret_stack_list)1036 static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
1037 {
1038 int i;
1039 int ret = 0;
1040 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1041 struct task_struct *g, *t;
1042
1043 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1044 return -ENOMEM;
1045
1046 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1047 ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1048 if (!ret_stack_list[i]) {
1049 start = 0;
1050 end = i;
1051 ret = -ENOMEM;
1052 goto free;
1053 }
1054 }
1055
1056 rcu_read_lock();
1057 for_each_process_thread(g, t) {
1058 if (start == end) {
1059 ret = -EAGAIN;
1060 goto unlock;
1061 }
1062
1063 if (t->ret_stack == NULL) {
1064 atomic_set(&t->trace_overrun, 0);
1065 ret_stack_init_task_vars(ret_stack_list[start]);
1066 t->curr_ret_stack = 0;
1067 t->curr_ret_depth = -1;
1068 /* Make sure the tasks see the 0 first: */
1069 smp_wmb();
1070 t->ret_stack = ret_stack_list[start++];
1071 }
1072 }
1073
1074 unlock:
1075 rcu_read_unlock();
1076 free:
1077 for (i = start; i < end; i++)
1078 kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
1079 return ret;
1080 }
1081
1082 static void
ftrace_graph_probe_sched_switch(void * ignore,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)1083 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1084 struct task_struct *prev,
1085 struct task_struct *next,
1086 unsigned int prev_state)
1087 {
1088 unsigned long long timestamp;
1089
1090 /*
1091 * Does the user want to count the time a function was asleep.
1092 * If so, do not update the time stamps.
1093 */
1094 if (!fgraph_no_sleep_time)
1095 return;
1096
1097 timestamp = trace_clock_local();
1098
1099 prev->ftrace_timestamp = timestamp;
1100
1101 /* only process tasks that we timestamped */
1102 if (!next->ftrace_timestamp)
1103 return;
1104
1105 next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
1106 }
1107
1108 static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1109
1110 static void
graph_init_task(struct task_struct * t,unsigned long * ret_stack)1111 graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1112 {
1113 atomic_set(&t->trace_overrun, 0);
1114 ret_stack_init_task_vars(ret_stack);
1115 t->ftrace_timestamp = 0;
1116 t->curr_ret_stack = 0;
1117 t->curr_ret_depth = -1;
1118 /* make curr_ret_stack visible before we add the ret_stack */
1119 smp_wmb();
1120 t->ret_stack = ret_stack;
1121 }
1122
1123 /*
1124 * Allocate a return stack for the idle task. May be the first
1125 * time through, or it may be done by CPU hotplug online.
1126 */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1127 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1128 {
1129 t->curr_ret_stack = 0;
1130 t->curr_ret_depth = -1;
1131 /*
1132 * The idle task has no parent, it either has its own
1133 * stack or no stack at all.
1134 */
1135 if (t->ret_stack)
1136 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1137
1138 if (ftrace_graph_active) {
1139 unsigned long *ret_stack;
1140
1141 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1142 return;
1143
1144 ret_stack = per_cpu(idle_ret_stack, cpu);
1145 if (!ret_stack) {
1146 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1147 if (!ret_stack)
1148 return;
1149 per_cpu(idle_ret_stack, cpu) = ret_stack;
1150 }
1151 graph_init_task(t, ret_stack);
1152 }
1153 }
1154
1155 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)1156 void ftrace_graph_init_task(struct task_struct *t)
1157 {
1158 /* Make sure we do not use the parent ret_stack */
1159 t->ret_stack = NULL;
1160 t->curr_ret_stack = 0;
1161 t->curr_ret_depth = -1;
1162
1163 if (ftrace_graph_active) {
1164 unsigned long *ret_stack;
1165
1166 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1167 return;
1168
1169 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1170 if (!ret_stack)
1171 return;
1172 graph_init_task(t, ret_stack);
1173 }
1174 }
1175
ftrace_graph_exit_task(struct task_struct * t)1176 void ftrace_graph_exit_task(struct task_struct *t)
1177 {
1178 unsigned long *ret_stack = t->ret_stack;
1179
1180 t->ret_stack = NULL;
1181 /* NULL must become visible to IRQs before we free it: */
1182 barrier();
1183
1184 if (ret_stack) {
1185 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1186 return;
1187 kmem_cache_free(fgraph_stack_cachep, ret_stack);
1188 }
1189 }
1190
1191 #ifdef CONFIG_DYNAMIC_FTRACE
fgraph_pid_func(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)1192 static int fgraph_pid_func(struct ftrace_graph_ent *trace,
1193 struct fgraph_ops *gops,
1194 struct ftrace_regs *fregs)
1195 {
1196 struct trace_array *tr = gops->ops.private;
1197 int pid;
1198
1199 if (tr) {
1200 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1201 if (pid == FTRACE_PID_IGNORE)
1202 return 0;
1203 if (pid != FTRACE_PID_TRACE &&
1204 pid != current->pid)
1205 return 0;
1206 }
1207
1208 return gops->saved_func(trace, gops, fregs);
1209 }
1210
fgraph_update_pid_func(void)1211 void fgraph_update_pid_func(void)
1212 {
1213 struct fgraph_ops *gops;
1214 struct ftrace_ops *op;
1215
1216 if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1217 return;
1218
1219 list_for_each_entry(op, &graph_ops.subop_list, list) {
1220 if (op->flags & FTRACE_OPS_FL_PID) {
1221 gops = container_of(op, struct fgraph_ops, ops);
1222 gops->entryfunc = ftrace_pids_enabled(op) ?
1223 fgraph_pid_func : gops->saved_func;
1224 if (ftrace_graph_active == 1)
1225 static_call_update(fgraph_func, gops->entryfunc);
1226 }
1227 }
1228 }
1229 #endif
1230
1231 /* Allocate a return stack for each task */
start_graph_tracing(void)1232 static int start_graph_tracing(void)
1233 {
1234 unsigned long **ret_stack_list;
1235 int ret, cpu;
1236
1237 ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1238 sizeof(*ret_stack_list), GFP_KERNEL);
1239
1240 if (!ret_stack_list)
1241 return -ENOMEM;
1242
1243 /* The cpu_boot init_task->ret_stack will never be freed */
1244 for_each_online_cpu(cpu) {
1245 if (!idle_task(cpu)->ret_stack)
1246 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1247 }
1248
1249 do {
1250 ret = alloc_retstack_tasklist(ret_stack_list);
1251 } while (ret == -EAGAIN);
1252
1253 if (!ret) {
1254 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1255 if (ret)
1256 pr_info("ftrace_graph: Couldn't activate tracepoint"
1257 " probe to kernel_sched_switch\n");
1258 }
1259
1260 kfree(ret_stack_list);
1261 return ret;
1262 }
1263
init_task_vars(int idx)1264 static void init_task_vars(int idx)
1265 {
1266 struct task_struct *g, *t;
1267 int cpu;
1268
1269 for_each_online_cpu(cpu) {
1270 if (idle_task(cpu)->ret_stack)
1271 ret_stack_set_task_var(idle_task(cpu), idx, 0);
1272 }
1273
1274 read_lock(&tasklist_lock);
1275 for_each_process_thread(g, t) {
1276 if (t->ret_stack)
1277 ret_stack_set_task_var(t, idx, 0);
1278 }
1279 read_unlock(&tasklist_lock);
1280 }
1281
ftrace_graph_enable_direct(bool enable_branch,struct fgraph_ops * gops)1282 static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1283 {
1284 trace_func_graph_ent_t func = NULL;
1285 trace_func_graph_ret_t retfunc = NULL;
1286 int i;
1287
1288 if (gops) {
1289 func = gops->entryfunc;
1290 retfunc = gops->retfunc;
1291 fgraph_direct_gops = gops;
1292 } else {
1293 for_each_set_bit(i, &fgraph_array_bitmask,
1294 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1295 func = fgraph_array[i]->entryfunc;
1296 retfunc = fgraph_array[i]->retfunc;
1297 fgraph_direct_gops = fgraph_array[i];
1298 }
1299 }
1300 if (WARN_ON_ONCE(!func))
1301 return;
1302
1303 static_call_update(fgraph_func, func);
1304 static_call_update(fgraph_retfunc, retfunc);
1305 if (enable_branch)
1306 static_branch_disable(&fgraph_do_direct);
1307 }
1308
ftrace_graph_disable_direct(bool disable_branch)1309 static void ftrace_graph_disable_direct(bool disable_branch)
1310 {
1311 if (disable_branch)
1312 static_branch_disable(&fgraph_do_direct);
1313 static_call_update(fgraph_func, ftrace_graph_entry_stub);
1314 static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1315 fgraph_direct_gops = &fgraph_stub;
1316 }
1317
1318 /* The cpu_boot init_task->ret_stack will never be freed */
fgraph_cpu_init(unsigned int cpu)1319 static int fgraph_cpu_init(unsigned int cpu)
1320 {
1321 if (!idle_task(cpu)->ret_stack)
1322 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1323 return 0;
1324 }
1325
register_ftrace_graph(struct fgraph_ops * gops)1326 int register_ftrace_graph(struct fgraph_ops *gops)
1327 {
1328 static bool fgraph_initialized;
1329 int command = 0;
1330 int ret = 0;
1331 int i = -1;
1332
1333 if (WARN_ONCE(gops->ops.flags & FTRACE_OPS_FL_GRAPH,
1334 "function graph ops registered again"))
1335 return -EBUSY;
1336
1337 guard(mutex)(&ftrace_lock);
1338
1339 if (!fgraph_stack_cachep) {
1340 fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
1341 SHADOW_STACK_SIZE,
1342 SHADOW_STACK_SIZE, 0, NULL);
1343 if (!fgraph_stack_cachep)
1344 return -ENOMEM;
1345 }
1346
1347 if (!fgraph_initialized) {
1348 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
1349 fgraph_cpu_init, NULL);
1350 if (ret < 0) {
1351 pr_warn("fgraph: Error to init cpu hotplug support\n");
1352 return ret;
1353 }
1354 fgraph_initialized = true;
1355 ret = 0;
1356 }
1357
1358 if (!fgraph_array[0]) {
1359 /* The array must always have real data on it */
1360 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1361 fgraph_array[i] = &fgraph_stub;
1362 fgraph_lru_init();
1363 }
1364
1365 i = fgraph_lru_alloc_index();
1366 if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
1367 return -ENOSPC;
1368 gops->idx = i;
1369
1370 ftrace_graph_active++;
1371
1372 /* Always save the function, and reset at unregistering */
1373 gops->saved_func = gops->entryfunc;
1374 #ifdef CONFIG_DYNAMIC_FTRACE
1375 if (ftrace_pids_enabled(&gops->ops))
1376 gops->entryfunc = fgraph_pid_func;
1377 #endif
1378
1379 if (ftrace_graph_active == 2)
1380 ftrace_graph_disable_direct(true);
1381
1382 if (ftrace_graph_active == 1) {
1383 ftrace_graph_enable_direct(false, gops);
1384 register_pm_notifier(&ftrace_suspend_notifier);
1385 ret = start_graph_tracing();
1386 if (ret)
1387 goto error;
1388 /*
1389 * Some archs just test to see if these are not
1390 * the default function
1391 */
1392 ftrace_graph_return = return_run;
1393 ftrace_graph_entry = entry_run;
1394 command = FTRACE_START_FUNC_RET;
1395 } else {
1396 init_task_vars(gops->idx);
1397 }
1398
1399 gops->ops.flags |= FTRACE_OPS_FL_GRAPH;
1400
1401 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1402 if (!ret)
1403 fgraph_array[i] = gops;
1404
1405 error:
1406 if (ret) {
1407 ftrace_graph_active--;
1408 gops->saved_func = NULL;
1409 fgraph_lru_release_index(i);
1410 if (!ftrace_graph_active)
1411 unregister_pm_notifier(&ftrace_suspend_notifier);
1412 }
1413 return ret;
1414 }
1415
unregister_ftrace_graph(struct fgraph_ops * gops)1416 void unregister_ftrace_graph(struct fgraph_ops *gops)
1417 {
1418 int command = 0;
1419
1420 if (WARN_ONCE(!(gops->ops.flags & FTRACE_OPS_FL_GRAPH),
1421 "function graph ops unregistered without registering"))
1422 return;
1423
1424 guard(mutex)(&ftrace_lock);
1425
1426 if (unlikely(!ftrace_graph_active))
1427 goto out;
1428
1429 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
1430 fgraph_array[gops->idx] != gops))
1431 goto out;
1432
1433 if (fgraph_lru_release_index(gops->idx) < 0)
1434 goto out;
1435
1436 fgraph_array[gops->idx] = &fgraph_stub;
1437
1438 ftrace_graph_active--;
1439
1440 if (!ftrace_graph_active)
1441 command = FTRACE_STOP_FUNC_RET;
1442
1443 ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1444
1445 if (ftrace_graph_active == 1)
1446 ftrace_graph_enable_direct(true, NULL);
1447 else if (!ftrace_graph_active)
1448 ftrace_graph_disable_direct(false);
1449
1450 if (!ftrace_graph_active) {
1451 ftrace_graph_return = ftrace_stub_graph;
1452 ftrace_graph_entry = ftrace_graph_entry_stub;
1453 unregister_pm_notifier(&ftrace_suspend_notifier);
1454 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1455 }
1456 gops->saved_func = NULL;
1457 out:
1458 gops->ops.flags &= ~FTRACE_OPS_FL_GRAPH;
1459 }
1460