Lines Matching +full:cpu +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack
36 * bits: 10 - 11 Type of storage
37 * 0 - reserved
38 * 1 - bitmap of fgraph_array index
39 * 2 - reserved data
42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index
43 * That is, it's a bitmask of 0-15 (16 bits)
53 * bits: 12 - 17 The size in words that is stored
54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored
68 * | | <- task->curr_ret_stack
69 * +--------------------------------------------+
73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words.
75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here
76 * +--------------------------------------------+ ( It is 4 words from the ret_stack)
79 * +--------------------------------------------+
84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here
85 * +--------------------------------------------+
87 * | (stores the saved ret pointer) | <- the offset points here
88 * +--------------------------------------------+
95 * fetched, then it looks at the task's curr_ret_stack offset, if it
97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105 * FGRAPH_TYPE (10-11) holds the type of word this is.
109 #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
112 #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
126 #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
131 * FGRAPH_DATA (12-17) bits hold the size of data (in words)
132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
138 #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
143 #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
162 /* RET_STACK(): Return the frame from a given @offset from task @t */
163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) argument
170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
198 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1)) in fgraph_lru_release_index()
199 return -1; in fgraph_lru_release_index()
214 if (idx == -1) in fgraph_lru_alloc_index()
215 return -1; in fgraph_lru_alloc_index()
217 fgraph_lru_table[fgraph_lru_next] = -1; in fgraph_lru_alloc_index()
224 /* Get the offset to the fgraph frame from a ret_stack value */
248 /* Get the word from the ret_stack at @offset */
249 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset) in get_fgraph_entry() argument
251 return t->ret_stack[offset]; in get_fgraph_entry()
254 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
255 static inline int get_frame_offset(struct task_struct *t, int offset) in get_frame_offset() argument
257 return __get_offset(t->ret_stack[offset]); in get_frame_offset()
260 /* For BITMAP type: get the bitmask from the @offset at ret_stack */
262 get_bitmap_bits(struct task_struct *t, int offset) in get_bitmap_bits() argument
264 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK; in get_bitmap_bits()
267 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
269 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap) in set_bitmap() argument
271 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) | in set_bitmap()
275 /* For DATA type: get the data saved under the ret_stack word at @offset */
276 static inline void *get_data_type_data(struct task_struct *t, int offset) in get_data_type_data() argument
278 unsigned long val = t->ret_stack[offset]; in get_data_type_data()
282 offset -= __get_data_size(val); in get_data_type_data()
283 return (void *)&t->ret_stack[offset]; in get_data_type_data()
287 static inline unsigned long make_data_type_val(int idx, int size, int offset) in make_data_type_val() argument
290 ((size - 1) << FGRAPH_DATA_SHIFT) | in make_data_type_val()
291 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset; in make_data_type_val()
309 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); in ret_stack_set_task_var()
317 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); in ret_stack_get_task_var()
330 * fgraph_reserve_data - Reserve storage on the task's ret_stack
350 int curr_ret_stack = current->curr_ret_stack; in fgraph_reserve_data()
357 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3); in fgraph_reserve_data()
359 val = get_fgraph_entry(current, curr_ret_stack - 1); in fgraph_reserve_data()
360 data = &current->ret_stack[curr_ret_stack]; in fgraph_reserve_data()
369 current->ret_stack[curr_ret_stack - 1] = val; in fgraph_reserve_data()
373 current->curr_ret_stack = curr_ret_stack; in fgraph_reserve_data()
375 current->ret_stack[curr_ret_stack - 1] = val; in fgraph_reserve_data()
381 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
401 * fgraph_get_task_var - retrieve a task specific state variable
413 return ret_stack_get_task_var(current, gops->idx); in fgraph_get_task_var()
417 * @offset: The offset into @t->ret_stack to find the ret_stack entry
418 * @frame_offset: Where to place the offset into @t->ret_stack of that entry
420 * Returns a pointer to the previous ret_stack below @offset or NULL
425 * offset = task->curr_ret_stack;
427 * ret_stack = get_ret_stack(task, offset, &offset);
434 get_ret_stack(struct task_struct *t, int offset, int *frame_offset) in get_ret_stack() argument
440 if (unlikely(offset <= 0)) in get_ret_stack()
443 offs = get_frame_offset(t, --offset); in get_ret_stack()
444 if (WARN_ON_ONCE(offs <= 0 || offs > offset)) in get_ret_stack()
447 offset -= offs; in get_ret_stack()
449 *frame_offset = offset; in get_ret_stack()
450 return RET_STACK(t, offset); in get_ret_stack()
454 * fgraph_retrieve_parent_data - get data from a parent function
467 int offset = current->curr_ret_stack; in fgraph_retrieve_parent_data() local
470 if (offset <= 0) in fgraph_retrieve_parent_data()
476 ret_stack = get_ret_stack(current, offset, &next_offset); in fgraph_retrieve_parent_data()
477 if (!ret_stack || --depth < 0) in fgraph_retrieve_parent_data()
479 offset = next_offset; in fgraph_retrieve_parent_data()
485 offset--; in fgraph_retrieve_parent_data()
487 val = get_fgraph_entry(current, offset); in fgraph_retrieve_parent_data()
491 offset -= __get_data_size(val) + 1; in fgraph_retrieve_parent_data()
492 val = get_fgraph_entry(current, offset); in fgraph_retrieve_parent_data()
498 return get_data_type_data(current, offset); in fgraph_retrieve_parent_data()
548 * ftrace_graph_stop - set to permanently disable function graph tracing
568 int offset; in ftrace_push_return_trace() local
571 return -EBUSY; in ftrace_push_return_trace()
573 if (!current->ret_stack) in ftrace_push_return_trace()
574 return -EBUSY; in ftrace_push_return_trace()
591 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) { in ftrace_push_return_trace()
592 atomic_inc(&current->trace_overrun); in ftrace_push_return_trace()
593 return -EBUSY; in ftrace_push_return_trace()
596 offset = READ_ONCE(current->curr_ret_stack); in ftrace_push_return_trace()
597 ret_stack = RET_STACK(current, offset); in ftrace_push_return_trace()
598 offset += FGRAPH_FRAME_OFFSET; in ftrace_push_return_trace()
600 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */ in ftrace_push_return_trace()
601 current->ret_stack[offset] = val; in ftrace_push_return_trace()
602 ret_stack->ret = ret; in ftrace_push_return_trace()
605 * or an offset where to find the next ret_stack. Even though the in ftrace_push_return_trace()
607 * offset to find the ret_stack before we increment the stack point. in ftrace_push_return_trace()
610 * offset will still be correct (even though the 'ret' won't be). in ftrace_push_return_trace()
611 * What we worry about is the offset being correct after we increment in ftrace_push_return_trace()
612 * the curr_ret_stack and before we update that offset, as if an in ftrace_push_return_trace()
614 * at least a correct offset! in ftrace_push_return_trace()
617 WRITE_ONCE(current->curr_ret_stack, offset + 1); in ftrace_push_return_trace()
625 current->ret_stack[offset] = val; in ftrace_push_return_trace()
627 ret_stack->ret = ret; in ftrace_push_return_trace()
628 ret_stack->func = func; in ftrace_push_return_trace()
630 ret_stack->fp = frame_pointer; in ftrace_push_return_trace()
632 ret_stack->retp = retp; in ftrace_push_return_trace()
633 return offset; in ftrace_push_return_trace()
657 int offset; in function_graph_enter_regs() local
663 return -EBUSY; in function_graph_enter_regs()
666 trace.depth = ++current->curr_ret_depth; in function_graph_enter_regs()
668 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0); in function_graph_enter_regs()
669 if (offset < 0) in function_graph_enter_regs()
674 int save_curr_ret_stack = current->curr_ret_stack; in function_graph_enter_regs()
677 bitmap |= BIT(fgraph_direct_gops->idx); in function_graph_enter_regs()
680 current->curr_ret_stack = save_curr_ret_stack; in function_graph_enter_regs()
692 save_curr_ret_stack = current->curr_ret_stack; in function_graph_enter_regs()
693 if (ftrace_ops_test(&gops->ops, func, NULL) && in function_graph_enter_regs()
694 gops->entryfunc(&trace, gops, fregs)) in function_graph_enter_regs()
698 current->curr_ret_stack = save_curr_ret_stack; in function_graph_enter_regs()
706 * Since this function uses fgraph_idx = 0 as a tail-call checking in function_graph_enter_regs()
709 set_bitmap(current, offset, bitmap | BIT(0)); in function_graph_enter_regs()
713 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; in function_graph_enter_regs()
715 current->curr_ret_depth--; in function_graph_enter_regs()
717 return -EBUSY; in function_graph_enter_regs()
723 unsigned long frame_pointer, int *offset) in ftrace_pop_return_trace() argument
727 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset); in ftrace_pop_return_trace()
732 current->curr_ret_stack); in ftrace_pop_return_trace()
747 * Currently, x86_32 with optimize for size (-Os) makes the latest in ftrace_pop_return_trace()
750 * Note, -mfentry does not use frame pointers, and this test in ftrace_pop_return_trace()
753 if (unlikely(ret_stack->fp != frame_pointer)) { in ftrace_pop_return_trace()
757 ret_stack->fp, in ftrace_pop_return_trace()
759 (void *)ret_stack->func, in ftrace_pop_return_trace()
760 ret_stack->ret); in ftrace_pop_return_trace()
766 *offset += FGRAPH_FRAME_OFFSET; in ftrace_pop_return_trace()
767 *ret = ret_stack->ret; in ftrace_pop_return_trace()
768 trace->func = ret_stack->func; in ftrace_pop_return_trace()
769 trace->overrun = atomic_read(&current->trace_overrun); in ftrace_pop_return_trace()
770 trace->depth = current->curr_ret_depth; in ftrace_pop_return_trace()
807 * Send the trace to the ring-buffer.
817 int offset; in __ftrace_return_to_handler() local
820 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset); in __ftrace_return_to_handler()
836 bitmap = get_bitmap_bits(current, offset); in __ftrace_return_to_handler()
840 if (test_bit(fgraph_direct_gops->idx, &bitmap)) in __ftrace_return_to_handler()
851 gops->retfunc(&trace, gops, fregs); in __ftrace_return_to_handler()
861 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET; in __ftrace_return_to_handler()
863 current->curr_ret_depth--; in __ftrace_return_to_handler()
885 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
899 int offset = task->curr_ret_stack; in ftrace_graph_get_ret_stack() local
901 if (offset < 0) in ftrace_graph_get_ret_stack()
905 ret_stack = get_ret_stack(task, offset, &offset); in ftrace_graph_get_ret_stack()
906 } while (ret_stack && --idx >= 0); in ftrace_graph_get_ret_stack()
912 * ftrace_graph_top_ret_addr - return the top return address in the shadow stack
922 int offset = task->curr_ret_stack; in ftrace_graph_top_ret_addr() local
924 if (offset < 0) in ftrace_graph_top_ret_addr()
928 ret_stack = get_ret_stack(task, offset, &offset); in ftrace_graph_top_ret_addr()
929 } while (ret_stack && ret_stack->ret == return_handler); in ftrace_graph_top_ret_addr()
931 return ret_stack ? ret_stack->ret : 0; in ftrace_graph_top_ret_addr()
935 * ftrace_graph_ret_addr - return the original value of the return address
967 i = *idx ? : task->curr_ret_stack; in ftrace_graph_ret_addr()
973 * For the tail-call, there would be 2 or more ftrace_ret_stacks on in ftrace_graph_ret_addr()
976 * But on the real stack, there should be 1 entry because tail-call in ftrace_graph_ret_addr()
980 if (ret_stack->retp == retp && in ftrace_graph_ret_addr()
981 ret_stack->ret != return_handler) { in ftrace_graph_ret_addr()
983 return ret_stack->ret; in ftrace_graph_ret_addr()
1002 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; in fgraph_init_ops()
1006 dst_ops->func_hash = &src_ops->local_hash; in fgraph_init_ops()
1007 mutex_init(&dst_ops->local_hash.regex_lock); in fgraph_init_ops()
1008 INIT_LIST_HEAD(&dst_ops->subop_list); in fgraph_init_ops()
1009 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; in fgraph_init_ops()
1039 return -ENOMEM; in alloc_retstack_tasklist()
1046 ret = -ENOMEM; in alloc_retstack_tasklist()
1054 ret = -EAGAIN; in alloc_retstack_tasklist()
1058 if (t->ret_stack == NULL) { in alloc_retstack_tasklist()
1059 atomic_set(&t->trace_overrun, 0); in alloc_retstack_tasklist()
1061 t->curr_ret_stack = 0; in alloc_retstack_tasklist()
1062 t->curr_ret_depth = -1; in alloc_retstack_tasklist()
1065 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist()
1094 prev->ftrace_timestamp = timestamp; in ftrace_graph_probe_sched_switch()
1097 if (!next->ftrace_timestamp) in ftrace_graph_probe_sched_switch()
1100 next->ftrace_sleeptime += timestamp - next->ftrace_timestamp; in ftrace_graph_probe_sched_switch()
1108 atomic_set(&t->trace_overrun, 0); in graph_init_task()
1110 t->ftrace_timestamp = 0; in graph_init_task()
1111 t->curr_ret_stack = 0; in graph_init_task()
1112 t->curr_ret_depth = -1; in graph_init_task()
1115 t->ret_stack = ret_stack; in graph_init_task()
1120 * time through, or it may be done by CPU hotplug online.
1122 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) in ftrace_graph_init_idle_task() argument
1124 t->curr_ret_stack = 0; in ftrace_graph_init_idle_task()
1125 t->curr_ret_depth = -1; in ftrace_graph_init_idle_task()
1130 if (t->ret_stack) in ftrace_graph_init_idle_task()
1131 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); in ftrace_graph_init_idle_task()
1139 ret_stack = per_cpu(idle_ret_stack, cpu); in ftrace_graph_init_idle_task()
1144 per_cpu(idle_ret_stack, cpu) = ret_stack; in ftrace_graph_init_idle_task()
1154 t->ret_stack = NULL; in ftrace_graph_init_task()
1155 t->curr_ret_stack = 0; in ftrace_graph_init_task()
1156 t->curr_ret_depth = -1; in ftrace_graph_init_task()
1173 unsigned long *ret_stack = t->ret_stack; in ftrace_graph_exit_task()
1175 t->ret_stack = NULL; in ftrace_graph_exit_task()
1191 struct trace_array *tr = gops->ops.private; in fgraph_pid_func()
1195 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); in fgraph_pid_func()
1199 pid != current->pid) in fgraph_pid_func()
1203 return gops->saved_func(trace, gops, fregs); in fgraph_pid_func()
1215 if (op->flags & FTRACE_OPS_FL_PID) { in fgraph_update_pid_func()
1217 gops->entryfunc = ftrace_pids_enabled(op) ? in fgraph_update_pid_func()
1218 fgraph_pid_func : gops->saved_func; in fgraph_update_pid_func()
1220 static_call_update(fgraph_func, gops->entryfunc); in fgraph_update_pid_func()
1230 int ret, cpu; in start_graph_tracing() local
1236 return -ENOMEM; in start_graph_tracing()
1238 /* The cpu_boot init_task->ret_stack will never be freed */ in start_graph_tracing()
1239 for_each_online_cpu(cpu) { in start_graph_tracing()
1240 if (!idle_task(cpu)->ret_stack) in start_graph_tracing()
1241 ftrace_graph_init_idle_task(idle_task(cpu), cpu); in start_graph_tracing()
1246 } while (ret == -EAGAIN); in start_graph_tracing()
1262 int cpu; in init_task_vars() local
1264 for_each_online_cpu(cpu) { in init_task_vars()
1265 if (idle_task(cpu)->ret_stack) in init_task_vars()
1266 ret_stack_set_task_var(idle_task(cpu), idx, 0); in init_task_vars()
1271 if (t->ret_stack) in init_task_vars()
1284 func = gops->entryfunc; in ftrace_graph_enable_direct()
1285 retfunc = gops->retfunc; in ftrace_graph_enable_direct()
1290 func = fgraph_array[i]->entryfunc; in ftrace_graph_enable_direct()
1291 retfunc = fgraph_array[i]->retfunc; in ftrace_graph_enable_direct()
1313 /* The cpu_boot init_task->ret_stack will never be freed */
1314 static int fgraph_cpu_init(unsigned int cpu) in fgraph_cpu_init() argument
1316 if (!idle_task(cpu)->ret_stack) in fgraph_cpu_init()
1317 ftrace_graph_init_idle_task(idle_task(cpu), cpu); in fgraph_cpu_init()
1326 int i = -1; in register_ftrace_graph()
1335 return -ENOMEM; in register_ftrace_graph()
1342 pr_warn("fgraph: Error to init cpu hotplug support\n"); in register_ftrace_graph()
1358 return -ENOSPC; in register_ftrace_graph()
1359 gops->idx = i; in register_ftrace_graph()
1380 init_task_vars(gops->idx); in register_ftrace_graph()
1383 gops->saved_func = gops->entryfunc; in register_ftrace_graph()
1385 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); in register_ftrace_graph()
1391 ftrace_graph_active--; in register_ftrace_graph()
1392 gops->saved_func = NULL; in register_ftrace_graph()
1407 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE || in unregister_ftrace_graph()
1408 fgraph_array[gops->idx] != gops)) in unregister_ftrace_graph()
1411 if (fgraph_lru_release_index(gops->idx) < 0) in unregister_ftrace_graph()
1414 fgraph_array[gops->idx] = &fgraph_stub; in unregister_ftrace_graph()
1416 ftrace_graph_active--; in unregister_ftrace_graph()
1421 ftrace_shutdown_subops(&graph_ops, &gops->ops, command); in unregister_ftrace_graph()
1434 gops->saved_func = NULL; in unregister_ftrace_graph()