1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure to took into function calls and returns. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 * Highly modified by Steven Rostedt (VMware). 9 */ 10 #include <linux/bits.h> 11 #include <linux/jump_label.h> 12 #include <linux/suspend.h> 13 #include <linux/ftrace.h> 14 #include <linux/static_call.h> 15 #include <linux/slab.h> 16 17 #include <trace/events/sched.h> 18 19 #include "ftrace_internal.h" 20 #include "trace.h" 21 22 /* 23 * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack 24 * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame 25 */ 26 #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack) 27 #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long)) 28 29 /* 30 * On entry to a function (via function_graph_enter()), a new fgraph frame 31 * (ftrace_ret_stack) is pushed onto the stack as well as a word that 32 * holds a bitmask and a type (called "bitmap"). The bitmap is defined as: 33 * 34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack 35 * 36 * bits: 10 - 11 Type of storage 37 * 0 - reserved 38 * 1 - bitmap of fgraph_array index 39 * 2 - reserved data 40 * 41 * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP): 42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index 43 * That is, it's a bitmask of 0-15 (16 bits) 44 * where if a corresponding ops in the fgraph_array[] 45 * expects a callback from the return of the function 46 * it's corresponding bit will be set. 47 * 48 * 49 * The top of the ret_stack (when not empty) will always have a reference 50 * word that points to the last fgraph frame that was saved. 51 * 52 * For reserved data: 53 * bits: 12 - 17 The size in words that is stored 54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored 55 * 56 * That is, at the end of function_graph_enter, if the first and forth 57 * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called 58 * on the return of the function being traced, and the forth fgraph_ops 59 * stored two words of data, this is what will be on the task's shadow 60 * ret_stack: (the stack grows upward) 61 * 62 * ret_stack[SHADOW_STACK_OFFSET] 63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] | 64 * ... 65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] | 66 * ret_stack[SHADOW_STACK_MAX_OFFSET] 67 * ... 68 * | | <- task->curr_ret_stack 69 * +--------------------------------------------+ 70 * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET| 71 * | *or put another way* | 72 * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3]. 73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words. 74 * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ | 75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here 76 * +--------------------------------------------+ ( It is 4 words from the ret_stack) 77 * | STORED DATA WORD 2 | 78 * | STORED DATA WORD 1 | 79 * +--------------------------------------------+ 80 * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET| 81 * | *or put another way* | 82 * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ | 83 * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ | 84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here 85 * +--------------------------------------------+ 86 * | struct ftrace_ret_stack | 87 * | (stores the saved ret pointer) | <- the offset points here 88 * +--------------------------------------------+ 89 * | (X) | (N) | ( N words away from 90 * | | previous ret_stack) 91 * ... 92 * ret_stack[0] 93 * 94 * If a backtrace is required, and the real return pointer needs to be 95 * fetched, then it looks at the task's curr_ret_stack offset, if it 96 * is greater than zero (reserved, or right before popped), it would mask 97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the 98 * ftrace_ret_stack structure stored on the shadow stack. 99 */ 100 101 /* 102 * The following is for the top word on the stack: 103 * 104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame 105 * FGRAPH_TYPE (10-11) holds the type of word this is. 106 * (RESERVED or BITMAP) 107 */ 108 #define FGRAPH_FRAME_OFFSET_BITS 10 109 #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0) 110 111 #define FGRAPH_TYPE_BITS 2 112 #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0) 113 #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS 114 115 enum { 116 FGRAPH_TYPE_RESERVED = 0, 117 FGRAPH_TYPE_BITMAP = 1, 118 FGRAPH_TYPE_DATA = 2, 119 }; 120 121 /* 122 * For BITMAP type: 123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called 124 */ 125 #define FGRAPH_INDEX_BITS 16 126 #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0) 127 #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 128 129 /* 130 * For DATA type: 131 * FGRAPH_DATA (12-17) bits hold the size of data (in words) 132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for 133 * 134 * Note: 135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words. 136 */ 137 #define FGRAPH_DATA_BITS 5 138 #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0) 139 #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 140 #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS)) 141 142 #define FGRAPH_DATA_INDEX_BITS 4 143 #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0) 144 #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS) 145 146 #define FGRAPH_MAX_INDEX \ 147 ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX) 148 149 #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS 150 151 /* 152 * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack 153 * SHADOW_STACK_OFFSET: The size in long words of the shadow stack 154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added 155 */ 156 #define SHADOW_STACK_SIZE (4096) 157 #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long)) 158 /* Leave on a buffer at the end */ 159 #define SHADOW_STACK_MAX_OFFSET \ 160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE)) 161 162 /* RET_STACK(): Return the frame from a given @offset from task @t */ 163 #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) 164 165 /* 166 * Each fgraph_ops has a reserved unsigned long at the end (top) of the 167 * ret_stack to store task specific state. 168 */ 169 #define SHADOW_STACK_TASK_VARS(ret_stack) \ 170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE])) 171 172 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); 173 int ftrace_graph_active; 174 175 static struct kmem_cache *fgraph_stack_cachep; 176 177 static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; 178 static unsigned long fgraph_array_bitmask; 179 180 /* LRU index table for fgraph_array */ 181 static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; 182 static int fgraph_lru_next; 183 static int fgraph_lru_last; 184 185 /* Initialize fgraph_lru_table with unused index */ 186 static void fgraph_lru_init(void) 187 { 188 int i; 189 190 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 191 fgraph_lru_table[i] = i; 192 } 193 194 /* Release the used index to the LRU table */ 195 static int fgraph_lru_release_index(int idx) 196 { 197 if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE || 198 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1)) 199 return -1; 200 201 fgraph_lru_table[fgraph_lru_last] = idx; 202 fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE; 203 204 clear_bit(idx, &fgraph_array_bitmask); 205 return 0; 206 } 207 208 /* Allocate a new index from LRU table */ 209 static int fgraph_lru_alloc_index(void) 210 { 211 int idx = fgraph_lru_table[fgraph_lru_next]; 212 213 /* No id is available */ 214 if (idx == -1) 215 return -1; 216 217 fgraph_lru_table[fgraph_lru_next] = -1; 218 fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE; 219 220 set_bit(idx, &fgraph_array_bitmask); 221 return idx; 222 } 223 224 /* Get the offset to the fgraph frame from a ret_stack value */ 225 static inline int __get_offset(unsigned long val) 226 { 227 return val & FGRAPH_FRAME_OFFSET_MASK; 228 } 229 230 /* Get the type of word from a ret_stack value */ 231 static inline int __get_type(unsigned long val) 232 { 233 return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK; 234 } 235 236 /* Get the data_index for a DATA type ret_stack word */ 237 static inline int __get_data_index(unsigned long val) 238 { 239 return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK; 240 } 241 242 /* Get the data_size for a DATA type ret_stack word */ 243 static inline int __get_data_size(unsigned long val) 244 { 245 return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1; 246 } 247 248 /* Get the word from the ret_stack at @offset */ 249 static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset) 250 { 251 return t->ret_stack[offset]; 252 } 253 254 /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */ 255 static inline int get_frame_offset(struct task_struct *t, int offset) 256 { 257 return __get_offset(t->ret_stack[offset]); 258 } 259 260 /* For BITMAP type: get the bitmask from the @offset at ret_stack */ 261 static inline unsigned long 262 get_bitmap_bits(struct task_struct *t, int offset) 263 { 264 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK; 265 } 266 267 /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */ 268 static inline void 269 set_bitmap(struct task_struct *t, int offset, unsigned long bitmap) 270 { 271 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) | 272 (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 273 } 274 275 /* For DATA type: get the data saved under the ret_stack word at @offset */ 276 static inline void *get_data_type_data(struct task_struct *t, int offset) 277 { 278 unsigned long val = t->ret_stack[offset]; 279 280 if (__get_type(val) != FGRAPH_TYPE_DATA) 281 return NULL; 282 offset -= __get_data_size(val); 283 return (void *)&t->ret_stack[offset]; 284 } 285 286 /* Create the ret_stack word for a DATA type */ 287 static inline unsigned long make_data_type_val(int idx, int size, int offset) 288 { 289 return (idx << FGRAPH_DATA_INDEX_SHIFT) | 290 ((size - 1) << FGRAPH_DATA_SHIFT) | 291 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset; 292 } 293 294 /* ftrace_graph_entry set to this to tell some archs to run function graph */ 295 static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops, 296 struct ftrace_regs *fregs) 297 { 298 return 0; 299 } 300 301 /* ftrace_graph_return set to this to tell some archs to run function graph */ 302 static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops, 303 struct ftrace_regs *fregs) 304 { 305 } 306 307 static void ret_stack_set_task_var(struct task_struct *t, int idx, long val) 308 { 309 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 310 311 gvals[idx] = val; 312 } 313 314 static unsigned long * 315 ret_stack_get_task_var(struct task_struct *t, int idx) 316 { 317 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 318 319 return &gvals[idx]; 320 } 321 322 static void ret_stack_init_task_vars(unsigned long *ret_stack) 323 { 324 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack); 325 326 memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE); 327 } 328 329 /** 330 * fgraph_reserve_data - Reserve storage on the task's ret_stack 331 * @idx: The index of fgraph_array 332 * @size_bytes: The size in bytes to reserve 333 * 334 * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the 335 * task's ret_stack shadow stack, for a given fgraph_ops during 336 * the entryfunc() call. If entryfunc() returns zero, the storage 337 * is discarded. An entryfunc() can only call this once per iteration. 338 * The fgraph_ops retfunc() can retrieve this stored data with 339 * fgraph_retrieve_data(). 340 * 341 * Returns: On success, a pointer to the data on the stack. 342 * Otherwise, NULL if there's not enough space left on the 343 * ret_stack for the data, or if fgraph_reserve_data() was called 344 * more than once for a single entryfunc() call. 345 */ 346 void *fgraph_reserve_data(int idx, int size_bytes) 347 { 348 unsigned long val; 349 void *data; 350 int curr_ret_stack = current->curr_ret_stack; 351 int data_size; 352 353 if (size_bytes > FGRAPH_MAX_DATA_SIZE) 354 return NULL; 355 356 /* Convert the data size to number of longs. */ 357 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3); 358 359 val = get_fgraph_entry(current, curr_ret_stack - 1); 360 data = ¤t->ret_stack[curr_ret_stack]; 361 362 curr_ret_stack += data_size + 1; 363 if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET)) 364 return NULL; 365 366 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1); 367 368 /* Set the last word to be reserved */ 369 current->ret_stack[curr_ret_stack - 1] = val; 370 371 /* Make sure interrupts see this */ 372 barrier(); 373 current->curr_ret_stack = curr_ret_stack; 374 /* Again sync with interrupts, and reset reserve */ 375 current->ret_stack[curr_ret_stack - 1] = val; 376 377 return data; 378 } 379 380 /** 381 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data() 382 * @idx: the index of fgraph_array (fgraph_ops::idx) 383 * @size_bytes: pointer to retrieved data size. 384 * 385 * This is to be called by a fgraph_ops retfunc(), to retrieve data that 386 * was stored by the fgraph_ops entryfunc() on the function entry. 387 * That is, this will retrieve the data that was reserved on the 388 * entry of the function that corresponds to the exit of the function 389 * that the fgraph_ops retfunc() is called on. 390 * 391 * Returns: The stored data from fgraph_reserve_data() called by the 392 * matching entryfunc() for the retfunc() this is called from. 393 * Or NULL if there was nothing stored. 394 */ 395 void *fgraph_retrieve_data(int idx, int *size_bytes) 396 { 397 return fgraph_retrieve_parent_data(idx, size_bytes, 0); 398 } 399 400 /** 401 * fgraph_get_task_var - retrieve a task specific state variable 402 * @gops: The ftrace_ops that owns the task specific variable 403 * 404 * Every registered fgraph_ops has a task state variable 405 * reserved on the task's ret_stack. This function returns the 406 * address to that variable. 407 * 408 * Returns the address to the fgraph_ops @gops tasks specific 409 * unsigned long variable. 410 */ 411 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops) 412 { 413 return ret_stack_get_task_var(current, gops->idx); 414 } 415 416 /* 417 * @offset: The offset into @t->ret_stack to find the ret_stack entry 418 * @frame_offset: Where to place the offset into @t->ret_stack of that entry 419 * 420 * Returns a pointer to the previous ret_stack below @offset or NULL 421 * when it reaches the bottom of the stack. 422 * 423 * Calling this with: 424 * 425 * offset = task->curr_ret_stack; 426 * do { 427 * ret_stack = get_ret_stack(task, offset, &offset); 428 * } while (ret_stack); 429 * 430 * Will iterate through all the ret_stack entries from curr_ret_stack 431 * down to the first one. 432 */ 433 static inline struct ftrace_ret_stack * 434 get_ret_stack(struct task_struct *t, int offset, int *frame_offset) 435 { 436 int offs; 437 438 BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long)); 439 440 if (unlikely(offset <= 0)) 441 return NULL; 442 443 offs = get_frame_offset(t, --offset); 444 if (WARN_ON_ONCE(offs <= 0 || offs > offset)) 445 return NULL; 446 447 offset -= offs; 448 449 *frame_offset = offset; 450 return RET_STACK(t, offset); 451 } 452 453 /** 454 * fgraph_retrieve_parent_data - get data from a parent function 455 * @idx: The index into the fgraph_array (fgraph_ops::idx) 456 * @size_bytes: A pointer to retrieved data size 457 * @depth: The depth to find the parent (0 is the current function) 458 * 459 * This is similar to fgraph_retrieve_data() but can be used to retrieve 460 * data from a parent caller function. 461 * 462 * Return: a pointer to the specified parent data or NULL if not found 463 */ 464 void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth) 465 { 466 struct ftrace_ret_stack *ret_stack = NULL; 467 int offset = current->curr_ret_stack; 468 unsigned long val; 469 470 if (offset <= 0) 471 return NULL; 472 473 for (;;) { 474 int next_offset; 475 476 ret_stack = get_ret_stack(current, offset, &next_offset); 477 if (!ret_stack || --depth < 0) 478 break; 479 offset = next_offset; 480 } 481 482 if (!ret_stack) 483 return NULL; 484 485 offset--; 486 487 val = get_fgraph_entry(current, offset); 488 while (__get_type(val) == FGRAPH_TYPE_DATA) { 489 if (__get_data_index(val) == idx) 490 goto found; 491 offset -= __get_data_size(val) + 1; 492 val = get_fgraph_entry(current, offset); 493 } 494 return NULL; 495 found: 496 if (size_bytes) 497 *size_bytes = __get_data_size(val) * sizeof(long); 498 return get_data_type_data(current, offset); 499 } 500 501 #ifdef CONFIG_DYNAMIC_FTRACE 502 /* 503 * archs can override this function if they must do something 504 * to enable hook for graph tracer. 505 */ 506 int __weak ftrace_enable_ftrace_graph_caller(void) 507 { 508 return 0; 509 } 510 511 /* 512 * archs can override this function if they must do something 513 * to disable hook for graph tracer. 514 */ 515 int __weak ftrace_disable_ftrace_graph_caller(void) 516 { 517 return 0; 518 } 519 #endif 520 521 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, 522 struct fgraph_ops *gops, 523 struct ftrace_regs *fregs) 524 { 525 return 0; 526 } 527 528 static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, 529 struct fgraph_ops *gops, 530 struct ftrace_regs *fregs) 531 { 532 } 533 534 static struct fgraph_ops fgraph_stub = { 535 .entryfunc = ftrace_graph_entry_stub, 536 .retfunc = ftrace_graph_ret_stub, 537 }; 538 539 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; 540 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); 541 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); 542 #if FGRAPH_NO_DIRECT 543 static DEFINE_STATIC_KEY_FALSE(fgraph_do_direct); 544 #else 545 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct); 546 #endif 547 548 /** 549 * ftrace_graph_stop - set to permanently disable function graph tracing 550 * 551 * In case of an error int function graph tracing, this is called 552 * to try to keep function graph tracing from causing any more harm. 553 * Usually this is pretty severe and this is called to try to at least 554 * get a warning out to the user. 555 */ 556 void ftrace_graph_stop(void) 557 { 558 static_branch_enable(&kill_ftrace_graph); 559 } 560 561 /* Add a function return address to the trace stack on thread info.*/ 562 static int 563 ftrace_push_return_trace(unsigned long ret, unsigned long func, 564 unsigned long frame_pointer, unsigned long *retp, 565 int fgraph_idx) 566 { 567 struct ftrace_ret_stack *ret_stack; 568 unsigned long val; 569 int offset; 570 571 if (unlikely(ftrace_graph_is_dead())) 572 return -EBUSY; 573 574 if (!current->ret_stack) 575 return -EBUSY; 576 577 BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long)); 578 579 /* Set val to "reserved" with the delta to the new fgraph frame */ 580 val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 581 582 /* 583 * We must make sure the ret_stack is tested before we read 584 * anything else. 585 */ 586 smp_rmb(); 587 588 /* 589 * Check if there's room on the shadow stack to fit a fraph frame 590 * and a bitmap word. 591 */ 592 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) { 593 atomic_inc(¤t->trace_overrun); 594 return -EBUSY; 595 } 596 597 offset = READ_ONCE(current->curr_ret_stack); 598 ret_stack = RET_STACK(current, offset); 599 offset += FGRAPH_FRAME_OFFSET; 600 601 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */ 602 current->ret_stack[offset] = val; 603 ret_stack->ret = ret; 604 /* 605 * The unwinders expect curr_ret_stack to point to either zero 606 * or an offset where to find the next ret_stack. Even though the 607 * ret stack might be bogus, we want to write the ret and the 608 * offset to find the ret_stack before we increment the stack point. 609 * If an interrupt comes in now before we increment the curr_ret_stack 610 * it may blow away what we wrote. But that's fine, because the 611 * offset will still be correct (even though the 'ret' won't be). 612 * What we worry about is the offset being correct after we increment 613 * the curr_ret_stack and before we update that offset, as if an 614 * interrupt comes in and does an unwind stack dump, it will need 615 * at least a correct offset! 616 */ 617 barrier(); 618 WRITE_ONCE(current->curr_ret_stack, offset + 1); 619 /* 620 * This next barrier is to ensure that an interrupt coming in 621 * will not corrupt what we are about to write. 622 */ 623 barrier(); 624 625 /* Still keep it reserved even if an interrupt came in */ 626 current->ret_stack[offset] = val; 627 628 ret_stack->ret = ret; 629 ret_stack->func = func; 630 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 631 ret_stack->fp = frame_pointer; 632 #endif 633 ret_stack->retp = retp; 634 return offset; 635 } 636 637 /* 638 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct 639 * functions. But those archs currently don't support direct functions 640 * anyway, and ftrace_find_rec_direct() is just a stub for them. 641 * Define MCOUNT_INSN_SIZE to keep those archs compiling. 642 */ 643 #ifndef MCOUNT_INSN_SIZE 644 /* Make sure this only works without direct calls */ 645 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 646 # error MCOUNT_INSN_SIZE not defined with direct calls enabled 647 # endif 648 # define MCOUNT_INSN_SIZE 0 649 #endif 650 651 /* If the caller does not use ftrace, call this function. */ 652 int function_graph_enter_regs(unsigned long ret, unsigned long func, 653 unsigned long frame_pointer, unsigned long *retp, 654 struct ftrace_regs *fregs) 655 { 656 struct ftrace_graph_ent trace; 657 unsigned long bitmap = 0; 658 int offset; 659 int bit; 660 int i; 661 662 bit = ftrace_test_recursion_trylock(func, ret); 663 if (bit < 0) 664 return -EBUSY; 665 666 trace.func = func; 667 trace.depth = ++current->curr_ret_depth; 668 669 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0); 670 if (offset < 0) 671 goto out; 672 673 #ifdef CONFIG_HAVE_STATIC_CALL 674 if (static_branch_likely(&fgraph_do_direct)) { 675 int save_curr_ret_stack = current->curr_ret_stack; 676 677 if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs)) 678 bitmap |= BIT(fgraph_direct_gops->idx); 679 else 680 /* Clear out any saved storage */ 681 current->curr_ret_stack = save_curr_ret_stack; 682 } else 683 #endif 684 { 685 for_each_set_bit(i, &fgraph_array_bitmask, 686 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 687 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); 688 int save_curr_ret_stack; 689 690 if (gops == &fgraph_stub) 691 continue; 692 693 save_curr_ret_stack = current->curr_ret_stack; 694 if (ftrace_ops_test(&gops->ops, func, NULL) && 695 gops->entryfunc(&trace, gops, fregs)) 696 bitmap |= BIT(i); 697 else 698 /* Clear out any saved storage */ 699 current->curr_ret_stack = save_curr_ret_stack; 700 } 701 } 702 703 if (!bitmap) 704 goto out_ret; 705 706 /* 707 * Since this function uses fgraph_idx = 0 as a tail-call checking 708 * flag, set that bit always. 709 */ 710 set_bitmap(current, offset, bitmap | BIT(0)); 711 ftrace_test_recursion_unlock(bit); 712 return 0; 713 out_ret: 714 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; 715 out: 716 current->curr_ret_depth--; 717 ftrace_test_recursion_unlock(bit); 718 return -EBUSY; 719 } 720 721 /* Retrieve a function return address to the trace stack on thread info.*/ 722 static struct ftrace_ret_stack * 723 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 724 unsigned long frame_pointer, int *offset) 725 { 726 struct ftrace_ret_stack *ret_stack; 727 728 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset); 729 730 if (unlikely(!ret_stack)) { 731 ftrace_graph_stop(); 732 WARN(1, "Bad function graph ret_stack pointer: %d", 733 current->curr_ret_stack); 734 /* Might as well panic, otherwise we have no where to go */ 735 *ret = (unsigned long)panic; 736 return NULL; 737 } 738 739 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 740 /* 741 * The arch may choose to record the frame pointer used 742 * and check it here to make sure that it is what we expect it 743 * to be. If gcc does not set the place holder of the return 744 * address in the frame pointer, and does a copy instead, then 745 * the function graph trace will fail. This test detects this 746 * case. 747 * 748 * Currently, x86_32 with optimize for size (-Os) makes the latest 749 * gcc do the above. 750 * 751 * Note, -mfentry does not use frame pointers, and this test 752 * is not needed if CC_USING_FENTRY is set. 753 */ 754 if (unlikely(ret_stack->fp != frame_pointer)) { 755 ftrace_graph_stop(); 756 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 757 " from func %ps return to %lx\n", 758 ret_stack->fp, 759 frame_pointer, 760 (void *)ret_stack->func, 761 ret_stack->ret); 762 *ret = (unsigned long)panic; 763 return NULL; 764 } 765 #endif 766 767 *offset += FGRAPH_FRAME_OFFSET; 768 *ret = ret_stack->ret; 769 trace->func = ret_stack->func; 770 trace->overrun = atomic_read(¤t->trace_overrun); 771 trace->depth = current->curr_ret_depth; 772 /* 773 * We still want to trace interrupts coming in if 774 * max_depth is set to 1. Make sure the decrement is 775 * seen before ftrace_graph_return. 776 */ 777 barrier(); 778 779 return ret_stack; 780 } 781 782 /* 783 * Hibernation protection. 784 * The state of the current task is too much unstable during 785 * suspend/restore to disk. We want to protect against that. 786 */ 787 static int 788 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, 789 void *unused) 790 { 791 switch (state) { 792 case PM_HIBERNATION_PREPARE: 793 pause_graph_tracing(); 794 break; 795 796 case PM_POST_HIBERNATION: 797 unpause_graph_tracing(); 798 break; 799 } 800 return NOTIFY_DONE; 801 } 802 803 static struct notifier_block ftrace_suspend_notifier = { 804 .notifier_call = ftrace_suspend_notifier_call, 805 }; 806 807 /* 808 * Send the trace to the ring-buffer. 809 * @return the original return address. 810 */ 811 static inline unsigned long 812 __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointer) 813 { 814 struct ftrace_ret_stack *ret_stack; 815 struct ftrace_graph_ret trace; 816 unsigned long bitmap; 817 unsigned long ret; 818 int offset; 819 int bit; 820 int i; 821 822 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset); 823 824 if (unlikely(!ret_stack)) { 825 ftrace_graph_stop(); 826 WARN_ON(1); 827 /* Might as well panic. What else to do? */ 828 return (unsigned long)panic; 829 } 830 831 if (fregs) 832 ftrace_regs_set_instruction_pointer(fregs, ret); 833 834 bit = ftrace_test_recursion_trylock(trace.func, ret); 835 /* 836 * This can fail because ftrace_test_recursion_trylock() allows one nest 837 * call. If we are already in a nested call, then we don't probe this and 838 * just return the original return address. 839 */ 840 if (unlikely(bit < 0)) 841 goto out; 842 843 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 844 trace.retval = ftrace_regs_get_return_value(fregs); 845 #endif 846 847 bitmap = get_bitmap_bits(current, offset); 848 849 #ifdef CONFIG_HAVE_STATIC_CALL 850 if (!FGRAPH_NO_DIRECT && static_branch_likely(&fgraph_do_direct)) { 851 if (test_bit(fgraph_direct_gops->idx, &bitmap)) 852 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs); 853 } else 854 #endif 855 { 856 for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) { 857 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); 858 859 if (gops == &fgraph_stub) 860 continue; 861 862 gops->retfunc(&trace, gops, fregs); 863 } 864 } 865 866 ftrace_test_recursion_unlock(bit); 867 out: 868 /* 869 * The ftrace_graph_return() may still access the current 870 * ret_stack structure, we need to make sure the update of 871 * curr_ret_stack is after that. 872 */ 873 barrier(); 874 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET; 875 876 current->curr_ret_depth--; 877 return ret; 878 } 879 880 /* 881 * After all architectures have selected HAVE_FUNCTION_GRAPH_FREGS, we can 882 * leave only ftrace_return_to_handler(fregs). 883 */ 884 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS 885 unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs) 886 { 887 return __ftrace_return_to_handler(fregs, 888 ftrace_regs_get_frame_pointer(fregs)); 889 } 890 #else 891 unsigned long ftrace_return_to_handler(unsigned long frame_pointer) 892 { 893 return __ftrace_return_to_handler(NULL, frame_pointer); 894 } 895 #endif 896 897 /** 898 * ftrace_graph_get_ret_stack - return the entry of the shadow stack 899 * @task: The task to read the shadow stack from. 900 * @idx: Index down the shadow stack 901 * 902 * Return the ret_struct on the shadow stack of the @task at the 903 * call graph at @idx starting with zero. If @idx is zero, it 904 * will return the last saved ret_stack entry. If it is greater than 905 * zero, it will return the corresponding ret_stack for the depth 906 * of saved return addresses. 907 */ 908 struct ftrace_ret_stack * 909 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) 910 { 911 struct ftrace_ret_stack *ret_stack = NULL; 912 int offset = task->curr_ret_stack; 913 914 if (offset < 0) 915 return NULL; 916 917 do { 918 ret_stack = get_ret_stack(task, offset, &offset); 919 } while (ret_stack && --idx >= 0); 920 921 return ret_stack; 922 } 923 924 /** 925 * ftrace_graph_top_ret_addr - return the top return address in the shadow stack 926 * @task: The task to read the shadow stack from. 927 * 928 * Return the first return address on the shadow stack of the @task, which is 929 * not the fgraph's return_to_handler. 930 */ 931 unsigned long ftrace_graph_top_ret_addr(struct task_struct *task) 932 { 933 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); 934 struct ftrace_ret_stack *ret_stack = NULL; 935 int offset = task->curr_ret_stack; 936 937 if (offset < 0) 938 return 0; 939 940 do { 941 ret_stack = get_ret_stack(task, offset, &offset); 942 } while (ret_stack && ret_stack->ret == return_handler); 943 944 return ret_stack ? ret_stack->ret : 0; 945 } 946 947 /** 948 * ftrace_graph_ret_addr - return the original value of the return address 949 * @task: The task the unwinder is being executed on 950 * @idx: An initialized pointer to the next stack index to use 951 * @ret: The current return address (likely pointing to return_handler) 952 * @retp: The address on the stack of the current return location 953 * 954 * This function can be called by stack unwinding code to convert a found stack 955 * return address (@ret) to its original value, in case the function graph 956 * tracer has modified it to be 'return_to_handler'. If the address hasn't 957 * been modified, the unchanged value of @ret is returned. 958 * 959 * @idx holds the last index used to know where to start from. It should be 960 * initialized to zero for the first iteration as that will mean to start 961 * at the top of the shadow stack. If the location is found, this pointer 962 * will be assigned that location so that if called again, it will continue 963 * where it left off. 964 * 965 * @retp is a pointer to the return address on the stack. 966 */ 967 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 968 unsigned long ret, unsigned long *retp) 969 { 970 struct ftrace_ret_stack *ret_stack; 971 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); 972 int i; 973 974 if (ret != return_handler) 975 return ret; 976 977 if (!idx) 978 return ret; 979 980 i = *idx ? : task->curr_ret_stack; 981 while (i > 0) { 982 ret_stack = get_ret_stack(task, i, &i); 983 if (!ret_stack) 984 break; 985 /* 986 * For the tail-call, there would be 2 or more ftrace_ret_stacks on 987 * the ret_stack, which records "return_to_handler" as the return 988 * address except for the last one. 989 * But on the real stack, there should be 1 entry because tail-call 990 * reuses the return address on the stack and jump to the next function. 991 * Thus we will continue to find real return address. 992 */ 993 if (ret_stack->retp == retp && 994 ret_stack->ret != return_handler) { 995 *idx = i; 996 return ret_stack->ret; 997 } 998 } 999 1000 return ret; 1001 } 1002 1003 static struct ftrace_ops graph_ops = { 1004 .func = ftrace_graph_func, 1005 .flags = FTRACE_OPS_GRAPH_STUB, 1006 #ifdef FTRACE_GRAPH_TRAMP_ADDR 1007 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 1008 /* trampoline_size is only needed for dynamically allocated tramps */ 1009 #endif 1010 }; 1011 1012 void fgraph_init_ops(struct ftrace_ops *dst_ops, 1013 struct ftrace_ops *src_ops) 1014 { 1015 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; 1016 1017 #ifdef CONFIG_DYNAMIC_FTRACE 1018 if (src_ops) { 1019 dst_ops->func_hash = &src_ops->local_hash; 1020 mutex_init(&dst_ops->local_hash.regex_lock); 1021 INIT_LIST_HEAD(&dst_ops->subop_list); 1022 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; 1023 dst_ops->private = src_ops->private; 1024 } 1025 #endif 1026 } 1027 1028 /* 1029 * Simply points to ftrace_stub, but with the proper protocol. 1030 * Defined by the linker script in linux/vmlinux.lds.h 1031 */ 1032 void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops, 1033 struct ftrace_regs *fregs); 1034 1035 /* The callbacks that hook a function */ 1036 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; 1037 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 1038 1039 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 1040 static int alloc_retstack_tasklist(unsigned long **ret_stack_list) 1041 { 1042 int i; 1043 int ret = 0; 1044 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 1045 struct task_struct *g, *t; 1046 1047 if (WARN_ON_ONCE(!fgraph_stack_cachep)) 1048 return -ENOMEM; 1049 1050 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 1051 ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); 1052 if (!ret_stack_list[i]) { 1053 start = 0; 1054 end = i; 1055 ret = -ENOMEM; 1056 goto free; 1057 } 1058 } 1059 1060 rcu_read_lock(); 1061 for_each_process_thread(g, t) { 1062 if (start == end) { 1063 ret = -EAGAIN; 1064 goto unlock; 1065 } 1066 1067 if (t->ret_stack == NULL) { 1068 atomic_set(&t->trace_overrun, 0); 1069 ret_stack_init_task_vars(ret_stack_list[start]); 1070 t->curr_ret_stack = 0; 1071 t->curr_ret_depth = -1; 1072 /* Make sure the tasks see the 0 first: */ 1073 smp_wmb(); 1074 t->ret_stack = ret_stack_list[start++]; 1075 } 1076 } 1077 1078 unlock: 1079 rcu_read_unlock(); 1080 free: 1081 for (i = start; i < end; i++) 1082 kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]); 1083 return ret; 1084 } 1085 1086 static void 1087 ftrace_graph_probe_sched_switch(void *ignore, bool preempt, 1088 struct task_struct *prev, 1089 struct task_struct *next, 1090 unsigned int prev_state) 1091 { 1092 unsigned long long timestamp; 1093 1094 /* 1095 * Does the user want to count the time a function was asleep. 1096 * If so, do not update the time stamps. 1097 */ 1098 if (!fgraph_no_sleep_time) 1099 return; 1100 1101 timestamp = trace_clock_local(); 1102 1103 prev->ftrace_timestamp = timestamp; 1104 1105 /* only process tasks that we timestamped */ 1106 if (!next->ftrace_timestamp) 1107 return; 1108 1109 next->ftrace_sleeptime += timestamp - next->ftrace_timestamp; 1110 } 1111 1112 static DEFINE_PER_CPU(unsigned long *, idle_ret_stack); 1113 1114 static void 1115 graph_init_task(struct task_struct *t, unsigned long *ret_stack) 1116 { 1117 atomic_set(&t->trace_overrun, 0); 1118 ret_stack_init_task_vars(ret_stack); 1119 t->ftrace_timestamp = 0; 1120 t->curr_ret_stack = 0; 1121 t->curr_ret_depth = -1; 1122 /* make curr_ret_stack visible before we add the ret_stack */ 1123 smp_wmb(); 1124 t->ret_stack = ret_stack; 1125 } 1126 1127 /* 1128 * Allocate a return stack for the idle task. May be the first 1129 * time through, or it may be done by CPU hotplug online. 1130 */ 1131 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 1132 { 1133 t->curr_ret_stack = 0; 1134 t->curr_ret_depth = -1; 1135 /* 1136 * The idle task has no parent, it either has its own 1137 * stack or no stack at all. 1138 */ 1139 if (t->ret_stack) 1140 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 1141 1142 if (ftrace_graph_active) { 1143 unsigned long *ret_stack; 1144 1145 if (WARN_ON_ONCE(!fgraph_stack_cachep)) 1146 return; 1147 1148 ret_stack = per_cpu(idle_ret_stack, cpu); 1149 if (!ret_stack) { 1150 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); 1151 if (!ret_stack) 1152 return; 1153 per_cpu(idle_ret_stack, cpu) = ret_stack; 1154 } 1155 graph_init_task(t, ret_stack); 1156 } 1157 } 1158 1159 /* Allocate a return stack for newly created task */ 1160 void ftrace_graph_init_task(struct task_struct *t) 1161 { 1162 /* Make sure we do not use the parent ret_stack */ 1163 t->ret_stack = NULL; 1164 t->curr_ret_stack = 0; 1165 t->curr_ret_depth = -1; 1166 1167 if (ftrace_graph_active) { 1168 unsigned long *ret_stack; 1169 1170 if (WARN_ON_ONCE(!fgraph_stack_cachep)) 1171 return; 1172 1173 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); 1174 if (!ret_stack) 1175 return; 1176 graph_init_task(t, ret_stack); 1177 } 1178 } 1179 1180 void ftrace_graph_exit_task(struct task_struct *t) 1181 { 1182 unsigned long *ret_stack = t->ret_stack; 1183 1184 t->ret_stack = NULL; 1185 /* NULL must become visible to IRQs before we free it: */ 1186 barrier(); 1187 1188 if (ret_stack) { 1189 if (WARN_ON_ONCE(!fgraph_stack_cachep)) 1190 return; 1191 kmem_cache_free(fgraph_stack_cachep, ret_stack); 1192 } 1193 } 1194 1195 #ifdef CONFIG_DYNAMIC_FTRACE 1196 static int fgraph_pid_func(struct ftrace_graph_ent *trace, 1197 struct fgraph_ops *gops, 1198 struct ftrace_regs *fregs) 1199 { 1200 struct trace_array *tr = gops->ops.private; 1201 int pid; 1202 1203 if (tr) { 1204 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 1205 if (pid == FTRACE_PID_IGNORE) 1206 return 0; 1207 if (pid != FTRACE_PID_TRACE && 1208 pid != current->pid) 1209 return 0; 1210 } 1211 1212 return gops->saved_func(trace, gops, fregs); 1213 } 1214 1215 void fgraph_update_pid_func(void) 1216 { 1217 struct fgraph_ops *gops; 1218 struct ftrace_ops *op; 1219 1220 if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED)) 1221 return; 1222 1223 list_for_each_entry(op, &graph_ops.subop_list, list) { 1224 if (op->flags & FTRACE_OPS_FL_PID) { 1225 gops = container_of(op, struct fgraph_ops, ops); 1226 gops->entryfunc = ftrace_pids_enabled(op) ? 1227 fgraph_pid_func : gops->saved_func; 1228 if (ftrace_graph_active == 1) 1229 static_call_update(fgraph_func, gops->entryfunc); 1230 } 1231 } 1232 } 1233 #endif 1234 1235 /* Allocate a return stack for each task */ 1236 static int start_graph_tracing(void) 1237 { 1238 unsigned long **ret_stack_list; 1239 int ret, cpu; 1240 1241 ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE, 1242 sizeof(*ret_stack_list), GFP_KERNEL); 1243 1244 if (!ret_stack_list) 1245 return -ENOMEM; 1246 1247 /* The cpu_boot init_task->ret_stack will never be freed */ 1248 for_each_online_cpu(cpu) { 1249 if (!idle_task(cpu)->ret_stack) 1250 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 1251 } 1252 1253 do { 1254 ret = alloc_retstack_tasklist(ret_stack_list); 1255 } while (ret == -EAGAIN); 1256 1257 if (!ret) { 1258 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 1259 if (ret) 1260 pr_info("ftrace_graph: Couldn't activate tracepoint" 1261 " probe to kernel_sched_switch\n"); 1262 } 1263 1264 kfree(ret_stack_list); 1265 return ret; 1266 } 1267 1268 static void init_task_vars(int idx) 1269 { 1270 struct task_struct *g, *t; 1271 int cpu; 1272 1273 for_each_online_cpu(cpu) { 1274 if (idle_task(cpu)->ret_stack) 1275 ret_stack_set_task_var(idle_task(cpu), idx, 0); 1276 } 1277 1278 read_lock(&tasklist_lock); 1279 for_each_process_thread(g, t) { 1280 if (t->ret_stack) 1281 ret_stack_set_task_var(t, idx, 0); 1282 } 1283 read_unlock(&tasklist_lock); 1284 } 1285 1286 static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops) 1287 { 1288 trace_func_graph_ent_t func = NULL; 1289 trace_func_graph_ret_t retfunc = NULL; 1290 int i; 1291 1292 if (FGRAPH_NO_DIRECT) 1293 return; 1294 1295 if (gops) { 1296 func = gops->entryfunc; 1297 retfunc = gops->retfunc; 1298 fgraph_direct_gops = gops; 1299 } else { 1300 for_each_set_bit(i, &fgraph_array_bitmask, 1301 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 1302 func = fgraph_array[i]->entryfunc; 1303 retfunc = fgraph_array[i]->retfunc; 1304 fgraph_direct_gops = fgraph_array[i]; 1305 } 1306 } 1307 if (WARN_ON_ONCE(!func)) 1308 return; 1309 1310 static_call_update(fgraph_func, func); 1311 static_call_update(fgraph_retfunc, retfunc); 1312 if (enable_branch) 1313 static_branch_enable(&fgraph_do_direct); 1314 } 1315 1316 static void ftrace_graph_disable_direct(bool disable_branch) 1317 { 1318 if (FGRAPH_NO_DIRECT) 1319 return; 1320 1321 if (disable_branch) 1322 static_branch_disable(&fgraph_do_direct); 1323 static_call_update(fgraph_func, ftrace_graph_entry_stub); 1324 static_call_update(fgraph_retfunc, ftrace_graph_ret_stub); 1325 fgraph_direct_gops = &fgraph_stub; 1326 } 1327 1328 /* The cpu_boot init_task->ret_stack will never be freed */ 1329 static int fgraph_cpu_init(unsigned int cpu) 1330 { 1331 if (!idle_task(cpu)->ret_stack) 1332 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 1333 return 0; 1334 } 1335 1336 int register_ftrace_graph(struct fgraph_ops *gops) 1337 { 1338 static bool fgraph_initialized; 1339 int command = 0; 1340 int ret = 0; 1341 int i = -1; 1342 1343 if (WARN_ONCE(gops->ops.flags & FTRACE_OPS_FL_GRAPH, 1344 "function graph ops registered again")) 1345 return -EBUSY; 1346 1347 guard(mutex)(&ftrace_lock); 1348 1349 if (!fgraph_stack_cachep) { 1350 fgraph_stack_cachep = kmem_cache_create("fgraph_stack", 1351 SHADOW_STACK_SIZE, 1352 SHADOW_STACK_SIZE, 0, NULL); 1353 if (!fgraph_stack_cachep) 1354 return -ENOMEM; 1355 } 1356 1357 if (!fgraph_initialized) { 1358 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online", 1359 fgraph_cpu_init, NULL); 1360 if (ret < 0) { 1361 pr_warn("fgraph: Error to init cpu hotplug support\n"); 1362 return ret; 1363 } 1364 fgraph_initialized = true; 1365 ret = 0; 1366 } 1367 1368 if (!fgraph_array[0]) { 1369 /* The array must always have real data on it */ 1370 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 1371 fgraph_array[i] = &fgraph_stub; 1372 fgraph_lru_init(); 1373 } 1374 1375 i = fgraph_lru_alloc_index(); 1376 if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) 1377 return -ENOSPC; 1378 gops->idx = i; 1379 1380 ftrace_graph_active++; 1381 1382 /* Always save the function, and reset at unregistering */ 1383 gops->saved_func = gops->entryfunc; 1384 #ifdef CONFIG_DYNAMIC_FTRACE 1385 if (ftrace_pids_enabled(&gops->ops)) 1386 gops->entryfunc = fgraph_pid_func; 1387 #endif 1388 1389 if (ftrace_graph_active == 2) 1390 ftrace_graph_disable_direct(true); 1391 1392 if (ftrace_graph_active == 1) { 1393 ftrace_graph_enable_direct(false, gops); 1394 register_pm_notifier(&ftrace_suspend_notifier); 1395 ret = start_graph_tracing(); 1396 if (ret) 1397 goto error; 1398 /* 1399 * Some archs just test to see if these are not 1400 * the default function 1401 */ 1402 ftrace_graph_return = return_run; 1403 ftrace_graph_entry = entry_run; 1404 command = FTRACE_START_FUNC_RET; 1405 } else { 1406 init_task_vars(gops->idx); 1407 } 1408 1409 gops->ops.flags |= FTRACE_OPS_FL_GRAPH; 1410 1411 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); 1412 if (!ret) 1413 fgraph_array[i] = gops; 1414 1415 error: 1416 if (ret) { 1417 ftrace_graph_active--; 1418 gops->saved_func = NULL; 1419 fgraph_lru_release_index(i); 1420 if (!ftrace_graph_active) 1421 unregister_pm_notifier(&ftrace_suspend_notifier); 1422 } 1423 return ret; 1424 } 1425 1426 void unregister_ftrace_graph(struct fgraph_ops *gops) 1427 { 1428 int command = 0; 1429 1430 if (WARN_ONCE(!(gops->ops.flags & FTRACE_OPS_FL_GRAPH), 1431 "function graph ops unregistered without registering")) 1432 return; 1433 1434 guard(mutex)(&ftrace_lock); 1435 1436 if (unlikely(!ftrace_graph_active)) 1437 goto out; 1438 1439 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE || 1440 fgraph_array[gops->idx] != gops)) 1441 goto out; 1442 1443 if (fgraph_lru_release_index(gops->idx) < 0) 1444 goto out; 1445 1446 fgraph_array[gops->idx] = &fgraph_stub; 1447 1448 ftrace_graph_active--; 1449 1450 if (!ftrace_graph_active) 1451 command = FTRACE_STOP_FUNC_RET; 1452 1453 ftrace_shutdown_subops(&graph_ops, &gops->ops, command); 1454 1455 if (ftrace_graph_active == 1) 1456 ftrace_graph_enable_direct(true, NULL); 1457 else if (!ftrace_graph_active) 1458 ftrace_graph_disable_direct(false); 1459 1460 if (!ftrace_graph_active) { 1461 ftrace_graph_return = ftrace_stub_graph; 1462 ftrace_graph_entry = ftrace_graph_entry_stub; 1463 unregister_pm_notifier(&ftrace_suspend_notifier); 1464 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 1465 } 1466 gops->saved_func = NULL; 1467 out: 1468 gops->ops.flags &= ~FTRACE_OPS_FL_GRAPH; 1469 } 1470