1 #include <trace/syscall.h> 2 #include <trace/events/syscalls.h> 3 #include <linux/syscalls.h> 4 #include <linux/slab.h> 5 #include <linux/kernel.h> 6 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ 7 #include <linux/ftrace.h> 8 #include <linux/perf_event.h> 9 #include <asm/syscall.h> 10 11 #include "trace_output.h" 12 #include "trace.h" 13 14 static DEFINE_MUTEX(syscall_trace_lock); 15 16 static int syscall_enter_register(struct ftrace_event_call *event, 17 enum trace_reg type, void *data); 18 static int syscall_exit_register(struct ftrace_event_call *event, 19 enum trace_reg type, void *data); 20 21 static struct list_head * 22 syscall_get_enter_fields(struct ftrace_event_call *call) 23 { 24 struct syscall_metadata *entry = call->data; 25 26 return &entry->enter_fields; 27 } 28 29 extern struct syscall_metadata *__start_syscalls_metadata[]; 30 extern struct syscall_metadata *__stop_syscalls_metadata[]; 31 32 static struct syscall_metadata **syscalls_metadata; 33 34 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME 35 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 36 { 37 /* 38 * Only compare after the "sys" prefix. Archs that use 39 * syscall wrappers may have syscalls symbols aliases prefixed 40 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted 41 * mismatch. 42 */ 43 return !strcmp(sym + 3, name + 3); 44 } 45 #endif 46 47 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 48 /* 49 * Some architectures that allow for 32bit applications 50 * to run on a 64bit kernel, do not map the syscalls for 51 * the 32bit tasks the same as they do for 64bit tasks. 52 * 53 * *cough*x86*cough* 54 * 55 * In such a case, instead of reporting the wrong syscalls, 56 * simply ignore them. 57 * 58 * For an arch to ignore the compat syscalls it needs to 59 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as 60 * define the function arch_trace_is_compat_syscall() to let 61 * the tracing system know that it should ignore it. 62 */ 63 static int 64 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 65 { 66 if (unlikely(arch_trace_is_compat_syscall(regs))) 67 return -1; 68 69 return syscall_get_nr(task, regs); 70 } 71 #else 72 static inline int 73 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) 74 { 75 return syscall_get_nr(task, regs); 76 } 77 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ 78 79 static __init struct syscall_metadata * 80 find_syscall_meta(unsigned long syscall) 81 { 82 struct syscall_metadata **start; 83 struct syscall_metadata **stop; 84 char str[KSYM_SYMBOL_LEN]; 85 86 87 start = __start_syscalls_metadata; 88 stop = __stop_syscalls_metadata; 89 kallsyms_lookup(syscall, NULL, NULL, NULL, str); 90 91 if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) 92 return NULL; 93 94 for ( ; start < stop; start++) { 95 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) 96 return *start; 97 } 98 return NULL; 99 } 100 101 static struct syscall_metadata *syscall_nr_to_meta(int nr) 102 { 103 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) 104 return NULL; 105 106 return syscalls_metadata[nr]; 107 } 108 109 static enum print_line_t 110 print_syscall_enter(struct trace_iterator *iter, int flags, 111 struct trace_event *event) 112 { 113 struct trace_seq *s = &iter->seq; 114 struct trace_entry *ent = iter->ent; 115 struct syscall_trace_enter *trace; 116 struct syscall_metadata *entry; 117 int i, syscall; 118 119 trace = (typeof(trace))ent; 120 syscall = trace->nr; 121 entry = syscall_nr_to_meta(syscall); 122 123 if (!entry) 124 goto end; 125 126 if (entry->enter_event->event.type != ent->type) { 127 WARN_ON_ONCE(1); 128 goto end; 129 } 130 131 trace_seq_printf(s, "%s(", entry->name); 132 133 for (i = 0; i < entry->nb_args; i++) { 134 135 if (trace_seq_has_overflowed(s)) 136 goto end; 137 138 /* parameter types */ 139 if (trace_flags & TRACE_ITER_VERBOSE) 140 trace_seq_printf(s, "%s ", entry->types[i]); 141 142 /* parameter values */ 143 trace_seq_printf(s, "%s: %lx%s", entry->args[i], 144 trace->args[i], 145 i == entry->nb_args - 1 ? "" : ", "); 146 } 147 148 trace_seq_putc(s, ')'); 149 end: 150 trace_seq_putc(s, '\n'); 151 152 return trace_handle_return(s); 153 } 154 155 static enum print_line_t 156 print_syscall_exit(struct trace_iterator *iter, int flags, 157 struct trace_event *event) 158 { 159 struct trace_seq *s = &iter->seq; 160 struct trace_entry *ent = iter->ent; 161 struct syscall_trace_exit *trace; 162 int syscall; 163 struct syscall_metadata *entry; 164 165 trace = (typeof(trace))ent; 166 syscall = trace->nr; 167 entry = syscall_nr_to_meta(syscall); 168 169 if (!entry) { 170 trace_seq_putc(s, '\n'); 171 goto out; 172 } 173 174 if (entry->exit_event->event.type != ent->type) { 175 WARN_ON_ONCE(1); 176 return TRACE_TYPE_UNHANDLED; 177 } 178 179 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, 180 trace->ret); 181 182 out: 183 return trace_handle_return(s); 184 } 185 186 extern char *__bad_type_size(void); 187 188 #define SYSCALL_FIELD(type, name) \ 189 sizeof(type) != sizeof(trace.name) ? \ 190 __bad_type_size() : \ 191 #type, #name, offsetof(typeof(trace), name), \ 192 sizeof(trace.name), is_signed_type(type) 193 194 static int __init 195 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 196 { 197 int i; 198 int pos = 0; 199 200 /* When len=0, we just calculate the needed length */ 201 #define LEN_OR_ZERO (len ? len - pos : 0) 202 203 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 204 for (i = 0; i < entry->nb_args; i++) { 205 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", 206 entry->args[i], sizeof(unsigned long), 207 i == entry->nb_args - 1 ? "" : ", "); 208 } 209 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 210 211 for (i = 0; i < entry->nb_args; i++) { 212 pos += snprintf(buf + pos, LEN_OR_ZERO, 213 ", ((unsigned long)(REC->%s))", entry->args[i]); 214 } 215 216 #undef LEN_OR_ZERO 217 218 /* return the length of print_fmt */ 219 return pos; 220 } 221 222 static int __init set_syscall_print_fmt(struct ftrace_event_call *call) 223 { 224 char *print_fmt; 225 int len; 226 struct syscall_metadata *entry = call->data; 227 228 if (entry->enter_event != call) { 229 call->print_fmt = "\"0x%lx\", REC->ret"; 230 return 0; 231 } 232 233 /* First: called with 0 length to calculate the needed length */ 234 len = __set_enter_print_fmt(entry, NULL, 0); 235 236 print_fmt = kmalloc(len + 1, GFP_KERNEL); 237 if (!print_fmt) 238 return -ENOMEM; 239 240 /* Second: actually write the @print_fmt */ 241 __set_enter_print_fmt(entry, print_fmt, len + 1); 242 call->print_fmt = print_fmt; 243 244 return 0; 245 } 246 247 static void __init free_syscall_print_fmt(struct ftrace_event_call *call) 248 { 249 struct syscall_metadata *entry = call->data; 250 251 if (entry->enter_event == call) 252 kfree(call->print_fmt); 253 } 254 255 static int __init syscall_enter_define_fields(struct ftrace_event_call *call) 256 { 257 struct syscall_trace_enter trace; 258 struct syscall_metadata *meta = call->data; 259 int ret; 260 int i; 261 int offset = offsetof(typeof(trace), args); 262 263 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 264 if (ret) 265 return ret; 266 267 for (i = 0; i < meta->nb_args; i++) { 268 ret = trace_define_field(call, meta->types[i], 269 meta->args[i], offset, 270 sizeof(unsigned long), 0, 271 FILTER_OTHER); 272 offset += sizeof(unsigned long); 273 } 274 275 return ret; 276 } 277 278 static int __init syscall_exit_define_fields(struct ftrace_event_call *call) 279 { 280 struct syscall_trace_exit trace; 281 int ret; 282 283 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 284 if (ret) 285 return ret; 286 287 ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 288 FILTER_OTHER); 289 290 return ret; 291 } 292 293 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 294 { 295 struct trace_array *tr = data; 296 struct ftrace_event_file *ftrace_file; 297 struct syscall_trace_enter *entry; 298 struct syscall_metadata *sys_data; 299 struct ring_buffer_event *event; 300 struct ring_buffer *buffer; 301 unsigned long irq_flags; 302 int pc; 303 int syscall_nr; 304 int size; 305 306 syscall_nr = trace_get_syscall_nr(current, regs); 307 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 308 return; 309 310 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 311 ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 312 if (!ftrace_file) 313 return; 314 315 if (ftrace_trigger_soft_disabled(ftrace_file)) 316 return; 317 318 sys_data = syscall_nr_to_meta(syscall_nr); 319 if (!sys_data) 320 return; 321 322 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 323 324 local_save_flags(irq_flags); 325 pc = preempt_count(); 326 327 buffer = tr->trace_buffer.buffer; 328 event = trace_buffer_lock_reserve(buffer, 329 sys_data->enter_event->event.type, size, irq_flags, pc); 330 if (!event) 331 return; 332 333 entry = ring_buffer_event_data(event); 334 entry->nr = syscall_nr; 335 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 336 337 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 338 irq_flags, pc); 339 } 340 341 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 342 { 343 struct trace_array *tr = data; 344 struct ftrace_event_file *ftrace_file; 345 struct syscall_trace_exit *entry; 346 struct syscall_metadata *sys_data; 347 struct ring_buffer_event *event; 348 struct ring_buffer *buffer; 349 unsigned long irq_flags; 350 int pc; 351 int syscall_nr; 352 353 syscall_nr = trace_get_syscall_nr(current, regs); 354 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 355 return; 356 357 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 358 ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 359 if (!ftrace_file) 360 return; 361 362 if (ftrace_trigger_soft_disabled(ftrace_file)) 363 return; 364 365 sys_data = syscall_nr_to_meta(syscall_nr); 366 if (!sys_data) 367 return; 368 369 local_save_flags(irq_flags); 370 pc = preempt_count(); 371 372 buffer = tr->trace_buffer.buffer; 373 event = trace_buffer_lock_reserve(buffer, 374 sys_data->exit_event->event.type, sizeof(*entry), 375 irq_flags, pc); 376 if (!event) 377 return; 378 379 entry = ring_buffer_event_data(event); 380 entry->nr = syscall_nr; 381 entry->ret = syscall_get_return_value(current, regs); 382 383 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 384 irq_flags, pc); 385 } 386 387 static int reg_event_syscall_enter(struct ftrace_event_file *file, 388 struct ftrace_event_call *call) 389 { 390 struct trace_array *tr = file->tr; 391 int ret = 0; 392 int num; 393 394 num = ((struct syscall_metadata *)call->data)->syscall_nr; 395 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 396 return -ENOSYS; 397 mutex_lock(&syscall_trace_lock); 398 if (!tr->sys_refcount_enter) 399 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 400 if (!ret) { 401 rcu_assign_pointer(tr->enter_syscall_files[num], file); 402 tr->sys_refcount_enter++; 403 } 404 mutex_unlock(&syscall_trace_lock); 405 return ret; 406 } 407 408 static void unreg_event_syscall_enter(struct ftrace_event_file *file, 409 struct ftrace_event_call *call) 410 { 411 struct trace_array *tr = file->tr; 412 int num; 413 414 num = ((struct syscall_metadata *)call->data)->syscall_nr; 415 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 416 return; 417 mutex_lock(&syscall_trace_lock); 418 tr->sys_refcount_enter--; 419 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); 420 if (!tr->sys_refcount_enter) 421 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 422 mutex_unlock(&syscall_trace_lock); 423 } 424 425 static int reg_event_syscall_exit(struct ftrace_event_file *file, 426 struct ftrace_event_call *call) 427 { 428 struct trace_array *tr = file->tr; 429 int ret = 0; 430 int num; 431 432 num = ((struct syscall_metadata *)call->data)->syscall_nr; 433 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 434 return -ENOSYS; 435 mutex_lock(&syscall_trace_lock); 436 if (!tr->sys_refcount_exit) 437 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 438 if (!ret) { 439 rcu_assign_pointer(tr->exit_syscall_files[num], file); 440 tr->sys_refcount_exit++; 441 } 442 mutex_unlock(&syscall_trace_lock); 443 return ret; 444 } 445 446 static void unreg_event_syscall_exit(struct ftrace_event_file *file, 447 struct ftrace_event_call *call) 448 { 449 struct trace_array *tr = file->tr; 450 int num; 451 452 num = ((struct syscall_metadata *)call->data)->syscall_nr; 453 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) 454 return; 455 mutex_lock(&syscall_trace_lock); 456 tr->sys_refcount_exit--; 457 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); 458 if (!tr->sys_refcount_exit) 459 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 460 mutex_unlock(&syscall_trace_lock); 461 } 462 463 static int __init init_syscall_trace(struct ftrace_event_call *call) 464 { 465 int id; 466 int num; 467 468 num = ((struct syscall_metadata *)call->data)->syscall_nr; 469 if (num < 0 || num >= NR_syscalls) { 470 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", 471 ((struct syscall_metadata *)call->data)->name); 472 return -ENOSYS; 473 } 474 475 if (set_syscall_print_fmt(call) < 0) 476 return -ENOMEM; 477 478 id = trace_event_raw_init(call); 479 480 if (id < 0) { 481 free_syscall_print_fmt(call); 482 return id; 483 } 484 485 return id; 486 } 487 488 struct trace_event_functions enter_syscall_print_funcs = { 489 .trace = print_syscall_enter, 490 }; 491 492 struct trace_event_functions exit_syscall_print_funcs = { 493 .trace = print_syscall_exit, 494 }; 495 496 struct ftrace_event_class __refdata event_class_syscall_enter = { 497 .system = "syscalls", 498 .reg = syscall_enter_register, 499 .define_fields = syscall_enter_define_fields, 500 .get_fields = syscall_get_enter_fields, 501 .raw_init = init_syscall_trace, 502 }; 503 504 struct ftrace_event_class __refdata event_class_syscall_exit = { 505 .system = "syscalls", 506 .reg = syscall_exit_register, 507 .define_fields = syscall_exit_define_fields, 508 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 509 .raw_init = init_syscall_trace, 510 }; 511 512 unsigned long __init __weak arch_syscall_addr(int nr) 513 { 514 return (unsigned long)sys_call_table[nr]; 515 } 516 517 void __init init_ftrace_syscalls(void) 518 { 519 struct syscall_metadata *meta; 520 unsigned long addr; 521 int i; 522 523 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), 524 GFP_KERNEL); 525 if (!syscalls_metadata) { 526 WARN_ON(1); 527 return; 528 } 529 530 for (i = 0; i < NR_syscalls; i++) { 531 addr = arch_syscall_addr(i); 532 meta = find_syscall_meta(addr); 533 if (!meta) 534 continue; 535 536 meta->syscall_nr = i; 537 syscalls_metadata[i] = meta; 538 } 539 } 540 541 #ifdef CONFIG_PERF_EVENTS 542 543 static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); 544 static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); 545 static int sys_perf_refcount_enter; 546 static int sys_perf_refcount_exit; 547 548 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) 549 { 550 struct syscall_metadata *sys_data; 551 struct syscall_trace_enter *rec; 552 struct hlist_head *head; 553 int syscall_nr; 554 int rctx; 555 int size; 556 557 syscall_nr = trace_get_syscall_nr(current, regs); 558 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 559 return; 560 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 561 return; 562 563 sys_data = syscall_nr_to_meta(syscall_nr); 564 if (!sys_data) 565 return; 566 567 head = this_cpu_ptr(sys_data->enter_event->perf_events); 568 if (hlist_empty(head)) 569 return; 570 571 /* get the size after alignment with the u32 buffer size field */ 572 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); 573 size = ALIGN(size + sizeof(u32), sizeof(u64)); 574 size -= sizeof(u32); 575 576 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 577 sys_data->enter_event->event.type, regs, &rctx); 578 if (!rec) 579 return; 580 581 rec->nr = syscall_nr; 582 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 583 (unsigned long *)&rec->args); 584 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 585 } 586 587 static int perf_sysenter_enable(struct ftrace_event_call *call) 588 { 589 int ret = 0; 590 int num; 591 592 num = ((struct syscall_metadata *)call->data)->syscall_nr; 593 594 mutex_lock(&syscall_trace_lock); 595 if (!sys_perf_refcount_enter) 596 ret = register_trace_sys_enter(perf_syscall_enter, NULL); 597 if (ret) { 598 pr_info("event trace: Could not activate" 599 "syscall entry trace point"); 600 } else { 601 set_bit(num, enabled_perf_enter_syscalls); 602 sys_perf_refcount_enter++; 603 } 604 mutex_unlock(&syscall_trace_lock); 605 return ret; 606 } 607 608 static void perf_sysenter_disable(struct ftrace_event_call *call) 609 { 610 int num; 611 612 num = ((struct syscall_metadata *)call->data)->syscall_nr; 613 614 mutex_lock(&syscall_trace_lock); 615 sys_perf_refcount_enter--; 616 clear_bit(num, enabled_perf_enter_syscalls); 617 if (!sys_perf_refcount_enter) 618 unregister_trace_sys_enter(perf_syscall_enter, NULL); 619 mutex_unlock(&syscall_trace_lock); 620 } 621 622 static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) 623 { 624 struct syscall_metadata *sys_data; 625 struct syscall_trace_exit *rec; 626 struct hlist_head *head; 627 int syscall_nr; 628 int rctx; 629 int size; 630 631 syscall_nr = trace_get_syscall_nr(current, regs); 632 if (syscall_nr < 0 || syscall_nr >= NR_syscalls) 633 return; 634 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 635 return; 636 637 sys_data = syscall_nr_to_meta(syscall_nr); 638 if (!sys_data) 639 return; 640 641 head = this_cpu_ptr(sys_data->exit_event->perf_events); 642 if (hlist_empty(head)) 643 return; 644 645 /* We can probably do that at build time */ 646 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); 647 size -= sizeof(u32); 648 649 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 650 sys_data->exit_event->event.type, regs, &rctx); 651 if (!rec) 652 return; 653 654 rec->nr = syscall_nr; 655 rec->ret = syscall_get_return_value(current, regs); 656 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 657 } 658 659 static int perf_sysexit_enable(struct ftrace_event_call *call) 660 { 661 int ret = 0; 662 int num; 663 664 num = ((struct syscall_metadata *)call->data)->syscall_nr; 665 666 mutex_lock(&syscall_trace_lock); 667 if (!sys_perf_refcount_exit) 668 ret = register_trace_sys_exit(perf_syscall_exit, NULL); 669 if (ret) { 670 pr_info("event trace: Could not activate" 671 "syscall exit trace point"); 672 } else { 673 set_bit(num, enabled_perf_exit_syscalls); 674 sys_perf_refcount_exit++; 675 } 676 mutex_unlock(&syscall_trace_lock); 677 return ret; 678 } 679 680 static void perf_sysexit_disable(struct ftrace_event_call *call) 681 { 682 int num; 683 684 num = ((struct syscall_metadata *)call->data)->syscall_nr; 685 686 mutex_lock(&syscall_trace_lock); 687 sys_perf_refcount_exit--; 688 clear_bit(num, enabled_perf_exit_syscalls); 689 if (!sys_perf_refcount_exit) 690 unregister_trace_sys_exit(perf_syscall_exit, NULL); 691 mutex_unlock(&syscall_trace_lock); 692 } 693 694 #endif /* CONFIG_PERF_EVENTS */ 695 696 static int syscall_enter_register(struct ftrace_event_call *event, 697 enum trace_reg type, void *data) 698 { 699 struct ftrace_event_file *file = data; 700 701 switch (type) { 702 case TRACE_REG_REGISTER: 703 return reg_event_syscall_enter(file, event); 704 case TRACE_REG_UNREGISTER: 705 unreg_event_syscall_enter(file, event); 706 return 0; 707 708 #ifdef CONFIG_PERF_EVENTS 709 case TRACE_REG_PERF_REGISTER: 710 return perf_sysenter_enable(event); 711 case TRACE_REG_PERF_UNREGISTER: 712 perf_sysenter_disable(event); 713 return 0; 714 case TRACE_REG_PERF_OPEN: 715 case TRACE_REG_PERF_CLOSE: 716 case TRACE_REG_PERF_ADD: 717 case TRACE_REG_PERF_DEL: 718 return 0; 719 #endif 720 } 721 return 0; 722 } 723 724 static int syscall_exit_register(struct ftrace_event_call *event, 725 enum trace_reg type, void *data) 726 { 727 struct ftrace_event_file *file = data; 728 729 switch (type) { 730 case TRACE_REG_REGISTER: 731 return reg_event_syscall_exit(file, event); 732 case TRACE_REG_UNREGISTER: 733 unreg_event_syscall_exit(file, event); 734 return 0; 735 736 #ifdef CONFIG_PERF_EVENTS 737 case TRACE_REG_PERF_REGISTER: 738 return perf_sysexit_enable(event); 739 case TRACE_REG_PERF_UNREGISTER: 740 perf_sysexit_disable(event); 741 return 0; 742 case TRACE_REG_PERF_OPEN: 743 case TRACE_REG_PERF_CLOSE: 744 case TRACE_REG_PERF_ADD: 745 case TRACE_REG_PERF_DEL: 746 return 0; 747 #endif 748 } 749 return 0; 750 } 751