core.c (7b47c66cfa203288c43851260edeeb0fae56f692) | core.c (c895f6f703ad7dd2f99e751d9884b0aa5d0eea25) |
---|---|
1/* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * --- 7973 unchanged lines hidden (view full) --- 7982 7983#ifdef CONFIG_BPF_SYSCALL 7984static void bpf_overflow_handler(struct perf_event *event, 7985 struct perf_sample_data *data, 7986 struct pt_regs *regs) 7987{ 7988 struct bpf_perf_event_data_kern ctx = { 7989 .data = data, | 1/* 2 * Performance events core code: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * --- 7973 unchanged lines hidden (view full) --- 7982 7983#ifdef CONFIG_BPF_SYSCALL 7984static void bpf_overflow_handler(struct perf_event *event, 7985 struct perf_sample_data *data, 7986 struct pt_regs *regs) 7987{ 7988 struct bpf_perf_event_data_kern ctx = { 7989 .data = data, |
7990 .regs = regs, | |
7991 .event = event, 7992 }; 7993 int ret = 0; 7994 | 7990 .event = event, 7991 }; 7992 int ret = 0; 7993 |
7994 ctx.regs = perf_arch_bpf_user_pt_regs(regs); |
|
7995 preempt_disable(); 7996 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) 7997 goto out; 7998 rcu_read_lock(); 7999 ret = BPF_PROG_RUN(event->prog, &ctx); 8000 rcu_read_unlock(); 8001out: 8002 __this_cpu_dec(bpf_prog_active); --- 3220 unchanged lines hidden --- | 7995 preempt_disable(); 7996 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) 7997 goto out; 7998 rcu_read_lock(); 7999 ret = BPF_PROG_RUN(event->prog, &ctx); 8000 rcu_read_unlock(); 8001out: 8002 __this_cpu_dec(bpf_prog_active); --- 3220 unchanged lines hidden --- |