1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2020 Facebook 3 #include <linux/bpf.h> 4 #include <asm/unistd.h> 5 #include <bpf/bpf_helpers.h> 6 #include <bpf/bpf_tracing.h> 7 #include "bpf_misc.h" 8 9 char _license[] SEC("license") = "GPL"; 10 11 #define CPU_MASK 255 12 #define MAX_CPUS (CPU_MASK + 1) /* should match MAX_BUCKETS in benchs/bench_trigger.c */ 13 14 /* matches struct counter in bench.h */ 15 struct counter { 16 long value; 17 } __attribute__((aligned(128))); 18 19 struct counter hits[MAX_CPUS]; 20 21 static __always_inline void inc_counter(void) 22 { 23 int cpu = bpf_get_smp_processor_id(); 24 25 __sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1); 26 } 27 28 volatile const int stacktrace; 29 30 typedef __u64 stack_trace_t[128]; 31 32 struct { 33 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 34 __uint(max_entries, 1); 35 __type(key, __u32); 36 __type(value, stack_trace_t); 37 } stack_heap SEC(".maps"); 38 39 static __always_inline void do_stacktrace(void *ctx) 40 { 41 if (!stacktrace) 42 return; 43 44 __u64 *ptr = bpf_map_lookup_elem(&stack_heap, &(__u32){0}); 45 46 if (ptr) 47 bpf_get_stack(ctx, ptr, sizeof(stack_trace_t), 0); 48 } 49 50 static __always_inline void handle(void *ctx) 51 { 52 inc_counter(); 53 do_stacktrace(ctx); 54 } 55 56 SEC("?uprobe") 57 int bench_trigger_uprobe(void *ctx) 58 { 59 inc_counter(); 60 return 0; 61 } 62 63 SEC("?uprobe.multi") 64 int bench_trigger_uprobe_multi(void *ctx) 65 { 66 inc_counter(); 67 return 0; 68 } 69 70 const volatile int batch_iters = 0; 71 72 SEC("?raw_tp") 73 int trigger_kernel_count(void *ctx) 74 { 75 int i; 76 77 for (i = 0; i < batch_iters; i++) { 78 inc_counter(); 79 bpf_get_numa_node_id(); 80 } 81 82 return 0; 83 } 84 85 SEC("?raw_tp") 86 int trigger_driver(void *ctx) 87 { 88 int i; 89 90 for (i = 0; i < batch_iters; i++) 91 (void)bpf_get_numa_node_id(); /* attach point for benchmarking */ 92 93 return 0; 94 } 95 96 extern int bpf_modify_return_test_tp(int nonce) __ksym __weak; 97 98 SEC("?raw_tp") 99 int trigger_driver_kfunc(void *ctx) 100 { 101 int i; 102 103 for (i = 0; i < batch_iters; i++) 104 (void)bpf_modify_return_test_tp(0); /* attach point for benchmarking */ 105 106 return 0; 107 } 108 109 SEC("?kprobe/bpf_get_numa_node_id") 110 int bench_trigger_kprobe(void *ctx) 111 { 112 handle(ctx); 113 return 0; 114 } 115 116 SEC("?kretprobe/bpf_get_numa_node_id") 117 int bench_trigger_kretprobe(void *ctx) 118 { 119 handle(ctx); 120 return 0; 121 } 122 123 SEC("?kprobe.multi/bpf_get_numa_node_id") 124 int bench_trigger_kprobe_multi(void *ctx) 125 { 126 handle(ctx); 127 return 0; 128 } 129 130 SEC("?kprobe.multi/bpf_get_numa_node_id") 131 int bench_kprobe_multi_empty(void *ctx) 132 { 133 return 0; 134 } 135 136 SEC("?kretprobe.multi/bpf_get_numa_node_id") 137 int bench_trigger_kretprobe_multi(void *ctx) 138 { 139 handle(ctx); 140 return 0; 141 } 142 143 SEC("?kretprobe.multi/bpf_get_numa_node_id") 144 int bench_kretprobe_multi_empty(void *ctx) 145 { 146 return 0; 147 } 148 149 SEC("?fentry/bpf_get_numa_node_id") 150 int bench_trigger_fentry(void *ctx) 151 { 152 handle(ctx); 153 return 0; 154 } 155 156 SEC("?fexit/bpf_get_numa_node_id") 157 int bench_trigger_fexit(void *ctx) 158 { 159 handle(ctx); 160 return 0; 161 } 162 163 SEC("?fmod_ret/bpf_modify_return_test_tp") 164 int bench_trigger_fmodret(void *ctx) 165 { 166 handle(ctx); 167 return -22; 168 } 169 170 SEC("?tp/bpf_test_run/bpf_trigger_tp") 171 int bench_trigger_tp(void *ctx) 172 { 173 handle(ctx); 174 return 0; 175 } 176 177 SEC("?raw_tp/bpf_trigger_tp") 178 int bench_trigger_rawtp(void *ctx) 179 { 180 handle(ctx); 181 return 0; 182 } 183