1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/bpf.h> 3 #include <bpf/bpf_tracing.h> 4 #include <stdbool.h> 5 #include "timerlat_bpf.h" 6 7 #define nosubprog __always_inline 8 #define MAX_ENTRIES_DEFAULT 4096 9 10 char LICENSE[] SEC("license") = "GPL"; 11 12 struct trace_event_raw_timerlat_sample { 13 unsigned long long timer_latency; 14 int context; 15 } __attribute__((preserve_access_index)); 16 17 struct { 18 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 19 __uint(max_entries, MAX_ENTRIES_DEFAULT); 20 __type(key, unsigned int); 21 __type(value, unsigned long long); 22 } hist_irq SEC(".maps"), hist_thread SEC(".maps"), hist_user SEC(".maps"); 23 24 struct { 25 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 26 __uint(max_entries, SUMMARY_FIELD_N); 27 __type(key, unsigned int); 28 __type(value, unsigned long long); 29 } summary_irq SEC(".maps"), summary_thread SEC(".maps"), summary_user SEC(".maps"); 30 31 struct { 32 __uint(type, BPF_MAP_TYPE_ARRAY); 33 __uint(max_entries, 1); 34 __type(key, unsigned int); 35 __type(value, unsigned long long); 36 } stop_tracing SEC(".maps"); 37 38 struct { 39 __uint(type, BPF_MAP_TYPE_RINGBUF); 40 __uint(max_entries, 1); 41 } signal_stop_tracing SEC(".maps"); 42 43 /* Params to be set by rtla */ 44 const volatile int bucket_size = 1; 45 const volatile int output_divisor = 1000; 46 const volatile int entries = 256; 47 const volatile int irq_threshold; 48 const volatile int thread_threshold; 49 const volatile bool aa_only; 50 51 nosubprog unsigned long long map_get(void *map, 52 unsigned int key) 53 { 54 unsigned long long *value_ptr; 55 56 value_ptr = bpf_map_lookup_elem(map, &key); 57 58 return !value_ptr ? 0 : *value_ptr; 59 } 60 61 nosubprog void map_set(void *map, 62 unsigned int key, 63 unsigned long long value) 64 { 65 bpf_map_update_elem(map, &key, &value, BPF_ANY); 66 } 67 68 nosubprog void map_increment(void *map, 69 unsigned int key) 70 { 71 map_set(map, key, map_get(map, key) + 1); 72 } 73 74 nosubprog void update_main_hist(void *map, 75 int bucket) 76 { 77 if (entries == 0) 78 /* No histogram */ 79 return; 80 81 if (bucket >= entries) 82 /* Overflow */ 83 return; 84 85 map_increment(map, bucket); 86 } 87 88 nosubprog void update_summary(void *map, 89 unsigned long long latency, 90 int bucket) 91 { 92 if (aa_only) 93 /* Auto-analysis only, nothing to be done here */ 94 return; 95 96 map_set(map, SUMMARY_CURRENT, latency); 97 98 if (bucket >= entries) 99 /* Overflow */ 100 map_increment(map, SUMMARY_OVERFLOW); 101 102 if (latency > map_get(map, SUMMARY_MAX)) 103 map_set(map, SUMMARY_MAX, latency); 104 105 if (latency < map_get(map, SUMMARY_MIN) || map_get(map, SUMMARY_COUNT) == 0) 106 map_set(map, SUMMARY_MIN, latency); 107 108 map_increment(map, SUMMARY_COUNT); 109 map_set(map, SUMMARY_SUM, map_get(map, SUMMARY_SUM) + latency); 110 } 111 112 nosubprog void set_stop_tracing(void) 113 { 114 int value = 0; 115 116 /* Suppress further sample processing */ 117 map_set(&stop_tracing, 0, 1); 118 119 /* Signal to userspace */ 120 bpf_ringbuf_output(&signal_stop_tracing, &value, sizeof(value), 0); 121 } 122 123 SEC("tp/osnoise/timerlat_sample") 124 int handle_timerlat_sample(struct trace_event_raw_timerlat_sample *tp_args) 125 { 126 unsigned long long latency, latency_us; 127 int bucket; 128 129 if (map_get(&stop_tracing, 0)) 130 return 0; 131 132 latency = tp_args->timer_latency / output_divisor; 133 latency_us = tp_args->timer_latency / 1000; 134 bucket = latency / bucket_size; 135 136 if (tp_args->context == 0) { 137 update_main_hist(&hist_irq, bucket); 138 update_summary(&summary_irq, latency, bucket); 139 140 if (irq_threshold != 0 && latency_us >= irq_threshold) 141 set_stop_tracing(); 142 } else if (tp_args->context == 1) { 143 update_main_hist(&hist_thread, bucket); 144 update_summary(&summary_thread, latency, bucket); 145 146 if (thread_threshold != 0 && latency_us >= thread_threshold) 147 set_stop_tracing(); 148 } else { 149 update_main_hist(&hist_user, bucket); 150 update_summary(&summary_user, latency, bucket); 151 } 152 153 return 0; 154 } 155