1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef PERF_LOCK_CONTENTION_H
3 #define PERF_LOCK_CONTENTION_H
4
5 #include <linux/list.h>
6 #include <linux/rbtree.h>
7
8 struct lock_filter {
9 int nr_types;
10 int nr_addrs;
11 int nr_syms;
12 int nr_cgrps;
13 int nr_slabs;
14 unsigned int *types;
15 unsigned long *addrs;
16 char **syms;
17 u64 *cgrps;
18 char **slabs;
19 };
20
21 struct lock_delay {
22 char *sym;
23 unsigned long addr;
24 unsigned long time;
25 };
26
27 struct lock_stat {
28 struct hlist_node hash_entry;
29 struct rb_node rb; /* used for sorting */
30
31 u64 addr; /* address of lockdep_map, used as ID */
32 char *name; /* for strcpy(), we cannot use const */
33 u64 *callstack;
34
35 unsigned int nr_acquire;
36 unsigned int nr_acquired;
37 unsigned int nr_contended;
38 unsigned int nr_release;
39
40 union {
41 unsigned int nr_readlock;
42 unsigned int flags;
43 };
44 unsigned int nr_trylock;
45
46 /* these times are in nano sec. */
47 u64 avg_wait_time;
48 u64 wait_time_total;
49 u64 wait_time_min;
50 u64 wait_time_max;
51
52 int broken; /* flag of blacklist */
53 int combined;
54 };
55
56 /*
57 * States of lock_seq_stat
58 *
59 * UNINITIALIZED is required for detecting first event of acquire.
60 * As the nature of lock events, there is no guarantee
61 * that the first event for the locks are acquire,
62 * it can be acquired, contended or release.
63 */
64 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
65 #define SEQ_STATE_RELEASED 1
66 #define SEQ_STATE_ACQUIRING 2
67 #define SEQ_STATE_ACQUIRED 3
68 #define SEQ_STATE_READ_ACQUIRED 4
69 #define SEQ_STATE_CONTENDED 5
70
71 /*
72 * MAX_LOCK_DEPTH
73 * Imported from include/linux/sched.h.
74 * Should this be synchronized?
75 */
76 #define MAX_LOCK_DEPTH 48
77
78 /* based on kernel/lockdep.c */
79 #define LOCKHASH_BITS 12
80 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
81
82 extern struct hlist_head *lockhash_table;
83
84 /*
85 * struct lock_seq_stat:
86 * Place to put on state of one lock sequence
87 * 1) acquire -> acquired -> release
88 * 2) acquire -> contended -> acquired -> release
89 * 3) acquire (with read or try) -> release
90 * 4) Are there other patterns?
91 */
92 struct lock_seq_stat {
93 struct list_head list;
94 int state;
95 u64 prev_event_time;
96 u64 addr;
97
98 int read_count;
99 };
100
101 struct thread_stat {
102 struct rb_node rb;
103
104 u32 tid;
105 struct list_head seq_list;
106 };
107
108 /*
109 * CONTENTION_STACK_DEPTH
110 * Number of stack trace entries to find callers
111 */
112 #define CONTENTION_STACK_DEPTH 8
113
114 /*
115 * CONTENTION_STACK_SKIP
116 * Number of stack trace entries to skip when finding callers.
117 * The first few entries belong to the locking implementation itself.
118 */
119 #define CONTENTION_STACK_SKIP 4
120
121 /*
122 * flags for lock:contention_begin
123 * Imported from include/trace/events/lock.h.
124 */
125 #define LCB_F_SPIN (1U << 0)
126 #define LCB_F_READ (1U << 1)
127 #define LCB_F_WRITE (1U << 2)
128 #define LCB_F_RT (1U << 3)
129 #define LCB_F_PERCPU (1U << 4)
130 #define LCB_F_MUTEX (1U << 5)
131
132 struct evlist;
133 struct machine;
134 struct target;
135
136 struct lock_contention_fails {
137 int task;
138 int stack;
139 int time;
140 int data;
141 };
142
143 struct lock_contention {
144 struct evlist *evlist;
145 struct target *target;
146 struct machine *machine;
147 struct hlist_head *result;
148 struct lock_filter *filters;
149 struct lock_delay *delays;
150 struct lock_contention_fails fails;
151 struct rb_root cgroups;
152 void *btf;
153 unsigned long map_nr_entries;
154 int max_stack;
155 int stack_skip;
156 int aggr_mode;
157 int owner;
158 int nr_filtered;
159 int nr_delays;
160 bool save_callstack;
161 };
162
163 struct option;
164 int parse_call_stack(const struct option *opt, const char *str, int unset);
165 bool needs_callstack(void);
166
167 struct lock_stat *lock_stat_find(u64 addr);
168 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
169
170 bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth);
171
172
173 #ifdef HAVE_BPF_SKEL
174 int lock_contention_prepare(struct lock_contention *con);
175 int lock_contention_start(void);
176 int lock_contention_stop(void);
177 int lock_contention_read(struct lock_contention *con);
178 int lock_contention_finish(struct lock_contention *con);
179
180 struct lock_stat *pop_owner_stack_trace(struct lock_contention *con);
181
182 #else /* !HAVE_BPF_SKEL */
183
lock_contention_prepare(struct lock_contention * con __maybe_unused)184 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
185 {
186 return 0;
187 }
188
lock_contention_start(void)189 static inline int lock_contention_start(void) { return 0; }
lock_contention_stop(void)190 static inline int lock_contention_stop(void) { return 0; }
lock_contention_finish(struct lock_contention * con __maybe_unused)191 static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
192 {
193 return 0;
194 }
195
lock_contention_read(struct lock_contention * con __maybe_unused)196 static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
197 {
198 return 0;
199 }
200
pop_owner_stack_trace(struct lock_contention * con __maybe_unused)201 static inline struct lock_stat *pop_owner_stack_trace(struct lock_contention *con __maybe_unused)
202 {
203 return NULL;
204 }
205
206 #endif /* HAVE_BPF_SKEL */
207
208 #endif /* PERF_LOCK_CONTENTION_H */
209