xref: /linux/tools/perf/bench/futex-wake-parallel.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Davidlohr Bueso.
4  *
5  * Block a bunch of threads and let parallel waker threads wakeup an
6  * equal amount of them. The program output reflects the avg latency
7  * for each individual thread to service its share of work. Ultimately
8  * it can be used to measure futex_wake() changes.
9  */
10 #include "bench.h"
11 #include <linux/compiler.h>
12 #include "../util/debug.h"
13 
14 #ifndef HAVE_PTHREAD_BARRIER
15 int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
16 {
17 	pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
18 	return 0;
19 }
20 #else /* HAVE_PTHREAD_BARRIER */
21 /* For the CLR_() macros */
22 #include <string.h>
23 #include <pthread.h>
24 
25 #include <signal.h>
26 #include "../util/stat.h"
27 #include <subcmd/parse-options.h>
28 #include <linux/kernel.h>
29 #include <linux/time64.h>
30 #include <errno.h>
31 #include "futex.h"
32 #include <perf/cpumap.h>
33 
34 #include <err.h>
35 #include <stdlib.h>
36 #include <sys/time.h>
37 #include <sys/mman.h>
38 
39 struct thread_data {
40 	pthread_t worker;
41 	unsigned int nwoken;
42 	struct timeval runtime;
43 };
44 
45 static unsigned int nwakes = 1;
46 
47 /* all threads will block on the same futex -- hash bucket chaos ;) */
48 static u_int32_t futex = 0;
49 
50 static pthread_t *blocked_worker;
51 static bool done = false;
52 static pthread_mutex_t thread_lock;
53 static pthread_cond_t thread_parent, thread_worker;
54 static pthread_barrier_t barrier;
55 static struct stats waketime_stats, wakeup_stats;
56 static unsigned int threads_starting;
57 static int futex_flag = 0;
58 
59 static struct bench_futex_parameters params;
60 
61 static const struct option options[] = {
62 	OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
63 	OPT_UINTEGER('w', "nwakers", &params.nwakes, "Specify amount of waking threads"),
64 	OPT_BOOLEAN( 's', "silent",  &params.silent, "Silent mode: do not display data/details"),
65 	OPT_BOOLEAN( 'S', "shared",  &params.fshared, "Use shared futexes instead of private ones"),
66 	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
67 
68 	OPT_END()
69 };
70 
71 static const char * const bench_futex_wake_parallel_usage[] = {
72 	"perf bench futex wake-parallel <options>",
73 	NULL
74 };
75 
76 static void *waking_workerfn(void *arg)
77 {
78 	struct thread_data *waker = (struct thread_data *) arg;
79 	struct timeval start, end;
80 
81 	pthread_barrier_wait(&barrier);
82 
83 	gettimeofday(&start, NULL);
84 
85 	waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
86 	if (waker->nwoken != nwakes)
87 		warnx("couldn't wakeup all tasks (%d/%d)",
88 		      waker->nwoken, nwakes);
89 
90 	gettimeofday(&end, NULL);
91 	timersub(&end, &start, &waker->runtime);
92 
93 	pthread_exit(NULL);
94 	return NULL;
95 }
96 
97 static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
98 {
99 	unsigned int i;
100 
101 	pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
102 
103 	pthread_barrier_init(&barrier, NULL, params.nwakes + 1);
104 
105 	/* create and block all threads */
106 	for (i = 0; i < params.nwakes; i++) {
107 		/*
108 		 * Thread creation order will impact per-thread latency
109 		 * as it will affect the order to acquire the hb spinlock.
110 		 * For now let the scheduler decide.
111 		 */
112 		if (pthread_create(&td[i].worker, &thread_attr,
113 				   waking_workerfn, (void *)&td[i]))
114 			err(EXIT_FAILURE, "pthread_create");
115 	}
116 
117 	pthread_barrier_wait(&barrier);
118 
119 	for (i = 0; i < params.nwakes; i++)
120 		if (pthread_join(td[i].worker, NULL))
121 			err(EXIT_FAILURE, "pthread_join");
122 
123 	pthread_barrier_destroy(&barrier);
124 }
125 
126 static void *blocked_workerfn(void *arg __maybe_unused)
127 {
128 	pthread_mutex_lock(&thread_lock);
129 	threads_starting--;
130 	if (!threads_starting)
131 		pthread_cond_signal(&thread_parent);
132 	pthread_cond_wait(&thread_worker, &thread_lock);
133 	pthread_mutex_unlock(&thread_lock);
134 
135 	while (1) { /* handle spurious wakeups */
136 		if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
137 			break;
138 	}
139 
140 	pthread_exit(NULL);
141 	return NULL;
142 }
143 
144 static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
145 			  struct perf_cpu_map *cpu)
146 {
147 	cpu_set_t cpuset;
148 	unsigned int i;
149 
150 	threads_starting = params.nthreads;
151 
152 	/* create and block all threads */
153 	for (i = 0; i < params.nthreads; i++) {
154 		CPU_ZERO(&cpuset);
155 		CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
156 
157 		if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
158 			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
159 
160 		if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
161 			err(EXIT_FAILURE, "pthread_create");
162 	}
163 }
164 
165 static void print_run(struct thread_data *waking_worker, unsigned int run_num)
166 {
167 	unsigned int i, wakeup_avg;
168 	double waketime_avg, waketime_stddev;
169 	struct stats __waketime_stats, __wakeup_stats;
170 
171 	init_stats(&__wakeup_stats);
172 	init_stats(&__waketime_stats);
173 
174 	for (i = 0; i < params.nwakes; i++) {
175 		update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
176 		update_stats(&__wakeup_stats, waking_worker[i].nwoken);
177 	}
178 
179 	waketime_avg = avg_stats(&__waketime_stats);
180 	waketime_stddev = stddev_stats(&__waketime_stats);
181 	wakeup_avg = avg_stats(&__wakeup_stats);
182 
183 	printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
184 	       "in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
185 	       params.nthreads, waketime_avg / USEC_PER_MSEC,
186 	       rel_stddev_stats(waketime_stddev, waketime_avg));
187 }
188 
189 static void print_summary(void)
190 {
191 	unsigned int wakeup_avg;
192 	double waketime_avg, waketime_stddev;
193 
194 	waketime_avg = avg_stats(&waketime_stats);
195 	waketime_stddev = stddev_stats(&waketime_stats);
196 	wakeup_avg = avg_stats(&wakeup_stats);
197 
198 	printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
199 	       wakeup_avg,
200 	       params.nthreads,
201 	       waketime_avg / USEC_PER_MSEC,
202 	       rel_stddev_stats(waketime_stddev, waketime_avg));
203 }
204 
205 
206 static void do_run_stats(struct thread_data *waking_worker)
207 {
208 	unsigned int i;
209 
210 	for (i = 0; i < params.nwakes; i++) {
211 		update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
212 		update_stats(&wakeup_stats, waking_worker[i].nwoken);
213 	}
214 
215 }
216 
217 static void toggle_done(int sig __maybe_unused,
218 			siginfo_t *info __maybe_unused,
219 			void *uc __maybe_unused)
220 {
221 	done = true;
222 }
223 
224 int bench_futex_wake_parallel(int argc, const char **argv)
225 {
226 	int ret = 0;
227 	unsigned int i, j;
228 	struct sigaction act;
229 	pthread_attr_t thread_attr;
230 	struct thread_data *waking_worker;
231 	struct perf_cpu_map *cpu;
232 
233 	argc = parse_options(argc, argv, options,
234 			     bench_futex_wake_parallel_usage, 0);
235 	if (argc) {
236 		usage_with_options(bench_futex_wake_parallel_usage, options);
237 		exit(EXIT_FAILURE);
238 	}
239 
240 	memset(&act, 0, sizeof(act));
241 	sigfillset(&act.sa_mask);
242 	act.sa_sigaction = toggle_done;
243 	sigaction(SIGINT, &act, NULL);
244 
245 	if (params.mlockall) {
246 		if (mlockall(MCL_CURRENT | MCL_FUTURE))
247 			err(EXIT_FAILURE, "mlockall");
248 	}
249 
250 	cpu = perf_cpu_map__new(NULL);
251 	if (!cpu)
252 		err(EXIT_FAILURE, "calloc");
253 
254 	if (!params.nthreads)
255 		params.nthreads = perf_cpu_map__nr(cpu);
256 
257 	/* some sanity checks */
258 	if (params.nwakes > params.nthreads ||
259 	    !params.nwakes)
260 		params.nwakes = params.nthreads;
261 
262 	if (params.nthreads % params.nwakes)
263 		errx(EXIT_FAILURE, "Must be perfectly divisible");
264 	/*
265 	 * Each thread will wakeup nwakes tasks in
266 	 * a single futex_wait call.
267 	 */
268 	nwakes = params.nthreads/params.nwakes;
269 
270 	blocked_worker = calloc(params.nthreads, sizeof(*blocked_worker));
271 	if (!blocked_worker)
272 		err(EXIT_FAILURE, "calloc");
273 
274 	if (!params.fshared)
275 		futex_flag = FUTEX_PRIVATE_FLAG;
276 
277 	printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
278 	       "futex %p), %d threads waking up %d at a time.\n\n",
279 	       getpid(), params.nthreads, params.fshared ? "shared":"private",
280 	       &futex, params.nwakes, nwakes);
281 
282 	init_stats(&wakeup_stats);
283 	init_stats(&waketime_stats);
284 
285 	pthread_attr_init(&thread_attr);
286 	pthread_mutex_init(&thread_lock, NULL);
287 	pthread_cond_init(&thread_parent, NULL);
288 	pthread_cond_init(&thread_worker, NULL);
289 
290 	for (j = 0; j < bench_repeat && !done; j++) {
291 		waking_worker = calloc(params.nwakes, sizeof(*waking_worker));
292 		if (!waking_worker)
293 			err(EXIT_FAILURE, "calloc");
294 
295 		/* create, launch & block all threads */
296 		block_threads(blocked_worker, thread_attr, cpu);
297 
298 		/* make sure all threads are already blocked */
299 		pthread_mutex_lock(&thread_lock);
300 		while (threads_starting)
301 			pthread_cond_wait(&thread_parent, &thread_lock);
302 		pthread_cond_broadcast(&thread_worker);
303 		pthread_mutex_unlock(&thread_lock);
304 
305 		usleep(100000);
306 
307 		/* Ok, all threads are patiently blocked, start waking folks up */
308 		wakeup_threads(waking_worker, thread_attr);
309 
310 		for (i = 0; i < params.nthreads; i++) {
311 			ret = pthread_join(blocked_worker[i], NULL);
312 			if (ret)
313 				err(EXIT_FAILURE, "pthread_join");
314 		}
315 
316 		do_run_stats(waking_worker);
317 		if (!params.silent)
318 			print_run(waking_worker, j);
319 
320 		free(waking_worker);
321 	}
322 
323 	/* cleanup & report results */
324 	pthread_cond_destroy(&thread_parent);
325 	pthread_cond_destroy(&thread_worker);
326 	pthread_mutex_destroy(&thread_lock);
327 	pthread_attr_destroy(&thread_attr);
328 
329 	print_summary();
330 
331 	free(blocked_worker);
332 	perf_cpu_map__put(cpu);
333 	return ret;
334 }
335 #endif /* HAVE_PTHREAD_BARRIER */
336