xref: /linux/tools/perf/bench/futex-hash.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
4  *
5  * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
6  *
7  * This program is particularly useful for measuring the kernel's futex hash
8  * table/function implementation. In order for it to make sense, use with as
9  * many threads and futexes as possible.
10  */
11 
12 /* For the CLR_() macros */
13 #include <string.h>
14 #include <pthread.h>
15 
16 #include <errno.h>
17 #include <signal.h>
18 #include <stdlib.h>
19 #include <linux/compiler.h>
20 #include <linux/kernel.h>
21 #include <linux/zalloc.h>
22 #include <sys/time.h>
23 #include <sys/mman.h>
24 #include <perf/cpumap.h>
25 
26 #include "../util/stat.h"
27 #include <subcmd/parse-options.h>
28 #include "bench.h"
29 #include "futex.h"
30 
31 #include <err.h>
32 
33 static bool done = false;
34 static int futex_flag = 0;
35 
36 struct timeval bench__start, bench__end, bench__runtime;
37 static pthread_mutex_t thread_lock;
38 static unsigned int threads_starting;
39 static struct stats throughput_stats;
40 static pthread_cond_t thread_parent, thread_worker;
41 
42 struct worker {
43 	int tid;
44 	u_int32_t *futex;
45 	pthread_t thread;
46 	unsigned long ops;
47 };
48 
49 static struct bench_futex_parameters params = {
50 	.nfutexes = 1024,
51 	.runtime  = 10,
52 };
53 
54 static const struct option options[] = {
55 	OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
56 	OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
57 	OPT_UINTEGER('f', "futexes", &params.nfutexes, "Specify amount of futexes per threads"),
58 	OPT_BOOLEAN( 's', "silent",  &params.silent, "Silent mode: do not display data/details"),
59 	OPT_BOOLEAN( 'S', "shared",  &params.fshared, "Use shared futexes instead of private ones"),
60 	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
61 	OPT_END()
62 };
63 
64 static const char * const bench_futex_hash_usage[] = {
65 	"perf bench futex hash <options>",
66 	NULL
67 };
68 
69 static void *workerfn(void *arg)
70 {
71 	int ret;
72 	struct worker *w = (struct worker *) arg;
73 	unsigned int i;
74 	unsigned long ops = w->ops; /* avoid cacheline bouncing */
75 
76 	pthread_mutex_lock(&thread_lock);
77 	threads_starting--;
78 	if (!threads_starting)
79 		pthread_cond_signal(&thread_parent);
80 	pthread_cond_wait(&thread_worker, &thread_lock);
81 	pthread_mutex_unlock(&thread_lock);
82 
83 	do {
84 		for (i = 0; i < params.nfutexes; i++, ops++) {
85 			/*
86 			 * We want the futex calls to fail in order to stress
87 			 * the hashing of uaddr and not measure other steps,
88 			 * such as internal waitqueue handling, thus enlarging
89 			 * the critical region protected by hb->lock.
90 			 */
91 			ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
92 			if (!params.silent &&
93 			    (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
94 				warn("Non-expected futex return call");
95 		}
96 	}  while (!done);
97 
98 	w->ops = ops;
99 	return NULL;
100 }
101 
102 static void toggle_done(int sig __maybe_unused,
103 			siginfo_t *info __maybe_unused,
104 			void *uc __maybe_unused)
105 {
106 	/* inform all threads that we're done for the day */
107 	done = true;
108 	gettimeofday(&bench__end, NULL);
109 	timersub(&bench__end, &bench__start, &bench__runtime);
110 }
111 
112 static void print_summary(void)
113 {
114 	unsigned long avg = avg_stats(&throughput_stats);
115 	double stddev = stddev_stats(&throughput_stats);
116 
117 	printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
118 	       !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
119 	       (int)bench__runtime.tv_sec);
120 }
121 
122 int bench_futex_hash(int argc, const char **argv)
123 {
124 	int ret = 0;
125 	cpu_set_t cpuset;
126 	struct sigaction act;
127 	unsigned int i;
128 	pthread_attr_t thread_attr;
129 	struct worker *worker = NULL;
130 	struct perf_cpu_map *cpu;
131 
132 	argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
133 	if (argc) {
134 		usage_with_options(bench_futex_hash_usage, options);
135 		exit(EXIT_FAILURE);
136 	}
137 
138 	cpu = perf_cpu_map__new(NULL);
139 	if (!cpu)
140 		goto errmem;
141 
142 	memset(&act, 0, sizeof(act));
143 	sigfillset(&act.sa_mask);
144 	act.sa_sigaction = toggle_done;
145 	sigaction(SIGINT, &act, NULL);
146 
147 	if (params.mlockall) {
148 		if (mlockall(MCL_CURRENT | MCL_FUTURE))
149 			err(EXIT_FAILURE, "mlockall");
150 	}
151 
152 	if (!params.nthreads) /* default to the number of CPUs */
153 		params.nthreads = perf_cpu_map__nr(cpu);
154 
155 	worker = calloc(params.nthreads, sizeof(*worker));
156 	if (!worker)
157 		goto errmem;
158 
159 	if (!params.fshared)
160 		futex_flag = FUTEX_PRIVATE_FLAG;
161 
162 	printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
163 	       getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime);
164 
165 	init_stats(&throughput_stats);
166 	pthread_mutex_init(&thread_lock, NULL);
167 	pthread_cond_init(&thread_parent, NULL);
168 	pthread_cond_init(&thread_worker, NULL);
169 
170 	threads_starting = params.nthreads;
171 	pthread_attr_init(&thread_attr);
172 	gettimeofday(&bench__start, NULL);
173 	for (i = 0; i < params.nthreads; i++) {
174 		worker[i].tid = i;
175 		worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
176 		if (!worker[i].futex)
177 			goto errmem;
178 
179 		CPU_ZERO(&cpuset);
180 		CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
181 
182 		ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
183 		if (ret)
184 			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
185 
186 		ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
187 				     (void *)(struct worker *) &worker[i]);
188 		if (ret)
189 			err(EXIT_FAILURE, "pthread_create");
190 
191 	}
192 	pthread_attr_destroy(&thread_attr);
193 
194 	pthread_mutex_lock(&thread_lock);
195 	while (threads_starting)
196 		pthread_cond_wait(&thread_parent, &thread_lock);
197 	pthread_cond_broadcast(&thread_worker);
198 	pthread_mutex_unlock(&thread_lock);
199 
200 	sleep(params.runtime);
201 	toggle_done(0, NULL, NULL);
202 
203 	for (i = 0; i < params.nthreads; i++) {
204 		ret = pthread_join(worker[i].thread, NULL);
205 		if (ret)
206 			err(EXIT_FAILURE, "pthread_join");
207 	}
208 
209 	/* cleanup & report results */
210 	pthread_cond_destroy(&thread_parent);
211 	pthread_cond_destroy(&thread_worker);
212 	pthread_mutex_destroy(&thread_lock);
213 
214 	for (i = 0; i < params.nthreads; i++) {
215 		unsigned long t = bench__runtime.tv_sec > 0 ?
216 			worker[i].ops / bench__runtime.tv_sec : 0;
217 		update_stats(&throughput_stats, t);
218 		if (!params.silent) {
219 			if (params.nfutexes == 1)
220 				printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
221 				       worker[i].tid, &worker[i].futex[0], t);
222 			else
223 				printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
224 				       worker[i].tid, &worker[i].futex[0],
225 				       &worker[i].futex[params.nfutexes-1], t);
226 		}
227 
228 		zfree(&worker[i].futex);
229 	}
230 
231 	print_summary();
232 
233 	free(worker);
234 	free(cpu);
235 	return ret;
236 errmem:
237 	err(EXIT_FAILURE, "calloc");
238 }
239