1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
4 *
5 * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
6 *
7 * This program is particularly useful for measuring the kernel's futex hash
8 * table/function implementation. In order for it to make sense, use with as
9 * many threads and futexes as possible.
10 */
11
12 /* For the CLR_() macros */
13 #include <string.h>
14 #include <pthread.h>
15
16 #include <errno.h>
17 #include <signal.h>
18 #include <stdlib.h>
19 #include <linux/compiler.h>
20 #include <linux/kernel.h>
21 #include <linux/zalloc.h>
22 #include <sys/time.h>
23 #include <sys/mman.h>
24 #include <sys/prctl.h>
25 #include <perf/cpumap.h>
26
27 #include "../util/mutex.h"
28 #include "../util/stat.h"
29 #include <subcmd/parse-options.h>
30 #include "bench.h"
31 #include "futex.h"
32
33 #include <err.h>
34
35 static bool done = false;
36 static int futex_flag = 0;
37
38 struct timeval bench__start, bench__end, bench__runtime;
39 static struct mutex thread_lock;
40 static unsigned int threads_starting;
41 static struct stats throughput_stats;
42 static struct cond thread_parent, thread_worker;
43
44 struct worker {
45 int tid;
46 u_int32_t *futex;
47 pthread_t thread;
48 unsigned long ops;
49 };
50
51 static struct bench_futex_parameters params = {
52 .nfutexes = 1024,
53 .runtime = 10,
54 .nbuckets = -1,
55 };
56
57 static const struct option options[] = {
58 OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"),
59 OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"),
60 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"),
61 OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"),
62 OPT_UINTEGER('f', "futexes", ¶ms.nfutexes, "Specify amount of futexes per threads"),
63 OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"),
64 OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"),
65 OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"),
66 OPT_END()
67 };
68
69 static const char * const bench_futex_hash_usage[] = {
70 "perf bench futex hash <options>",
71 NULL
72 };
73
workerfn(void * arg)74 static void *workerfn(void *arg)
75 {
76 int ret;
77 struct worker *w = (struct worker *) arg;
78 unsigned int i;
79 unsigned long ops = w->ops; /* avoid cacheline bouncing */
80
81 mutex_lock(&thread_lock);
82 threads_starting--;
83 if (!threads_starting)
84 cond_signal(&thread_parent);
85 cond_wait(&thread_worker, &thread_lock);
86 mutex_unlock(&thread_lock);
87
88 do {
89 for (i = 0; i < params.nfutexes; i++, ops++) {
90 /*
91 * We want the futex calls to fail in order to stress
92 * the hashing of uaddr and not measure other steps,
93 * such as internal waitqueue handling, thus enlarging
94 * the critical region protected by hb->lock.
95 */
96 ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
97 if (!params.silent &&
98 (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
99 warn("Non-expected futex return call");
100 }
101 } while (!done);
102
103 w->ops = ops;
104 return NULL;
105 }
106
toggle_done(int sig __maybe_unused,siginfo_t * info __maybe_unused,void * uc __maybe_unused)107 static void toggle_done(int sig __maybe_unused,
108 siginfo_t *info __maybe_unused,
109 void *uc __maybe_unused)
110 {
111 /* inform all threads that we're done for the day */
112 done = true;
113 gettimeofday(&bench__end, NULL);
114 timersub(&bench__end, &bench__start, &bench__runtime);
115 }
116
print_summary(void)117 static void print_summary(void)
118 {
119 unsigned long avg = avg_stats(&throughput_stats);
120 double stddev = stddev_stats(&throughput_stats);
121
122 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
123 !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
124 (int)bench__runtime.tv_sec);
125 futex_print_nbuckets(¶ms);
126 }
127
bench_futex_hash(int argc,const char ** argv)128 int bench_futex_hash(int argc, const char **argv)
129 {
130 int ret = 0;
131 cpu_set_t *cpuset;
132 struct sigaction act;
133 unsigned int i;
134 pthread_attr_t thread_attr;
135 struct worker *worker = NULL;
136 struct perf_cpu_map *cpu;
137 int nrcpus;
138 size_t size;
139
140 argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
141 if (argc) {
142 usage_with_options(bench_futex_hash_usage, options);
143 exit(EXIT_FAILURE);
144 }
145
146 cpu = perf_cpu_map__new_online_cpus();
147 if (!cpu)
148 goto errmem;
149
150 memset(&act, 0, sizeof(act));
151 sigfillset(&act.sa_mask);
152 act.sa_sigaction = toggle_done;
153 sigaction(SIGINT, &act, NULL);
154
155 if (params.mlockall) {
156 if (mlockall(MCL_CURRENT | MCL_FUTURE))
157 err(EXIT_FAILURE, "mlockall");
158 }
159
160 if (!params.nthreads) /* default to the number of CPUs */
161 params.nthreads = perf_cpu_map__nr(cpu);
162
163 worker = calloc(params.nthreads, sizeof(*worker));
164 if (!worker)
165 goto errmem;
166
167 if (!params.fshared)
168 futex_flag = FUTEX_PRIVATE_FLAG;
169 futex_set_nbuckets_param(¶ms);
170
171 printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
172 getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime);
173
174 init_stats(&throughput_stats);
175 mutex_init(&thread_lock);
176 cond_init(&thread_parent);
177 cond_init(&thread_worker);
178
179 threads_starting = params.nthreads;
180 pthread_attr_init(&thread_attr);
181 gettimeofday(&bench__start, NULL);
182
183 nrcpus = cpu__max_cpu().cpu;
184 cpuset = CPU_ALLOC(nrcpus);
185 BUG_ON(!cpuset);
186 size = CPU_ALLOC_SIZE(nrcpus);
187
188 for (i = 0; i < params.nthreads; i++) {
189 worker[i].tid = i;
190 worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
191 if (!worker[i].futex)
192 goto errmem;
193
194 CPU_ZERO_S(size, cpuset);
195
196 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
197 ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
198 if (ret) {
199 CPU_FREE(cpuset);
200 err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
201 }
202 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
203 (void *)(struct worker *) &worker[i]);
204 if (ret) {
205 CPU_FREE(cpuset);
206 err(EXIT_FAILURE, "pthread_create");
207 }
208
209 }
210 CPU_FREE(cpuset);
211 pthread_attr_destroy(&thread_attr);
212
213 mutex_lock(&thread_lock);
214 while (threads_starting)
215 cond_wait(&thread_parent, &thread_lock);
216 cond_broadcast(&thread_worker);
217 mutex_unlock(&thread_lock);
218
219 sleep(params.runtime);
220 toggle_done(0, NULL, NULL);
221
222 for (i = 0; i < params.nthreads; i++) {
223 ret = pthread_join(worker[i].thread, NULL);
224 if (ret)
225 err(EXIT_FAILURE, "pthread_join");
226 }
227
228 /* cleanup & report results */
229 cond_destroy(&thread_parent);
230 cond_destroy(&thread_worker);
231 mutex_destroy(&thread_lock);
232
233 for (i = 0; i < params.nthreads; i++) {
234 unsigned long t = bench__runtime.tv_sec > 0 ?
235 worker[i].ops / bench__runtime.tv_sec : 0;
236 update_stats(&throughput_stats, t);
237 if (!params.silent) {
238 if (params.nfutexes == 1)
239 printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
240 worker[i].tid, &worker[i].futex[0], t);
241 else
242 printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
243 worker[i].tid, &worker[i].futex[0],
244 &worker[i].futex[params.nfutexes-1], t);
245 }
246
247 zfree(&worker[i].futex);
248 }
249
250 print_summary();
251
252 free(worker);
253 free(cpu);
254 return ret;
255 errmem:
256 err(EXIT_FAILURE, "calloc");
257 }
258