1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com> 4 * 5 * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing. 6 * 7 * This program is particularly useful for measuring the kernel's futex hash 8 * table/function implementation. In order for it to make sense, use with as 9 * many threads and futexes as possible. 10 */ 11 12 /* For the CLR_() macros */ 13 #include <string.h> 14 #include <pthread.h> 15 16 #include <errno.h> 17 #include <signal.h> 18 #include <stdlib.h> 19 #include <linux/compiler.h> 20 #include <linux/kernel.h> 21 #include <linux/prctl.h> 22 #include <linux/zalloc.h> 23 #include <sys/time.h> 24 #include <sys/mman.h> 25 #include <sys/prctl.h> 26 #include <perf/cpumap.h> 27 28 #include "../util/mutex.h" 29 #include "../util/stat.h" 30 #include <subcmd/parse-options.h> 31 #include "bench.h" 32 #include "futex.h" 33 34 #include <err.h> 35 36 static bool done = false; 37 static int futex_flag = 0; 38 39 struct timeval bench__start, bench__end, bench__runtime; 40 static struct mutex thread_lock; 41 static unsigned int threads_starting; 42 static struct stats throughput_stats; 43 static struct cond thread_parent, thread_worker; 44 45 struct worker { 46 int tid; 47 u_int32_t *futex; 48 pthread_t thread; 49 unsigned long ops; 50 }; 51 52 static struct bench_futex_parameters params = { 53 .nfutexes = 1024, 54 .runtime = 10, 55 .nbuckets = -1, 56 }; 57 58 static const struct option options[] = { 59 OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), 60 OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), 61 OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), 62 OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"), 63 OPT_UINTEGER('f', "futexes", ¶ms.nfutexes, "Specify amount of futexes per threads"), 64 OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"), 65 OPT_BOOLEAN( 'S', "shared", ¶ms.fshared, "Use shared futexes instead of private ones"), 66 OPT_BOOLEAN( 'm', "mlockall", ¶ms.mlockall, "Lock all current and future memory"), 67 OPT_END() 68 }; 69 70 static const char * const bench_futex_hash_usage[] = { 71 "perf bench futex hash <options>", 72 NULL 73 }; 74 75 static void *workerfn(void *arg) 76 { 77 int ret; 78 struct worker *w = (struct worker *) arg; 79 unsigned int i; 80 unsigned long ops = w->ops; /* avoid cacheline bouncing */ 81 82 mutex_lock(&thread_lock); 83 threads_starting--; 84 if (!threads_starting) 85 cond_signal(&thread_parent); 86 cond_wait(&thread_worker, &thread_lock); 87 mutex_unlock(&thread_lock); 88 89 do { 90 for (i = 0; i < params.nfutexes; i++, ops++) { 91 /* 92 * We want the futex calls to fail in order to stress 93 * the hashing of uaddr and not measure other steps, 94 * such as internal waitqueue handling, thus enlarging 95 * the critical region protected by hb->lock. 96 */ 97 ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag); 98 if (!params.silent && 99 (!ret || errno != EAGAIN || errno != EWOULDBLOCK)) 100 warn("Non-expected futex return call"); 101 } 102 } while (!done); 103 104 w->ops = ops; 105 return NULL; 106 } 107 108 static void toggle_done(int sig __maybe_unused, 109 siginfo_t *info __maybe_unused, 110 void *uc __maybe_unused) 111 { 112 /* inform all threads that we're done for the day */ 113 done = true; 114 gettimeofday(&bench__end, NULL); 115 timersub(&bench__end, &bench__start, &bench__runtime); 116 } 117 118 static void print_summary(void) 119 { 120 unsigned long avg = avg_stats(&throughput_stats); 121 double stddev = stddev_stats(&throughput_stats); 122 123 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", 124 !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), 125 (int)bench__runtime.tv_sec); 126 futex_print_nbuckets(¶ms); 127 } 128 129 int bench_futex_hash(int argc, const char **argv) 130 { 131 int ret = 0; 132 cpu_set_t *cpuset; 133 struct sigaction act; 134 unsigned int i; 135 pthread_attr_t thread_attr; 136 struct worker *worker = NULL; 137 struct perf_cpu_map *cpu; 138 int nrcpus; 139 size_t size; 140 141 argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); 142 if (argc) { 143 usage_with_options(bench_futex_hash_usage, options); 144 exit(EXIT_FAILURE); 145 } 146 147 cpu = perf_cpu_map__new_online_cpus(); 148 if (!cpu) 149 goto errmem; 150 151 memset(&act, 0, sizeof(act)); 152 sigfillset(&act.sa_mask); 153 act.sa_sigaction = toggle_done; 154 sigaction(SIGINT, &act, NULL); 155 156 if (params.mlockall) { 157 if (mlockall(MCL_CURRENT | MCL_FUTURE)) 158 err(EXIT_FAILURE, "mlockall"); 159 } 160 161 if (!params.nthreads) /* default to the number of CPUs */ 162 params.nthreads = perf_cpu_map__nr(cpu); 163 164 worker = calloc(params.nthreads, sizeof(*worker)); 165 if (!worker) 166 goto errmem; 167 168 if (!params.fshared) 169 futex_flag = FUTEX_PRIVATE_FLAG; 170 futex_set_nbuckets_param(¶ms); 171 172 printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", 173 getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); 174 175 init_stats(&throughput_stats); 176 mutex_init(&thread_lock); 177 cond_init(&thread_parent); 178 cond_init(&thread_worker); 179 180 threads_starting = params.nthreads; 181 pthread_attr_init(&thread_attr); 182 gettimeofday(&bench__start, NULL); 183 184 nrcpus = cpu__max_cpu().cpu; 185 cpuset = CPU_ALLOC(nrcpus); 186 BUG_ON(!cpuset); 187 size = CPU_ALLOC_SIZE(nrcpus); 188 189 for (i = 0; i < params.nthreads; i++) { 190 worker[i].tid = i; 191 worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex)); 192 if (!worker[i].futex) 193 goto errmem; 194 195 CPU_ZERO_S(size, cpuset); 196 197 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); 198 ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset); 199 if (ret) { 200 CPU_FREE(cpuset); 201 err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); 202 } 203 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, 204 (void *)(struct worker *) &worker[i]); 205 if (ret) { 206 CPU_FREE(cpuset); 207 err(EXIT_FAILURE, "pthread_create"); 208 } 209 210 } 211 CPU_FREE(cpuset); 212 pthread_attr_destroy(&thread_attr); 213 214 mutex_lock(&thread_lock); 215 while (threads_starting) 216 cond_wait(&thread_parent, &thread_lock); 217 cond_broadcast(&thread_worker); 218 mutex_unlock(&thread_lock); 219 220 sleep(params.runtime); 221 toggle_done(0, NULL, NULL); 222 223 for (i = 0; i < params.nthreads; i++) { 224 ret = pthread_join(worker[i].thread, NULL); 225 if (ret) 226 err(EXIT_FAILURE, "pthread_join"); 227 } 228 229 /* cleanup & report results */ 230 cond_destroy(&thread_parent); 231 cond_destroy(&thread_worker); 232 mutex_destroy(&thread_lock); 233 234 for (i = 0; i < params.nthreads; i++) { 235 unsigned long t = bench__runtime.tv_sec > 0 ? 236 worker[i].ops / bench__runtime.tv_sec : 0; 237 update_stats(&throughput_stats, t); 238 if (!params.silent) { 239 if (params.nfutexes == 1) 240 printf("[thread %2d] futex: %p [ %ld ops/sec ]\n", 241 worker[i].tid, &worker[i].futex[0], t); 242 else 243 printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n", 244 worker[i].tid, &worker[i].futex[0], 245 &worker[i].futex[params.nfutexes-1], t); 246 } 247 248 zfree(&worker[i].futex); 249 } 250 251 print_summary(); 252 253 free(worker); 254 free(cpu); 255 return ret; 256 errmem: 257 err(EXIT_FAILURE, "calloc"); 258 } 259