1#!/bin/sh 2 3[ `uname -p` != "amd64" ] && exit 0 4[ `id -u ` -ne 0 ] && echo "Must be root!" && exit 1 5[ "`sysctl -in kern.features.kasan`" != "1" ] && exit 0 6 7. ../default.cfg 8prog=$(basename "$0" .sh) 9cat > /tmp/$prog.c <<EOF 10// https://syzkaller.appspot.com/bug?id=749aa1fdb67018e9c0179373a60d523511bff02c 11// autogenerated by syzkaller (https://github.com/google/syzkaller) 12// Reported-by: syzbot+5cb51285603332d9be11@syzkaller.appspotmail.com 13 14#define _GNU_SOURCE 15 16#include <sys/types.h> 17 18#include <errno.h> 19#include <pthread.h> 20#include <pwd.h> 21#include <setjmp.h> 22#include <signal.h> 23#include <stdarg.h> 24#include <stdbool.h> 25#include <stdint.h> 26#include <stdio.h> 27#include <stdlib.h> 28#include <string.h> 29#include <sys/endian.h> 30#include <sys/resource.h> 31#include <sys/syscall.h> 32#include <sys/wait.h> 33#include <time.h> 34#include <unistd.h> 35 36static __thread int clone_ongoing; 37static __thread int skip_segv; 38static __thread jmp_buf segv_env; 39 40static void segv_handler(int sig, siginfo_t* info, void* ctx __unused) 41{ 42 if (__atomic_load_n(&clone_ongoing, __ATOMIC_RELAXED) != 0) { 43 exit(sig); 44 } 45 uintptr_t addr = (uintptr_t)info->si_addr; 46 const uintptr_t prog_start = 1 << 20; 47 const uintptr_t prog_end = 100 << 20; 48 int skip = __atomic_load_n(&skip_segv, __ATOMIC_RELAXED) != 0; 49 int valid = addr < prog_start || addr > prog_end; 50 if (sig == SIGBUS) 51 valid = 1; 52 if (skip && valid) { 53 _longjmp(segv_env, 1); 54 } 55 exit(sig); 56} 57 58static void install_segv_handler(void) 59{ 60 struct sigaction sa; 61 memset(&sa, 0, sizeof(sa)); 62 sa.sa_sigaction = segv_handler; 63 sa.sa_flags = SA_NODEFER | SA_SIGINFO; 64 sigaction(SIGSEGV, &sa, NULL); 65 sigaction(SIGBUS, &sa, NULL); 66} 67 68#define NONFAILING(...) \ 69 ({ \ 70 int ok = 1; \ 71 __atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \ 72 if (_setjmp(segv_env) == 0) { \ 73 __VA_ARGS__; \ 74 } else \ 75 ok = 0; \ 76 __atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \ 77 ok; \ 78 }) 79 80static void kill_and_wait(int pid, int* status) 81{ 82 kill(pid, SIGKILL); 83 while (waitpid(-1, status, 0) != pid) { 84 } 85} 86 87static void sleep_ms(uint64_t ms) 88{ 89 usleep(ms * 1000); 90} 91 92static uint64_t current_time_ms(void) 93{ 94 struct timespec ts; 95 if (clock_gettime(CLOCK_MONOTONIC, &ts)) 96 exit(1); 97 return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000; 98} 99 100static void thread_start(void* (*fn)(void*), void* arg) 101{ 102 pthread_t th; 103 pthread_attr_t attr; 104 pthread_attr_init(&attr); 105 pthread_attr_setstacksize(&attr, 128 << 10); 106 int i = 0; 107 for (; i < 100; i++) { 108 if (pthread_create(&th, &attr, fn, arg) == 0) { 109 pthread_attr_destroy(&attr); 110 return; 111 } 112 if (errno == EAGAIN) { 113 usleep(50); 114 continue; 115 } 116 break; 117 } 118 exit(1); 119} 120 121typedef struct { 122 pthread_mutex_t mu; 123 pthread_cond_t cv; 124 int state; 125} event_t; 126 127static void event_init(event_t* ev) 128{ 129 if (pthread_mutex_init(&ev->mu, 0)) 130 exit(1); 131 if (pthread_cond_init(&ev->cv, 0)) 132 exit(1); 133 ev->state = 0; 134} 135 136static void event_reset(event_t* ev) 137{ 138 ev->state = 0; 139} 140 141static void event_set(event_t* ev) 142{ 143 pthread_mutex_lock(&ev->mu); 144 if (ev->state) 145 exit(1); 146 ev->state = 1; 147 pthread_mutex_unlock(&ev->mu); 148 pthread_cond_broadcast(&ev->cv); 149} 150 151static void event_wait(event_t* ev) 152{ 153 pthread_mutex_lock(&ev->mu); 154 while (!ev->state) 155 pthread_cond_wait(&ev->cv, &ev->mu); 156 pthread_mutex_unlock(&ev->mu); 157} 158 159static int event_isset(event_t* ev) 160{ 161 pthread_mutex_lock(&ev->mu); 162 int res = ev->state; 163 pthread_mutex_unlock(&ev->mu); 164 return res; 165} 166 167static int event_timedwait(event_t* ev, uint64_t timeout) 168{ 169 uint64_t start = current_time_ms(); 170 uint64_t now = start; 171 pthread_mutex_lock(&ev->mu); 172 for (;;) { 173 if (ev->state) 174 break; 175 uint64_t remain = timeout - (now - start); 176 struct timespec ts; 177 ts.tv_sec = remain / 1000; 178 ts.tv_nsec = (remain % 1000) * 1000 * 1000; 179 pthread_cond_timedwait(&ev->cv, &ev->mu, &ts); 180 now = current_time_ms(); 181 if (now - start > timeout) 182 break; 183 } 184 int res = ev->state; 185 pthread_mutex_unlock(&ev->mu); 186 return res; 187} 188 189static void sandbox_common() 190{ 191 struct rlimit rlim; 192 rlim.rlim_cur = rlim.rlim_max = 128 << 20; 193 setrlimit(RLIMIT_AS, &rlim); 194 rlim.rlim_cur = rlim.rlim_max = 8 << 20; 195 setrlimit(RLIMIT_MEMLOCK, &rlim); 196 rlim.rlim_cur = rlim.rlim_max = 1 << 20; 197 setrlimit(RLIMIT_FSIZE, &rlim); 198 rlim.rlim_cur = rlim.rlim_max = 1 << 20; 199 setrlimit(RLIMIT_STACK, &rlim); 200 rlim.rlim_cur = rlim.rlim_max = 0; 201 setrlimit(RLIMIT_CORE, &rlim); 202 rlim.rlim_cur = rlim.rlim_max = 256; 203 setrlimit(RLIMIT_NOFILE, &rlim); 204} 205 206static void loop(); 207 208static int do_sandbox_none(void) 209{ 210 sandbox_common(); 211 loop(); 212 return 0; 213} 214 215struct thread_t { 216 int created, call; 217 event_t ready, done; 218}; 219 220static struct thread_t threads[16]; 221static void execute_call(int call); 222static int running; 223 224static void* thr(void* arg) 225{ 226 struct thread_t* th = (struct thread_t*)arg; 227 for (;;) { 228 event_wait(&th->ready); 229 event_reset(&th->ready); 230 execute_call(th->call); 231 __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); 232 event_set(&th->done); 233 } 234 return 0; 235} 236 237static void execute_one(void) 238{ 239 int i, call, thread; 240 for (call = 0; call < 5; call++) { 241 for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); 242 thread++) { 243 struct thread_t* th = &threads[thread]; 244 if (!th->created) { 245 th->created = 1; 246 event_init(&th->ready); 247 event_init(&th->done); 248 event_set(&th->done); 249 thread_start(thr, th); 250 } 251 if (!event_isset(&th->done)) 252 continue; 253 event_reset(&th->done); 254 th->call = call; 255 __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); 256 event_set(&th->ready); 257 event_timedwait(&th->done, 50); 258 break; 259 } 260 } 261 for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++) 262 sleep_ms(1); 263} 264 265static void execute_one(void); 266 267#define WAIT_FLAGS 0 268 269static void loop(void) 270{ 271 int iter = 0; 272 for (;; iter++) { 273 int pid = fork(); 274 if (pid < 0) 275 exit(1); 276 if (pid == 0) { 277 execute_one(); 278 exit(0); 279 } 280 int status = 0; 281 uint64_t start = current_time_ms(); 282 for (;;) { 283 if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid) 284 break; 285 sleep_ms(1); 286 if (current_time_ms() - start < 5000) 287 continue; 288 kill_and_wait(pid, &status); 289 break; 290 } 291 } 292} 293 294void execute_call(int call) 295{ 296 switch (call) { 297 case 0: 298 syscall(SYS_thr_new, 0ul, 0ul); 299 break; 300 case 1: 301 syscall(SYS_setloginclass, 0ul); 302 break; 303 case 2: 304 syscall(SYS_vfork); 305 break; 306 case 3: 307 NONFAILING(*(uint32_t*)0x20001880 = 4); 308 syscall(SYS_sysarch, 8ul, 0x20001880ul); 309 break; 310 case 4: 311 syscall(SYS_getsid, 0); 312 break; 313 } 314} 315int main(void) 316{ 317 syscall(SYS_mmap, 0x20000000ul, 0x1000000ul, 7ul, 0x1012ul, -1, 0ul); 318 install_segv_handler(); 319 do_sandbox_none(); 320 return 0; 321} 322EOF 323mycc -o /tmp/$prog -Wall -Wextra -O0 /tmp/$prog.c -lpthread || exit 1 324 325(cd /tmp; timeout 2m ./$prog) 326 327rm -rf /tmp/$prog /tmp/$prog.c /tmp/syzkaller.* 328exit 0 329