1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4 * 5 * Test code for seccomp bpf. 6 */ 7 8 #define _GNU_SOURCE 9 #include <sys/types.h> 10 11 /* 12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that, 13 * we need to use the kernel's siginfo.h file and trick glibc 14 * into accepting it. 15 */ 16 #if !__GLIBC_PREREQ(2, 26) 17 # include <asm/siginfo.h> 18 # define __have_siginfo_t 1 19 # define __have_sigval_t 1 20 # define __have_sigevent_t 1 21 #endif 22 23 #include <errno.h> 24 #include <linux/filter.h> 25 #include <sys/prctl.h> 26 #include <sys/ptrace.h> 27 #include <sys/user.h> 28 #include <linux/prctl.h> 29 #include <linux/ptrace.h> 30 #include <linux/seccomp.h> 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <stddef.h> 35 #include <stdbool.h> 36 #include <string.h> 37 #include <time.h> 38 #include <linux/elf.h> 39 #include <sys/uio.h> 40 #include <sys/utsname.h> 41 #include <sys/fcntl.h> 42 #include <sys/mman.h> 43 #include <sys/times.h> 44 #include <sys/socket.h> 45 #include <sys/ioctl.h> 46 47 #include <unistd.h> 48 #include <sys/syscall.h> 49 #include <poll.h> 50 51 #include "../kselftest_harness.h" 52 53 #ifndef PR_SET_PTRACER 54 # define PR_SET_PTRACER 0x59616d61 55 #endif 56 57 #ifndef PR_SET_NO_NEW_PRIVS 58 #define PR_SET_NO_NEW_PRIVS 38 59 #define PR_GET_NO_NEW_PRIVS 39 60 #endif 61 62 #ifndef PR_SECCOMP_EXT 63 #define PR_SECCOMP_EXT 43 64 #endif 65 66 #ifndef SECCOMP_EXT_ACT 67 #define SECCOMP_EXT_ACT 1 68 #endif 69 70 #ifndef SECCOMP_EXT_ACT_TSYNC 71 #define SECCOMP_EXT_ACT_TSYNC 1 72 #endif 73 74 #ifndef SECCOMP_MODE_STRICT 75 #define SECCOMP_MODE_STRICT 1 76 #endif 77 78 #ifndef SECCOMP_MODE_FILTER 79 #define SECCOMP_MODE_FILTER 2 80 #endif 81 82 #ifndef SECCOMP_RET_ALLOW 83 struct seccomp_data { 84 int nr; 85 __u32 arch; 86 __u64 instruction_pointer; 87 __u64 args[6]; 88 }; 89 #endif 90 91 #ifndef SECCOMP_RET_KILL_PROCESS 92 #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ 93 #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ 94 #endif 95 #ifndef SECCOMP_RET_KILL 96 #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD 97 #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ 98 #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ 99 #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ 100 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ 101 #endif 102 #ifndef SECCOMP_RET_LOG 103 #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ 104 #endif 105 106 #ifndef __NR_seccomp 107 # if defined(__i386__) 108 # define __NR_seccomp 354 109 # elif defined(__x86_64__) 110 # define __NR_seccomp 317 111 # elif defined(__arm__) 112 # define __NR_seccomp 383 113 # elif defined(__aarch64__) 114 # define __NR_seccomp 277 115 # elif defined(__hppa__) 116 # define __NR_seccomp 338 117 # elif defined(__powerpc__) 118 # define __NR_seccomp 358 119 # elif defined(__s390__) 120 # define __NR_seccomp 348 121 # else 122 # warning "seccomp syscall number unknown for this architecture" 123 # define __NR_seccomp 0xffff 124 # endif 125 #endif 126 127 #ifndef SECCOMP_SET_MODE_STRICT 128 #define SECCOMP_SET_MODE_STRICT 0 129 #endif 130 131 #ifndef SECCOMP_SET_MODE_FILTER 132 #define SECCOMP_SET_MODE_FILTER 1 133 #endif 134 135 #ifndef SECCOMP_GET_ACTION_AVAIL 136 #define SECCOMP_GET_ACTION_AVAIL 2 137 #endif 138 139 #ifndef SECCOMP_GET_NOTIF_SIZES 140 #define SECCOMP_GET_NOTIF_SIZES 3 141 #endif 142 143 #ifndef SECCOMP_FILTER_FLAG_TSYNC 144 #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) 145 #endif 146 147 #ifndef SECCOMP_FILTER_FLAG_LOG 148 #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) 149 #endif 150 151 #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW 152 #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) 153 #endif 154 155 #ifndef PTRACE_SECCOMP_GET_METADATA 156 #define PTRACE_SECCOMP_GET_METADATA 0x420d 157 158 struct seccomp_metadata { 159 __u64 filter_off; /* Input: which filter */ 160 __u64 flags; /* Output: filter's flags */ 161 }; 162 #endif 163 164 #ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER 165 #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) 166 167 #define SECCOMP_RET_USER_NOTIF 0x7fc00000U 168 169 #define SECCOMP_IOC_MAGIC '!' 170 #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) 171 #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) 172 #define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) 173 #define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) 174 175 /* Flags for seccomp notification fd ioctl. */ 176 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) 177 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ 178 struct seccomp_notif_resp) 179 #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) 180 181 struct seccomp_notif { 182 __u64 id; 183 __u32 pid; 184 __u32 flags; 185 struct seccomp_data data; 186 }; 187 188 struct seccomp_notif_resp { 189 __u64 id; 190 __s64 val; 191 __s32 error; 192 __u32 flags; 193 }; 194 195 struct seccomp_notif_sizes { 196 __u16 seccomp_notif; 197 __u16 seccomp_notif_resp; 198 __u16 seccomp_data; 199 }; 200 #endif 201 202 #ifndef seccomp 203 int seccomp(unsigned int op, unsigned int flags, void *args) 204 { 205 errno = 0; 206 return syscall(__NR_seccomp, op, flags, args); 207 } 208 #endif 209 210 #if __BYTE_ORDER == __LITTLE_ENDIAN 211 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) 212 #elif __BYTE_ORDER == __BIG_ENDIAN 213 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) 214 #else 215 #error "wut? Unknown __BYTE_ORDER?!" 216 #endif 217 218 #define SIBLING_EXIT_UNKILLED 0xbadbeef 219 #define SIBLING_EXIT_FAILURE 0xbadface 220 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed 221 222 TEST(mode_strict_support) 223 { 224 long ret; 225 226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 227 ASSERT_EQ(0, ret) { 228 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 229 } 230 syscall(__NR_exit, 0); 231 } 232 233 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) 234 { 235 long ret; 236 237 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 238 ASSERT_EQ(0, ret) { 239 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 240 } 241 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 242 NULL, NULL, NULL); 243 EXPECT_FALSE(true) { 244 TH_LOG("Unreachable!"); 245 } 246 } 247 248 /* Note! This doesn't test no new privs behavior */ 249 TEST(no_new_privs_support) 250 { 251 long ret; 252 253 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 254 EXPECT_EQ(0, ret) { 255 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 256 } 257 } 258 259 /* Tests kernel support by checking for a copy_from_user() fault on NULL. */ 260 TEST(mode_filter_support) 261 { 262 long ret; 263 264 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 265 ASSERT_EQ(0, ret) { 266 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 267 } 268 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); 269 EXPECT_EQ(-1, ret); 270 EXPECT_EQ(EFAULT, errno) { 271 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); 272 } 273 } 274 275 TEST(mode_filter_without_nnp) 276 { 277 struct sock_filter filter[] = { 278 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 279 }; 280 struct sock_fprog prog = { 281 .len = (unsigned short)ARRAY_SIZE(filter), 282 .filter = filter, 283 }; 284 long ret; 285 286 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); 287 ASSERT_LE(0, ret) { 288 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); 289 } 290 errno = 0; 291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 292 /* Succeeds with CAP_SYS_ADMIN, fails without */ 293 /* TODO(wad) check caps not euid */ 294 if (geteuid()) { 295 EXPECT_EQ(-1, ret); 296 EXPECT_EQ(EACCES, errno); 297 } else { 298 EXPECT_EQ(0, ret); 299 } 300 } 301 302 #define MAX_INSNS_PER_PATH 32768 303 304 TEST(filter_size_limits) 305 { 306 int i; 307 int count = BPF_MAXINSNS + 1; 308 struct sock_filter allow[] = { 309 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 310 }; 311 struct sock_filter *filter; 312 struct sock_fprog prog = { }; 313 long ret; 314 315 filter = calloc(count, sizeof(*filter)); 316 ASSERT_NE(NULL, filter); 317 318 for (i = 0; i < count; i++) 319 filter[i] = allow[0]; 320 321 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 322 ASSERT_EQ(0, ret); 323 324 prog.filter = filter; 325 prog.len = count; 326 327 /* Too many filter instructions in a single filter. */ 328 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 329 ASSERT_NE(0, ret) { 330 TH_LOG("Installing %d insn filter was allowed", prog.len); 331 } 332 333 /* One less is okay, though. */ 334 prog.len -= 1; 335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 336 ASSERT_EQ(0, ret) { 337 TH_LOG("Installing %d insn filter wasn't allowed", prog.len); 338 } 339 } 340 341 TEST(filter_chain_limits) 342 { 343 int i; 344 int count = BPF_MAXINSNS; 345 struct sock_filter allow[] = { 346 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 347 }; 348 struct sock_filter *filter; 349 struct sock_fprog prog = { }; 350 long ret; 351 352 filter = calloc(count, sizeof(*filter)); 353 ASSERT_NE(NULL, filter); 354 355 for (i = 0; i < count; i++) 356 filter[i] = allow[0]; 357 358 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 359 ASSERT_EQ(0, ret); 360 361 prog.filter = filter; 362 prog.len = 1; 363 364 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 365 ASSERT_EQ(0, ret); 366 367 prog.len = count; 368 369 /* Too many total filter instructions. */ 370 for (i = 0; i < MAX_INSNS_PER_PATH; i++) { 371 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 372 if (ret != 0) 373 break; 374 } 375 ASSERT_NE(0, ret) { 376 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", 377 i, count, i * (count + 4)); 378 } 379 } 380 381 TEST(mode_filter_cannot_move_to_strict) 382 { 383 struct sock_filter filter[] = { 384 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 385 }; 386 struct sock_fprog prog = { 387 .len = (unsigned short)ARRAY_SIZE(filter), 388 .filter = filter, 389 }; 390 long ret; 391 392 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 393 ASSERT_EQ(0, ret); 394 395 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 396 ASSERT_EQ(0, ret); 397 398 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); 399 EXPECT_EQ(-1, ret); 400 EXPECT_EQ(EINVAL, errno); 401 } 402 403 404 TEST(mode_filter_get_seccomp) 405 { 406 struct sock_filter filter[] = { 407 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 408 }; 409 struct sock_fprog prog = { 410 .len = (unsigned short)ARRAY_SIZE(filter), 411 .filter = filter, 412 }; 413 long ret; 414 415 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 416 ASSERT_EQ(0, ret); 417 418 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 419 EXPECT_EQ(0, ret); 420 421 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 422 ASSERT_EQ(0, ret); 423 424 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 425 EXPECT_EQ(2, ret); 426 } 427 428 429 TEST(ALLOW_all) 430 { 431 struct sock_filter filter[] = { 432 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 433 }; 434 struct sock_fprog prog = { 435 .len = (unsigned short)ARRAY_SIZE(filter), 436 .filter = filter, 437 }; 438 long ret; 439 440 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 441 ASSERT_EQ(0, ret); 442 443 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 444 ASSERT_EQ(0, ret); 445 } 446 447 TEST(empty_prog) 448 { 449 struct sock_filter filter[] = { 450 }; 451 struct sock_fprog prog = { 452 .len = (unsigned short)ARRAY_SIZE(filter), 453 .filter = filter, 454 }; 455 long ret; 456 457 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 458 ASSERT_EQ(0, ret); 459 460 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 461 EXPECT_EQ(-1, ret); 462 EXPECT_EQ(EINVAL, errno); 463 } 464 465 TEST(log_all) 466 { 467 struct sock_filter filter[] = { 468 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 469 }; 470 struct sock_fprog prog = { 471 .len = (unsigned short)ARRAY_SIZE(filter), 472 .filter = filter, 473 }; 474 long ret; 475 pid_t parent = getppid(); 476 477 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 478 ASSERT_EQ(0, ret); 479 480 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 481 ASSERT_EQ(0, ret); 482 483 /* getppid() should succeed and be logged (no check for logging) */ 484 EXPECT_EQ(parent, syscall(__NR_getppid)); 485 } 486 487 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) 488 { 489 struct sock_filter filter[] = { 490 BPF_STMT(BPF_RET|BPF_K, 0x10000000U), 491 }; 492 struct sock_fprog prog = { 493 .len = (unsigned short)ARRAY_SIZE(filter), 494 .filter = filter, 495 }; 496 long ret; 497 498 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 499 ASSERT_EQ(0, ret); 500 501 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 502 ASSERT_EQ(0, ret); 503 EXPECT_EQ(0, syscall(__NR_getpid)) { 504 TH_LOG("getpid() shouldn't ever return"); 505 } 506 } 507 508 /* return code >= 0x80000000 is unused. */ 509 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) 510 { 511 struct sock_filter filter[] = { 512 BPF_STMT(BPF_RET|BPF_K, 0x90000000U), 513 }; 514 struct sock_fprog prog = { 515 .len = (unsigned short)ARRAY_SIZE(filter), 516 .filter = filter, 517 }; 518 long ret; 519 520 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 521 ASSERT_EQ(0, ret); 522 523 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 524 ASSERT_EQ(0, ret); 525 EXPECT_EQ(0, syscall(__NR_getpid)) { 526 TH_LOG("getpid() shouldn't ever return"); 527 } 528 } 529 530 TEST_SIGNAL(KILL_all, SIGSYS) 531 { 532 struct sock_filter filter[] = { 533 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 534 }; 535 struct sock_fprog prog = { 536 .len = (unsigned short)ARRAY_SIZE(filter), 537 .filter = filter, 538 }; 539 long ret; 540 541 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 542 ASSERT_EQ(0, ret); 543 544 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 545 ASSERT_EQ(0, ret); 546 } 547 548 TEST_SIGNAL(KILL_one, SIGSYS) 549 { 550 struct sock_filter filter[] = { 551 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 552 offsetof(struct seccomp_data, nr)), 553 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 554 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 555 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 556 }; 557 struct sock_fprog prog = { 558 .len = (unsigned short)ARRAY_SIZE(filter), 559 .filter = filter, 560 }; 561 long ret; 562 pid_t parent = getppid(); 563 564 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 565 ASSERT_EQ(0, ret); 566 567 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 568 ASSERT_EQ(0, ret); 569 570 EXPECT_EQ(parent, syscall(__NR_getppid)); 571 /* getpid() should never return. */ 572 EXPECT_EQ(0, syscall(__NR_getpid)); 573 } 574 575 TEST_SIGNAL(KILL_one_arg_one, SIGSYS) 576 { 577 void *fatal_address; 578 struct sock_filter filter[] = { 579 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 580 offsetof(struct seccomp_data, nr)), 581 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), 582 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 583 /* Only both with lower 32-bit for now. */ 584 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), 585 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 586 (unsigned long)&fatal_address, 0, 1), 587 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 588 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 589 }; 590 struct sock_fprog prog = { 591 .len = (unsigned short)ARRAY_SIZE(filter), 592 .filter = filter, 593 }; 594 long ret; 595 pid_t parent = getppid(); 596 struct tms timebuf; 597 clock_t clock = times(&timebuf); 598 599 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 600 ASSERT_EQ(0, ret); 601 602 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 603 ASSERT_EQ(0, ret); 604 605 EXPECT_EQ(parent, syscall(__NR_getppid)); 606 EXPECT_LE(clock, syscall(__NR_times, &timebuf)); 607 /* times() should never return. */ 608 EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); 609 } 610 611 TEST_SIGNAL(KILL_one_arg_six, SIGSYS) 612 { 613 #ifndef __NR_mmap2 614 int sysno = __NR_mmap; 615 #else 616 int sysno = __NR_mmap2; 617 #endif 618 struct sock_filter filter[] = { 619 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 620 offsetof(struct seccomp_data, nr)), 621 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), 622 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 623 /* Only both with lower 32-bit for now. */ 624 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), 625 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), 626 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 627 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 628 }; 629 struct sock_fprog prog = { 630 .len = (unsigned short)ARRAY_SIZE(filter), 631 .filter = filter, 632 }; 633 long ret; 634 pid_t parent = getppid(); 635 int fd; 636 void *map1, *map2; 637 int page_size = sysconf(_SC_PAGESIZE); 638 639 ASSERT_LT(0, page_size); 640 641 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 642 ASSERT_EQ(0, ret); 643 644 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 645 ASSERT_EQ(0, ret); 646 647 fd = open("/dev/zero", O_RDONLY); 648 ASSERT_NE(-1, fd); 649 650 EXPECT_EQ(parent, syscall(__NR_getppid)); 651 map1 = (void *)syscall(sysno, 652 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); 653 EXPECT_NE(MAP_FAILED, map1); 654 /* mmap2() should never return. */ 655 map2 = (void *)syscall(sysno, 656 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 657 EXPECT_EQ(MAP_FAILED, map2); 658 659 /* The test failed, so clean up the resources. */ 660 munmap(map1, page_size); 661 munmap(map2, page_size); 662 close(fd); 663 } 664 665 /* This is a thread task to die via seccomp filter violation. */ 666 void *kill_thread(void *data) 667 { 668 bool die = (bool)data; 669 670 if (die) { 671 prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 672 return (void *)SIBLING_EXIT_FAILURE; 673 } 674 675 return (void *)SIBLING_EXIT_UNKILLED; 676 } 677 678 /* Prepare a thread that will kill itself or both of us. */ 679 void kill_thread_or_group(struct __test_metadata *_metadata, bool kill_process) 680 { 681 pthread_t thread; 682 void *status; 683 /* Kill only when calling __NR_prctl. */ 684 struct sock_filter filter_thread[] = { 685 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 686 offsetof(struct seccomp_data, nr)), 687 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 688 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 689 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 690 }; 691 struct sock_fprog prog_thread = { 692 .len = (unsigned short)ARRAY_SIZE(filter_thread), 693 .filter = filter_thread, 694 }; 695 struct sock_filter filter_process[] = { 696 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 697 offsetof(struct seccomp_data, nr)), 698 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 699 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_PROCESS), 700 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 701 }; 702 struct sock_fprog prog_process = { 703 .len = (unsigned short)ARRAY_SIZE(filter_process), 704 .filter = filter_process, 705 }; 706 707 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 708 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 709 } 710 711 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, 712 kill_process ? &prog_process : &prog_thread)); 713 714 /* 715 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS 716 * flag cannot be downgraded by a new filter. 717 */ 718 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread)); 719 720 /* Start a thread that will exit immediately. */ 721 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false)); 722 ASSERT_EQ(0, pthread_join(thread, &status)); 723 ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status); 724 725 /* Start a thread that will die immediately. */ 726 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true)); 727 ASSERT_EQ(0, pthread_join(thread, &status)); 728 ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status); 729 730 /* 731 * If we get here, only the spawned thread died. Let the parent know 732 * the whole process didn't die (i.e. this thread, the spawner, 733 * stayed running). 734 */ 735 exit(42); 736 } 737 738 TEST(KILL_thread) 739 { 740 int status; 741 pid_t child_pid; 742 743 child_pid = fork(); 744 ASSERT_LE(0, child_pid); 745 if (child_pid == 0) { 746 kill_thread_or_group(_metadata, false); 747 _exit(38); 748 } 749 750 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 751 752 /* If only the thread was killed, we'll see exit 42. */ 753 ASSERT_TRUE(WIFEXITED(status)); 754 ASSERT_EQ(42, WEXITSTATUS(status)); 755 } 756 757 TEST(KILL_process) 758 { 759 int status; 760 pid_t child_pid; 761 762 child_pid = fork(); 763 ASSERT_LE(0, child_pid); 764 if (child_pid == 0) { 765 kill_thread_or_group(_metadata, true); 766 _exit(38); 767 } 768 769 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 770 771 /* If the entire process was killed, we'll see SIGSYS. */ 772 ASSERT_TRUE(WIFSIGNALED(status)); 773 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 774 } 775 776 /* TODO(wad) add 64-bit versus 32-bit arg tests. */ 777 TEST(arg_out_of_range) 778 { 779 struct sock_filter filter[] = { 780 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), 781 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 782 }; 783 struct sock_fprog prog = { 784 .len = (unsigned short)ARRAY_SIZE(filter), 785 .filter = filter, 786 }; 787 long ret; 788 789 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 790 ASSERT_EQ(0, ret); 791 792 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 793 EXPECT_EQ(-1, ret); 794 EXPECT_EQ(EINVAL, errno); 795 } 796 797 #define ERRNO_FILTER(name, errno) \ 798 struct sock_filter _read_filter_##name[] = { \ 799 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \ 800 offsetof(struct seccomp_data, nr)), \ 801 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \ 802 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \ 803 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \ 804 }; \ 805 struct sock_fprog prog_##name = { \ 806 .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \ 807 .filter = _read_filter_##name, \ 808 } 809 810 /* Make sure basic errno values are correctly passed through a filter. */ 811 TEST(ERRNO_valid) 812 { 813 ERRNO_FILTER(valid, E2BIG); 814 long ret; 815 pid_t parent = getppid(); 816 817 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 818 ASSERT_EQ(0, ret); 819 820 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid); 821 ASSERT_EQ(0, ret); 822 823 EXPECT_EQ(parent, syscall(__NR_getppid)); 824 EXPECT_EQ(-1, read(0, NULL, 0)); 825 EXPECT_EQ(E2BIG, errno); 826 } 827 828 /* Make sure an errno of zero is correctly handled by the arch code. */ 829 TEST(ERRNO_zero) 830 { 831 ERRNO_FILTER(zero, 0); 832 long ret; 833 pid_t parent = getppid(); 834 835 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 836 ASSERT_EQ(0, ret); 837 838 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero); 839 ASSERT_EQ(0, ret); 840 841 EXPECT_EQ(parent, syscall(__NR_getppid)); 842 /* "errno" of 0 is ok. */ 843 EXPECT_EQ(0, read(0, NULL, 0)); 844 } 845 846 /* 847 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller. 848 * This tests that the errno value gets capped correctly, fixed by 849 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO"). 850 */ 851 TEST(ERRNO_capped) 852 { 853 ERRNO_FILTER(capped, 4096); 854 long ret; 855 pid_t parent = getppid(); 856 857 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 858 ASSERT_EQ(0, ret); 859 860 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped); 861 ASSERT_EQ(0, ret); 862 863 EXPECT_EQ(parent, syscall(__NR_getppid)); 864 EXPECT_EQ(-1, read(0, NULL, 0)); 865 EXPECT_EQ(4095, errno); 866 } 867 868 /* 869 * Filters are processed in reverse order: last applied is executed first. 870 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the 871 * SECCOMP_RET_DATA mask results will follow the most recently applied 872 * matching filter return (and not the lowest or highest value). 873 */ 874 TEST(ERRNO_order) 875 { 876 ERRNO_FILTER(first, 11); 877 ERRNO_FILTER(second, 13); 878 ERRNO_FILTER(third, 12); 879 long ret; 880 pid_t parent = getppid(); 881 882 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 883 ASSERT_EQ(0, ret); 884 885 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first); 886 ASSERT_EQ(0, ret); 887 888 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second); 889 ASSERT_EQ(0, ret); 890 891 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third); 892 ASSERT_EQ(0, ret); 893 894 EXPECT_EQ(parent, syscall(__NR_getppid)); 895 EXPECT_EQ(-1, read(0, NULL, 0)); 896 EXPECT_EQ(12, errno); 897 } 898 899 FIXTURE_DATA(TRAP) { 900 struct sock_fprog prog; 901 }; 902 903 FIXTURE_SETUP(TRAP) 904 { 905 struct sock_filter filter[] = { 906 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 907 offsetof(struct seccomp_data, nr)), 908 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 909 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 910 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 911 }; 912 913 memset(&self->prog, 0, sizeof(self->prog)); 914 self->prog.filter = malloc(sizeof(filter)); 915 ASSERT_NE(NULL, self->prog.filter); 916 memcpy(self->prog.filter, filter, sizeof(filter)); 917 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 918 } 919 920 FIXTURE_TEARDOWN(TRAP) 921 { 922 if (self->prog.filter) 923 free(self->prog.filter); 924 } 925 926 TEST_F_SIGNAL(TRAP, dfl, SIGSYS) 927 { 928 long ret; 929 930 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 931 ASSERT_EQ(0, ret); 932 933 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 934 ASSERT_EQ(0, ret); 935 syscall(__NR_getpid); 936 } 937 938 /* Ensure that SIGSYS overrides SIG_IGN */ 939 TEST_F_SIGNAL(TRAP, ign, SIGSYS) 940 { 941 long ret; 942 943 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 944 ASSERT_EQ(0, ret); 945 946 signal(SIGSYS, SIG_IGN); 947 948 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 949 ASSERT_EQ(0, ret); 950 syscall(__NR_getpid); 951 } 952 953 static siginfo_t TRAP_info; 954 static volatile int TRAP_nr; 955 static void TRAP_action(int nr, siginfo_t *info, void *void_context) 956 { 957 memcpy(&TRAP_info, info, sizeof(TRAP_info)); 958 TRAP_nr = nr; 959 } 960 961 TEST_F(TRAP, handler) 962 { 963 int ret, test; 964 struct sigaction act; 965 sigset_t mask; 966 967 memset(&act, 0, sizeof(act)); 968 sigemptyset(&mask); 969 sigaddset(&mask, SIGSYS); 970 971 act.sa_sigaction = &TRAP_action; 972 act.sa_flags = SA_SIGINFO; 973 ret = sigaction(SIGSYS, &act, NULL); 974 ASSERT_EQ(0, ret) { 975 TH_LOG("sigaction failed"); 976 } 977 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); 978 ASSERT_EQ(0, ret) { 979 TH_LOG("sigprocmask failed"); 980 } 981 982 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 983 ASSERT_EQ(0, ret); 984 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 985 ASSERT_EQ(0, ret); 986 TRAP_nr = 0; 987 memset(&TRAP_info, 0, sizeof(TRAP_info)); 988 /* Expect the registers to be rolled back. (nr = error) may vary 989 * based on arch. */ 990 ret = syscall(__NR_getpid); 991 /* Silence gcc warning about volatile. */ 992 test = TRAP_nr; 993 EXPECT_EQ(SIGSYS, test); 994 struct local_sigsys { 995 void *_call_addr; /* calling user insn */ 996 int _syscall; /* triggering system call number */ 997 unsigned int _arch; /* AUDIT_ARCH_* of syscall */ 998 } *sigsys = (struct local_sigsys *) 999 #ifdef si_syscall 1000 &(TRAP_info.si_call_addr); 1001 #else 1002 &TRAP_info.si_pid; 1003 #endif 1004 EXPECT_EQ(__NR_getpid, sigsys->_syscall); 1005 /* Make sure arch is non-zero. */ 1006 EXPECT_NE(0, sigsys->_arch); 1007 EXPECT_NE(0, (unsigned long)sigsys->_call_addr); 1008 } 1009 1010 FIXTURE_DATA(precedence) { 1011 struct sock_fprog allow; 1012 struct sock_fprog log; 1013 struct sock_fprog trace; 1014 struct sock_fprog error; 1015 struct sock_fprog trap; 1016 struct sock_fprog kill; 1017 }; 1018 1019 FIXTURE_SETUP(precedence) 1020 { 1021 struct sock_filter allow_insns[] = { 1022 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1023 }; 1024 struct sock_filter log_insns[] = { 1025 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1026 offsetof(struct seccomp_data, nr)), 1027 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1028 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1029 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 1030 }; 1031 struct sock_filter trace_insns[] = { 1032 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1033 offsetof(struct seccomp_data, nr)), 1034 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1035 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1036 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), 1037 }; 1038 struct sock_filter error_insns[] = { 1039 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1040 offsetof(struct seccomp_data, nr)), 1041 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1042 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1043 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), 1044 }; 1045 struct sock_filter trap_insns[] = { 1046 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1047 offsetof(struct seccomp_data, nr)), 1048 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1049 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1050 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1051 }; 1052 struct sock_filter kill_insns[] = { 1053 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1054 offsetof(struct seccomp_data, nr)), 1055 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1056 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1057 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1058 }; 1059 1060 memset(self, 0, sizeof(*self)); 1061 #define FILTER_ALLOC(_x) \ 1062 self->_x.filter = malloc(sizeof(_x##_insns)); \ 1063 ASSERT_NE(NULL, self->_x.filter); \ 1064 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ 1065 self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) 1066 FILTER_ALLOC(allow); 1067 FILTER_ALLOC(log); 1068 FILTER_ALLOC(trace); 1069 FILTER_ALLOC(error); 1070 FILTER_ALLOC(trap); 1071 FILTER_ALLOC(kill); 1072 } 1073 1074 FIXTURE_TEARDOWN(precedence) 1075 { 1076 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) 1077 FILTER_FREE(allow); 1078 FILTER_FREE(log); 1079 FILTER_FREE(trace); 1080 FILTER_FREE(error); 1081 FILTER_FREE(trap); 1082 FILTER_FREE(kill); 1083 } 1084 1085 TEST_F(precedence, allow_ok) 1086 { 1087 pid_t parent, res = 0; 1088 long ret; 1089 1090 parent = getppid(); 1091 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1092 ASSERT_EQ(0, ret); 1093 1094 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1095 ASSERT_EQ(0, ret); 1096 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1097 ASSERT_EQ(0, ret); 1098 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1099 ASSERT_EQ(0, ret); 1100 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1101 ASSERT_EQ(0, ret); 1102 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1103 ASSERT_EQ(0, ret); 1104 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1105 ASSERT_EQ(0, ret); 1106 /* Should work just fine. */ 1107 res = syscall(__NR_getppid); 1108 EXPECT_EQ(parent, res); 1109 } 1110 1111 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) 1112 { 1113 pid_t parent, res = 0; 1114 long ret; 1115 1116 parent = getppid(); 1117 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1118 ASSERT_EQ(0, ret); 1119 1120 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1121 ASSERT_EQ(0, ret); 1122 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1123 ASSERT_EQ(0, ret); 1124 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1125 ASSERT_EQ(0, ret); 1126 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1127 ASSERT_EQ(0, ret); 1128 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1129 ASSERT_EQ(0, ret); 1130 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1131 ASSERT_EQ(0, ret); 1132 /* Should work just fine. */ 1133 res = syscall(__NR_getppid); 1134 EXPECT_EQ(parent, res); 1135 /* getpid() should never return. */ 1136 res = syscall(__NR_getpid); 1137 EXPECT_EQ(0, res); 1138 } 1139 1140 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) 1141 { 1142 pid_t parent; 1143 long ret; 1144 1145 parent = getppid(); 1146 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1147 ASSERT_EQ(0, ret); 1148 1149 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1150 ASSERT_EQ(0, ret); 1151 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1152 ASSERT_EQ(0, ret); 1153 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1154 ASSERT_EQ(0, ret); 1155 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1156 ASSERT_EQ(0, ret); 1157 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1158 ASSERT_EQ(0, ret); 1159 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1160 ASSERT_EQ(0, ret); 1161 /* Should work just fine. */ 1162 EXPECT_EQ(parent, syscall(__NR_getppid)); 1163 /* getpid() should never return. */ 1164 EXPECT_EQ(0, syscall(__NR_getpid)); 1165 } 1166 1167 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) 1168 { 1169 pid_t parent; 1170 long ret; 1171 1172 parent = getppid(); 1173 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1174 ASSERT_EQ(0, ret); 1175 1176 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1177 ASSERT_EQ(0, ret); 1178 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1179 ASSERT_EQ(0, ret); 1180 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1181 ASSERT_EQ(0, ret); 1182 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1183 ASSERT_EQ(0, ret); 1184 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1185 ASSERT_EQ(0, ret); 1186 /* Should work just fine. */ 1187 EXPECT_EQ(parent, syscall(__NR_getppid)); 1188 /* getpid() should never return. */ 1189 EXPECT_EQ(0, syscall(__NR_getpid)); 1190 } 1191 1192 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) 1193 { 1194 pid_t parent; 1195 long ret; 1196 1197 parent = getppid(); 1198 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1199 ASSERT_EQ(0, ret); 1200 1201 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1202 ASSERT_EQ(0, ret); 1203 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1204 ASSERT_EQ(0, ret); 1205 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1206 ASSERT_EQ(0, ret); 1207 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1208 ASSERT_EQ(0, ret); 1209 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1210 ASSERT_EQ(0, ret); 1211 /* Should work just fine. */ 1212 EXPECT_EQ(parent, syscall(__NR_getppid)); 1213 /* getpid() should never return. */ 1214 EXPECT_EQ(0, syscall(__NR_getpid)); 1215 } 1216 1217 TEST_F(precedence, errno_is_third) 1218 { 1219 pid_t parent; 1220 long ret; 1221 1222 parent = getppid(); 1223 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1224 ASSERT_EQ(0, ret); 1225 1226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1227 ASSERT_EQ(0, ret); 1228 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1229 ASSERT_EQ(0, ret); 1230 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1231 ASSERT_EQ(0, ret); 1232 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1233 ASSERT_EQ(0, ret); 1234 /* Should work just fine. */ 1235 EXPECT_EQ(parent, syscall(__NR_getppid)); 1236 EXPECT_EQ(0, syscall(__NR_getpid)); 1237 } 1238 1239 TEST_F(precedence, errno_is_third_in_any_order) 1240 { 1241 pid_t parent; 1242 long ret; 1243 1244 parent = getppid(); 1245 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1246 ASSERT_EQ(0, ret); 1247 1248 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1249 ASSERT_EQ(0, ret); 1250 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1251 ASSERT_EQ(0, ret); 1252 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1253 ASSERT_EQ(0, ret); 1254 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1255 ASSERT_EQ(0, ret); 1256 /* Should work just fine. */ 1257 EXPECT_EQ(parent, syscall(__NR_getppid)); 1258 EXPECT_EQ(0, syscall(__NR_getpid)); 1259 } 1260 1261 TEST_F(precedence, trace_is_fourth) 1262 { 1263 pid_t parent; 1264 long ret; 1265 1266 parent = getppid(); 1267 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1268 ASSERT_EQ(0, ret); 1269 1270 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1271 ASSERT_EQ(0, ret); 1272 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1273 ASSERT_EQ(0, ret); 1274 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1275 ASSERT_EQ(0, ret); 1276 /* Should work just fine. */ 1277 EXPECT_EQ(parent, syscall(__NR_getppid)); 1278 /* No ptracer */ 1279 EXPECT_EQ(-1, syscall(__NR_getpid)); 1280 } 1281 1282 TEST_F(precedence, trace_is_fourth_in_any_order) 1283 { 1284 pid_t parent; 1285 long ret; 1286 1287 parent = getppid(); 1288 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1289 ASSERT_EQ(0, ret); 1290 1291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1292 ASSERT_EQ(0, ret); 1293 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1294 ASSERT_EQ(0, ret); 1295 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1296 ASSERT_EQ(0, ret); 1297 /* Should work just fine. */ 1298 EXPECT_EQ(parent, syscall(__NR_getppid)); 1299 /* No ptracer */ 1300 EXPECT_EQ(-1, syscall(__NR_getpid)); 1301 } 1302 1303 TEST_F(precedence, log_is_fifth) 1304 { 1305 pid_t mypid, parent; 1306 long ret; 1307 1308 mypid = getpid(); 1309 parent = getppid(); 1310 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1311 ASSERT_EQ(0, ret); 1312 1313 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1314 ASSERT_EQ(0, ret); 1315 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1316 ASSERT_EQ(0, ret); 1317 /* Should work just fine. */ 1318 EXPECT_EQ(parent, syscall(__NR_getppid)); 1319 /* Should also work just fine */ 1320 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1321 } 1322 1323 TEST_F(precedence, log_is_fifth_in_any_order) 1324 { 1325 pid_t mypid, parent; 1326 long ret; 1327 1328 mypid = getpid(); 1329 parent = getppid(); 1330 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1331 ASSERT_EQ(0, ret); 1332 1333 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1334 ASSERT_EQ(0, ret); 1335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1336 ASSERT_EQ(0, ret); 1337 /* Should work just fine. */ 1338 EXPECT_EQ(parent, syscall(__NR_getppid)); 1339 /* Should also work just fine */ 1340 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1341 } 1342 1343 #ifndef PTRACE_O_TRACESECCOMP 1344 #define PTRACE_O_TRACESECCOMP 0x00000080 1345 #endif 1346 1347 /* Catch the Ubuntu 12.04 value error. */ 1348 #if PTRACE_EVENT_SECCOMP != 7 1349 #undef PTRACE_EVENT_SECCOMP 1350 #endif 1351 1352 #ifndef PTRACE_EVENT_SECCOMP 1353 #define PTRACE_EVENT_SECCOMP 7 1354 #endif 1355 1356 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) 1357 bool tracer_running; 1358 void tracer_stop(int sig) 1359 { 1360 tracer_running = false; 1361 } 1362 1363 typedef void tracer_func_t(struct __test_metadata *_metadata, 1364 pid_t tracee, int status, void *args); 1365 1366 void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, 1367 tracer_func_t tracer_func, void *args, bool ptrace_syscall) 1368 { 1369 int ret = -1; 1370 struct sigaction action = { 1371 .sa_handler = tracer_stop, 1372 }; 1373 1374 /* Allow external shutdown. */ 1375 tracer_running = true; 1376 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); 1377 1378 errno = 0; 1379 while (ret == -1 && errno != EINVAL) 1380 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); 1381 ASSERT_EQ(0, ret) { 1382 kill(tracee, SIGKILL); 1383 } 1384 /* Wait for attach stop */ 1385 wait(NULL); 1386 1387 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? 1388 PTRACE_O_TRACESYSGOOD : 1389 PTRACE_O_TRACESECCOMP); 1390 ASSERT_EQ(0, ret) { 1391 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); 1392 kill(tracee, SIGKILL); 1393 } 1394 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1395 tracee, NULL, 0); 1396 ASSERT_EQ(0, ret); 1397 1398 /* Unblock the tracee */ 1399 ASSERT_EQ(1, write(fd, "A", 1)); 1400 ASSERT_EQ(0, close(fd)); 1401 1402 /* Run until we're shut down. Must assert to stop execution. */ 1403 while (tracer_running) { 1404 int status; 1405 1406 if (wait(&status) != tracee) 1407 continue; 1408 if (WIFSIGNALED(status) || WIFEXITED(status)) 1409 /* Child is dead. Time to go. */ 1410 return; 1411 1412 /* Check if this is a seccomp event. */ 1413 ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); 1414 1415 tracer_func(_metadata, tracee, status, args); 1416 1417 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1418 tracee, NULL, 0); 1419 ASSERT_EQ(0, ret); 1420 } 1421 /* Directly report the status of our test harness results. */ 1422 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); 1423 } 1424 1425 /* Common tracer setup/teardown functions. */ 1426 void cont_handler(int num) 1427 { } 1428 pid_t setup_trace_fixture(struct __test_metadata *_metadata, 1429 tracer_func_t func, void *args, bool ptrace_syscall) 1430 { 1431 char sync; 1432 int pipefd[2]; 1433 pid_t tracer_pid; 1434 pid_t tracee = getpid(); 1435 1436 /* Setup a pipe for clean synchronization. */ 1437 ASSERT_EQ(0, pipe(pipefd)); 1438 1439 /* Fork a child which we'll promote to tracer */ 1440 tracer_pid = fork(); 1441 ASSERT_LE(0, tracer_pid); 1442 signal(SIGALRM, cont_handler); 1443 if (tracer_pid == 0) { 1444 close(pipefd[0]); 1445 start_tracer(_metadata, pipefd[1], tracee, func, args, 1446 ptrace_syscall); 1447 syscall(__NR_exit, 0); 1448 } 1449 close(pipefd[1]); 1450 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); 1451 read(pipefd[0], &sync, 1); 1452 close(pipefd[0]); 1453 1454 return tracer_pid; 1455 } 1456 void teardown_trace_fixture(struct __test_metadata *_metadata, 1457 pid_t tracer) 1458 { 1459 if (tracer) { 1460 int status; 1461 /* 1462 * Extract the exit code from the other process and 1463 * adopt it for ourselves in case its asserts failed. 1464 */ 1465 ASSERT_EQ(0, kill(tracer, SIGUSR1)); 1466 ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); 1467 if (WEXITSTATUS(status)) 1468 _metadata->passed = 0; 1469 } 1470 } 1471 1472 /* "poke" tracer arguments and function. */ 1473 struct tracer_args_poke_t { 1474 unsigned long poke_addr; 1475 }; 1476 1477 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, 1478 void *args) 1479 { 1480 int ret; 1481 unsigned long msg; 1482 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; 1483 1484 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1485 EXPECT_EQ(0, ret); 1486 /* If this fails, don't try to recover. */ 1487 ASSERT_EQ(0x1001, msg) { 1488 kill(tracee, SIGKILL); 1489 } 1490 /* 1491 * Poke in the message. 1492 * Registers are not touched to try to keep this relatively arch 1493 * agnostic. 1494 */ 1495 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); 1496 EXPECT_EQ(0, ret); 1497 } 1498 1499 FIXTURE_DATA(TRACE_poke) { 1500 struct sock_fprog prog; 1501 pid_t tracer; 1502 long poked; 1503 struct tracer_args_poke_t tracer_args; 1504 }; 1505 1506 FIXTURE_SETUP(TRACE_poke) 1507 { 1508 struct sock_filter filter[] = { 1509 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1510 offsetof(struct seccomp_data, nr)), 1511 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 1512 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), 1513 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1514 }; 1515 1516 self->poked = 0; 1517 memset(&self->prog, 0, sizeof(self->prog)); 1518 self->prog.filter = malloc(sizeof(filter)); 1519 ASSERT_NE(NULL, self->prog.filter); 1520 memcpy(self->prog.filter, filter, sizeof(filter)); 1521 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1522 1523 /* Set up tracer args. */ 1524 self->tracer_args.poke_addr = (unsigned long)&self->poked; 1525 1526 /* Launch tracer. */ 1527 self->tracer = setup_trace_fixture(_metadata, tracer_poke, 1528 &self->tracer_args, false); 1529 } 1530 1531 FIXTURE_TEARDOWN(TRACE_poke) 1532 { 1533 teardown_trace_fixture(_metadata, self->tracer); 1534 if (self->prog.filter) 1535 free(self->prog.filter); 1536 } 1537 1538 TEST_F(TRACE_poke, read_has_side_effects) 1539 { 1540 ssize_t ret; 1541 1542 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1543 ASSERT_EQ(0, ret); 1544 1545 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1546 ASSERT_EQ(0, ret); 1547 1548 EXPECT_EQ(0, self->poked); 1549 ret = read(-1, NULL, 0); 1550 EXPECT_EQ(-1, ret); 1551 EXPECT_EQ(0x1001, self->poked); 1552 } 1553 1554 TEST_F(TRACE_poke, getpid_runs_normally) 1555 { 1556 long ret; 1557 1558 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1559 ASSERT_EQ(0, ret); 1560 1561 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1562 ASSERT_EQ(0, ret); 1563 1564 EXPECT_EQ(0, self->poked); 1565 EXPECT_NE(0, syscall(__NR_getpid)); 1566 EXPECT_EQ(0, self->poked); 1567 } 1568 1569 #if defined(__x86_64__) 1570 # define ARCH_REGS struct user_regs_struct 1571 # define SYSCALL_NUM orig_rax 1572 # define SYSCALL_RET rax 1573 #elif defined(__i386__) 1574 # define ARCH_REGS struct user_regs_struct 1575 # define SYSCALL_NUM orig_eax 1576 # define SYSCALL_RET eax 1577 #elif defined(__arm__) 1578 # define ARCH_REGS struct pt_regs 1579 # define SYSCALL_NUM ARM_r7 1580 # define SYSCALL_RET ARM_r0 1581 #elif defined(__aarch64__) 1582 # define ARCH_REGS struct user_pt_regs 1583 # define SYSCALL_NUM regs[8] 1584 # define SYSCALL_RET regs[0] 1585 #elif defined(__hppa__) 1586 # define ARCH_REGS struct user_regs_struct 1587 # define SYSCALL_NUM gr[20] 1588 # define SYSCALL_RET gr[28] 1589 #elif defined(__powerpc__) 1590 # define ARCH_REGS struct pt_regs 1591 # define SYSCALL_NUM gpr[0] 1592 # define SYSCALL_RET gpr[3] 1593 #elif defined(__s390__) 1594 # define ARCH_REGS s390_regs 1595 # define SYSCALL_NUM gprs[2] 1596 # define SYSCALL_RET gprs[2] 1597 #elif defined(__mips__) 1598 # define ARCH_REGS struct pt_regs 1599 # define SYSCALL_NUM regs[2] 1600 # define SYSCALL_SYSCALL_NUM regs[4] 1601 # define SYSCALL_RET regs[2] 1602 # define SYSCALL_NUM_RET_SHARE_REG 1603 #else 1604 # error "Do not know how to find your architecture's registers and syscalls" 1605 #endif 1606 1607 /* When the syscall return can't be changed, stub out the tests for it. */ 1608 #ifdef SYSCALL_NUM_RET_SHARE_REG 1609 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1610 #else 1611 # define EXPECT_SYSCALL_RETURN(val, action) \ 1612 do { \ 1613 errno = 0; \ 1614 if (val < 0) { \ 1615 EXPECT_EQ(-1, action); \ 1616 EXPECT_EQ(-(val), errno); \ 1617 } else { \ 1618 EXPECT_EQ(val, action); \ 1619 } \ 1620 } while (0) 1621 #endif 1622 1623 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1624 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). 1625 */ 1626 #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) 1627 #define HAVE_GETREGS 1628 #endif 1629 1630 /* Architecture-specific syscall fetching routine. */ 1631 int get_syscall(struct __test_metadata *_metadata, pid_t tracee) 1632 { 1633 ARCH_REGS regs; 1634 #ifdef HAVE_GETREGS 1635 EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, ®s)) { 1636 TH_LOG("PTRACE_GETREGS failed"); 1637 return -1; 1638 } 1639 #else 1640 struct iovec iov; 1641 1642 iov.iov_base = ®s; 1643 iov.iov_len = sizeof(regs); 1644 EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) { 1645 TH_LOG("PTRACE_GETREGSET failed"); 1646 return -1; 1647 } 1648 #endif 1649 1650 #if defined(__mips__) 1651 if (regs.SYSCALL_NUM == __NR_O32_Linux) 1652 return regs.SYSCALL_SYSCALL_NUM; 1653 #endif 1654 return regs.SYSCALL_NUM; 1655 } 1656 1657 /* Architecture-specific syscall changing routine. */ 1658 void change_syscall(struct __test_metadata *_metadata, 1659 pid_t tracee, int syscall, int result) 1660 { 1661 int ret; 1662 ARCH_REGS regs; 1663 #ifdef HAVE_GETREGS 1664 ret = ptrace(PTRACE_GETREGS, tracee, 0, ®s); 1665 #else 1666 struct iovec iov; 1667 iov.iov_base = ®s; 1668 iov.iov_len = sizeof(regs); 1669 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); 1670 #endif 1671 EXPECT_EQ(0, ret) {} 1672 1673 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ 1674 defined(__s390__) || defined(__hppa__) 1675 { 1676 regs.SYSCALL_NUM = syscall; 1677 } 1678 #elif defined(__mips__) 1679 { 1680 if (regs.SYSCALL_NUM == __NR_O32_Linux) 1681 regs.SYSCALL_SYSCALL_NUM = syscall; 1682 else 1683 regs.SYSCALL_NUM = syscall; 1684 } 1685 1686 #elif defined(__arm__) 1687 # ifndef PTRACE_SET_SYSCALL 1688 # define PTRACE_SET_SYSCALL 23 1689 # endif 1690 { 1691 ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall); 1692 EXPECT_EQ(0, ret); 1693 } 1694 1695 #elif defined(__aarch64__) 1696 # ifndef NT_ARM_SYSTEM_CALL 1697 # define NT_ARM_SYSTEM_CALL 0x404 1698 # endif 1699 { 1700 iov.iov_base = &syscall; 1701 iov.iov_len = sizeof(syscall); 1702 ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL, 1703 &iov); 1704 EXPECT_EQ(0, ret); 1705 } 1706 1707 #else 1708 ASSERT_EQ(1, 0) { 1709 TH_LOG("How is the syscall changed on this architecture?"); 1710 } 1711 #endif 1712 1713 /* If syscall is skipped, change return value. */ 1714 if (syscall == -1) 1715 #ifdef SYSCALL_NUM_RET_SHARE_REG 1716 TH_LOG("Can't modify syscall return on this architecture"); 1717 #else 1718 regs.SYSCALL_RET = result; 1719 #endif 1720 1721 #ifdef HAVE_GETREGS 1722 ret = ptrace(PTRACE_SETREGS, tracee, 0, ®s); 1723 #else 1724 iov.iov_base = ®s; 1725 iov.iov_len = sizeof(regs); 1726 ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov); 1727 #endif 1728 EXPECT_EQ(0, ret); 1729 } 1730 1731 void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, 1732 int status, void *args) 1733 { 1734 int ret; 1735 unsigned long msg; 1736 1737 /* Make sure we got the right message. */ 1738 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1739 EXPECT_EQ(0, ret); 1740 1741 /* Validate and take action on expected syscalls. */ 1742 switch (msg) { 1743 case 0x1002: 1744 /* change getpid to getppid. */ 1745 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1746 change_syscall(_metadata, tracee, __NR_getppid, 0); 1747 break; 1748 case 0x1003: 1749 /* skip gettid with valid return code. */ 1750 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1751 change_syscall(_metadata, tracee, -1, 45000); 1752 break; 1753 case 0x1004: 1754 /* skip openat with error. */ 1755 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); 1756 change_syscall(_metadata, tracee, -1, -ESRCH); 1757 break; 1758 case 0x1005: 1759 /* do nothing (allow getppid) */ 1760 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1761 break; 1762 default: 1763 EXPECT_EQ(0, msg) { 1764 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); 1765 kill(tracee, SIGKILL); 1766 } 1767 } 1768 1769 } 1770 1771 void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, 1772 int status, void *args) 1773 { 1774 int ret, nr; 1775 unsigned long msg; 1776 static bool entry; 1777 1778 /* 1779 * The traditional way to tell PTRACE_SYSCALL entry/exit 1780 * is by counting. 1781 */ 1782 entry = !entry; 1783 1784 /* Make sure we got an appropriate message. */ 1785 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1786 EXPECT_EQ(0, ret); 1787 EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY 1788 : PTRACE_EVENTMSG_SYSCALL_EXIT, msg); 1789 1790 if (!entry) 1791 return; 1792 1793 nr = get_syscall(_metadata, tracee); 1794 1795 if (nr == __NR_getpid) 1796 change_syscall(_metadata, tracee, __NR_getppid, 0); 1797 if (nr == __NR_gettid) 1798 change_syscall(_metadata, tracee, -1, 45000); 1799 if (nr == __NR_openat) 1800 change_syscall(_metadata, tracee, -1, -ESRCH); 1801 } 1802 1803 FIXTURE_DATA(TRACE_syscall) { 1804 struct sock_fprog prog; 1805 pid_t tracer, mytid, mypid, parent; 1806 }; 1807 1808 FIXTURE_SETUP(TRACE_syscall) 1809 { 1810 struct sock_filter filter[] = { 1811 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1812 offsetof(struct seccomp_data, nr)), 1813 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 1814 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1815 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1816 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1817 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), 1818 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1819 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1820 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), 1821 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1822 }; 1823 1824 memset(&self->prog, 0, sizeof(self->prog)); 1825 self->prog.filter = malloc(sizeof(filter)); 1826 ASSERT_NE(NULL, self->prog.filter); 1827 memcpy(self->prog.filter, filter, sizeof(filter)); 1828 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1829 1830 /* Prepare some testable syscall results. */ 1831 self->mytid = syscall(__NR_gettid); 1832 ASSERT_GT(self->mytid, 0); 1833 ASSERT_NE(self->mytid, 1) { 1834 TH_LOG("Running this test as init is not supported. :)"); 1835 } 1836 1837 self->mypid = getpid(); 1838 ASSERT_GT(self->mypid, 0); 1839 ASSERT_EQ(self->mytid, self->mypid); 1840 1841 self->parent = getppid(); 1842 ASSERT_GT(self->parent, 0); 1843 ASSERT_NE(self->parent, self->mypid); 1844 1845 /* Launch tracer. */ 1846 self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL, 1847 false); 1848 } 1849 1850 FIXTURE_TEARDOWN(TRACE_syscall) 1851 { 1852 teardown_trace_fixture(_metadata, self->tracer); 1853 if (self->prog.filter) 1854 free(self->prog.filter); 1855 } 1856 1857 TEST_F(TRACE_syscall, ptrace_syscall_redirected) 1858 { 1859 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1860 teardown_trace_fixture(_metadata, self->tracer); 1861 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1862 true); 1863 1864 /* Tracer will redirect getpid to getppid. */ 1865 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1866 } 1867 1868 TEST_F(TRACE_syscall, ptrace_syscall_errno) 1869 { 1870 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1871 teardown_trace_fixture(_metadata, self->tracer); 1872 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1873 true); 1874 1875 /* Tracer should skip the open syscall, resulting in ESRCH. */ 1876 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 1877 } 1878 1879 TEST_F(TRACE_syscall, ptrace_syscall_faked) 1880 { 1881 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1882 teardown_trace_fixture(_metadata, self->tracer); 1883 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1884 true); 1885 1886 /* Tracer should skip the gettid syscall, resulting fake pid. */ 1887 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 1888 } 1889 1890 TEST_F(TRACE_syscall, syscall_allowed) 1891 { 1892 long ret; 1893 1894 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1895 ASSERT_EQ(0, ret); 1896 1897 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1898 ASSERT_EQ(0, ret); 1899 1900 /* getppid works as expected (no changes). */ 1901 EXPECT_EQ(self->parent, syscall(__NR_getppid)); 1902 EXPECT_NE(self->mypid, syscall(__NR_getppid)); 1903 } 1904 1905 TEST_F(TRACE_syscall, syscall_redirected) 1906 { 1907 long ret; 1908 1909 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1910 ASSERT_EQ(0, ret); 1911 1912 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1913 ASSERT_EQ(0, ret); 1914 1915 /* getpid has been redirected to getppid as expected. */ 1916 EXPECT_EQ(self->parent, syscall(__NR_getpid)); 1917 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1918 } 1919 1920 TEST_F(TRACE_syscall, syscall_errno) 1921 { 1922 long ret; 1923 1924 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1925 ASSERT_EQ(0, ret); 1926 1927 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1928 ASSERT_EQ(0, ret); 1929 1930 /* openat has been skipped and an errno return. */ 1931 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 1932 } 1933 1934 TEST_F(TRACE_syscall, syscall_faked) 1935 { 1936 long ret; 1937 1938 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1939 ASSERT_EQ(0, ret); 1940 1941 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1942 ASSERT_EQ(0, ret); 1943 1944 /* gettid has been skipped and an altered return value stored. */ 1945 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 1946 } 1947 1948 TEST_F(TRACE_syscall, skip_after_RET_TRACE) 1949 { 1950 struct sock_filter filter[] = { 1951 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1952 offsetof(struct seccomp_data, nr)), 1953 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1954 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 1955 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1956 }; 1957 struct sock_fprog prog = { 1958 .len = (unsigned short)ARRAY_SIZE(filter), 1959 .filter = filter, 1960 }; 1961 long ret; 1962 1963 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1964 ASSERT_EQ(0, ret); 1965 1966 /* Install fixture filter. */ 1967 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1968 ASSERT_EQ(0, ret); 1969 1970 /* Install "errno on getppid" filter. */ 1971 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 1972 ASSERT_EQ(0, ret); 1973 1974 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 1975 errno = 0; 1976 EXPECT_EQ(-1, syscall(__NR_getpid)); 1977 EXPECT_EQ(EPERM, errno); 1978 } 1979 1980 TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS) 1981 { 1982 struct sock_filter filter[] = { 1983 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1984 offsetof(struct seccomp_data, nr)), 1985 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1986 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1987 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1988 }; 1989 struct sock_fprog prog = { 1990 .len = (unsigned short)ARRAY_SIZE(filter), 1991 .filter = filter, 1992 }; 1993 long ret; 1994 1995 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1996 ASSERT_EQ(0, ret); 1997 1998 /* Install fixture filter. */ 1999 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 2000 ASSERT_EQ(0, ret); 2001 2002 /* Install "death on getppid" filter. */ 2003 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2004 ASSERT_EQ(0, ret); 2005 2006 /* Tracer will redirect getpid to getppid, and we should die. */ 2007 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2008 } 2009 2010 TEST_F(TRACE_syscall, skip_after_ptrace) 2011 { 2012 struct sock_filter filter[] = { 2013 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2014 offsetof(struct seccomp_data, nr)), 2015 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2016 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 2017 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2018 }; 2019 struct sock_fprog prog = { 2020 .len = (unsigned short)ARRAY_SIZE(filter), 2021 .filter = filter, 2022 }; 2023 long ret; 2024 2025 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 2026 teardown_trace_fixture(_metadata, self->tracer); 2027 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 2028 true); 2029 2030 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2031 ASSERT_EQ(0, ret); 2032 2033 /* Install "errno on getppid" filter. */ 2034 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2035 ASSERT_EQ(0, ret); 2036 2037 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 2038 EXPECT_EQ(-1, syscall(__NR_getpid)); 2039 EXPECT_EQ(EPERM, errno); 2040 } 2041 2042 TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) 2043 { 2044 struct sock_filter filter[] = { 2045 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2046 offsetof(struct seccomp_data, nr)), 2047 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2048 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2049 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2050 }; 2051 struct sock_fprog prog = { 2052 .len = (unsigned short)ARRAY_SIZE(filter), 2053 .filter = filter, 2054 }; 2055 long ret; 2056 2057 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 2058 teardown_trace_fixture(_metadata, self->tracer); 2059 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 2060 true); 2061 2062 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2063 ASSERT_EQ(0, ret); 2064 2065 /* Install "death on getppid" filter. */ 2066 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2067 ASSERT_EQ(0, ret); 2068 2069 /* Tracer will redirect getpid to getppid, and we should die. */ 2070 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2071 } 2072 2073 TEST(seccomp_syscall) 2074 { 2075 struct sock_filter filter[] = { 2076 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2077 }; 2078 struct sock_fprog prog = { 2079 .len = (unsigned short)ARRAY_SIZE(filter), 2080 .filter = filter, 2081 }; 2082 long ret; 2083 2084 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2085 ASSERT_EQ(0, ret) { 2086 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2087 } 2088 2089 /* Reject insane operation. */ 2090 ret = seccomp(-1, 0, &prog); 2091 ASSERT_NE(ENOSYS, errno) { 2092 TH_LOG("Kernel does not support seccomp syscall!"); 2093 } 2094 EXPECT_EQ(EINVAL, errno) { 2095 TH_LOG("Did not reject crazy op value!"); 2096 } 2097 2098 /* Reject strict with flags or pointer. */ 2099 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); 2100 EXPECT_EQ(EINVAL, errno) { 2101 TH_LOG("Did not reject mode strict with flags!"); 2102 } 2103 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); 2104 EXPECT_EQ(EINVAL, errno) { 2105 TH_LOG("Did not reject mode strict with uargs!"); 2106 } 2107 2108 /* Reject insane args for filter. */ 2109 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); 2110 EXPECT_EQ(EINVAL, errno) { 2111 TH_LOG("Did not reject crazy filter flags!"); 2112 } 2113 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); 2114 EXPECT_EQ(EFAULT, errno) { 2115 TH_LOG("Did not reject NULL filter!"); 2116 } 2117 2118 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2119 EXPECT_EQ(0, errno) { 2120 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", 2121 strerror(errno)); 2122 } 2123 } 2124 2125 TEST(seccomp_syscall_mode_lock) 2126 { 2127 struct sock_filter filter[] = { 2128 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2129 }; 2130 struct sock_fprog prog = { 2131 .len = (unsigned short)ARRAY_SIZE(filter), 2132 .filter = filter, 2133 }; 2134 long ret; 2135 2136 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2137 ASSERT_EQ(0, ret) { 2138 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2139 } 2140 2141 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2142 ASSERT_NE(ENOSYS, errno) { 2143 TH_LOG("Kernel does not support seccomp syscall!"); 2144 } 2145 EXPECT_EQ(0, ret) { 2146 TH_LOG("Could not install filter!"); 2147 } 2148 2149 /* Make sure neither entry point will switch to strict. */ 2150 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); 2151 EXPECT_EQ(EINVAL, errno) { 2152 TH_LOG("Switched to mode strict!"); 2153 } 2154 2155 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); 2156 EXPECT_EQ(EINVAL, errno) { 2157 TH_LOG("Switched to mode strict!"); 2158 } 2159 } 2160 2161 /* 2162 * Test detection of known and unknown filter flags. Userspace needs to be able 2163 * to check if a filter flag is supported by the current kernel and a good way 2164 * of doing that is by attempting to enter filter mode, with the flag bit in 2165 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates 2166 * that the flag is valid and EINVAL indicates that the flag is invalid. 2167 */ 2168 TEST(detect_seccomp_filter_flags) 2169 { 2170 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, 2171 SECCOMP_FILTER_FLAG_LOG, 2172 SECCOMP_FILTER_FLAG_SPEC_ALLOW, 2173 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2174 unsigned int exclusive[] = { 2175 SECCOMP_FILTER_FLAG_TSYNC, 2176 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2177 unsigned int flag, all_flags, exclusive_mask; 2178 int i; 2179 long ret; 2180 2181 /* Test detection of individual known-good filter flags */ 2182 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2183 int bits = 0; 2184 2185 flag = flags[i]; 2186 /* Make sure the flag is a single bit! */ 2187 while (flag) { 2188 if (flag & 0x1) 2189 bits ++; 2190 flag >>= 1; 2191 } 2192 ASSERT_EQ(1, bits); 2193 flag = flags[i]; 2194 2195 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2196 ASSERT_NE(ENOSYS, errno) { 2197 TH_LOG("Kernel does not support seccomp syscall!"); 2198 } 2199 EXPECT_EQ(-1, ret); 2200 EXPECT_EQ(EFAULT, errno) { 2201 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", 2202 flag); 2203 } 2204 2205 all_flags |= flag; 2206 } 2207 2208 /* 2209 * Test detection of all known-good filter flags combined. But 2210 * for the exclusive flags we need to mask them out and try them 2211 * individually for the "all flags" testing. 2212 */ 2213 exclusive_mask = 0; 2214 for (i = 0; i < ARRAY_SIZE(exclusive); i++) 2215 exclusive_mask |= exclusive[i]; 2216 for (i = 0; i < ARRAY_SIZE(exclusive); i++) { 2217 flag = all_flags & ~exclusive_mask; 2218 flag |= exclusive[i]; 2219 2220 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2221 EXPECT_EQ(-1, ret); 2222 EXPECT_EQ(EFAULT, errno) { 2223 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", 2224 flag); 2225 } 2226 } 2227 2228 /* Test detection of an unknown filter flags, without exclusives. */ 2229 flag = -1; 2230 flag &= ~exclusive_mask; 2231 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2232 EXPECT_EQ(-1, ret); 2233 EXPECT_EQ(EINVAL, errno) { 2234 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", 2235 flag); 2236 } 2237 2238 /* 2239 * Test detection of an unknown filter flag that may simply need to be 2240 * added to this test 2241 */ 2242 flag = flags[ARRAY_SIZE(flags) - 1] << 1; 2243 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2244 EXPECT_EQ(-1, ret); 2245 EXPECT_EQ(EINVAL, errno) { 2246 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", 2247 flag); 2248 } 2249 } 2250 2251 TEST(TSYNC_first) 2252 { 2253 struct sock_filter filter[] = { 2254 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2255 }; 2256 struct sock_fprog prog = { 2257 .len = (unsigned short)ARRAY_SIZE(filter), 2258 .filter = filter, 2259 }; 2260 long ret; 2261 2262 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2263 ASSERT_EQ(0, ret) { 2264 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2265 } 2266 2267 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2268 &prog); 2269 ASSERT_NE(ENOSYS, errno) { 2270 TH_LOG("Kernel does not support seccomp syscall!"); 2271 } 2272 EXPECT_EQ(0, ret) { 2273 TH_LOG("Could not install initial filter with TSYNC!"); 2274 } 2275 } 2276 2277 #define TSYNC_SIBLINGS 2 2278 struct tsync_sibling { 2279 pthread_t tid; 2280 pid_t system_tid; 2281 sem_t *started; 2282 pthread_cond_t *cond; 2283 pthread_mutex_t *mutex; 2284 int diverge; 2285 int num_waits; 2286 struct sock_fprog *prog; 2287 struct __test_metadata *metadata; 2288 }; 2289 2290 /* 2291 * To avoid joining joined threads (which is not allowed by Bionic), 2292 * make sure we both successfully join and clear the tid to skip a 2293 * later join attempt during fixture teardown. Any remaining threads 2294 * will be directly killed during teardown. 2295 */ 2296 #define PTHREAD_JOIN(tid, status) \ 2297 do { \ 2298 int _rc = pthread_join(tid, status); \ 2299 if (_rc) { \ 2300 TH_LOG("pthread_join of tid %u failed: %d\n", \ 2301 (unsigned int)tid, _rc); \ 2302 } else { \ 2303 tid = 0; \ 2304 } \ 2305 } while (0) 2306 2307 FIXTURE_DATA(TSYNC) { 2308 struct sock_fprog root_prog, apply_prog; 2309 struct tsync_sibling sibling[TSYNC_SIBLINGS]; 2310 sem_t started; 2311 pthread_cond_t cond; 2312 pthread_mutex_t mutex; 2313 int sibling_count; 2314 }; 2315 2316 FIXTURE_SETUP(TSYNC) 2317 { 2318 struct sock_filter root_filter[] = { 2319 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2320 }; 2321 struct sock_filter apply_filter[] = { 2322 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2323 offsetof(struct seccomp_data, nr)), 2324 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 2325 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2326 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2327 }; 2328 2329 memset(&self->root_prog, 0, sizeof(self->root_prog)); 2330 memset(&self->apply_prog, 0, sizeof(self->apply_prog)); 2331 memset(&self->sibling, 0, sizeof(self->sibling)); 2332 self->root_prog.filter = malloc(sizeof(root_filter)); 2333 ASSERT_NE(NULL, self->root_prog.filter); 2334 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); 2335 self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); 2336 2337 self->apply_prog.filter = malloc(sizeof(apply_filter)); 2338 ASSERT_NE(NULL, self->apply_prog.filter); 2339 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); 2340 self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); 2341 2342 self->sibling_count = 0; 2343 pthread_mutex_init(&self->mutex, NULL); 2344 pthread_cond_init(&self->cond, NULL); 2345 sem_init(&self->started, 0, 0); 2346 self->sibling[0].tid = 0; 2347 self->sibling[0].cond = &self->cond; 2348 self->sibling[0].started = &self->started; 2349 self->sibling[0].mutex = &self->mutex; 2350 self->sibling[0].diverge = 0; 2351 self->sibling[0].num_waits = 1; 2352 self->sibling[0].prog = &self->root_prog; 2353 self->sibling[0].metadata = _metadata; 2354 self->sibling[1].tid = 0; 2355 self->sibling[1].cond = &self->cond; 2356 self->sibling[1].started = &self->started; 2357 self->sibling[1].mutex = &self->mutex; 2358 self->sibling[1].diverge = 0; 2359 self->sibling[1].prog = &self->root_prog; 2360 self->sibling[1].num_waits = 1; 2361 self->sibling[1].metadata = _metadata; 2362 } 2363 2364 FIXTURE_TEARDOWN(TSYNC) 2365 { 2366 int sib = 0; 2367 2368 if (self->root_prog.filter) 2369 free(self->root_prog.filter); 2370 if (self->apply_prog.filter) 2371 free(self->apply_prog.filter); 2372 2373 for ( ; sib < self->sibling_count; ++sib) { 2374 struct tsync_sibling *s = &self->sibling[sib]; 2375 2376 if (!s->tid) 2377 continue; 2378 /* 2379 * If a thread is still running, it may be stuck, so hit 2380 * it over the head really hard. 2381 */ 2382 pthread_kill(s->tid, 9); 2383 } 2384 pthread_mutex_destroy(&self->mutex); 2385 pthread_cond_destroy(&self->cond); 2386 sem_destroy(&self->started); 2387 } 2388 2389 void *tsync_sibling(void *data) 2390 { 2391 long ret = 0; 2392 struct tsync_sibling *me = data; 2393 2394 me->system_tid = syscall(__NR_gettid); 2395 2396 pthread_mutex_lock(me->mutex); 2397 if (me->diverge) { 2398 /* Just re-apply the root prog to fork the tree */ 2399 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 2400 me->prog, 0, 0); 2401 } 2402 sem_post(me->started); 2403 /* Return outside of started so parent notices failures. */ 2404 if (ret) { 2405 pthread_mutex_unlock(me->mutex); 2406 return (void *)SIBLING_EXIT_FAILURE; 2407 } 2408 do { 2409 pthread_cond_wait(me->cond, me->mutex); 2410 me->num_waits = me->num_waits - 1; 2411 } while (me->num_waits); 2412 pthread_mutex_unlock(me->mutex); 2413 2414 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); 2415 if (!ret) 2416 return (void *)SIBLING_EXIT_NEWPRIVS; 2417 read(0, NULL, 0); 2418 return (void *)SIBLING_EXIT_UNKILLED; 2419 } 2420 2421 void tsync_start_sibling(struct tsync_sibling *sibling) 2422 { 2423 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); 2424 } 2425 2426 TEST_F(TSYNC, siblings_fail_prctl) 2427 { 2428 long ret; 2429 void *status; 2430 struct sock_filter filter[] = { 2431 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2432 offsetof(struct seccomp_data, nr)), 2433 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 2434 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), 2435 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2436 }; 2437 struct sock_fprog prog = { 2438 .len = (unsigned short)ARRAY_SIZE(filter), 2439 .filter = filter, 2440 }; 2441 2442 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2443 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2444 } 2445 2446 /* Check prctl failure detection by requesting sib 0 diverge. */ 2447 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2448 ASSERT_NE(ENOSYS, errno) { 2449 TH_LOG("Kernel does not support seccomp syscall!"); 2450 } 2451 ASSERT_EQ(0, ret) { 2452 TH_LOG("setting filter failed"); 2453 } 2454 2455 self->sibling[0].diverge = 1; 2456 tsync_start_sibling(&self->sibling[0]); 2457 tsync_start_sibling(&self->sibling[1]); 2458 2459 while (self->sibling_count < TSYNC_SIBLINGS) { 2460 sem_wait(&self->started); 2461 self->sibling_count++; 2462 } 2463 2464 /* Signal the threads to clean up*/ 2465 pthread_mutex_lock(&self->mutex); 2466 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2467 TH_LOG("cond broadcast non-zero"); 2468 } 2469 pthread_mutex_unlock(&self->mutex); 2470 2471 /* Ensure diverging sibling failed to call prctl. */ 2472 PTHREAD_JOIN(self->sibling[0].tid, &status); 2473 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); 2474 PTHREAD_JOIN(self->sibling[1].tid, &status); 2475 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2476 } 2477 2478 TEST_F(TSYNC, two_siblings_with_ancestor) 2479 { 2480 long ret; 2481 void *status; 2482 2483 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2484 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2485 } 2486 2487 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2488 ASSERT_NE(ENOSYS, errno) { 2489 TH_LOG("Kernel does not support seccomp syscall!"); 2490 } 2491 ASSERT_EQ(0, ret) { 2492 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2493 } 2494 tsync_start_sibling(&self->sibling[0]); 2495 tsync_start_sibling(&self->sibling[1]); 2496 2497 while (self->sibling_count < TSYNC_SIBLINGS) { 2498 sem_wait(&self->started); 2499 self->sibling_count++; 2500 } 2501 2502 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2503 &self->apply_prog); 2504 ASSERT_EQ(0, ret) { 2505 TH_LOG("Could install filter on all threads!"); 2506 } 2507 /* Tell the siblings to test the policy */ 2508 pthread_mutex_lock(&self->mutex); 2509 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2510 TH_LOG("cond broadcast non-zero"); 2511 } 2512 pthread_mutex_unlock(&self->mutex); 2513 /* Ensure they are both killed and don't exit cleanly. */ 2514 PTHREAD_JOIN(self->sibling[0].tid, &status); 2515 EXPECT_EQ(0x0, (long)status); 2516 PTHREAD_JOIN(self->sibling[1].tid, &status); 2517 EXPECT_EQ(0x0, (long)status); 2518 } 2519 2520 TEST_F(TSYNC, two_sibling_want_nnp) 2521 { 2522 void *status; 2523 2524 /* start siblings before any prctl() operations */ 2525 tsync_start_sibling(&self->sibling[0]); 2526 tsync_start_sibling(&self->sibling[1]); 2527 while (self->sibling_count < TSYNC_SIBLINGS) { 2528 sem_wait(&self->started); 2529 self->sibling_count++; 2530 } 2531 2532 /* Tell the siblings to test no policy */ 2533 pthread_mutex_lock(&self->mutex); 2534 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2535 TH_LOG("cond broadcast non-zero"); 2536 } 2537 pthread_mutex_unlock(&self->mutex); 2538 2539 /* Ensure they are both upset about lacking nnp. */ 2540 PTHREAD_JOIN(self->sibling[0].tid, &status); 2541 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2542 PTHREAD_JOIN(self->sibling[1].tid, &status); 2543 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2544 } 2545 2546 TEST_F(TSYNC, two_siblings_with_no_filter) 2547 { 2548 long ret; 2549 void *status; 2550 2551 /* start siblings before any prctl() operations */ 2552 tsync_start_sibling(&self->sibling[0]); 2553 tsync_start_sibling(&self->sibling[1]); 2554 while (self->sibling_count < TSYNC_SIBLINGS) { 2555 sem_wait(&self->started); 2556 self->sibling_count++; 2557 } 2558 2559 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2560 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2561 } 2562 2563 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2564 &self->apply_prog); 2565 ASSERT_NE(ENOSYS, errno) { 2566 TH_LOG("Kernel does not support seccomp syscall!"); 2567 } 2568 ASSERT_EQ(0, ret) { 2569 TH_LOG("Could install filter on all threads!"); 2570 } 2571 2572 /* Tell the siblings to test the policy */ 2573 pthread_mutex_lock(&self->mutex); 2574 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2575 TH_LOG("cond broadcast non-zero"); 2576 } 2577 pthread_mutex_unlock(&self->mutex); 2578 2579 /* Ensure they are both killed and don't exit cleanly. */ 2580 PTHREAD_JOIN(self->sibling[0].tid, &status); 2581 EXPECT_EQ(0x0, (long)status); 2582 PTHREAD_JOIN(self->sibling[1].tid, &status); 2583 EXPECT_EQ(0x0, (long)status); 2584 } 2585 2586 TEST_F(TSYNC, two_siblings_with_one_divergence) 2587 { 2588 long ret; 2589 void *status; 2590 2591 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2592 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2593 } 2594 2595 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2596 ASSERT_NE(ENOSYS, errno) { 2597 TH_LOG("Kernel does not support seccomp syscall!"); 2598 } 2599 ASSERT_EQ(0, ret) { 2600 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2601 } 2602 self->sibling[0].diverge = 1; 2603 tsync_start_sibling(&self->sibling[0]); 2604 tsync_start_sibling(&self->sibling[1]); 2605 2606 while (self->sibling_count < TSYNC_SIBLINGS) { 2607 sem_wait(&self->started); 2608 self->sibling_count++; 2609 } 2610 2611 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2612 &self->apply_prog); 2613 ASSERT_EQ(self->sibling[0].system_tid, ret) { 2614 TH_LOG("Did not fail on diverged sibling."); 2615 } 2616 2617 /* Wake the threads */ 2618 pthread_mutex_lock(&self->mutex); 2619 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2620 TH_LOG("cond broadcast non-zero"); 2621 } 2622 pthread_mutex_unlock(&self->mutex); 2623 2624 /* Ensure they are both unkilled. */ 2625 PTHREAD_JOIN(self->sibling[0].tid, &status); 2626 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2627 PTHREAD_JOIN(self->sibling[1].tid, &status); 2628 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2629 } 2630 2631 TEST_F(TSYNC, two_siblings_not_under_filter) 2632 { 2633 long ret, sib; 2634 void *status; 2635 struct timespec delay = { .tv_nsec = 100000000 }; 2636 2637 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2638 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2639 } 2640 2641 /* 2642 * Sibling 0 will have its own seccomp policy 2643 * and Sibling 1 will not be under seccomp at 2644 * all. Sibling 1 will enter seccomp and 0 2645 * will cause failure. 2646 */ 2647 self->sibling[0].diverge = 1; 2648 tsync_start_sibling(&self->sibling[0]); 2649 tsync_start_sibling(&self->sibling[1]); 2650 2651 while (self->sibling_count < TSYNC_SIBLINGS) { 2652 sem_wait(&self->started); 2653 self->sibling_count++; 2654 } 2655 2656 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2657 ASSERT_NE(ENOSYS, errno) { 2658 TH_LOG("Kernel does not support seccomp syscall!"); 2659 } 2660 ASSERT_EQ(0, ret) { 2661 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2662 } 2663 2664 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2665 &self->apply_prog); 2666 ASSERT_EQ(ret, self->sibling[0].system_tid) { 2667 TH_LOG("Did not fail on diverged sibling."); 2668 } 2669 sib = 1; 2670 if (ret == self->sibling[0].system_tid) 2671 sib = 0; 2672 2673 pthread_mutex_lock(&self->mutex); 2674 2675 /* Increment the other siblings num_waits so we can clean up 2676 * the one we just saw. 2677 */ 2678 self->sibling[!sib].num_waits += 1; 2679 2680 /* Signal the thread to clean up*/ 2681 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2682 TH_LOG("cond broadcast non-zero"); 2683 } 2684 pthread_mutex_unlock(&self->mutex); 2685 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2686 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2687 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2688 while (!kill(self->sibling[sib].system_tid, 0)) 2689 nanosleep(&delay, NULL); 2690 /* Switch to the remaining sibling */ 2691 sib = !sib; 2692 2693 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2694 &self->apply_prog); 2695 ASSERT_EQ(0, ret) { 2696 TH_LOG("Expected the remaining sibling to sync"); 2697 }; 2698 2699 pthread_mutex_lock(&self->mutex); 2700 2701 /* If remaining sibling didn't have a chance to wake up during 2702 * the first broadcast, manually reduce the num_waits now. 2703 */ 2704 if (self->sibling[sib].num_waits > 1) 2705 self->sibling[sib].num_waits = 1; 2706 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2707 TH_LOG("cond broadcast non-zero"); 2708 } 2709 pthread_mutex_unlock(&self->mutex); 2710 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2711 EXPECT_EQ(0, (long)status); 2712 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2713 while (!kill(self->sibling[sib].system_tid, 0)) 2714 nanosleep(&delay, NULL); 2715 2716 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2717 &self->apply_prog); 2718 ASSERT_EQ(0, ret); /* just us chickens */ 2719 } 2720 2721 /* Make sure restarted syscalls are seen directly as "restart_syscall". */ 2722 TEST(syscall_restart) 2723 { 2724 long ret; 2725 unsigned long msg; 2726 pid_t child_pid; 2727 int pipefd[2]; 2728 int status; 2729 siginfo_t info = { }; 2730 struct sock_filter filter[] = { 2731 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2732 offsetof(struct seccomp_data, nr)), 2733 2734 #ifdef __NR_sigreturn 2735 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0), 2736 #endif 2737 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0), 2738 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0), 2739 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0), 2740 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0), 2741 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), 2742 2743 /* Allow __NR_write for easy logging. */ 2744 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), 2745 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2746 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2747 /* The nanosleep jump target. */ 2748 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), 2749 /* The restart_syscall jump target. */ 2750 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), 2751 }; 2752 struct sock_fprog prog = { 2753 .len = (unsigned short)ARRAY_SIZE(filter), 2754 .filter = filter, 2755 }; 2756 #if defined(__arm__) 2757 struct utsname utsbuf; 2758 #endif 2759 2760 ASSERT_EQ(0, pipe(pipefd)); 2761 2762 child_pid = fork(); 2763 ASSERT_LE(0, child_pid); 2764 if (child_pid == 0) { 2765 /* Child uses EXPECT not ASSERT to deliver status correctly. */ 2766 char buf = ' '; 2767 struct timespec timeout = { }; 2768 2769 /* Attach parent as tracer and stop. */ 2770 EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); 2771 EXPECT_EQ(0, raise(SIGSTOP)); 2772 2773 EXPECT_EQ(0, close(pipefd[1])); 2774 2775 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2776 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2777 } 2778 2779 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2780 EXPECT_EQ(0, ret) { 2781 TH_LOG("Failed to install filter!"); 2782 } 2783 2784 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 2785 TH_LOG("Failed to read() sync from parent"); 2786 } 2787 EXPECT_EQ('.', buf) { 2788 TH_LOG("Failed to get sync data from read()"); 2789 } 2790 2791 /* Start nanosleep to be interrupted. */ 2792 timeout.tv_sec = 1; 2793 errno = 0; 2794 EXPECT_EQ(0, nanosleep(&timeout, NULL)) { 2795 TH_LOG("Call to nanosleep() failed (errno %d)", errno); 2796 } 2797 2798 /* Read final sync from parent. */ 2799 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 2800 TH_LOG("Failed final read() from parent"); 2801 } 2802 EXPECT_EQ('!', buf) { 2803 TH_LOG("Failed to get final data from read()"); 2804 } 2805 2806 /* Directly report the status of our test harness results. */ 2807 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS 2808 : EXIT_FAILURE); 2809 } 2810 EXPECT_EQ(0, close(pipefd[0])); 2811 2812 /* Attach to child, setup options, and release. */ 2813 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2814 ASSERT_EQ(true, WIFSTOPPED(status)); 2815 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, 2816 PTRACE_O_TRACESECCOMP)); 2817 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2818 ASSERT_EQ(1, write(pipefd[1], ".", 1)); 2819 2820 /* Wait for nanosleep() to start. */ 2821 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2822 ASSERT_EQ(true, WIFSTOPPED(status)); 2823 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 2824 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 2825 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 2826 ASSERT_EQ(0x100, msg); 2827 EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid)); 2828 2829 /* Might as well check siginfo for sanity while we're here. */ 2830 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 2831 ASSERT_EQ(SIGTRAP, info.si_signo); 2832 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); 2833 EXPECT_EQ(0, info.si_errno); 2834 EXPECT_EQ(getuid(), info.si_uid); 2835 /* Verify signal delivery came from child (seccomp-triggered). */ 2836 EXPECT_EQ(child_pid, info.si_pid); 2837 2838 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ 2839 ASSERT_EQ(0, kill(child_pid, SIGSTOP)); 2840 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2841 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2842 ASSERT_EQ(true, WIFSTOPPED(status)); 2843 ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); 2844 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 2845 /* 2846 * There is no siginfo on SIGSTOP any more, so we can't verify 2847 * signal delivery came from parent now (getpid() == info.si_pid). 2848 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com 2849 * At least verify the SIGSTOP via PTRACE_GETSIGINFO. 2850 */ 2851 EXPECT_EQ(SIGSTOP, info.si_signo); 2852 2853 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ 2854 ASSERT_EQ(0, kill(child_pid, SIGCONT)); 2855 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2856 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2857 ASSERT_EQ(true, WIFSTOPPED(status)); 2858 ASSERT_EQ(SIGCONT, WSTOPSIG(status)); 2859 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2860 2861 /* Wait for restart_syscall() to start. */ 2862 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2863 ASSERT_EQ(true, WIFSTOPPED(status)); 2864 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 2865 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 2866 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 2867 2868 ASSERT_EQ(0x200, msg); 2869 ret = get_syscall(_metadata, child_pid); 2870 #if defined(__arm__) 2871 /* 2872 * FIXME: 2873 * - native ARM registers do NOT expose true syscall. 2874 * - compat ARM registers on ARM64 DO expose true syscall. 2875 */ 2876 ASSERT_EQ(0, uname(&utsbuf)); 2877 if (strncmp(utsbuf.machine, "arm", 3) == 0) { 2878 EXPECT_EQ(__NR_nanosleep, ret); 2879 } else 2880 #endif 2881 { 2882 EXPECT_EQ(__NR_restart_syscall, ret); 2883 } 2884 2885 /* Write again to end test. */ 2886 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2887 ASSERT_EQ(1, write(pipefd[1], "!", 1)); 2888 EXPECT_EQ(0, close(pipefd[1])); 2889 2890 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2891 if (WIFSIGNALED(status) || WEXITSTATUS(status)) 2892 _metadata->passed = 0; 2893 } 2894 2895 TEST_SIGNAL(filter_flag_log, SIGSYS) 2896 { 2897 struct sock_filter allow_filter[] = { 2898 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2899 }; 2900 struct sock_filter kill_filter[] = { 2901 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2902 offsetof(struct seccomp_data, nr)), 2903 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 2904 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2905 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2906 }; 2907 struct sock_fprog allow_prog = { 2908 .len = (unsigned short)ARRAY_SIZE(allow_filter), 2909 .filter = allow_filter, 2910 }; 2911 struct sock_fprog kill_prog = { 2912 .len = (unsigned short)ARRAY_SIZE(kill_filter), 2913 .filter = kill_filter, 2914 }; 2915 long ret; 2916 pid_t parent = getppid(); 2917 2918 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2919 ASSERT_EQ(0, ret); 2920 2921 /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */ 2922 ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG, 2923 &allow_prog); 2924 ASSERT_NE(ENOSYS, errno) { 2925 TH_LOG("Kernel does not support seccomp syscall!"); 2926 } 2927 EXPECT_NE(0, ret) { 2928 TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!"); 2929 } 2930 EXPECT_EQ(EINVAL, errno) { 2931 TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!"); 2932 } 2933 2934 /* Verify that a simple, permissive filter can be added with no flags */ 2935 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog); 2936 EXPECT_EQ(0, ret); 2937 2938 /* See if the same filter can be added with the FILTER_FLAG_LOG flag */ 2939 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 2940 &allow_prog); 2941 ASSERT_NE(EINVAL, errno) { 2942 TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!"); 2943 } 2944 EXPECT_EQ(0, ret); 2945 2946 /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */ 2947 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 2948 &kill_prog); 2949 EXPECT_EQ(0, ret); 2950 2951 EXPECT_EQ(parent, syscall(__NR_getppid)); 2952 /* getpid() should never return. */ 2953 EXPECT_EQ(0, syscall(__NR_getpid)); 2954 } 2955 2956 TEST(get_action_avail) 2957 { 2958 __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP, 2959 SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE, 2960 SECCOMP_RET_LOG, SECCOMP_RET_ALLOW }; 2961 __u32 unknown_action = 0x10000000U; 2962 int i; 2963 long ret; 2964 2965 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]); 2966 ASSERT_NE(ENOSYS, errno) { 2967 TH_LOG("Kernel does not support seccomp syscall!"); 2968 } 2969 ASSERT_NE(EINVAL, errno) { 2970 TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!"); 2971 } 2972 EXPECT_EQ(ret, 0); 2973 2974 for (i = 0; i < ARRAY_SIZE(actions); i++) { 2975 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]); 2976 EXPECT_EQ(ret, 0) { 2977 TH_LOG("Expected action (0x%X) not available!", 2978 actions[i]); 2979 } 2980 } 2981 2982 /* Check that an unknown action is handled properly (EOPNOTSUPP) */ 2983 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action); 2984 EXPECT_EQ(ret, -1); 2985 EXPECT_EQ(errno, EOPNOTSUPP); 2986 } 2987 2988 TEST(get_metadata) 2989 { 2990 pid_t pid; 2991 int pipefd[2]; 2992 char buf; 2993 struct seccomp_metadata md; 2994 long ret; 2995 2996 /* Only real root can get metadata. */ 2997 if (geteuid()) { 2998 XFAIL(return, "get_metadata requires real root"); 2999 return; 3000 } 3001 3002 ASSERT_EQ(0, pipe(pipefd)); 3003 3004 pid = fork(); 3005 ASSERT_GE(pid, 0); 3006 if (pid == 0) { 3007 struct sock_filter filter[] = { 3008 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3009 }; 3010 struct sock_fprog prog = { 3011 .len = (unsigned short)ARRAY_SIZE(filter), 3012 .filter = filter, 3013 }; 3014 3015 /* one with log, one without */ 3016 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 3017 SECCOMP_FILTER_FLAG_LOG, &prog)); 3018 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); 3019 3020 EXPECT_EQ(0, close(pipefd[0])); 3021 ASSERT_EQ(1, write(pipefd[1], "1", 1)); 3022 ASSERT_EQ(0, close(pipefd[1])); 3023 3024 while (1) 3025 sleep(100); 3026 } 3027 3028 ASSERT_EQ(0, close(pipefd[1])); 3029 ASSERT_EQ(1, read(pipefd[0], &buf, 1)); 3030 3031 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); 3032 ASSERT_EQ(pid, waitpid(pid, NULL, 0)); 3033 3034 /* Past here must not use ASSERT or child process is never killed. */ 3035 3036 md.filter_off = 0; 3037 errno = 0; 3038 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3039 EXPECT_EQ(sizeof(md), ret) { 3040 if (errno == EINVAL) 3041 XFAIL(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)"); 3042 } 3043 3044 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); 3045 EXPECT_EQ(md.filter_off, 0); 3046 3047 md.filter_off = 1; 3048 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3049 EXPECT_EQ(sizeof(md), ret); 3050 EXPECT_EQ(md.flags, 0); 3051 EXPECT_EQ(md.filter_off, 1); 3052 3053 skip: 3054 ASSERT_EQ(0, kill(pid, SIGKILL)); 3055 } 3056 3057 static int user_trap_syscall(int nr, unsigned int flags) 3058 { 3059 struct sock_filter filter[] = { 3060 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, 3061 offsetof(struct seccomp_data, nr)), 3062 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1), 3063 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_USER_NOTIF), 3064 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW), 3065 }; 3066 3067 struct sock_fprog prog = { 3068 .len = (unsigned short)ARRAY_SIZE(filter), 3069 .filter = filter, 3070 }; 3071 3072 return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog); 3073 } 3074 3075 #define USER_NOTIF_MAGIC 116983961184613L 3076 TEST(user_notification_basic) 3077 { 3078 pid_t pid; 3079 long ret; 3080 int status, listener; 3081 struct seccomp_notif req = {}; 3082 struct seccomp_notif_resp resp = {}; 3083 struct pollfd pollfd; 3084 3085 struct sock_filter filter[] = { 3086 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3087 }; 3088 struct sock_fprog prog = { 3089 .len = (unsigned short)ARRAY_SIZE(filter), 3090 .filter = filter, 3091 }; 3092 3093 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3094 ASSERT_EQ(0, ret) { 3095 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3096 } 3097 3098 pid = fork(); 3099 ASSERT_GE(pid, 0); 3100 3101 /* Check that we get -ENOSYS with no listener attached */ 3102 if (pid == 0) { 3103 if (user_trap_syscall(__NR_getppid, 0) < 0) 3104 exit(1); 3105 ret = syscall(__NR_getppid); 3106 exit(ret >= 0 || errno != ENOSYS); 3107 } 3108 3109 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3110 EXPECT_EQ(true, WIFEXITED(status)); 3111 EXPECT_EQ(0, WEXITSTATUS(status)); 3112 3113 /* Add some no-op filters for grins. */ 3114 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3115 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3116 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3117 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3118 3119 /* Check that the basic notification machinery works */ 3120 listener = user_trap_syscall(__NR_getppid, 3121 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3122 ASSERT_GE(listener, 0); 3123 3124 /* Installing a second listener in the chain should EBUSY */ 3125 EXPECT_EQ(user_trap_syscall(__NR_getppid, 3126 SECCOMP_FILTER_FLAG_NEW_LISTENER), 3127 -1); 3128 EXPECT_EQ(errno, EBUSY); 3129 3130 pid = fork(); 3131 ASSERT_GE(pid, 0); 3132 3133 if (pid == 0) { 3134 ret = syscall(__NR_getppid); 3135 exit(ret != USER_NOTIF_MAGIC); 3136 } 3137 3138 pollfd.fd = listener; 3139 pollfd.events = POLLIN | POLLOUT; 3140 3141 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3142 EXPECT_EQ(pollfd.revents, POLLIN); 3143 3144 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3145 3146 pollfd.fd = listener; 3147 pollfd.events = POLLIN | POLLOUT; 3148 3149 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3150 EXPECT_EQ(pollfd.revents, POLLOUT); 3151 3152 EXPECT_EQ(req.data.nr, __NR_getppid); 3153 3154 resp.id = req.id; 3155 resp.error = 0; 3156 resp.val = USER_NOTIF_MAGIC; 3157 3158 /* check that we make sure flags == 0 */ 3159 resp.flags = 1; 3160 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3161 EXPECT_EQ(errno, EINVAL); 3162 3163 resp.flags = 0; 3164 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3165 3166 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3167 EXPECT_EQ(true, WIFEXITED(status)); 3168 EXPECT_EQ(0, WEXITSTATUS(status)); 3169 } 3170 3171 TEST(user_notification_kill_in_middle) 3172 { 3173 pid_t pid; 3174 long ret; 3175 int listener; 3176 struct seccomp_notif req = {}; 3177 struct seccomp_notif_resp resp = {}; 3178 3179 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3180 ASSERT_EQ(0, ret) { 3181 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3182 } 3183 3184 listener = user_trap_syscall(__NR_getppid, 3185 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3186 ASSERT_GE(listener, 0); 3187 3188 /* 3189 * Check that nothing bad happens when we kill the task in the middle 3190 * of a syscall. 3191 */ 3192 pid = fork(); 3193 ASSERT_GE(pid, 0); 3194 3195 if (pid == 0) { 3196 ret = syscall(__NR_getppid); 3197 exit(ret != USER_NOTIF_MAGIC); 3198 } 3199 3200 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3201 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0); 3202 3203 EXPECT_EQ(kill(pid, SIGKILL), 0); 3204 EXPECT_EQ(waitpid(pid, NULL, 0), pid); 3205 3206 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1); 3207 3208 resp.id = req.id; 3209 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp); 3210 EXPECT_EQ(ret, -1); 3211 EXPECT_EQ(errno, ENOENT); 3212 } 3213 3214 static int handled = -1; 3215 3216 static void signal_handler(int signal) 3217 { 3218 if (write(handled, "c", 1) != 1) 3219 perror("write from signal"); 3220 } 3221 3222 TEST(user_notification_signal) 3223 { 3224 pid_t pid; 3225 long ret; 3226 int status, listener, sk_pair[2]; 3227 struct seccomp_notif req = {}; 3228 struct seccomp_notif_resp resp = {}; 3229 char c; 3230 3231 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3232 ASSERT_EQ(0, ret) { 3233 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3234 } 3235 3236 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 3237 3238 listener = user_trap_syscall(__NR_gettid, 3239 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3240 ASSERT_GE(listener, 0); 3241 3242 pid = fork(); 3243 ASSERT_GE(pid, 0); 3244 3245 if (pid == 0) { 3246 close(sk_pair[0]); 3247 handled = sk_pair[1]; 3248 if (signal(SIGUSR1, signal_handler) == SIG_ERR) { 3249 perror("signal"); 3250 exit(1); 3251 } 3252 /* 3253 * ERESTARTSYS behavior is a bit hard to test, because we need 3254 * to rely on a signal that has not yet been handled. Let's at 3255 * least check that the error code gets propagated through, and 3256 * hope that it doesn't break when there is actually a signal :) 3257 */ 3258 ret = syscall(__NR_gettid); 3259 exit(!(ret == -1 && errno == 512)); 3260 } 3261 3262 close(sk_pair[1]); 3263 3264 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3265 3266 EXPECT_EQ(kill(pid, SIGUSR1), 0); 3267 3268 /* 3269 * Make sure the signal really is delivered, which means we're not 3270 * stuck in the user notification code any more and the notification 3271 * should be dead. 3272 */ 3273 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 3274 3275 resp.id = req.id; 3276 resp.error = -EPERM; 3277 resp.val = 0; 3278 3279 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3280 EXPECT_EQ(errno, ENOENT); 3281 3282 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3283 3284 resp.id = req.id; 3285 resp.error = -512; /* -ERESTARTSYS */ 3286 resp.val = 0; 3287 3288 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3289 3290 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3291 EXPECT_EQ(true, WIFEXITED(status)); 3292 EXPECT_EQ(0, WEXITSTATUS(status)); 3293 } 3294 3295 TEST(user_notification_closed_listener) 3296 { 3297 pid_t pid; 3298 long ret; 3299 int status, listener; 3300 3301 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3302 ASSERT_EQ(0, ret) { 3303 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3304 } 3305 3306 listener = user_trap_syscall(__NR_getppid, 3307 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3308 ASSERT_GE(listener, 0); 3309 3310 /* 3311 * Check that we get an ENOSYS when the listener is closed. 3312 */ 3313 pid = fork(); 3314 ASSERT_GE(pid, 0); 3315 if (pid == 0) { 3316 close(listener); 3317 ret = syscall(__NR_getppid); 3318 exit(ret != -1 && errno != ENOSYS); 3319 } 3320 3321 close(listener); 3322 3323 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3324 EXPECT_EQ(true, WIFEXITED(status)); 3325 EXPECT_EQ(0, WEXITSTATUS(status)); 3326 } 3327 3328 /* 3329 * Check that a pid in a child namespace still shows up as valid in ours. 3330 */ 3331 TEST(user_notification_child_pid_ns) 3332 { 3333 pid_t pid; 3334 int status, listener; 3335 struct seccomp_notif req = {}; 3336 struct seccomp_notif_resp resp = {}; 3337 3338 ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0); 3339 3340 listener = user_trap_syscall(__NR_getppid, 3341 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3342 ASSERT_GE(listener, 0); 3343 3344 pid = fork(); 3345 ASSERT_GE(pid, 0); 3346 3347 if (pid == 0) 3348 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3349 3350 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3351 EXPECT_EQ(req.pid, pid); 3352 3353 resp.id = req.id; 3354 resp.error = 0; 3355 resp.val = USER_NOTIF_MAGIC; 3356 3357 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3358 3359 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3360 EXPECT_EQ(true, WIFEXITED(status)); 3361 EXPECT_EQ(0, WEXITSTATUS(status)); 3362 close(listener); 3363 } 3364 3365 /* 3366 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e. 3367 * invalid. 3368 */ 3369 TEST(user_notification_sibling_pid_ns) 3370 { 3371 pid_t pid, pid2; 3372 int status, listener; 3373 struct seccomp_notif req = {}; 3374 struct seccomp_notif_resp resp = {}; 3375 3376 ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) { 3377 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3378 } 3379 3380 listener = user_trap_syscall(__NR_getppid, 3381 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3382 ASSERT_GE(listener, 0); 3383 3384 pid = fork(); 3385 ASSERT_GE(pid, 0); 3386 3387 if (pid == 0) { 3388 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3389 3390 pid2 = fork(); 3391 ASSERT_GE(pid2, 0); 3392 3393 if (pid2 == 0) 3394 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3395 3396 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3397 EXPECT_EQ(true, WIFEXITED(status)); 3398 EXPECT_EQ(0, WEXITSTATUS(status)); 3399 exit(WEXITSTATUS(status)); 3400 } 3401 3402 /* Create the sibling ns, and sibling in it. */ 3403 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3404 ASSERT_EQ(errno, 0); 3405 3406 pid2 = fork(); 3407 ASSERT_GE(pid2, 0); 3408 3409 if (pid2 == 0) { 3410 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3411 /* 3412 * The pid should be 0, i.e. the task is in some namespace that 3413 * we can't "see". 3414 */ 3415 EXPECT_EQ(req.pid, 0); 3416 3417 resp.id = req.id; 3418 resp.error = 0; 3419 resp.val = USER_NOTIF_MAGIC; 3420 3421 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3422 exit(0); 3423 } 3424 3425 close(listener); 3426 3427 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3428 EXPECT_EQ(true, WIFEXITED(status)); 3429 EXPECT_EQ(0, WEXITSTATUS(status)); 3430 3431 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3432 EXPECT_EQ(true, WIFEXITED(status)); 3433 EXPECT_EQ(0, WEXITSTATUS(status)); 3434 } 3435 3436 TEST(user_notification_fault_recv) 3437 { 3438 pid_t pid; 3439 int status, listener; 3440 struct seccomp_notif req = {}; 3441 struct seccomp_notif_resp resp = {}; 3442 3443 ASSERT_EQ(unshare(CLONE_NEWUSER), 0); 3444 3445 listener = user_trap_syscall(__NR_getppid, 3446 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3447 ASSERT_GE(listener, 0); 3448 3449 pid = fork(); 3450 ASSERT_GE(pid, 0); 3451 3452 if (pid == 0) 3453 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3454 3455 /* Do a bad recv() */ 3456 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1); 3457 EXPECT_EQ(errno, EFAULT); 3458 3459 /* We should still be able to receive this notification, though. */ 3460 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3461 EXPECT_EQ(req.pid, pid); 3462 3463 resp.id = req.id; 3464 resp.error = 0; 3465 resp.val = USER_NOTIF_MAGIC; 3466 3467 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3468 3469 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3470 EXPECT_EQ(true, WIFEXITED(status)); 3471 EXPECT_EQ(0, WEXITSTATUS(status)); 3472 } 3473 3474 TEST(seccomp_get_notif_sizes) 3475 { 3476 struct seccomp_notif_sizes sizes; 3477 3478 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3479 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3480 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3481 } 3482 3483 /* 3484 * TODO: 3485 * - add microbenchmarks 3486 * - expand NNP testing 3487 * - better arch-specific TRACE and TRAP handlers. 3488 * - endianness checking when appropriate 3489 * - 64-bit arg prodding 3490 * - arch value testing (x86 modes especially) 3491 * - verify that FILTER_FLAG_LOG filters generate log messages 3492 * - verify that RET_LOG generates log messages 3493 * - ... 3494 */ 3495 3496 TEST_HARNESS_MAIN 3497