1 /*- 2 * Copyright (c) 2004 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Regression test to do some very basic AIO exercising on several types of 31 * file descriptors. Currently, the tests consist of initializing a fixed 32 * size buffer with pseudo-random data, writing it to one fd using AIO, then 33 * reading it from a second descriptor using AIO. For some targets, the same 34 * fd is used for write and read (i.e., file, md device), but for others the 35 * operation is performed on a peer (pty, socket, fifo, etc). For each file 36 * descriptor type, several completion methods are tested. This test program 37 * does not attempt to exercise error cases or more subtle asynchronous 38 * behavior, just make sure that the basic operations work on some basic object 39 * types. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/module.h> 44 #include <sys/resource.h> 45 #include <sys/socket.h> 46 #include <sys/stat.h> 47 #include <sys/mdioctl.h> 48 49 #include <aio.h> 50 #include <err.h> 51 #include <errno.h> 52 #include <fcntl.h> 53 #include <libutil.h> 54 #include <limits.h> 55 #include <semaphore.h> 56 #include <stdint.h> 57 #include <stdio.h> 58 #include <stdlib.h> 59 #include <string.h> 60 #include <termios.h> 61 #include <unistd.h> 62 63 #include <atf-c.h> 64 65 #include "freebsd_test_suite/macros.h" 66 #include "local.h" 67 68 /* 69 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as 70 * it sizes ac_buffer in the aio_context structure. It is also the default 71 * size for file I/O. For other types, we use smaller blocks or we risk 72 * blocking (and we run in a single process/thread so that would be bad). 73 */ 74 #define GLOBAL_MAX 16384 75 76 #define BUFFER_MAX GLOBAL_MAX 77 78 /* 79 * A completion function will block until the aio has completed, then return 80 * the result of the aio. errno will be set appropriately. 81 */ 82 typedef ssize_t (*completion)(struct aiocb*); 83 84 struct aio_context { 85 int ac_read_fd, ac_write_fd; 86 long ac_seed; 87 char ac_buffer[GLOBAL_MAX]; 88 int ac_buflen; 89 int ac_seconds; 90 }; 91 92 static sem_t completions; 93 94 95 /* 96 * Fill a buffer given a seed that can be fed into srandom() to initialize 97 * the PRNG in a repeatable manner. 98 */ 99 static void 100 aio_fill_buffer(char *buffer, int len, long seed) 101 { 102 char ch; 103 int i; 104 105 srandom(seed); 106 for (i = 0; i < len; i++) { 107 ch = random() & 0xff; 108 buffer[i] = ch; 109 } 110 } 111 112 /* 113 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return 114 * (1) on a match, (0) on a mismatch. 115 */ 116 static int 117 aio_test_buffer(char *buffer, int len, long seed) 118 { 119 char ch; 120 int i; 121 122 srandom(seed); 123 for (i = 0; i < len; i++) { 124 ch = random() & 0xff; 125 if (buffer[i] != ch) 126 return (0); 127 } 128 return (1); 129 } 130 131 /* 132 * Initialize a testing context given the file descriptors provided by the 133 * test setup. 134 */ 135 static void 136 aio_context_init(struct aio_context *ac, int read_fd, 137 int write_fd, int buflen) 138 { 139 140 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX, 141 "aio_context_init: buffer too large (%d > %d)", 142 buflen, BUFFER_MAX); 143 bzero(ac, sizeof(*ac)); 144 ac->ac_read_fd = read_fd; 145 ac->ac_write_fd = write_fd; 146 ac->ac_buflen = buflen; 147 srandomdev(); 148 ac->ac_seed = random(); 149 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed); 150 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen, 151 ac->ac_seed) != 0, "aio_test_buffer: internal error"); 152 } 153 154 static ssize_t 155 poll(struct aiocb *aio) 156 { 157 int error; 158 159 while ((error = aio_error(aio)) == EINPROGRESS) 160 usleep(25000); 161 if (error) 162 return (error); 163 else 164 return (aio_return(aio)); 165 } 166 167 static void 168 sigusr1_handler(int sig __unused) 169 { 170 ATF_REQUIRE_EQ(0, sem_post(&completions)); 171 } 172 173 static void 174 thr_handler(union sigval sv __unused) 175 { 176 ATF_REQUIRE_EQ(0, sem_post(&completions)); 177 } 178 179 static ssize_t 180 poll_signaled(struct aiocb *aio) 181 { 182 int error; 183 184 ATF_REQUIRE_EQ(0, sem_wait(&completions)); 185 error = aio_error(aio); 186 switch (error) { 187 case EINPROGRESS: 188 errno = EINTR; 189 return (-1); 190 case 0: 191 return (aio_return(aio)); 192 default: 193 return (error); 194 } 195 } 196 197 /* 198 * Setup a signal handler for signal delivery tests 199 * This isn't thread safe, but it's ok since ATF runs each testcase in a 200 * separate process 201 */ 202 static struct sigevent* 203 setup_signal(void) 204 { 205 static struct sigevent sev; 206 207 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 208 sev.sigev_notify = SIGEV_SIGNAL; 209 sev.sigev_signo = SIGUSR1; 210 ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler)); 211 return (&sev); 212 } 213 214 /* 215 * Setup a thread for thread delivery tests 216 * This isn't thread safe, but it's ok since ATF runs each testcase in a 217 * separate process 218 */ 219 static struct sigevent* 220 setup_thread(void) 221 { 222 static struct sigevent sev; 223 224 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 225 sev.sigev_notify = SIGEV_THREAD; 226 sev.sigev_notify_function = thr_handler; 227 sev.sigev_notify_attributes = NULL; 228 return (&sev); 229 } 230 231 static ssize_t 232 suspend(struct aiocb *aio) 233 { 234 const struct aiocb *const iocbs[] = {aio}; 235 int error; 236 237 error = aio_suspend(iocbs, 1, NULL); 238 if (error == 0) 239 return (aio_return(aio)); 240 else 241 return (error); 242 } 243 244 static ssize_t 245 waitcomplete(struct aiocb *aio) 246 { 247 struct aiocb *aiop; 248 ssize_t ret; 249 250 ret = aio_waitcomplete(&aiop, NULL); 251 ATF_REQUIRE_EQ(aio, aiop); 252 return (ret); 253 } 254 255 /* 256 * Perform a simple write test of our initialized data buffer to the provided 257 * file descriptor. 258 */ 259 static void 260 aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev) 261 { 262 struct aiocb aio; 263 ssize_t len; 264 265 bzero(&aio, sizeof(aio)); 266 aio.aio_buf = ac->ac_buffer; 267 aio.aio_nbytes = ac->ac_buflen; 268 aio.aio_fildes = ac->ac_write_fd; 269 aio.aio_offset = 0; 270 if (sev) 271 aio.aio_sigevent = *sev; 272 273 if (aio_write(&aio) < 0) 274 atf_tc_fail("aio_write failed: %s", strerror(errno)); 275 276 len = comp(&aio); 277 if (len < 0) 278 atf_tc_fail("aio failed: %s", strerror(errno)); 279 280 if (len != ac->ac_buflen) 281 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 282 } 283 284 /* 285 * Perform a vectored I/O test of our initialized data buffer to the provided 286 * file descriptor. 287 * 288 * To vectorize the linear buffer, chop it up into two pieces of dissimilar 289 * size, and swap their offsets. 290 */ 291 static void 292 aio_writev_test(struct aio_context *ac, completion comp, struct sigevent *sev) 293 { 294 struct aiocb aio; 295 struct iovec iov[2]; 296 size_t len0, len1; 297 ssize_t len; 298 299 bzero(&aio, sizeof(aio)); 300 301 aio.aio_fildes = ac->ac_write_fd; 302 aio.aio_offset = 0; 303 len0 = ac->ac_buflen * 3 / 4; 304 len1 = ac->ac_buflen / 4; 305 iov[0].iov_base = ac->ac_buffer + len1; 306 iov[0].iov_len = len0; 307 iov[1].iov_base = ac->ac_buffer; 308 iov[1].iov_len = len1; 309 aio.aio_iov = iov; 310 aio.aio_iovcnt = 2; 311 if (sev) 312 aio.aio_sigevent = *sev; 313 314 if (aio_writev(&aio) < 0) 315 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 316 317 len = comp(&aio); 318 if (len < 0) 319 atf_tc_fail("aio failed: %s", strerror(errno)); 320 321 if (len != ac->ac_buflen) 322 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 323 } 324 325 /* 326 * Perform a simple read test of our initialized data buffer from the 327 * provided file descriptor. 328 */ 329 static void 330 aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev) 331 { 332 struct aiocb aio; 333 ssize_t len; 334 335 bzero(ac->ac_buffer, ac->ac_buflen); 336 bzero(&aio, sizeof(aio)); 337 aio.aio_buf = ac->ac_buffer; 338 aio.aio_nbytes = ac->ac_buflen; 339 aio.aio_fildes = ac->ac_read_fd; 340 aio.aio_offset = 0; 341 if (sev) 342 aio.aio_sigevent = *sev; 343 344 if (aio_read(&aio) < 0) 345 atf_tc_fail("aio_read failed: %s", strerror(errno)); 346 347 len = comp(&aio); 348 if (len < 0) 349 atf_tc_fail("aio failed: %s", strerror(errno)); 350 351 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 352 "aio short read (%jd)", (intmax_t)len); 353 354 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 355 atf_tc_fail("buffer mismatched"); 356 } 357 358 static void 359 aio_readv_test(struct aio_context *ac, completion comp, struct sigevent *sev) 360 { 361 struct aiocb aio; 362 struct iovec iov[2]; 363 size_t len0, len1; 364 ssize_t len; 365 366 bzero(ac->ac_buffer, ac->ac_buflen); 367 bzero(&aio, sizeof(aio)); 368 aio.aio_fildes = ac->ac_read_fd; 369 aio.aio_offset = 0; 370 len0 = ac->ac_buflen * 3 / 4; 371 len1 = ac->ac_buflen / 4; 372 iov[0].iov_base = ac->ac_buffer + len1; 373 iov[0].iov_len = len0; 374 iov[1].iov_base = ac->ac_buffer; 375 iov[1].iov_len = len1; 376 aio.aio_iov = iov; 377 aio.aio_iovcnt = 2; 378 if (sev) 379 aio.aio_sigevent = *sev; 380 381 if (aio_readv(&aio) < 0) 382 atf_tc_fail("aio_read failed: %s", strerror(errno)); 383 384 len = comp(&aio); 385 if (len < 0) 386 atf_tc_fail("aio failed: %s", strerror(errno)); 387 388 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 389 "aio short read (%jd)", (intmax_t)len); 390 391 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 392 atf_tc_fail("buffer mismatched"); 393 } 394 395 /* 396 * Series of type-specific tests for AIO. For now, we just make sure we can 397 * issue a write and then a read to each type. We assume that once a write 398 * is issued, a read can follow. 399 */ 400 401 /* 402 * Test with a classic file. Assumes we can create a moderate size temporary 403 * file. 404 */ 405 #define FILE_LEN GLOBAL_MAX 406 #define FILE_PATHNAME "testfile" 407 408 static void 409 aio_file_test(completion comp, struct sigevent *sev, bool vectored) 410 { 411 struct aio_context ac; 412 int fd; 413 414 ATF_REQUIRE_KERNEL_MODULE("aio"); 415 ATF_REQUIRE_UNSAFE_AIO(); 416 417 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 418 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 419 420 aio_context_init(&ac, fd, fd, FILE_LEN); 421 if (vectored) { 422 aio_writev_test(&ac, comp, sev); 423 aio_readv_test(&ac, comp, sev); 424 } else { 425 aio_write_test(&ac, comp, sev); 426 aio_read_test(&ac, comp, sev); 427 } 428 close(fd); 429 } 430 431 ATF_TC_WITHOUT_HEAD(file_poll); 432 ATF_TC_BODY(file_poll, tc) 433 { 434 aio_file_test(poll, NULL, false); 435 } 436 437 ATF_TC_WITHOUT_HEAD(file_signal); 438 ATF_TC_BODY(file_signal, tc) 439 { 440 aio_file_test(poll_signaled, setup_signal(), false); 441 } 442 443 ATF_TC_WITHOUT_HEAD(file_suspend); 444 ATF_TC_BODY(file_suspend, tc) 445 { 446 aio_file_test(suspend, NULL, false); 447 } 448 449 ATF_TC_WITHOUT_HEAD(file_thread); 450 ATF_TC_BODY(file_thread, tc) 451 { 452 aio_file_test(poll_signaled, setup_thread(), false); 453 } 454 455 ATF_TC_WITHOUT_HEAD(file_waitcomplete); 456 ATF_TC_BODY(file_waitcomplete, tc) 457 { 458 aio_file_test(waitcomplete, NULL, false); 459 } 460 461 #define FIFO_LEN 256 462 #define FIFO_PATHNAME "testfifo" 463 464 static void 465 aio_fifo_test(completion comp, struct sigevent *sev) 466 { 467 int error, read_fd = -1, write_fd = -1; 468 struct aio_context ac; 469 470 ATF_REQUIRE_KERNEL_MODULE("aio"); 471 ATF_REQUIRE_UNSAFE_AIO(); 472 473 ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1, 474 "mkfifo failed: %s", strerror(errno)); 475 476 read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK); 477 if (read_fd == -1) { 478 error = errno; 479 errno = error; 480 atf_tc_fail("read_fd open failed: %s", 481 strerror(errno)); 482 } 483 484 write_fd = open(FIFO_PATHNAME, O_WRONLY); 485 if (write_fd == -1) { 486 error = errno; 487 errno = error; 488 atf_tc_fail("write_fd open failed: %s", 489 strerror(errno)); 490 } 491 492 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN); 493 aio_write_test(&ac, comp, sev); 494 aio_read_test(&ac, comp, sev); 495 496 close(read_fd); 497 close(write_fd); 498 } 499 500 ATF_TC_WITHOUT_HEAD(fifo_poll); 501 ATF_TC_BODY(fifo_poll, tc) 502 { 503 aio_fifo_test(poll, NULL); 504 } 505 506 ATF_TC_WITHOUT_HEAD(fifo_signal); 507 ATF_TC_BODY(fifo_signal, tc) 508 { 509 aio_fifo_test(poll_signaled, setup_signal()); 510 } 511 512 ATF_TC_WITHOUT_HEAD(fifo_suspend); 513 ATF_TC_BODY(fifo_suspend, tc) 514 { 515 aio_fifo_test(suspend, NULL); 516 } 517 518 ATF_TC_WITHOUT_HEAD(fifo_thread); 519 ATF_TC_BODY(fifo_thread, tc) 520 { 521 aio_fifo_test(poll_signaled, setup_thread()); 522 } 523 524 ATF_TC_WITHOUT_HEAD(fifo_waitcomplete); 525 ATF_TC_BODY(fifo_waitcomplete, tc) 526 { 527 aio_fifo_test(waitcomplete, NULL); 528 } 529 530 #define UNIX_SOCKETPAIR_LEN 256 531 static void 532 aio_unix_socketpair_test(completion comp, struct sigevent *sev, bool vectored) 533 { 534 struct aio_context ac; 535 struct rusage ru_before, ru_after; 536 int sockets[2]; 537 538 ATF_REQUIRE_KERNEL_MODULE("aio"); 539 540 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1, 541 "socketpair failed: %s", strerror(errno)); 542 543 aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN); 544 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1, 545 "getrusage failed: %s", strerror(errno)); 546 if (vectored) { 547 aio_writev_test(&ac, comp, sev); 548 aio_readv_test(&ac, comp, sev); 549 } else { 550 aio_write_test(&ac, comp, sev); 551 aio_read_test(&ac, comp, sev); 552 } 553 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, 554 "getrusage failed: %s", strerror(errno)); 555 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1); 556 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1); 557 558 close(sockets[0]); 559 close(sockets[1]); 560 } 561 562 ATF_TC_WITHOUT_HEAD(socket_poll); 563 ATF_TC_BODY(socket_poll, tc) 564 { 565 aio_unix_socketpair_test(poll, NULL, false); 566 } 567 568 ATF_TC_WITHOUT_HEAD(socket_signal); 569 ATF_TC_BODY(socket_signal, tc) 570 { 571 aio_unix_socketpair_test(poll_signaled, setup_signal(), false); 572 } 573 574 ATF_TC_WITHOUT_HEAD(socket_suspend); 575 ATF_TC_BODY(socket_suspend, tc) 576 { 577 aio_unix_socketpair_test(suspend, NULL, false); 578 } 579 580 ATF_TC_WITHOUT_HEAD(socket_thread); 581 ATF_TC_BODY(socket_thread, tc) 582 { 583 aio_unix_socketpair_test(poll_signaled, setup_thread(), false); 584 } 585 586 ATF_TC_WITHOUT_HEAD(socket_waitcomplete); 587 ATF_TC_BODY(socket_waitcomplete, tc) 588 { 589 aio_unix_socketpair_test(waitcomplete, NULL, false); 590 } 591 592 struct aio_pty_arg { 593 int apa_read_fd; 594 int apa_write_fd; 595 }; 596 597 #define PTY_LEN 256 598 static void 599 aio_pty_test(completion comp, struct sigevent *sev) 600 { 601 struct aio_context ac; 602 int read_fd, write_fd; 603 struct termios ts; 604 int error; 605 606 ATF_REQUIRE_KERNEL_MODULE("aio"); 607 ATF_REQUIRE_UNSAFE_AIO(); 608 609 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0, 610 "openpty failed: %s", strerror(errno)); 611 612 613 if (tcgetattr(write_fd, &ts) < 0) { 614 error = errno; 615 errno = error; 616 atf_tc_fail("tcgetattr failed: %s", strerror(errno)); 617 } 618 cfmakeraw(&ts); 619 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) { 620 error = errno; 621 errno = error; 622 atf_tc_fail("tcsetattr failed: %s", strerror(errno)); 623 } 624 aio_context_init(&ac, read_fd, write_fd, PTY_LEN); 625 626 aio_write_test(&ac, comp, sev); 627 aio_read_test(&ac, comp, sev); 628 629 close(read_fd); 630 close(write_fd); 631 } 632 633 ATF_TC_WITHOUT_HEAD(pty_poll); 634 ATF_TC_BODY(pty_poll, tc) 635 { 636 aio_pty_test(poll, NULL); 637 } 638 639 ATF_TC_WITHOUT_HEAD(pty_signal); 640 ATF_TC_BODY(pty_signal, tc) 641 { 642 aio_pty_test(poll_signaled, setup_signal()); 643 } 644 645 ATF_TC_WITHOUT_HEAD(pty_suspend); 646 ATF_TC_BODY(pty_suspend, tc) 647 { 648 aio_pty_test(suspend, NULL); 649 } 650 651 ATF_TC_WITHOUT_HEAD(pty_thread); 652 ATF_TC_BODY(pty_thread, tc) 653 { 654 aio_pty_test(poll_signaled, setup_thread()); 655 } 656 657 ATF_TC_WITHOUT_HEAD(pty_waitcomplete); 658 ATF_TC_BODY(pty_waitcomplete, tc) 659 { 660 aio_pty_test(waitcomplete, NULL); 661 } 662 663 #define PIPE_LEN 256 664 static void 665 aio_pipe_test(completion comp, struct sigevent *sev) 666 { 667 struct aio_context ac; 668 int pipes[2]; 669 670 ATF_REQUIRE_KERNEL_MODULE("aio"); 671 ATF_REQUIRE_UNSAFE_AIO(); 672 673 ATF_REQUIRE_MSG(pipe(pipes) != -1, 674 "pipe failed: %s", strerror(errno)); 675 676 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN); 677 aio_write_test(&ac, comp, sev); 678 aio_read_test(&ac, comp, sev); 679 680 close(pipes[0]); 681 close(pipes[1]); 682 } 683 684 ATF_TC_WITHOUT_HEAD(pipe_poll); 685 ATF_TC_BODY(pipe_poll, tc) 686 { 687 aio_pipe_test(poll, NULL); 688 } 689 690 ATF_TC_WITHOUT_HEAD(pipe_signal); 691 ATF_TC_BODY(pipe_signal, tc) 692 { 693 aio_pipe_test(poll_signaled, setup_signal()); 694 } 695 696 ATF_TC_WITHOUT_HEAD(pipe_suspend); 697 ATF_TC_BODY(pipe_suspend, tc) 698 { 699 aio_pipe_test(suspend, NULL); 700 } 701 702 ATF_TC_WITHOUT_HEAD(pipe_thread); 703 ATF_TC_BODY(pipe_thread, tc) 704 { 705 aio_pipe_test(poll_signaled, setup_thread()); 706 } 707 708 ATF_TC_WITHOUT_HEAD(pipe_waitcomplete); 709 ATF_TC_BODY(pipe_waitcomplete, tc) 710 { 711 aio_pipe_test(waitcomplete, NULL); 712 } 713 714 #define MD_LEN GLOBAL_MAX 715 #define MDUNIT_LINK "mdunit_link" 716 717 static int 718 aio_md_setup(void) 719 { 720 int error, fd, mdctl_fd, unit; 721 char pathname[PATH_MAX]; 722 struct md_ioctl mdio; 723 char buf[80]; 724 725 ATF_REQUIRE_KERNEL_MODULE("aio"); 726 727 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 728 ATF_REQUIRE_MSG(mdctl_fd != -1, 729 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno)); 730 731 bzero(&mdio, sizeof(mdio)); 732 mdio.md_version = MDIOVERSION; 733 mdio.md_type = MD_MALLOC; 734 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS; 735 mdio.md_mediasize = GLOBAL_MAX; 736 mdio.md_sectorsize = 512; 737 strlcpy(buf, __func__, sizeof(buf)); 738 mdio.md_label = buf; 739 740 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) { 741 error = errno; 742 errno = error; 743 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno)); 744 } 745 close(mdctl_fd); 746 747 /* Store the md unit number in a symlink for future cleanup */ 748 unit = mdio.md_unit; 749 snprintf(buf, sizeof(buf), "%d", unit); 750 ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK)); 751 snprintf(pathname, PATH_MAX, "/dev/md%d", unit); 752 fd = open(pathname, O_RDWR); 753 ATF_REQUIRE_MSG(fd != -1, 754 "opening %s failed: %s", pathname, strerror(errno)); 755 756 return (fd); 757 } 758 759 static void 760 aio_md_cleanup(void) 761 { 762 struct md_ioctl mdio; 763 int mdctl_fd, n, unit; 764 char buf[80]; 765 766 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 767 if (mdctl_fd < 0) { 768 fprintf(stderr, "opening /dev/%s failed: %s\n", MDCTL_NAME, 769 strerror(errno)); 770 return; 771 } 772 n = readlink(MDUNIT_LINK, buf, sizeof(buf) - 1); 773 if (n > 0) { 774 buf[n] = '\0'; 775 if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) { 776 bzero(&mdio, sizeof(mdio)); 777 mdio.md_version = MDIOVERSION; 778 mdio.md_unit = unit; 779 if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) { 780 fprintf(stderr, 781 "ioctl MDIOCDETACH unit %d failed: %s\n", 782 unit, strerror(errno)); 783 } 784 } 785 } 786 787 close(mdctl_fd); 788 } 789 790 static void 791 aio_md_test(completion comp, struct sigevent *sev, bool vectored) 792 { 793 struct aio_context ac; 794 int fd; 795 796 fd = aio_md_setup(); 797 aio_context_init(&ac, fd, fd, MD_LEN); 798 if (vectored) { 799 aio_writev_test(&ac, comp, sev); 800 aio_readv_test(&ac, comp, sev); 801 } else { 802 aio_write_test(&ac, comp, sev); 803 aio_read_test(&ac, comp, sev); 804 } 805 806 close(fd); 807 } 808 809 ATF_TC_WITH_CLEANUP(md_poll); 810 ATF_TC_HEAD(md_poll, tc) 811 { 812 813 atf_tc_set_md_var(tc, "require.user", "root"); 814 } 815 ATF_TC_BODY(md_poll, tc) 816 { 817 aio_md_test(poll, NULL, false); 818 } 819 ATF_TC_CLEANUP(md_poll, tc) 820 { 821 aio_md_cleanup(); 822 } 823 824 ATF_TC_WITH_CLEANUP(md_signal); 825 ATF_TC_HEAD(md_signal, tc) 826 { 827 828 atf_tc_set_md_var(tc, "require.user", "root"); 829 } 830 ATF_TC_BODY(md_signal, tc) 831 { 832 aio_md_test(poll_signaled, setup_signal(), false); 833 } 834 ATF_TC_CLEANUP(md_signal, tc) 835 { 836 aio_md_cleanup(); 837 } 838 839 ATF_TC_WITH_CLEANUP(md_suspend); 840 ATF_TC_HEAD(md_suspend, tc) 841 { 842 843 atf_tc_set_md_var(tc, "require.user", "root"); 844 } 845 ATF_TC_BODY(md_suspend, tc) 846 { 847 aio_md_test(suspend, NULL, false); 848 } 849 ATF_TC_CLEANUP(md_suspend, tc) 850 { 851 aio_md_cleanup(); 852 } 853 854 ATF_TC_WITH_CLEANUP(md_thread); 855 ATF_TC_HEAD(md_thread, tc) 856 { 857 858 atf_tc_set_md_var(tc, "require.user", "root"); 859 } 860 ATF_TC_BODY(md_thread, tc) 861 { 862 aio_md_test(poll_signaled, setup_thread(), false); 863 } 864 ATF_TC_CLEANUP(md_thread, tc) 865 { 866 aio_md_cleanup(); 867 } 868 869 ATF_TC_WITH_CLEANUP(md_waitcomplete); 870 ATF_TC_HEAD(md_waitcomplete, tc) 871 { 872 873 atf_tc_set_md_var(tc, "require.user", "root"); 874 } 875 ATF_TC_BODY(md_waitcomplete, tc) 876 { 877 aio_md_test(waitcomplete, NULL, false); 878 } 879 ATF_TC_CLEANUP(md_waitcomplete, tc) 880 { 881 aio_md_cleanup(); 882 } 883 884 #define ZVOL_VDEV_PATHNAME "test_vdev" 885 #define POOL_SIZE (1 << 28) /* 256 MB */ 886 #define ZVOL_SIZE "64m" 887 #define POOL_NAME "aio_testpool" 888 #define ZVOL_NAME "aio_testvol" 889 890 static int 891 aio_zvol_setup(void) 892 { 893 FILE *pidfile; 894 int fd; 895 pid_t pid; 896 char pool_name[80]; 897 char cmd[160]; 898 char zvol_name[160]; 899 char devname[160]; 900 901 ATF_REQUIRE_KERNEL_MODULE("aio"); 902 ATF_REQUIRE_KERNEL_MODULE("zfs"); 903 904 fd = open(ZVOL_VDEV_PATHNAME, O_RDWR | O_CREAT, 0600); 905 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 906 ATF_REQUIRE_EQ_MSG(0, 907 ftruncate(fd, POOL_SIZE), "ftruncate failed: %s", strerror(errno)); 908 close(fd); 909 910 pid = getpid(); 911 pidfile = fopen("pidfile", "w"); 912 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 913 fprintf(pidfile, "%d", pid); 914 fclose(pidfile); 915 916 snprintf(pool_name, sizeof(pool_name), POOL_NAME ".%d", pid); 917 snprintf(zvol_name, sizeof(zvol_name), "%s/" ZVOL_NAME, pool_name); 918 snprintf(cmd, sizeof(cmd), "zpool create %s $PWD/" ZVOL_VDEV_PATHNAME, 919 pool_name); 920 ATF_REQUIRE_EQ_MSG(0, system(cmd), 921 "zpool create failed: %s", strerror(errno)); 922 snprintf(cmd, sizeof(cmd), 923 "zfs create -o volblocksize=8192 -o volmode=dev -V " 924 ZVOL_SIZE " %s", zvol_name); 925 ATF_REQUIRE_EQ_MSG(0, system(cmd), 926 "zfs create failed: %s", strerror(errno)); 927 /* 928 * XXX Due to bug 251828, we need an extra "zfs set" here 929 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=251828 930 */ 931 snprintf(cmd, sizeof(cmd), "zfs set volmode=dev %s", zvol_name); 932 ATF_REQUIRE_EQ_MSG(0, system(cmd), 933 "zfs set failed: %s", strerror(errno)); 934 935 snprintf(devname, sizeof(devname), "/dev/zvol/%s", zvol_name); 936 do { 937 fd = open(devname, O_RDWR); 938 } while (fd == -1 && errno == EINTR) ; 939 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 940 return (fd); 941 } 942 943 static void 944 aio_zvol_cleanup(void) 945 { 946 FILE *pidfile; 947 pid_t testpid; 948 char cmd[160]; 949 950 pidfile = fopen("pidfile", "r"); 951 if (pidfile == NULL && errno == ENOENT) { 952 /* Setup probably failed */ 953 return; 954 } 955 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 956 ATF_REQUIRE_EQ(1, fscanf(pidfile, "%d", &testpid)); 957 fclose(pidfile); 958 959 snprintf(cmd, sizeof(cmd), "zpool destroy " POOL_NAME ".%d", testpid); 960 system(cmd); 961 } 962 963 964 ATF_TC_WITHOUT_HEAD(aio_large_read_test); 965 ATF_TC_BODY(aio_large_read_test, tc) 966 { 967 struct aiocb cb, *cbp; 968 ssize_t nread; 969 size_t len; 970 int fd; 971 #ifdef __LP64__ 972 int clamped; 973 #endif 974 975 ATF_REQUIRE_KERNEL_MODULE("aio"); 976 ATF_REQUIRE_UNSAFE_AIO(); 977 978 #ifdef __LP64__ 979 len = sizeof(clamped); 980 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) == 981 -1) 982 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp"); 983 #endif 984 985 /* Determine the maximum supported read(2) size. */ 986 len = SSIZE_MAX; 987 #ifdef __LP64__ 988 if (clamped) 989 len = INT_MAX; 990 #endif 991 992 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 993 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 994 995 unlink(FILE_PATHNAME); 996 997 memset(&cb, 0, sizeof(cb)); 998 cb.aio_nbytes = len; 999 cb.aio_fildes = fd; 1000 cb.aio_buf = NULL; 1001 if (aio_read(&cb) == -1) 1002 atf_tc_fail("aio_read() of maximum read size failed: %s", 1003 strerror(errno)); 1004 1005 nread = aio_waitcomplete(&cbp, NULL); 1006 if (nread == -1) 1007 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1008 if (nread != 0) 1009 atf_tc_fail("aio_read() from empty file returned data: %zd", 1010 nread); 1011 1012 memset(&cb, 0, sizeof(cb)); 1013 cb.aio_nbytes = len + 1; 1014 cb.aio_fildes = fd; 1015 cb.aio_buf = NULL; 1016 if (aio_read(&cb) == -1) { 1017 if (errno == EINVAL) 1018 goto finished; 1019 atf_tc_fail("aio_read() of too large read size failed: %s", 1020 strerror(errno)); 1021 } 1022 1023 nread = aio_waitcomplete(&cbp, NULL); 1024 if (nread == -1) { 1025 if (errno == EINVAL) 1026 goto finished; 1027 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1028 } 1029 atf_tc_fail("aio_read() of too large read size returned: %zd", nread); 1030 1031 finished: 1032 close(fd); 1033 } 1034 1035 /* 1036 * This tests for a bug where arriving socket data can wakeup multiple 1037 * AIO read requests resulting in an uncancellable request. 1038 */ 1039 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads); 1040 ATF_TC_BODY(aio_socket_two_reads, tc) 1041 { 1042 struct ioreq { 1043 struct aiocb iocb; 1044 char buffer[1024]; 1045 } ioreq[2]; 1046 struct aiocb *iocb; 1047 unsigned i; 1048 int s[2]; 1049 char c; 1050 1051 ATF_REQUIRE_KERNEL_MODULE("aio"); 1052 #if __FreeBSD_version < 1100101 1053 aft_tc_skip("kernel version %d is too old (%d required)", 1054 __FreeBSD_version, 1100101); 1055 #endif 1056 1057 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1058 1059 /* Queue two read requests. */ 1060 memset(&ioreq, 0, sizeof(ioreq)); 1061 for (i = 0; i < nitems(ioreq); i++) { 1062 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer); 1063 ioreq[i].iocb.aio_fildes = s[0]; 1064 ioreq[i].iocb.aio_buf = ioreq[i].buffer; 1065 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0); 1066 } 1067 1068 /* Send a single byte. This should complete one request. */ 1069 c = 0xc3; 1070 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1); 1071 1072 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1); 1073 1074 /* Determine which request completed and verify the data was read. */ 1075 if (iocb == &ioreq[0].iocb) 1076 i = 0; 1077 else 1078 i = 1; 1079 ATF_REQUIRE(ioreq[i].buffer[0] == c); 1080 1081 i ^= 1; 1082 1083 /* 1084 * Try to cancel the other request. On broken systems this 1085 * will fail and the process will hang on exit. 1086 */ 1087 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS); 1088 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED); 1089 1090 close(s[1]); 1091 close(s[0]); 1092 } 1093 1094 static void 1095 aio_socket_blocking_short_write_test(bool vectored) 1096 { 1097 struct aiocb iocb, *iocbp; 1098 struct iovec iov[2]; 1099 char *buffer[2]; 1100 ssize_t done, r; 1101 int buffer_size, sb_size; 1102 socklen_t len; 1103 int s[2]; 1104 1105 ATF_REQUIRE_KERNEL_MODULE("aio"); 1106 1107 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1108 1109 len = sizeof(sb_size); 1110 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1111 -1); 1112 ATF_REQUIRE(len == sizeof(sb_size)); 1113 buffer_size = sb_size; 1114 1115 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1116 -1); 1117 ATF_REQUIRE(len == sizeof(sb_size)); 1118 if (sb_size > buffer_size) 1119 buffer_size = sb_size; 1120 1121 /* 1122 * Use twice the size of the MAX(receive buffer, send buffer) 1123 * to ensure that the write is split up into multiple writes 1124 * internally. 1125 */ 1126 buffer_size *= 2; 1127 1128 buffer[0] = malloc(buffer_size); 1129 ATF_REQUIRE(buffer[0] != NULL); 1130 buffer[1] = malloc(buffer_size); 1131 ATF_REQUIRE(buffer[1] != NULL); 1132 1133 srandomdev(); 1134 aio_fill_buffer(buffer[1], buffer_size, random()); 1135 1136 memset(&iocb, 0, sizeof(iocb)); 1137 iocb.aio_fildes = s[1]; 1138 if (vectored) { 1139 iov[0].iov_base = buffer[1]; 1140 iov[0].iov_len = buffer_size / 2 + 1; 1141 iov[1].iov_base = buffer[1] + buffer_size / 2 + 1; 1142 iov[1].iov_len = buffer_size / 2 - 1; 1143 iocb.aio_iov = iov; 1144 iocb.aio_iovcnt = 2; 1145 r = aio_writev(&iocb); 1146 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1147 } else { 1148 iocb.aio_buf = buffer[1]; 1149 iocb.aio_nbytes = buffer_size; 1150 r = aio_write(&iocb); 1151 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1152 } 1153 1154 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1155 ATF_REQUIRE(done == buffer_size); 1156 1157 done = aio_waitcomplete(&iocbp, NULL); 1158 ATF_REQUIRE(iocbp == &iocb); 1159 ATF_REQUIRE(done == buffer_size); 1160 1161 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1162 1163 close(s[1]); 1164 close(s[0]); 1165 } 1166 1167 /* 1168 * This test ensures that aio_write() on a blocking socket of a "large" 1169 * buffer does not return a short completion. 1170 */ 1171 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); 1172 ATF_TC_BODY(aio_socket_blocking_short_write, tc) 1173 { 1174 aio_socket_blocking_short_write_test(false); 1175 } 1176 1177 /* 1178 * Like aio_socket_blocking_short_write, but also tests that partially 1179 * completed vectored sends can be retried correctly. 1180 */ 1181 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write_vectored); 1182 ATF_TC_BODY(aio_socket_blocking_short_write_vectored, tc) 1183 { 1184 aio_socket_blocking_short_write_test(true); 1185 } 1186 1187 /* 1188 * This test verifies that cancelling a partially completed socket write 1189 * returns a short write rather than ECANCELED. 1190 */ 1191 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel); 1192 ATF_TC_BODY(aio_socket_short_write_cancel, tc) 1193 { 1194 struct aiocb iocb, *iocbp; 1195 char *buffer[2]; 1196 ssize_t done; 1197 int buffer_size, sb_size; 1198 socklen_t len; 1199 int s[2]; 1200 1201 ATF_REQUIRE_KERNEL_MODULE("aio"); 1202 1203 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1204 1205 len = sizeof(sb_size); 1206 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1207 -1); 1208 ATF_REQUIRE(len == sizeof(sb_size)); 1209 buffer_size = sb_size; 1210 1211 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1212 -1); 1213 ATF_REQUIRE(len == sizeof(sb_size)); 1214 if (sb_size > buffer_size) 1215 buffer_size = sb_size; 1216 1217 /* 1218 * Use three times the size of the MAX(receive buffer, send 1219 * buffer) for the write to ensure that the write is split up 1220 * into multiple writes internally. The recv() ensures that 1221 * the write has partially completed, but a remaining size of 1222 * two buffers should ensure that the write has not completed 1223 * fully when it is cancelled. 1224 */ 1225 buffer[0] = malloc(buffer_size); 1226 ATF_REQUIRE(buffer[0] != NULL); 1227 buffer[1] = malloc(buffer_size * 3); 1228 ATF_REQUIRE(buffer[1] != NULL); 1229 1230 srandomdev(); 1231 aio_fill_buffer(buffer[1], buffer_size * 3, random()); 1232 1233 memset(&iocb, 0, sizeof(iocb)); 1234 iocb.aio_fildes = s[1]; 1235 iocb.aio_buf = buffer[1]; 1236 iocb.aio_nbytes = buffer_size * 3; 1237 ATF_REQUIRE(aio_write(&iocb) == 0); 1238 1239 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1240 ATF_REQUIRE(done == buffer_size); 1241 1242 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS); 1243 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED); 1244 1245 done = aio_waitcomplete(&iocbp, NULL); 1246 ATF_REQUIRE(iocbp == &iocb); 1247 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2); 1248 1249 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1250 1251 close(s[1]); 1252 close(s[0]); 1253 } 1254 1255 /* 1256 * test aio_fsync's behavior with bad inputs 1257 */ 1258 ATF_TC_WITHOUT_HEAD(aio_fsync_errors); 1259 ATF_TC_BODY(aio_fsync_errors, tc) 1260 { 1261 int fd; 1262 struct aiocb iocb; 1263 1264 ATF_REQUIRE_KERNEL_MODULE("aio"); 1265 ATF_REQUIRE_UNSAFE_AIO(); 1266 1267 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1268 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1269 unlink(FILE_PATHNAME); 1270 1271 /* aio_fsync should return EINVAL unless op is O_SYNC or O_DSYNC */ 1272 memset(&iocb, 0, sizeof(iocb)); 1273 iocb.aio_fildes = fd; 1274 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1275 ATF_CHECK_EQ(EINVAL, errno); 1276 1277 /* aio_fsync should return EBADF if fd is not a valid descriptor */ 1278 memset(&iocb, 0, sizeof(iocb)); 1279 iocb.aio_fildes = 666; 1280 ATF_CHECK_EQ(-1, aio_fsync(O_SYNC, &iocb)); 1281 ATF_CHECK_EQ(EBADF, errno); 1282 1283 /* aio_fsync should return EINVAL if sigev_notify is invalid */ 1284 memset(&iocb, 0, sizeof(iocb)); 1285 iocb.aio_fildes = fd; 1286 iocb.aio_sigevent.sigev_notify = 666; 1287 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1288 ATF_CHECK_EQ(EINVAL, errno); 1289 } 1290 1291 /* 1292 * This test just performs a basic test of aio_fsync(). 1293 */ 1294 static void 1295 aio_fsync_test(int op) 1296 { 1297 struct aiocb synccb, *iocbp; 1298 struct { 1299 struct aiocb iocb; 1300 bool done; 1301 char *buffer; 1302 } buffers[16]; 1303 struct stat sb; 1304 ssize_t rval; 1305 unsigned i; 1306 int fd; 1307 1308 ATF_REQUIRE_KERNEL_MODULE("aio"); 1309 ATF_REQUIRE_UNSAFE_AIO(); 1310 1311 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1312 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1313 unlink(FILE_PATHNAME); 1314 1315 ATF_REQUIRE(fstat(fd, &sb) == 0); 1316 ATF_REQUIRE(sb.st_blksize != 0); 1317 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0); 1318 1319 /* 1320 * Queue several asynchronous write requests. Hopefully this 1321 * forces the aio_fsync() request to be deferred. There is no 1322 * reliable way to guarantee that however. 1323 */ 1324 srandomdev(); 1325 for (i = 0; i < nitems(buffers); i++) { 1326 buffers[i].done = false; 1327 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb)); 1328 buffers[i].buffer = malloc(sb.st_blksize); 1329 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random()); 1330 buffers[i].iocb.aio_fildes = fd; 1331 buffers[i].iocb.aio_buf = buffers[i].buffer; 1332 buffers[i].iocb.aio_nbytes = sb.st_blksize; 1333 buffers[i].iocb.aio_offset = sb.st_blksize * i; 1334 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0); 1335 } 1336 1337 /* Queue the aio_fsync request. */ 1338 memset(&synccb, 0, sizeof(synccb)); 1339 synccb.aio_fildes = fd; 1340 ATF_REQUIRE(aio_fsync(op, &synccb) == 0); 1341 1342 /* Wait for requests to complete. */ 1343 for (;;) { 1344 next: 1345 rval = aio_waitcomplete(&iocbp, NULL); 1346 ATF_REQUIRE(iocbp != NULL); 1347 if (iocbp == &synccb) { 1348 ATF_REQUIRE(rval == 0); 1349 break; 1350 } 1351 1352 for (i = 0; i < nitems(buffers); i++) { 1353 if (iocbp == &buffers[i].iocb) { 1354 ATF_REQUIRE(buffers[i].done == false); 1355 ATF_REQUIRE(rval == sb.st_blksize); 1356 buffers[i].done = true; 1357 goto next; 1358 } 1359 } 1360 1361 ATF_REQUIRE_MSG(false, "unmatched AIO request"); 1362 } 1363 1364 for (i = 0; i < nitems(buffers); i++) 1365 ATF_REQUIRE_MSG(buffers[i].done, 1366 "AIO request %u did not complete", i); 1367 1368 close(fd); 1369 } 1370 1371 ATF_TC_WITHOUT_HEAD(aio_fsync_sync_test); 1372 ATF_TC_BODY(aio_fsync_sync_test, tc) 1373 { 1374 aio_fsync_test(O_SYNC); 1375 } 1376 1377 ATF_TC_WITHOUT_HEAD(aio_fsync_dsync_test); 1378 ATF_TC_BODY(aio_fsync_dsync_test, tc) 1379 { 1380 aio_fsync_test(O_DSYNC); 1381 } 1382 1383 /* 1384 * We shouldn't be able to DoS the system by setting iov_len to an insane 1385 * value 1386 */ 1387 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iov_len); 1388 ATF_TC_BODY(aio_writev_dos_iov_len, tc) 1389 { 1390 struct aiocb aio; 1391 const struct aiocb *const iocbs[] = {&aio}; 1392 const char *wbuf = "Hello, world!"; 1393 struct iovec iov[1]; 1394 ssize_t len, r; 1395 int fd; 1396 1397 ATF_REQUIRE_KERNEL_MODULE("aio"); 1398 ATF_REQUIRE_UNSAFE_AIO(); 1399 1400 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1401 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1402 1403 len = strlen(wbuf); 1404 iov[0].iov_base = __DECONST(void*, wbuf); 1405 iov[0].iov_len = 1 << 30; 1406 bzero(&aio, sizeof(aio)); 1407 aio.aio_fildes = fd; 1408 aio.aio_offset = 0; 1409 aio.aio_iov = iov; 1410 aio.aio_iovcnt = 1; 1411 1412 r = aio_writev(&aio); 1413 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1414 ATF_REQUIRE_EQ(0, aio_suspend(iocbs, 1, NULL)); 1415 r = aio_return(&aio); 1416 ATF_CHECK_EQ_MSG(-1, r, "aio_return returned %zd", r); 1417 ATF_CHECK_MSG(errno == EFAULT || errno == EINVAL, 1418 "aio_writev: %s", strerror(errno)); 1419 1420 close(fd); 1421 } 1422 1423 /* 1424 * We shouldn't be able to DoS the system by setting aio_iovcnt to an insane 1425 * value 1426 */ 1427 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iovcnt); 1428 ATF_TC_BODY(aio_writev_dos_iovcnt, tc) 1429 { 1430 struct aiocb aio; 1431 const char *wbuf = "Hello, world!"; 1432 struct iovec iov[1]; 1433 ssize_t len; 1434 int fd; 1435 1436 ATF_REQUIRE_KERNEL_MODULE("aio"); 1437 ATF_REQUIRE_UNSAFE_AIO(); 1438 1439 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1440 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1441 1442 len = strlen(wbuf); 1443 iov[0].iov_base = __DECONST(void*, wbuf); 1444 iov[0].iov_len = len; 1445 bzero(&aio, sizeof(aio)); 1446 aio.aio_fildes = fd; 1447 aio.aio_offset = 0; 1448 aio.aio_iov = iov; 1449 aio.aio_iovcnt = 1 << 30; 1450 1451 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1452 ATF_CHECK_EQ(EINVAL, errno); 1453 1454 close(fd); 1455 } 1456 1457 ATF_TC_WITH_CLEANUP(aio_writev_efault); 1458 ATF_TC_HEAD(aio_writev_efault, tc) 1459 { 1460 atf_tc_set_md_var(tc, "descr", 1461 "Vectored AIO should gracefully handle invalid addresses"); 1462 atf_tc_set_md_var(tc, "require.user", "root"); 1463 } 1464 ATF_TC_BODY(aio_writev_efault, tc) 1465 { 1466 struct aiocb aio; 1467 ssize_t buflen; 1468 char *buffer; 1469 struct iovec iov[2]; 1470 long seed; 1471 int fd; 1472 1473 ATF_REQUIRE_KERNEL_MODULE("aio"); 1474 ATF_REQUIRE_UNSAFE_AIO(); 1475 1476 fd = aio_md_setup(); 1477 1478 seed = random(); 1479 buflen = 4096; 1480 buffer = malloc(buflen); 1481 aio_fill_buffer(buffer, buflen, seed); 1482 iov[0].iov_base = buffer; 1483 iov[0].iov_len = buflen; 1484 iov[1].iov_base = (void*)-1; /* Invalid! */ 1485 iov[1].iov_len = buflen; 1486 bzero(&aio, sizeof(aio)); 1487 aio.aio_fildes = fd; 1488 aio.aio_offset = 0; 1489 aio.aio_iov = iov; 1490 aio.aio_iovcnt = nitems(iov); 1491 1492 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1493 ATF_CHECK_EQ(EFAULT, errno); 1494 1495 close(fd); 1496 } 1497 ATF_TC_CLEANUP(aio_writev_efault, tc) 1498 { 1499 aio_md_cleanup(); 1500 } 1501 1502 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_poll); 1503 ATF_TC_BODY(aio_writev_empty_file_poll, tc) 1504 { 1505 struct aiocb aio; 1506 int fd; 1507 1508 ATF_REQUIRE_KERNEL_MODULE("aio"); 1509 ATF_REQUIRE_UNSAFE_AIO(); 1510 1511 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1512 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1513 1514 bzero(&aio, sizeof(aio)); 1515 aio.aio_fildes = fd; 1516 aio.aio_offset = 0; 1517 aio.aio_iovcnt = 0; 1518 1519 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1520 ATF_REQUIRE_EQ(0, suspend(&aio)); 1521 1522 close(fd); 1523 } 1524 1525 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_signal); 1526 ATF_TC_BODY(aio_writev_empty_file_signal, tc) 1527 { 1528 struct aiocb aio; 1529 int fd; 1530 1531 ATF_REQUIRE_KERNEL_MODULE("aio"); 1532 ATF_REQUIRE_UNSAFE_AIO(); 1533 1534 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1535 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1536 1537 bzero(&aio, sizeof(aio)); 1538 aio.aio_fildes = fd; 1539 aio.aio_offset = 0; 1540 aio.aio_iovcnt = 0; 1541 aio.aio_sigevent = *setup_signal(); 1542 1543 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1544 ATF_REQUIRE_EQ(0, poll_signaled(&aio)); 1545 1546 close(fd); 1547 } 1548 1549 // aio_writev and aio_readv should still work even if the iovcnt is greater 1550 // than the number of buffered AIO operations permitted per process. 1551 ATF_TC_WITH_CLEANUP(vectored_big_iovcnt); 1552 ATF_TC_HEAD(vectored_big_iovcnt, tc) 1553 { 1554 atf_tc_set_md_var(tc, "descr", 1555 "Vectored AIO should still work even if the iovcnt is greater than " 1556 "the number of buffered AIO operations permitted by the process"); 1557 atf_tc_set_md_var(tc, "require.user", "root"); 1558 } 1559 ATF_TC_BODY(vectored_big_iovcnt, tc) 1560 { 1561 struct aiocb aio; 1562 struct iovec *iov; 1563 ssize_t len, buflen; 1564 char *buffer; 1565 const char *oid = "vfs.aio.max_buf_aio"; 1566 long seed; 1567 int max_buf_aio; 1568 int fd, i; 1569 ssize_t sysctl_len = sizeof(max_buf_aio); 1570 1571 ATF_REQUIRE_KERNEL_MODULE("aio"); 1572 ATF_REQUIRE_UNSAFE_AIO(); 1573 1574 if (sysctlbyname(oid, &max_buf_aio, &sysctl_len, NULL, 0) == -1) 1575 atf_libc_error(errno, "Failed to read %s", oid); 1576 1577 seed = random(); 1578 buflen = 512 * (max_buf_aio + 1); 1579 buffer = malloc(buflen); 1580 aio_fill_buffer(buffer, buflen, seed); 1581 iov = calloc(max_buf_aio + 1, sizeof(struct iovec)); 1582 1583 fd = aio_md_setup(); 1584 1585 bzero(&aio, sizeof(aio)); 1586 aio.aio_fildes = fd; 1587 aio.aio_offset = 0; 1588 for (i = 0; i < max_buf_aio + 1; i++) { 1589 iov[i].iov_base = &buffer[i * 512]; 1590 iov[i].iov_len = 512; 1591 } 1592 aio.aio_iov = iov; 1593 aio.aio_iovcnt = max_buf_aio + 1; 1594 1595 if (aio_writev(&aio) < 0) 1596 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1597 1598 len = poll(&aio); 1599 if (len < 0) 1600 atf_tc_fail("aio failed: %s", strerror(errno)); 1601 1602 if (len != buflen) 1603 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 1604 1605 bzero(&aio, sizeof(aio)); 1606 aio.aio_fildes = fd; 1607 aio.aio_offset = 0; 1608 aio.aio_iov = iov; 1609 aio.aio_iovcnt = max_buf_aio + 1; 1610 1611 if (aio_readv(&aio) < 0) 1612 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 1613 1614 len = poll(&aio); 1615 if (len < 0) 1616 atf_tc_fail("aio failed: %s", strerror(errno)); 1617 1618 if (len != buflen) 1619 atf_tc_fail("aio short read (%jd)", (intmax_t)len); 1620 1621 if (aio_test_buffer(buffer, buflen, seed) == 0) 1622 atf_tc_fail("buffer mismatched"); 1623 1624 close(fd); 1625 } 1626 ATF_TC_CLEANUP(vectored_big_iovcnt, tc) 1627 { 1628 aio_md_cleanup(); 1629 } 1630 1631 ATF_TC_WITHOUT_HEAD(vectored_file_poll); 1632 ATF_TC_BODY(vectored_file_poll, tc) 1633 { 1634 aio_file_test(poll, NULL, true); 1635 } 1636 1637 ATF_TC_WITH_CLEANUP(vectored_md_poll); 1638 ATF_TC_HEAD(vectored_md_poll, tc) 1639 { 1640 atf_tc_set_md_var(tc, "require.user", "root"); 1641 } 1642 ATF_TC_BODY(vectored_md_poll, tc) 1643 { 1644 aio_md_test(poll, NULL, true); 1645 } 1646 ATF_TC_CLEANUP(vectored_md_poll, tc) 1647 { 1648 aio_md_cleanup(); 1649 } 1650 1651 ATF_TC_WITHOUT_HEAD(vectored_socket_poll); 1652 ATF_TC_BODY(vectored_socket_poll, tc) 1653 { 1654 aio_unix_socketpair_test(poll, NULL, true); 1655 } 1656 1657 // aio_writev and aio_readv should still work even if the iov contains elements 1658 // that aren't a multiple of the device's sector size, and even if the total 1659 // amount if I/O _is_ a multiple of the device's sector size. 1660 ATF_TC_WITH_CLEANUP(vectored_unaligned); 1661 ATF_TC_HEAD(vectored_unaligned, tc) 1662 { 1663 atf_tc_set_md_var(tc, "descr", 1664 "Vectored AIO should still work even if the iov contains elements " 1665 "that aren't a multiple of the sector size."); 1666 atf_tc_set_md_var(tc, "require.user", "root"); 1667 } 1668 ATF_TC_BODY(vectored_unaligned, tc) 1669 { 1670 struct aio_context ac; 1671 struct aiocb aio; 1672 struct iovec iov[3]; 1673 ssize_t len, total_len; 1674 int fd; 1675 1676 ATF_REQUIRE_KERNEL_MODULE("aio"); 1677 ATF_REQUIRE_UNSAFE_AIO(); 1678 1679 /* 1680 * Use a zvol with volmode=dev, so it will allow .d_write with 1681 * unaligned uio. geom devices use physio, which doesn't allow that. 1682 */ 1683 fd = aio_zvol_setup(); 1684 aio_context_init(&ac, fd, fd, FILE_LEN); 1685 1686 /* Break the buffer into 3 parts: 1687 * * A 4kB part, aligned to 4kB 1688 * * Two other parts that add up to 4kB: 1689 * - 256B 1690 * - 4kB - 256B 1691 */ 1692 iov[0].iov_base = ac.ac_buffer; 1693 iov[0].iov_len = 4096; 1694 iov[1].iov_base = (void*)((uintptr_t)iov[0].iov_base + iov[0].iov_len); 1695 iov[1].iov_len = 256; 1696 iov[2].iov_base = (void*)((uintptr_t)iov[1].iov_base + iov[1].iov_len); 1697 iov[2].iov_len = 4096 - iov[1].iov_len; 1698 total_len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; 1699 bzero(&aio, sizeof(aio)); 1700 aio.aio_fildes = ac.ac_write_fd; 1701 aio.aio_offset = 0; 1702 aio.aio_iov = iov; 1703 aio.aio_iovcnt = 3; 1704 1705 if (aio_writev(&aio) < 0) 1706 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1707 1708 len = poll(&aio); 1709 if (len < 0) 1710 atf_tc_fail("aio failed: %s", strerror(errno)); 1711 1712 if (len != total_len) 1713 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 1714 1715 bzero(&aio, sizeof(aio)); 1716 aio.aio_fildes = ac.ac_read_fd; 1717 aio.aio_offset = 0; 1718 aio.aio_iov = iov; 1719 aio.aio_iovcnt = 3; 1720 1721 if (aio_readv(&aio) < 0) 1722 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 1723 len = poll(&aio); 1724 1725 ATF_REQUIRE_MSG(aio_test_buffer(ac.ac_buffer, total_len, 1726 ac.ac_seed) != 0, "aio_test_buffer: internal error"); 1727 1728 close(fd); 1729 } 1730 ATF_TC_CLEANUP(vectored_unaligned, tc) 1731 { 1732 aio_zvol_cleanup(); 1733 } 1734 1735 static void 1736 aio_zvol_test(completion comp, struct sigevent *sev, bool vectored) 1737 { 1738 struct aio_context ac; 1739 int fd; 1740 1741 fd = aio_zvol_setup(); 1742 aio_context_init(&ac, fd, fd, MD_LEN); 1743 if (vectored) { 1744 aio_writev_test(&ac, comp, sev); 1745 aio_readv_test(&ac, comp, sev); 1746 } else { 1747 aio_write_test(&ac, comp, sev); 1748 aio_read_test(&ac, comp, sev); 1749 } 1750 1751 close(fd); 1752 } 1753 1754 /* 1755 * Note that unlike md, the zvol is not a geom device, does not allow unmapped 1756 * buffers, and does not use physio. 1757 */ 1758 ATF_TC_WITH_CLEANUP(vectored_zvol_poll); 1759 ATF_TC_HEAD(vectored_zvol_poll, tc) 1760 { 1761 atf_tc_set_md_var(tc, "require.user", "root"); 1762 } 1763 ATF_TC_BODY(vectored_zvol_poll, tc) 1764 { 1765 aio_zvol_test(poll, NULL, true); 1766 } 1767 ATF_TC_CLEANUP(vectored_zvol_poll, tc) 1768 { 1769 aio_zvol_cleanup(); 1770 } 1771 1772 ATF_TP_ADD_TCS(tp) 1773 { 1774 1775 ATF_TP_ADD_TC(tp, file_poll); 1776 ATF_TP_ADD_TC(tp, file_signal); 1777 ATF_TP_ADD_TC(tp, file_suspend); 1778 ATF_TP_ADD_TC(tp, file_thread); 1779 ATF_TP_ADD_TC(tp, file_waitcomplete); 1780 ATF_TP_ADD_TC(tp, fifo_poll); 1781 ATF_TP_ADD_TC(tp, fifo_signal); 1782 ATF_TP_ADD_TC(tp, fifo_suspend); 1783 ATF_TP_ADD_TC(tp, fifo_thread); 1784 ATF_TP_ADD_TC(tp, fifo_waitcomplete); 1785 ATF_TP_ADD_TC(tp, socket_poll); 1786 ATF_TP_ADD_TC(tp, socket_signal); 1787 ATF_TP_ADD_TC(tp, socket_suspend); 1788 ATF_TP_ADD_TC(tp, socket_thread); 1789 ATF_TP_ADD_TC(tp, socket_waitcomplete); 1790 ATF_TP_ADD_TC(tp, pty_poll); 1791 ATF_TP_ADD_TC(tp, pty_signal); 1792 ATF_TP_ADD_TC(tp, pty_suspend); 1793 ATF_TP_ADD_TC(tp, pty_thread); 1794 ATF_TP_ADD_TC(tp, pty_waitcomplete); 1795 ATF_TP_ADD_TC(tp, pipe_poll); 1796 ATF_TP_ADD_TC(tp, pipe_signal); 1797 ATF_TP_ADD_TC(tp, pipe_suspend); 1798 ATF_TP_ADD_TC(tp, pipe_thread); 1799 ATF_TP_ADD_TC(tp, pipe_waitcomplete); 1800 ATF_TP_ADD_TC(tp, md_poll); 1801 ATF_TP_ADD_TC(tp, md_signal); 1802 ATF_TP_ADD_TC(tp, md_suspend); 1803 ATF_TP_ADD_TC(tp, md_thread); 1804 ATF_TP_ADD_TC(tp, md_waitcomplete); 1805 ATF_TP_ADD_TC(tp, aio_fsync_errors); 1806 ATF_TP_ADD_TC(tp, aio_fsync_sync_test); 1807 ATF_TP_ADD_TC(tp, aio_fsync_dsync_test); 1808 ATF_TP_ADD_TC(tp, aio_large_read_test); 1809 ATF_TP_ADD_TC(tp, aio_socket_two_reads); 1810 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); 1811 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write_vectored); 1812 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); 1813 ATF_TP_ADD_TC(tp, aio_writev_dos_iov_len); 1814 ATF_TP_ADD_TC(tp, aio_writev_dos_iovcnt); 1815 ATF_TP_ADD_TC(tp, aio_writev_efault); 1816 ATF_TP_ADD_TC(tp, aio_writev_empty_file_poll); 1817 ATF_TP_ADD_TC(tp, aio_writev_empty_file_signal); 1818 ATF_TP_ADD_TC(tp, vectored_big_iovcnt); 1819 ATF_TP_ADD_TC(tp, vectored_file_poll); 1820 ATF_TP_ADD_TC(tp, vectored_md_poll); 1821 ATF_TP_ADD_TC(tp, vectored_zvol_poll); 1822 ATF_TP_ADD_TC(tp, vectored_unaligned); 1823 ATF_TP_ADD_TC(tp, vectored_socket_poll); 1824 1825 return (atf_no_error()); 1826 } 1827