1 /*- 2 * Copyright (c) 2004 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Regression test to do some very basic AIO exercising on several types of 31 * file descriptors. Currently, the tests consist of initializing a fixed 32 * size buffer with pseudo-random data, writing it to one fd using AIO, then 33 * reading it from a second descriptor using AIO. For some targets, the same 34 * fd is used for write and read (i.e., file, md device), but for others the 35 * operation is performed on a peer (pty, socket, fifo, etc). For each file 36 * descriptor type, several completion methods are tested. This test program 37 * does not attempt to exercise error cases or more subtle asynchronous 38 * behavior, just make sure that the basic operations work on some basic object 39 * types. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/event.h> 44 #include <sys/mdioctl.h> 45 #include <sys/module.h> 46 #include <sys/resource.h> 47 #include <sys/socket.h> 48 #include <sys/stat.h> 49 #include <sys/un.h> 50 51 #include <aio.h> 52 #include <err.h> 53 #include <errno.h> 54 #include <fcntl.h> 55 #include <libutil.h> 56 #include <limits.h> 57 #include <semaphore.h> 58 #include <signal.h> 59 #include <stdint.h> 60 #include <stdio.h> 61 #include <stdlib.h> 62 #include <string.h> 63 #include <termios.h> 64 #include <unistd.h> 65 66 #include <atf-c.h> 67 68 #include "freebsd_test_suite/macros.h" 69 #include "local.h" 70 71 /* 72 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as 73 * it sizes ac_buffer in the aio_context structure. It is also the default 74 * size for file I/O. For other types, we use smaller blocks or we risk 75 * blocking (and we run in a single process/thread so that would be bad). 76 */ 77 #define GLOBAL_MAX 16384 78 79 #define BUFFER_MAX GLOBAL_MAX 80 81 /* 82 * A completion function will block until the aio has completed, then return 83 * the result of the aio. errno will be set appropriately. 84 */ 85 typedef ssize_t (*completion)(struct aiocb*); 86 87 struct aio_context { 88 int ac_read_fd, ac_write_fd; 89 long ac_seed; 90 char ac_buffer[GLOBAL_MAX]; 91 int ac_buflen; 92 int ac_seconds; 93 }; 94 95 static sem_t completions; 96 97 /* 98 * Fill a buffer given a seed that can be fed into srandom() to initialize 99 * the PRNG in a repeatable manner. 100 */ 101 static void 102 aio_fill_buffer(char *buffer, int len, long seed) 103 { 104 char ch; 105 int i; 106 107 srandom(seed); 108 for (i = 0; i < len; i++) { 109 ch = random() & 0xff; 110 buffer[i] = ch; 111 } 112 } 113 114 /* 115 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return 116 * (1) on a match, (0) on a mismatch. 117 */ 118 static int 119 aio_test_buffer(char *buffer, int len, long seed) 120 { 121 char ch; 122 int i; 123 124 srandom(seed); 125 for (i = 0; i < len; i++) { 126 ch = random() & 0xff; 127 if (buffer[i] != ch) 128 return (0); 129 } 130 return (1); 131 } 132 133 /* 134 * Initialize a testing context given the file descriptors provided by the 135 * test setup. 136 */ 137 static void 138 aio_context_init(struct aio_context *ac, int read_fd, 139 int write_fd, int buflen) 140 { 141 142 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX, 143 "aio_context_init: buffer too large (%d > %d)", 144 buflen, BUFFER_MAX); 145 bzero(ac, sizeof(*ac)); 146 ac->ac_read_fd = read_fd; 147 ac->ac_write_fd = write_fd; 148 ac->ac_buflen = buflen; 149 srandomdev(); 150 ac->ac_seed = random(); 151 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed); 152 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen, 153 ac->ac_seed) != 0, "aio_test_buffer: internal error"); 154 } 155 156 static ssize_t 157 poll(struct aiocb *aio) 158 { 159 int error; 160 161 while ((error = aio_error(aio)) == EINPROGRESS) 162 usleep(25000); 163 if (error) 164 return (error); 165 else 166 return (aio_return(aio)); 167 } 168 169 static void 170 sigusr1_handler(int sig __unused) 171 { 172 ATF_REQUIRE_EQ(0, sem_post(&completions)); 173 } 174 175 static void 176 thr_handler(union sigval sv __unused) 177 { 178 ATF_REQUIRE_EQ(0, sem_post(&completions)); 179 } 180 181 static ssize_t 182 poll_signaled(struct aiocb *aio) 183 { 184 int error; 185 186 ATF_REQUIRE_EQ(0, sem_wait(&completions)); 187 error = aio_error(aio); 188 switch (error) { 189 case EINPROGRESS: 190 errno = EINTR; 191 return (-1); 192 case 0: 193 return (aio_return(aio)); 194 default: 195 return (error); 196 } 197 } 198 199 /* 200 * Setup a signal handler for signal delivery tests 201 * This isn't thread safe, but it's ok since ATF runs each testcase in a 202 * separate process 203 */ 204 static struct sigevent* 205 setup_signal(void) 206 { 207 static struct sigevent sev; 208 209 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 210 sev.sigev_notify = SIGEV_SIGNAL; 211 sev.sigev_signo = SIGUSR1; 212 ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler)); 213 return (&sev); 214 } 215 216 /* 217 * Setup a thread for thread delivery tests 218 * This isn't thread safe, but it's ok since ATF runs each testcase in a 219 * separate process 220 */ 221 static struct sigevent* 222 setup_thread(void) 223 { 224 static struct sigevent sev; 225 226 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 227 sev.sigev_notify = SIGEV_THREAD; 228 sev.sigev_notify_function = thr_handler; 229 sev.sigev_notify_attributes = NULL; 230 return (&sev); 231 } 232 233 static ssize_t 234 suspend(struct aiocb *aio) 235 { 236 const struct aiocb *const iocbs[] = {aio}; 237 int error; 238 239 error = aio_suspend(iocbs, 1, NULL); 240 if (error == 0) 241 return (aio_return(aio)); 242 else 243 return (error); 244 } 245 246 static ssize_t 247 waitcomplete(struct aiocb *aio) 248 { 249 struct aiocb *aiop; 250 ssize_t ret; 251 252 ret = aio_waitcomplete(&aiop, NULL); 253 ATF_REQUIRE_EQ(aio, aiop); 254 return (ret); 255 } 256 257 /* 258 * Setup an iocb for kqueue notification. This isn't thread 259 * safe, but it's ok because ATF runs every test case in a separate process. 260 */ 261 static struct sigevent* 262 setup_kqueue(void) 263 { 264 static struct sigevent sev; 265 static int kq; 266 267 kq = kqueue(); 268 ATF_REQUIRE(kq >= 0); 269 270 memset(&sev, 0, sizeof(sev)); 271 sev.sigev_notify_kqueue = kq; 272 sev.sigev_value.sival_ptr = (void*)0xdeadbeef; 273 sev.sigev_notify = SIGEV_KEVENT; 274 275 return (&sev); 276 } 277 278 static ssize_t 279 poll_kqueue(struct aiocb *aio) 280 { 281 int kq, nevents; 282 struct kevent events[1]; 283 284 kq = aio->aio_sigevent.sigev_notify_kqueue; 285 286 nevents = kevent(kq, NULL, 0, events, 1, NULL); 287 ATF_CHECK_EQ(1, nevents); 288 ATF_CHECK_EQ(events[0].ident, (uintptr_t) aio); 289 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO); 290 ATF_CHECK_EQ(events[0].flags, EV_EOF); 291 ATF_CHECK_EQ(events[0].fflags, 0); 292 ATF_CHECK_EQ(events[0].data, 0); 293 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef); 294 295 return (aio_return(aio)); 296 } 297 298 /* 299 * Perform a simple write test of our initialized data buffer to the provided 300 * file descriptor. 301 */ 302 static void 303 aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev) 304 { 305 struct aiocb aio; 306 ssize_t len; 307 308 bzero(&aio, sizeof(aio)); 309 aio.aio_buf = ac->ac_buffer; 310 aio.aio_nbytes = ac->ac_buflen; 311 aio.aio_fildes = ac->ac_write_fd; 312 aio.aio_offset = 0; 313 if (sev) 314 aio.aio_sigevent = *sev; 315 316 if (aio_write(&aio) < 0) 317 atf_tc_fail("aio_write failed: %s", strerror(errno)); 318 319 len = comp(&aio); 320 if (len < 0) 321 atf_tc_fail("aio failed: %s", strerror(errno)); 322 323 if (len != ac->ac_buflen) 324 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 325 } 326 327 /* 328 * Perform a vectored I/O test of our initialized data buffer to the provided 329 * file descriptor. 330 * 331 * To vectorize the linear buffer, chop it up into two pieces of dissimilar 332 * size, and swap their offsets. 333 */ 334 static void 335 aio_writev_test(struct aio_context *ac, completion comp, struct sigevent *sev) 336 { 337 struct aiocb aio; 338 struct iovec iov[2]; 339 size_t len0, len1; 340 ssize_t len; 341 342 bzero(&aio, sizeof(aio)); 343 344 aio.aio_fildes = ac->ac_write_fd; 345 aio.aio_offset = 0; 346 len0 = ac->ac_buflen * 3 / 4; 347 len1 = ac->ac_buflen / 4; 348 iov[0].iov_base = ac->ac_buffer + len1; 349 iov[0].iov_len = len0; 350 iov[1].iov_base = ac->ac_buffer; 351 iov[1].iov_len = len1; 352 aio.aio_iov = iov; 353 aio.aio_iovcnt = 2; 354 if (sev) 355 aio.aio_sigevent = *sev; 356 357 if (aio_writev(&aio) < 0) 358 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 359 360 len = comp(&aio); 361 if (len < 0) 362 atf_tc_fail("aio failed: %s", strerror(errno)); 363 364 if (len != ac->ac_buflen) 365 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 366 } 367 368 /* 369 * Perform a simple read test of our initialized data buffer from the 370 * provided file descriptor. 371 */ 372 static void 373 aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev) 374 { 375 struct aiocb aio; 376 ssize_t len; 377 378 bzero(ac->ac_buffer, ac->ac_buflen); 379 bzero(&aio, sizeof(aio)); 380 aio.aio_buf = ac->ac_buffer; 381 aio.aio_nbytes = ac->ac_buflen; 382 aio.aio_fildes = ac->ac_read_fd; 383 aio.aio_offset = 0; 384 if (sev) 385 aio.aio_sigevent = *sev; 386 387 if (aio_read(&aio) < 0) 388 atf_tc_fail("aio_read failed: %s", strerror(errno)); 389 390 len = comp(&aio); 391 if (len < 0) 392 atf_tc_fail("aio failed: %s", strerror(errno)); 393 394 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 395 "aio short read (%jd)", (intmax_t)len); 396 397 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 398 atf_tc_fail("buffer mismatched"); 399 } 400 401 static void 402 aio_readv_test(struct aio_context *ac, completion comp, struct sigevent *sev) 403 { 404 struct aiocb aio; 405 struct iovec iov[2]; 406 size_t len0, len1; 407 ssize_t len; 408 409 bzero(ac->ac_buffer, ac->ac_buflen); 410 bzero(&aio, sizeof(aio)); 411 aio.aio_fildes = ac->ac_read_fd; 412 aio.aio_offset = 0; 413 len0 = ac->ac_buflen * 3 / 4; 414 len1 = ac->ac_buflen / 4; 415 iov[0].iov_base = ac->ac_buffer + len1; 416 iov[0].iov_len = len0; 417 iov[1].iov_base = ac->ac_buffer; 418 iov[1].iov_len = len1; 419 aio.aio_iov = iov; 420 aio.aio_iovcnt = 2; 421 if (sev) 422 aio.aio_sigevent = *sev; 423 424 if (aio_readv(&aio) < 0) 425 atf_tc_fail("aio_read failed: %s", strerror(errno)); 426 427 len = comp(&aio); 428 if (len < 0) 429 atf_tc_fail("aio failed: %s", strerror(errno)); 430 431 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 432 "aio short read (%jd)", (intmax_t)len); 433 434 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 435 atf_tc_fail("buffer mismatched"); 436 } 437 438 /* 439 * Series of type-specific tests for AIO. For now, we just make sure we can 440 * issue a write and then a read to each type. We assume that once a write 441 * is issued, a read can follow. 442 */ 443 444 /* 445 * Test with a classic file. Assumes we can create a moderate size temporary 446 * file. 447 */ 448 #define FILE_LEN GLOBAL_MAX 449 #define FILE_PATHNAME "testfile" 450 451 static void 452 aio_file_test(completion comp, struct sigevent *sev, bool vectored) 453 { 454 struct aio_context ac; 455 int fd; 456 457 ATF_REQUIRE_KERNEL_MODULE("aio"); 458 ATF_REQUIRE_UNSAFE_AIO(); 459 460 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 461 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 462 463 aio_context_init(&ac, fd, fd, FILE_LEN); 464 if (vectored) { 465 aio_writev_test(&ac, comp, sev); 466 aio_readv_test(&ac, comp, sev); 467 } else { 468 aio_write_test(&ac, comp, sev); 469 aio_read_test(&ac, comp, sev); 470 } 471 close(fd); 472 } 473 474 ATF_TC_WITHOUT_HEAD(file_kq); 475 ATF_TC_BODY(file_kq, tc) 476 { 477 aio_file_test(poll_kqueue, setup_kqueue(), false); 478 } 479 480 ATF_TC_WITHOUT_HEAD(file_poll); 481 ATF_TC_BODY(file_poll, tc) 482 { 483 aio_file_test(poll, NULL, false); 484 } 485 486 ATF_TC_WITHOUT_HEAD(file_signal); 487 ATF_TC_BODY(file_signal, tc) 488 { 489 aio_file_test(poll_signaled, setup_signal(), false); 490 } 491 492 ATF_TC_WITHOUT_HEAD(file_suspend); 493 ATF_TC_BODY(file_suspend, tc) 494 { 495 aio_file_test(suspend, NULL, false); 496 } 497 498 ATF_TC_WITHOUT_HEAD(file_thread); 499 ATF_TC_BODY(file_thread, tc) 500 { 501 aio_file_test(poll_signaled, setup_thread(), false); 502 } 503 504 ATF_TC_WITHOUT_HEAD(file_waitcomplete); 505 ATF_TC_BODY(file_waitcomplete, tc) 506 { 507 aio_file_test(waitcomplete, NULL, false); 508 } 509 510 #define FIFO_LEN 256 511 #define FIFO_PATHNAME "testfifo" 512 513 static void 514 aio_fifo_test(completion comp, struct sigevent *sev) 515 { 516 int error, read_fd = -1, write_fd = -1; 517 struct aio_context ac; 518 519 ATF_REQUIRE_KERNEL_MODULE("aio"); 520 ATF_REQUIRE_UNSAFE_AIO(); 521 522 ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1, 523 "mkfifo failed: %s", strerror(errno)); 524 525 read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK); 526 if (read_fd == -1) { 527 error = errno; 528 errno = error; 529 atf_tc_fail("read_fd open failed: %s", 530 strerror(errno)); 531 } 532 533 write_fd = open(FIFO_PATHNAME, O_WRONLY); 534 if (write_fd == -1) { 535 error = errno; 536 errno = error; 537 atf_tc_fail("write_fd open failed: %s", 538 strerror(errno)); 539 } 540 541 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN); 542 aio_write_test(&ac, comp, sev); 543 aio_read_test(&ac, comp, sev); 544 545 close(read_fd); 546 close(write_fd); 547 } 548 549 ATF_TC_WITHOUT_HEAD(fifo_kq); 550 ATF_TC_BODY(fifo_kq, tc) 551 { 552 aio_fifo_test(poll_kqueue, setup_kqueue()); 553 } 554 555 ATF_TC_WITHOUT_HEAD(fifo_poll); 556 ATF_TC_BODY(fifo_poll, tc) 557 { 558 aio_fifo_test(poll, NULL); 559 } 560 561 ATF_TC_WITHOUT_HEAD(fifo_signal); 562 ATF_TC_BODY(fifo_signal, tc) 563 { 564 aio_fifo_test(poll_signaled, setup_signal()); 565 } 566 567 ATF_TC_WITHOUT_HEAD(fifo_suspend); 568 ATF_TC_BODY(fifo_suspend, tc) 569 { 570 aio_fifo_test(suspend, NULL); 571 } 572 573 ATF_TC_WITHOUT_HEAD(fifo_thread); 574 ATF_TC_BODY(fifo_thread, tc) 575 { 576 aio_fifo_test(poll_signaled, setup_thread()); 577 } 578 579 ATF_TC_WITHOUT_HEAD(fifo_waitcomplete); 580 ATF_TC_BODY(fifo_waitcomplete, tc) 581 { 582 aio_fifo_test(waitcomplete, NULL); 583 } 584 585 #define UNIX_SOCKETPAIR_LEN 256 586 static void 587 aio_unix_socketpair_test(completion comp, struct sigevent *sev, bool vectored) 588 { 589 struct aio_context ac; 590 struct rusage ru_before, ru_after; 591 int sockets[2]; 592 593 ATF_REQUIRE_KERNEL_MODULE("aio"); 594 595 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1, 596 "socketpair failed: %s", strerror(errno)); 597 598 aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN); 599 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1, 600 "getrusage failed: %s", strerror(errno)); 601 if (vectored) { 602 aio_writev_test(&ac, comp, sev); 603 aio_readv_test(&ac, comp, sev); 604 } else { 605 aio_write_test(&ac, comp, sev); 606 aio_read_test(&ac, comp, sev); 607 } 608 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, 609 "getrusage failed: %s", strerror(errno)); 610 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1); 611 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1); 612 613 close(sockets[0]); 614 close(sockets[1]); 615 } 616 617 ATF_TC_WITHOUT_HEAD(socket_kq); 618 ATF_TC_BODY(socket_kq, tc) 619 { 620 aio_unix_socketpair_test(poll_kqueue, setup_kqueue(), false); 621 } 622 623 ATF_TC_WITHOUT_HEAD(socket_poll); 624 ATF_TC_BODY(socket_poll, tc) 625 { 626 aio_unix_socketpair_test(poll, NULL, false); 627 } 628 629 ATF_TC_WITHOUT_HEAD(socket_signal); 630 ATF_TC_BODY(socket_signal, tc) 631 { 632 aio_unix_socketpair_test(poll_signaled, setup_signal(), false); 633 } 634 635 ATF_TC_WITHOUT_HEAD(socket_suspend); 636 ATF_TC_BODY(socket_suspend, tc) 637 { 638 aio_unix_socketpair_test(suspend, NULL, false); 639 } 640 641 ATF_TC_WITHOUT_HEAD(socket_thread); 642 ATF_TC_BODY(socket_thread, tc) 643 { 644 aio_unix_socketpair_test(poll_signaled, setup_thread(), false); 645 } 646 647 ATF_TC_WITHOUT_HEAD(socket_waitcomplete); 648 ATF_TC_BODY(socket_waitcomplete, tc) 649 { 650 aio_unix_socketpair_test(waitcomplete, NULL, false); 651 } 652 653 struct aio_pty_arg { 654 int apa_read_fd; 655 int apa_write_fd; 656 }; 657 658 #define PTY_LEN 256 659 static void 660 aio_pty_test(completion comp, struct sigevent *sev) 661 { 662 struct aio_context ac; 663 int read_fd, write_fd; 664 struct termios ts; 665 int error; 666 667 ATF_REQUIRE_KERNEL_MODULE("aio"); 668 ATF_REQUIRE_UNSAFE_AIO(); 669 670 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0, 671 "openpty failed: %s", strerror(errno)); 672 673 674 if (tcgetattr(write_fd, &ts) < 0) { 675 error = errno; 676 errno = error; 677 atf_tc_fail("tcgetattr failed: %s", strerror(errno)); 678 } 679 cfmakeraw(&ts); 680 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) { 681 error = errno; 682 errno = error; 683 atf_tc_fail("tcsetattr failed: %s", strerror(errno)); 684 } 685 aio_context_init(&ac, read_fd, write_fd, PTY_LEN); 686 687 aio_write_test(&ac, comp, sev); 688 aio_read_test(&ac, comp, sev); 689 690 close(read_fd); 691 close(write_fd); 692 } 693 694 ATF_TC_WITHOUT_HEAD(pty_kq); 695 ATF_TC_BODY(pty_kq, tc) 696 { 697 aio_pty_test(poll_kqueue, setup_kqueue()); 698 } 699 700 ATF_TC_WITHOUT_HEAD(pty_poll); 701 ATF_TC_BODY(pty_poll, tc) 702 { 703 aio_pty_test(poll, NULL); 704 } 705 706 ATF_TC_WITHOUT_HEAD(pty_signal); 707 ATF_TC_BODY(pty_signal, tc) 708 { 709 aio_pty_test(poll_signaled, setup_signal()); 710 } 711 712 ATF_TC_WITHOUT_HEAD(pty_suspend); 713 ATF_TC_BODY(pty_suspend, tc) 714 { 715 aio_pty_test(suspend, NULL); 716 } 717 718 ATF_TC_WITHOUT_HEAD(pty_thread); 719 ATF_TC_BODY(pty_thread, tc) 720 { 721 aio_pty_test(poll_signaled, setup_thread()); 722 } 723 724 ATF_TC_WITHOUT_HEAD(pty_waitcomplete); 725 ATF_TC_BODY(pty_waitcomplete, tc) 726 { 727 aio_pty_test(waitcomplete, NULL); 728 } 729 730 #define PIPE_LEN 256 731 static void 732 aio_pipe_test(completion comp, struct sigevent *sev) 733 { 734 struct aio_context ac; 735 int pipes[2]; 736 737 ATF_REQUIRE_KERNEL_MODULE("aio"); 738 ATF_REQUIRE_UNSAFE_AIO(); 739 740 ATF_REQUIRE_MSG(pipe(pipes) != -1, 741 "pipe failed: %s", strerror(errno)); 742 743 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN); 744 aio_write_test(&ac, comp, sev); 745 aio_read_test(&ac, comp, sev); 746 747 close(pipes[0]); 748 close(pipes[1]); 749 } 750 751 ATF_TC_WITHOUT_HEAD(pipe_kq); 752 ATF_TC_BODY(pipe_kq, tc) 753 { 754 aio_pipe_test(poll_kqueue, setup_kqueue()); 755 } 756 757 ATF_TC_WITHOUT_HEAD(pipe_poll); 758 ATF_TC_BODY(pipe_poll, tc) 759 { 760 aio_pipe_test(poll, NULL); 761 } 762 763 ATF_TC_WITHOUT_HEAD(pipe_signal); 764 ATF_TC_BODY(pipe_signal, tc) 765 { 766 aio_pipe_test(poll_signaled, setup_signal()); 767 } 768 769 ATF_TC_WITHOUT_HEAD(pipe_suspend); 770 ATF_TC_BODY(pipe_suspend, tc) 771 { 772 aio_pipe_test(suspend, NULL); 773 } 774 775 ATF_TC_WITHOUT_HEAD(pipe_thread); 776 ATF_TC_BODY(pipe_thread, tc) 777 { 778 aio_pipe_test(poll_signaled, setup_thread()); 779 } 780 781 ATF_TC_WITHOUT_HEAD(pipe_waitcomplete); 782 ATF_TC_BODY(pipe_waitcomplete, tc) 783 { 784 aio_pipe_test(waitcomplete, NULL); 785 } 786 787 #define MD_LEN GLOBAL_MAX 788 #define MDUNIT_LINK "mdunit_link" 789 790 static int 791 aio_md_setup(void) 792 { 793 int error, fd, mdctl_fd, unit; 794 char pathname[PATH_MAX]; 795 struct md_ioctl mdio; 796 char buf[80]; 797 798 ATF_REQUIRE_KERNEL_MODULE("aio"); 799 800 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 801 ATF_REQUIRE_MSG(mdctl_fd != -1, 802 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno)); 803 804 bzero(&mdio, sizeof(mdio)); 805 mdio.md_version = MDIOVERSION; 806 mdio.md_type = MD_MALLOC; 807 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS; 808 mdio.md_mediasize = GLOBAL_MAX; 809 mdio.md_sectorsize = 512; 810 strlcpy(buf, __func__, sizeof(buf)); 811 mdio.md_label = buf; 812 813 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) { 814 error = errno; 815 errno = error; 816 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno)); 817 } 818 close(mdctl_fd); 819 820 /* Store the md unit number in a symlink for future cleanup */ 821 unit = mdio.md_unit; 822 snprintf(buf, sizeof(buf), "%d", unit); 823 ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK)); 824 snprintf(pathname, PATH_MAX, "/dev/md%d", unit); 825 fd = open(pathname, O_RDWR); 826 ATF_REQUIRE_MSG(fd != -1, 827 "opening %s failed: %s", pathname, strerror(errno)); 828 829 return (fd); 830 } 831 832 static void 833 aio_md_cleanup(void) 834 { 835 struct md_ioctl mdio; 836 int mdctl_fd, n, unit; 837 char buf[80]; 838 839 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 840 if (mdctl_fd < 0) { 841 fprintf(stderr, "opening /dev/%s failed: %s\n", MDCTL_NAME, 842 strerror(errno)); 843 return; 844 } 845 n = readlink(MDUNIT_LINK, buf, sizeof(buf) - 1); 846 if (n > 0) { 847 buf[n] = '\0'; 848 if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) { 849 bzero(&mdio, sizeof(mdio)); 850 mdio.md_version = MDIOVERSION; 851 mdio.md_unit = unit; 852 if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) { 853 fprintf(stderr, 854 "ioctl MDIOCDETACH unit %d failed: %s\n", 855 unit, strerror(errno)); 856 } 857 } 858 } 859 860 close(mdctl_fd); 861 } 862 863 static void 864 aio_md_test(completion comp, struct sigevent *sev, bool vectored) 865 { 866 struct aio_context ac; 867 int fd; 868 869 fd = aio_md_setup(); 870 aio_context_init(&ac, fd, fd, MD_LEN); 871 if (vectored) { 872 aio_writev_test(&ac, comp, sev); 873 aio_readv_test(&ac, comp, sev); 874 } else { 875 aio_write_test(&ac, comp, sev); 876 aio_read_test(&ac, comp, sev); 877 } 878 879 close(fd); 880 } 881 882 ATF_TC_WITH_CLEANUP(md_kq); 883 ATF_TC_HEAD(md_kq, tc) 884 { 885 886 atf_tc_set_md_var(tc, "require.user", "root"); 887 } 888 ATF_TC_BODY(md_kq, tc) 889 { 890 aio_md_test(poll_kqueue, setup_kqueue(), false); 891 } 892 ATF_TC_CLEANUP(md_kq, tc) 893 { 894 aio_md_cleanup(); 895 } 896 897 ATF_TC_WITH_CLEANUP(md_poll); 898 ATF_TC_HEAD(md_poll, tc) 899 { 900 901 atf_tc_set_md_var(tc, "require.user", "root"); 902 } 903 ATF_TC_BODY(md_poll, tc) 904 { 905 aio_md_test(poll, NULL, false); 906 } 907 ATF_TC_CLEANUP(md_poll, tc) 908 { 909 aio_md_cleanup(); 910 } 911 912 ATF_TC_WITH_CLEANUP(md_signal); 913 ATF_TC_HEAD(md_signal, tc) 914 { 915 916 atf_tc_set_md_var(tc, "require.user", "root"); 917 } 918 ATF_TC_BODY(md_signal, tc) 919 { 920 aio_md_test(poll_signaled, setup_signal(), false); 921 } 922 ATF_TC_CLEANUP(md_signal, tc) 923 { 924 aio_md_cleanup(); 925 } 926 927 ATF_TC_WITH_CLEANUP(md_suspend); 928 ATF_TC_HEAD(md_suspend, tc) 929 { 930 931 atf_tc_set_md_var(tc, "require.user", "root"); 932 } 933 ATF_TC_BODY(md_suspend, tc) 934 { 935 aio_md_test(suspend, NULL, false); 936 } 937 ATF_TC_CLEANUP(md_suspend, tc) 938 { 939 aio_md_cleanup(); 940 } 941 942 ATF_TC_WITH_CLEANUP(md_thread); 943 ATF_TC_HEAD(md_thread, tc) 944 { 945 946 atf_tc_set_md_var(tc, "require.user", "root"); 947 } 948 ATF_TC_BODY(md_thread, tc) 949 { 950 aio_md_test(poll_signaled, setup_thread(), false); 951 } 952 ATF_TC_CLEANUP(md_thread, tc) 953 { 954 aio_md_cleanup(); 955 } 956 957 ATF_TC_WITH_CLEANUP(md_waitcomplete); 958 ATF_TC_HEAD(md_waitcomplete, tc) 959 { 960 961 atf_tc_set_md_var(tc, "require.user", "root"); 962 } 963 ATF_TC_BODY(md_waitcomplete, tc) 964 { 965 aio_md_test(waitcomplete, NULL, false); 966 } 967 ATF_TC_CLEANUP(md_waitcomplete, tc) 968 { 969 aio_md_cleanup(); 970 } 971 972 #define ZVOL_VDEV_PATHNAME "test_vdev" 973 #define POOL_SIZE (1 << 28) /* 256 MB */ 974 #define ZVOL_SIZE "64m" 975 #define POOL_NAME "aio_testpool" 976 #define ZVOL_NAME "aio_testvol" 977 978 static int 979 aio_zvol_setup(const char *unique) 980 { 981 FILE *pidfile; 982 int fd; 983 pid_t pid; 984 char vdev_name[160]; 985 char pool_name[80]; 986 char cmd[160]; 987 char zvol_name[160]; 988 char devname[160]; 989 990 ATF_REQUIRE_KERNEL_MODULE("aio"); 991 ATF_REQUIRE_KERNEL_MODULE("zfs"); 992 993 pid = getpid(); 994 snprintf(vdev_name, sizeof(vdev_name), "%s", ZVOL_VDEV_PATHNAME); 995 snprintf(pool_name, sizeof(pool_name), "%s_%s.%d", POOL_NAME, unique, 996 pid); 997 snprintf(zvol_name, sizeof(zvol_name), "%s/%s_%s", pool_name, ZVOL_NAME, 998 unique); 999 1000 fd = open(vdev_name, O_RDWR | O_CREAT, 0600); 1001 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1002 ATF_REQUIRE_EQ_MSG(0, 1003 ftruncate(fd, POOL_SIZE), "ftruncate failed: %s", strerror(errno)); 1004 close(fd); 1005 1006 pidfile = fopen("pidfile", "w"); 1007 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 1008 fprintf(pidfile, "%d", pid); 1009 fclose(pidfile); 1010 1011 snprintf(cmd, sizeof(cmd), "zpool create %s $PWD/%s", pool_name, 1012 vdev_name); 1013 ATF_REQUIRE_EQ_MSG(0, system(cmd), 1014 "zpool create failed: %s", strerror(errno)); 1015 snprintf(cmd, sizeof(cmd), 1016 "zfs create -o volblocksize=8192 -o volmode=dev -V %s %s", 1017 ZVOL_SIZE, zvol_name); 1018 ATF_REQUIRE_EQ_MSG(0, system(cmd), 1019 "zfs create failed: %s", strerror(errno)); 1020 1021 snprintf(devname, sizeof(devname), "/dev/zvol/%s", zvol_name); 1022 do { 1023 fd = open(devname, O_RDWR); 1024 } while (fd == -1 && errno == EINTR); 1025 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1026 return (fd); 1027 } 1028 1029 static void 1030 aio_zvol_cleanup(const char *unique) 1031 { 1032 FILE *pidfile; 1033 pid_t testpid; 1034 char cmd[160]; 1035 1036 pidfile = fopen("pidfile", "r"); 1037 if (pidfile == NULL && errno == ENOENT) { 1038 /* Setup probably failed */ 1039 return; 1040 } 1041 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 1042 ATF_REQUIRE_EQ(1, fscanf(pidfile, "%d", &testpid)); 1043 fclose(pidfile); 1044 1045 snprintf(cmd, sizeof(cmd), "zpool destroy %s_%s.%d", POOL_NAME, unique, 1046 testpid); 1047 system(cmd); 1048 } 1049 1050 1051 ATF_TC_WITHOUT_HEAD(aio_large_read_test); 1052 ATF_TC_BODY(aio_large_read_test, tc) 1053 { 1054 struct aiocb cb, *cbp; 1055 ssize_t nread; 1056 size_t len; 1057 int fd; 1058 #ifdef __LP64__ 1059 int clamped; 1060 #endif 1061 1062 ATF_REQUIRE_KERNEL_MODULE("aio"); 1063 ATF_REQUIRE_UNSAFE_AIO(); 1064 1065 #ifdef __LP64__ 1066 len = sizeof(clamped); 1067 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) == 1068 -1) 1069 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp"); 1070 #endif 1071 1072 /* Determine the maximum supported read(2) size. */ 1073 len = SSIZE_MAX; 1074 #ifdef __LP64__ 1075 if (clamped) 1076 len = INT_MAX; 1077 #endif 1078 1079 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1080 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1081 1082 unlink(FILE_PATHNAME); 1083 1084 memset(&cb, 0, sizeof(cb)); 1085 cb.aio_nbytes = len; 1086 cb.aio_fildes = fd; 1087 cb.aio_buf = NULL; 1088 if (aio_read(&cb) == -1) 1089 atf_tc_fail("aio_read() of maximum read size failed: %s", 1090 strerror(errno)); 1091 1092 nread = aio_waitcomplete(&cbp, NULL); 1093 if (nread == -1) 1094 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1095 if (nread != 0) 1096 atf_tc_fail("aio_read() from empty file returned data: %zd", 1097 nread); 1098 1099 memset(&cb, 0, sizeof(cb)); 1100 cb.aio_nbytes = len + 1; 1101 cb.aio_fildes = fd; 1102 cb.aio_buf = NULL; 1103 if (aio_read(&cb) == -1) { 1104 if (errno == EINVAL) 1105 goto finished; 1106 atf_tc_fail("aio_read() of too large read size failed: %s", 1107 strerror(errno)); 1108 } 1109 1110 nread = aio_waitcomplete(&cbp, NULL); 1111 if (nread == -1) { 1112 if (errno == EINVAL) 1113 goto finished; 1114 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1115 } 1116 atf_tc_fail("aio_read() of too large read size returned: %zd", nread); 1117 1118 finished: 1119 close(fd); 1120 } 1121 1122 /* 1123 * This tests for a bug where arriving socket data can wakeup multiple 1124 * AIO read requests resulting in an uncancellable request. 1125 */ 1126 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads); 1127 ATF_TC_BODY(aio_socket_two_reads, tc) 1128 { 1129 struct ioreq { 1130 struct aiocb iocb; 1131 char buffer[1024]; 1132 } ioreq[2]; 1133 struct aiocb *iocb; 1134 unsigned i; 1135 int s[2]; 1136 char c; 1137 1138 ATF_REQUIRE_KERNEL_MODULE("aio"); 1139 #if __FreeBSD_version < 1100101 1140 aft_tc_skip("kernel version %d is too old (%d required)", 1141 __FreeBSD_version, 1100101); 1142 #endif 1143 1144 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1145 1146 /* Queue two read requests. */ 1147 memset(&ioreq, 0, sizeof(ioreq)); 1148 for (i = 0; i < nitems(ioreq); i++) { 1149 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer); 1150 ioreq[i].iocb.aio_fildes = s[0]; 1151 ioreq[i].iocb.aio_buf = ioreq[i].buffer; 1152 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0); 1153 } 1154 1155 /* Send a single byte. This should complete one request. */ 1156 c = 0xc3; 1157 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1); 1158 1159 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1); 1160 1161 /* Determine which request completed and verify the data was read. */ 1162 if (iocb == &ioreq[0].iocb) 1163 i = 0; 1164 else 1165 i = 1; 1166 ATF_REQUIRE(ioreq[i].buffer[0] == c); 1167 1168 i ^= 1; 1169 1170 /* 1171 * Try to cancel the other request. On broken systems this 1172 * will fail and the process will hang on exit. 1173 */ 1174 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS); 1175 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED); 1176 1177 close(s[1]); 1178 close(s[0]); 1179 } 1180 1181 static void 1182 aio_socket_blocking_short_write_test(bool vectored) 1183 { 1184 struct aiocb iocb, *iocbp; 1185 struct iovec iov[2]; 1186 char *buffer[2]; 1187 ssize_t done, r; 1188 int buffer_size, sb_size; 1189 socklen_t len; 1190 int s[2]; 1191 1192 ATF_REQUIRE_KERNEL_MODULE("aio"); 1193 1194 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1195 1196 len = sizeof(sb_size); 1197 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1198 -1); 1199 ATF_REQUIRE(len == sizeof(sb_size)); 1200 buffer_size = sb_size; 1201 1202 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1203 -1); 1204 ATF_REQUIRE(len == sizeof(sb_size)); 1205 if (sb_size > buffer_size) 1206 buffer_size = sb_size; 1207 1208 /* 1209 * Use twice the size of the MAX(receive buffer, send buffer) 1210 * to ensure that the write is split up into multiple writes 1211 * internally. 1212 */ 1213 buffer_size *= 2; 1214 1215 buffer[0] = malloc(buffer_size); 1216 ATF_REQUIRE(buffer[0] != NULL); 1217 buffer[1] = malloc(buffer_size); 1218 ATF_REQUIRE(buffer[1] != NULL); 1219 1220 srandomdev(); 1221 aio_fill_buffer(buffer[1], buffer_size, random()); 1222 1223 memset(&iocb, 0, sizeof(iocb)); 1224 iocb.aio_fildes = s[1]; 1225 if (vectored) { 1226 iov[0].iov_base = buffer[1]; 1227 iov[0].iov_len = buffer_size / 2 + 1; 1228 iov[1].iov_base = buffer[1] + buffer_size / 2 + 1; 1229 iov[1].iov_len = buffer_size / 2 - 1; 1230 iocb.aio_iov = iov; 1231 iocb.aio_iovcnt = 2; 1232 r = aio_writev(&iocb); 1233 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1234 } else { 1235 iocb.aio_buf = buffer[1]; 1236 iocb.aio_nbytes = buffer_size; 1237 r = aio_write(&iocb); 1238 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1239 } 1240 1241 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1242 ATF_REQUIRE(done == buffer_size); 1243 1244 done = aio_waitcomplete(&iocbp, NULL); 1245 ATF_REQUIRE(iocbp == &iocb); 1246 ATF_REQUIRE(done == buffer_size); 1247 1248 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1249 1250 close(s[1]); 1251 close(s[0]); 1252 } 1253 1254 /* 1255 * This test ensures that aio_write() on a blocking socket of a "large" 1256 * buffer does not return a short completion. 1257 */ 1258 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); 1259 ATF_TC_BODY(aio_socket_blocking_short_write, tc) 1260 { 1261 aio_socket_blocking_short_write_test(false); 1262 } 1263 1264 /* 1265 * Like aio_socket_blocking_short_write, but also tests that partially 1266 * completed vectored sends can be retried correctly. 1267 */ 1268 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write_vectored); 1269 ATF_TC_BODY(aio_socket_blocking_short_write_vectored, tc) 1270 { 1271 aio_socket_blocking_short_write_test(true); 1272 } 1273 1274 /* 1275 * Verify that AIO requests fail when applied to a listening socket. 1276 */ 1277 ATF_TC_WITHOUT_HEAD(aio_socket_listen_fail); 1278 ATF_TC_BODY(aio_socket_listen_fail, tc) 1279 { 1280 struct aiocb iocb; 1281 struct sockaddr_un sun; 1282 char buf[16]; 1283 int s; 1284 1285 s = socket(AF_LOCAL, SOCK_STREAM, 0); 1286 ATF_REQUIRE(s != -1); 1287 1288 memset(&sun, 0, sizeof(sun)); 1289 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX"); 1290 mktemp(sun.sun_path); 1291 sun.sun_family = AF_LOCAL; 1292 sun.sun_len = SUN_LEN(&sun); 1293 1294 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0); 1295 ATF_REQUIRE(listen(s, 5) == 0); 1296 1297 memset(buf, 0, sizeof(buf)); 1298 memset(&iocb, 0, sizeof(iocb)); 1299 iocb.aio_fildes = s; 1300 iocb.aio_buf = buf; 1301 iocb.aio_nbytes = sizeof(buf); 1302 1303 ATF_REQUIRE_ERRNO(EINVAL, aio_read(&iocb) == -1); 1304 ATF_REQUIRE_ERRNO(EINVAL, aio_write(&iocb) == -1); 1305 1306 ATF_REQUIRE(unlink(sun.sun_path) == 0); 1307 close(s); 1308 } 1309 1310 /* 1311 * Verify that listen(2) fails if a socket has pending AIO requests. 1312 */ 1313 ATF_TC_WITHOUT_HEAD(aio_socket_listen_pending); 1314 ATF_TC_BODY(aio_socket_listen_pending, tc) 1315 { 1316 struct aiocb iocb; 1317 struct sockaddr_un sun; 1318 char buf[16]; 1319 int s; 1320 1321 s = socket(AF_LOCAL, SOCK_STREAM, 0); 1322 ATF_REQUIRE(s != -1); 1323 1324 memset(&sun, 0, sizeof(sun)); 1325 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX"); 1326 mktemp(sun.sun_path); 1327 sun.sun_family = AF_LOCAL; 1328 sun.sun_len = SUN_LEN(&sun); 1329 1330 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0); 1331 1332 memset(buf, 0, sizeof(buf)); 1333 memset(&iocb, 0, sizeof(iocb)); 1334 iocb.aio_fildes = s; 1335 iocb.aio_buf = buf; 1336 iocb.aio_nbytes = sizeof(buf); 1337 ATF_REQUIRE(aio_read(&iocb) == 0); 1338 1339 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 5) == -1); 1340 1341 ATF_REQUIRE(aio_cancel(s, &iocb) != -1); 1342 1343 ATF_REQUIRE(unlink(sun.sun_path) == 0); 1344 close(s); 1345 } 1346 1347 /* 1348 * This test verifies that cancelling a partially completed socket write 1349 * returns a short write rather than ECANCELED. 1350 */ 1351 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel); 1352 ATF_TC_BODY(aio_socket_short_write_cancel, tc) 1353 { 1354 struct aiocb iocb, *iocbp; 1355 char *buffer[2]; 1356 ssize_t done; 1357 int buffer_size, sb_size; 1358 socklen_t len; 1359 int s[2]; 1360 1361 ATF_REQUIRE_KERNEL_MODULE("aio"); 1362 1363 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1364 1365 len = sizeof(sb_size); 1366 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1367 -1); 1368 ATF_REQUIRE(len == sizeof(sb_size)); 1369 buffer_size = sb_size; 1370 1371 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1372 -1); 1373 ATF_REQUIRE(len == sizeof(sb_size)); 1374 if (sb_size > buffer_size) 1375 buffer_size = sb_size; 1376 1377 /* 1378 * Use three times the size of the MAX(receive buffer, send 1379 * buffer) for the write to ensure that the write is split up 1380 * into multiple writes internally. The recv() ensures that 1381 * the write has partially completed, but a remaining size of 1382 * two buffers should ensure that the write has not completed 1383 * fully when it is cancelled. 1384 */ 1385 buffer[0] = malloc(buffer_size); 1386 ATF_REQUIRE(buffer[0] != NULL); 1387 buffer[1] = malloc(buffer_size * 3); 1388 ATF_REQUIRE(buffer[1] != NULL); 1389 1390 srandomdev(); 1391 aio_fill_buffer(buffer[1], buffer_size * 3, random()); 1392 1393 memset(&iocb, 0, sizeof(iocb)); 1394 iocb.aio_fildes = s[1]; 1395 iocb.aio_buf = buffer[1]; 1396 iocb.aio_nbytes = buffer_size * 3; 1397 ATF_REQUIRE(aio_write(&iocb) == 0); 1398 1399 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1400 ATF_REQUIRE(done == buffer_size); 1401 1402 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS); 1403 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED); 1404 1405 done = aio_waitcomplete(&iocbp, NULL); 1406 ATF_REQUIRE(iocbp == &iocb); 1407 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2); 1408 1409 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1410 1411 close(s[1]); 1412 close(s[0]); 1413 } 1414 1415 /* 1416 * Test handling of aio_read() and aio_write() on shut-down sockets. 1417 */ 1418 ATF_TC_WITHOUT_HEAD(aio_socket_shutdown); 1419 ATF_TC_BODY(aio_socket_shutdown, tc) 1420 { 1421 struct aiocb iocb; 1422 sigset_t set; 1423 char *buffer; 1424 ssize_t len; 1425 size_t bsz; 1426 int error, s[2]; 1427 1428 ATF_REQUIRE_KERNEL_MODULE("aio"); 1429 1430 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1431 1432 bsz = 1024; 1433 buffer = malloc(bsz); 1434 memset(buffer, 0, bsz); 1435 1436 /* Put some data in s[0]'s recv buffer. */ 1437 ATF_REQUIRE(send(s[1], buffer, bsz, 0) == (ssize_t)bsz); 1438 1439 /* No more reading from s[0]. */ 1440 ATF_REQUIRE(shutdown(s[0], SHUT_RD) != -1); 1441 1442 ATF_REQUIRE(buffer != NULL); 1443 1444 memset(&iocb, 0, sizeof(iocb)); 1445 iocb.aio_fildes = s[0]; 1446 iocb.aio_buf = buffer; 1447 iocb.aio_nbytes = bsz; 1448 ATF_REQUIRE(aio_read(&iocb) == 0); 1449 1450 /* Expect to see zero bytes, analogous to recv(2). */ 1451 while ((error = aio_error(&iocb)) == EINPROGRESS) 1452 usleep(25000); 1453 ATF_REQUIRE_MSG(error == 0, "aio_error() returned %d", error); 1454 len = aio_return(&iocb); 1455 ATF_REQUIRE_MSG(len == 0, "read job returned %zd bytes", len); 1456 1457 /* No more writing to s[1]. */ 1458 ATF_REQUIRE(shutdown(s[1], SHUT_WR) != -1); 1459 1460 /* Block SIGPIPE so that we can detect the error in-band. */ 1461 sigemptyset(&set); 1462 sigaddset(&set, SIGPIPE); 1463 ATF_REQUIRE(sigprocmask(SIG_BLOCK, &set, NULL) == 0); 1464 1465 memset(&iocb, 0, sizeof(iocb)); 1466 iocb.aio_fildes = s[1]; 1467 iocb.aio_buf = buffer; 1468 iocb.aio_nbytes = bsz; 1469 ATF_REQUIRE(aio_write(&iocb) == 0); 1470 1471 /* Expect an error, analogous to send(2). */ 1472 while ((error = aio_error(&iocb)) == EINPROGRESS) 1473 usleep(25000); 1474 ATF_REQUIRE_MSG(error == EPIPE, "aio_error() returned %d", error); 1475 1476 ATF_REQUIRE(close(s[0]) != -1); 1477 ATF_REQUIRE(close(s[1]) != -1); 1478 free(buffer); 1479 } 1480 1481 /* 1482 * test aio_fsync's behavior with bad inputs 1483 */ 1484 ATF_TC_WITHOUT_HEAD(aio_fsync_errors); 1485 ATF_TC_BODY(aio_fsync_errors, tc) 1486 { 1487 int fd; 1488 struct aiocb iocb; 1489 1490 ATF_REQUIRE_KERNEL_MODULE("aio"); 1491 ATF_REQUIRE_UNSAFE_AIO(); 1492 1493 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1494 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1495 unlink(FILE_PATHNAME); 1496 1497 /* aio_fsync should return EINVAL unless op is O_SYNC or O_DSYNC */ 1498 memset(&iocb, 0, sizeof(iocb)); 1499 iocb.aio_fildes = fd; 1500 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1501 ATF_CHECK_EQ(EINVAL, errno); 1502 1503 /* aio_fsync should return EBADF if fd is not a valid descriptor */ 1504 memset(&iocb, 0, sizeof(iocb)); 1505 iocb.aio_fildes = 666; 1506 ATF_CHECK_EQ(-1, aio_fsync(O_SYNC, &iocb)); 1507 ATF_CHECK_EQ(EBADF, errno); 1508 1509 /* aio_fsync should return EINVAL if sigev_notify is invalid */ 1510 memset(&iocb, 0, sizeof(iocb)); 1511 iocb.aio_fildes = fd; 1512 iocb.aio_sigevent.sigev_notify = 666; 1513 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1514 ATF_CHECK_EQ(EINVAL, errno); 1515 } 1516 1517 /* 1518 * This test just performs a basic test of aio_fsync(). 1519 */ 1520 static void 1521 aio_fsync_test(int op) 1522 { 1523 struct aiocb synccb, *iocbp; 1524 struct { 1525 struct aiocb iocb; 1526 bool done; 1527 char *buffer; 1528 } buffers[16]; 1529 struct stat sb; 1530 ssize_t rval; 1531 unsigned i; 1532 int fd; 1533 1534 ATF_REQUIRE_KERNEL_MODULE("aio"); 1535 ATF_REQUIRE_UNSAFE_AIO(); 1536 1537 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1538 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1539 unlink(FILE_PATHNAME); 1540 1541 ATF_REQUIRE(fstat(fd, &sb) == 0); 1542 ATF_REQUIRE(sb.st_blksize != 0); 1543 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0); 1544 1545 /* 1546 * Queue several asynchronous write requests. Hopefully this 1547 * forces the aio_fsync() request to be deferred. There is no 1548 * reliable way to guarantee that however. 1549 */ 1550 srandomdev(); 1551 for (i = 0; i < nitems(buffers); i++) { 1552 buffers[i].done = false; 1553 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb)); 1554 buffers[i].buffer = malloc(sb.st_blksize); 1555 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random()); 1556 buffers[i].iocb.aio_fildes = fd; 1557 buffers[i].iocb.aio_buf = buffers[i].buffer; 1558 buffers[i].iocb.aio_nbytes = sb.st_blksize; 1559 buffers[i].iocb.aio_offset = sb.st_blksize * i; 1560 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0); 1561 } 1562 1563 /* Queue the aio_fsync request. */ 1564 memset(&synccb, 0, sizeof(synccb)); 1565 synccb.aio_fildes = fd; 1566 ATF_REQUIRE(aio_fsync(op, &synccb) == 0); 1567 1568 /* Wait for requests to complete. */ 1569 for (;;) { 1570 next: 1571 rval = aio_waitcomplete(&iocbp, NULL); 1572 ATF_REQUIRE(iocbp != NULL); 1573 if (iocbp == &synccb) { 1574 ATF_REQUIRE(rval == 0); 1575 break; 1576 } 1577 1578 for (i = 0; i < nitems(buffers); i++) { 1579 if (iocbp == &buffers[i].iocb) { 1580 ATF_REQUIRE(buffers[i].done == false); 1581 ATF_REQUIRE(rval == sb.st_blksize); 1582 buffers[i].done = true; 1583 goto next; 1584 } 1585 } 1586 1587 ATF_REQUIRE_MSG(false, "unmatched AIO request"); 1588 } 1589 1590 for (i = 0; i < nitems(buffers); i++) 1591 ATF_REQUIRE_MSG(buffers[i].done, 1592 "AIO request %u did not complete", i); 1593 1594 close(fd); 1595 } 1596 1597 ATF_TC_WITHOUT_HEAD(aio_fsync_sync_test); 1598 ATF_TC_BODY(aio_fsync_sync_test, tc) 1599 { 1600 aio_fsync_test(O_SYNC); 1601 } 1602 1603 ATF_TC_WITHOUT_HEAD(aio_fsync_dsync_test); 1604 ATF_TC_BODY(aio_fsync_dsync_test, tc) 1605 { 1606 aio_fsync_test(O_DSYNC); 1607 } 1608 1609 /* 1610 * We shouldn't be able to DoS the system by setting iov_len to an insane 1611 * value 1612 */ 1613 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iov_len); 1614 ATF_TC_BODY(aio_writev_dos_iov_len, tc) 1615 { 1616 struct aiocb aio; 1617 const struct aiocb *const iocbs[] = {&aio}; 1618 const char *wbuf = "Hello, world!"; 1619 struct iovec iov[1]; 1620 ssize_t r; 1621 int fd; 1622 1623 ATF_REQUIRE_KERNEL_MODULE("aio"); 1624 ATF_REQUIRE_UNSAFE_AIO(); 1625 1626 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1627 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1628 1629 iov[0].iov_base = __DECONST(void*, wbuf); 1630 iov[0].iov_len = 1 << 30; 1631 bzero(&aio, sizeof(aio)); 1632 aio.aio_fildes = fd; 1633 aio.aio_offset = 0; 1634 aio.aio_iov = iov; 1635 aio.aio_iovcnt = 1; 1636 1637 r = aio_writev(&aio); 1638 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1639 ATF_REQUIRE_EQ(0, aio_suspend(iocbs, 1, NULL)); 1640 r = aio_return(&aio); 1641 ATF_CHECK_EQ_MSG(-1, r, "aio_return returned %zd", r); 1642 ATF_CHECK_MSG(errno == EFAULT || errno == EINVAL, 1643 "aio_writev: %s", strerror(errno)); 1644 1645 close(fd); 1646 } 1647 1648 /* 1649 * We shouldn't be able to DoS the system by setting aio_iovcnt to an insane 1650 * value 1651 */ 1652 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iovcnt); 1653 ATF_TC_BODY(aio_writev_dos_iovcnt, tc) 1654 { 1655 struct aiocb aio; 1656 const char *wbuf = "Hello, world!"; 1657 struct iovec iov[1]; 1658 ssize_t len; 1659 int fd; 1660 1661 ATF_REQUIRE_KERNEL_MODULE("aio"); 1662 ATF_REQUIRE_UNSAFE_AIO(); 1663 1664 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1665 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1666 1667 len = strlen(wbuf); 1668 iov[0].iov_base = __DECONST(void*, wbuf); 1669 iov[0].iov_len = len; 1670 bzero(&aio, sizeof(aio)); 1671 aio.aio_fildes = fd; 1672 aio.aio_offset = 0; 1673 aio.aio_iov = iov; 1674 aio.aio_iovcnt = 1 << 30; 1675 1676 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1677 ATF_CHECK_EQ(EINVAL, errno); 1678 1679 close(fd); 1680 } 1681 1682 ATF_TC_WITH_CLEANUP(aio_writev_efault); 1683 ATF_TC_HEAD(aio_writev_efault, tc) 1684 { 1685 atf_tc_set_md_var(tc, "descr", 1686 "Vectored AIO should gracefully handle invalid addresses"); 1687 atf_tc_set_md_var(tc, "require.user", "root"); 1688 } 1689 ATF_TC_BODY(aio_writev_efault, tc) 1690 { 1691 struct aiocb aio; 1692 ssize_t buflen; 1693 char *buffer; 1694 struct iovec iov[2]; 1695 long seed; 1696 int fd; 1697 1698 ATF_REQUIRE_KERNEL_MODULE("aio"); 1699 ATF_REQUIRE_UNSAFE_AIO(); 1700 1701 fd = aio_md_setup(); 1702 1703 seed = random(); 1704 buflen = 4096; 1705 buffer = malloc(buflen); 1706 aio_fill_buffer(buffer, buflen, seed); 1707 iov[0].iov_base = buffer; 1708 iov[0].iov_len = buflen; 1709 iov[1].iov_base = (void*)-1; /* Invalid! */ 1710 iov[1].iov_len = buflen; 1711 bzero(&aio, sizeof(aio)); 1712 aio.aio_fildes = fd; 1713 aio.aio_offset = 0; 1714 aio.aio_iov = iov; 1715 aio.aio_iovcnt = nitems(iov); 1716 1717 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1718 ATF_CHECK_EQ(EFAULT, errno); 1719 1720 close(fd); 1721 } 1722 ATF_TC_CLEANUP(aio_writev_efault, tc) 1723 { 1724 aio_md_cleanup(); 1725 } 1726 1727 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_poll); 1728 ATF_TC_BODY(aio_writev_empty_file_poll, tc) 1729 { 1730 struct aiocb aio; 1731 int fd; 1732 1733 ATF_REQUIRE_KERNEL_MODULE("aio"); 1734 ATF_REQUIRE_UNSAFE_AIO(); 1735 1736 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1737 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1738 1739 bzero(&aio, sizeof(aio)); 1740 aio.aio_fildes = fd; 1741 aio.aio_offset = 0; 1742 aio.aio_iovcnt = 0; 1743 1744 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1745 ATF_REQUIRE_EQ(0, suspend(&aio)); 1746 1747 close(fd); 1748 } 1749 1750 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_signal); 1751 ATF_TC_BODY(aio_writev_empty_file_signal, tc) 1752 { 1753 struct aiocb aio; 1754 int fd; 1755 1756 ATF_REQUIRE_KERNEL_MODULE("aio"); 1757 ATF_REQUIRE_UNSAFE_AIO(); 1758 1759 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1760 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1761 1762 bzero(&aio, sizeof(aio)); 1763 aio.aio_fildes = fd; 1764 aio.aio_offset = 0; 1765 aio.aio_iovcnt = 0; 1766 aio.aio_sigevent = *setup_signal(); 1767 1768 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1769 ATF_REQUIRE_EQ(0, poll_signaled(&aio)); 1770 1771 close(fd); 1772 } 1773 1774 /* 1775 * Use an aiocb with kqueue and EV_ONESHOT. kqueue should deliver the event 1776 * only once, even if the user doesn't promptly call aio_return. 1777 */ 1778 ATF_TC_WITHOUT_HEAD(ev_oneshot); 1779 ATF_TC_BODY(ev_oneshot, tc) 1780 { 1781 int fd, kq, nevents; 1782 struct aiocb iocb; 1783 struct kevent events[1]; 1784 struct timespec timeout; 1785 1786 ATF_REQUIRE_KERNEL_MODULE("aio"); 1787 1788 kq = kqueue(); 1789 ATF_REQUIRE(kq >= 0); 1790 1791 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1792 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1793 1794 memset(&iocb, 0, sizeof(iocb)); 1795 iocb.aio_fildes = fd; 1796 iocb.aio_sigevent.sigev_notify_kqueue = kq; 1797 iocb.aio_sigevent.sigev_value.sival_ptr = (void*)0xdeadbeef; 1798 iocb.aio_sigevent.sigev_notify_kevent_flags = EV_ONESHOT; 1799 iocb.aio_sigevent.sigev_notify = SIGEV_KEVENT; 1800 1801 ATF_CHECK_EQ(0, aio_fsync(O_SYNC, &iocb)); 1802 1803 nevents = kevent(kq, NULL, 0, events, 1, NULL); 1804 ATF_CHECK_EQ(1, nevents); 1805 ATF_CHECK_EQ(events[0].ident, (uintptr_t) &iocb); 1806 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO); 1807 ATF_CHECK_EQ(events[0].flags, EV_EOF | EV_ONESHOT); 1808 ATF_CHECK_EQ(events[0].fflags, 0); 1809 ATF_CHECK_EQ(events[0].data, 0); 1810 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef); 1811 1812 /* 1813 * Even though we haven't called aio_return, kevent will not return the 1814 * event again due to EV_ONESHOT. 1815 */ 1816 timeout.tv_sec = 0; 1817 timeout.tv_nsec = 100000000; 1818 nevents = kevent(kq, NULL, 0, events, 1, &timeout); 1819 ATF_CHECK_EQ(0, nevents); 1820 1821 ATF_CHECK_EQ(0, aio_return(&iocb)); 1822 close(fd); 1823 close(kq); 1824 } 1825 1826 1827 // aio_writev and aio_readv should still work even if the iovcnt is greater 1828 // than the number of buffered AIO operations permitted per process. 1829 ATF_TC_WITH_CLEANUP(vectored_big_iovcnt); 1830 ATF_TC_HEAD(vectored_big_iovcnt, tc) 1831 { 1832 atf_tc_set_md_var(tc, "descr", 1833 "Vectored AIO should still work even if the iovcnt is greater than " 1834 "the number of buffered AIO operations permitted by the process"); 1835 atf_tc_set_md_var(tc, "require.user", "root"); 1836 } 1837 ATF_TC_BODY(vectored_big_iovcnt, tc) 1838 { 1839 struct aiocb aio; 1840 struct iovec *iov; 1841 ssize_t len, buflen; 1842 char *buffer; 1843 const char *oid = "vfs.aio.max_buf_aio"; 1844 long seed; 1845 int max_buf_aio; 1846 int fd, i; 1847 ssize_t sysctl_len = sizeof(max_buf_aio); 1848 1849 ATF_REQUIRE_KERNEL_MODULE("aio"); 1850 ATF_REQUIRE_UNSAFE_AIO(); 1851 1852 if (sysctlbyname(oid, &max_buf_aio, &sysctl_len, NULL, 0) == -1) 1853 atf_libc_error(errno, "Failed to read %s", oid); 1854 1855 seed = random(); 1856 buflen = 512 * (max_buf_aio + 1); 1857 buffer = malloc(buflen); 1858 aio_fill_buffer(buffer, buflen, seed); 1859 iov = calloc(max_buf_aio + 1, sizeof(struct iovec)); 1860 1861 fd = aio_md_setup(); 1862 1863 bzero(&aio, sizeof(aio)); 1864 aio.aio_fildes = fd; 1865 aio.aio_offset = 0; 1866 for (i = 0; i < max_buf_aio + 1; i++) { 1867 iov[i].iov_base = &buffer[i * 512]; 1868 iov[i].iov_len = 512; 1869 } 1870 aio.aio_iov = iov; 1871 aio.aio_iovcnt = max_buf_aio + 1; 1872 1873 if (aio_writev(&aio) < 0) 1874 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1875 1876 len = poll(&aio); 1877 if (len < 0) 1878 atf_tc_fail("aio failed: %s", strerror(errno)); 1879 1880 if (len != buflen) 1881 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 1882 1883 bzero(&aio, sizeof(aio)); 1884 aio.aio_fildes = fd; 1885 aio.aio_offset = 0; 1886 aio.aio_iov = iov; 1887 aio.aio_iovcnt = max_buf_aio + 1; 1888 1889 if (aio_readv(&aio) < 0) 1890 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 1891 1892 len = poll(&aio); 1893 if (len < 0) 1894 atf_tc_fail("aio failed: %s", strerror(errno)); 1895 1896 if (len != buflen) 1897 atf_tc_fail("aio short read (%jd)", (intmax_t)len); 1898 1899 if (aio_test_buffer(buffer, buflen, seed) == 0) 1900 atf_tc_fail("buffer mismatched"); 1901 1902 close(fd); 1903 } 1904 ATF_TC_CLEANUP(vectored_big_iovcnt, tc) 1905 { 1906 aio_md_cleanup(); 1907 } 1908 1909 ATF_TC_WITHOUT_HEAD(vectored_file_poll); 1910 ATF_TC_BODY(vectored_file_poll, tc) 1911 { 1912 aio_file_test(poll, NULL, true); 1913 } 1914 1915 ATF_TC_WITHOUT_HEAD(vectored_thread); 1916 ATF_TC_BODY(vectored_thread, tc) 1917 { 1918 aio_file_test(poll_signaled, setup_thread(), true); 1919 } 1920 1921 ATF_TC_WITH_CLEANUP(vectored_md_poll); 1922 ATF_TC_HEAD(vectored_md_poll, tc) 1923 { 1924 atf_tc_set_md_var(tc, "require.user", "root"); 1925 } 1926 ATF_TC_BODY(vectored_md_poll, tc) 1927 { 1928 aio_md_test(poll, NULL, true); 1929 } 1930 ATF_TC_CLEANUP(vectored_md_poll, tc) 1931 { 1932 aio_md_cleanup(); 1933 } 1934 1935 ATF_TC_WITHOUT_HEAD(vectored_socket_poll); 1936 ATF_TC_BODY(vectored_socket_poll, tc) 1937 { 1938 aio_unix_socketpair_test(poll, NULL, true); 1939 } 1940 1941 // aio_writev and aio_readv should still work even if the iov contains elements 1942 // that aren't a multiple of the device's sector size, and even if the total 1943 // amount if I/O _is_ a multiple of the device's sector size. 1944 ATF_TC_WITH_CLEANUP(vectored_unaligned); 1945 ATF_TC_HEAD(vectored_unaligned, tc) 1946 { 1947 atf_tc_set_md_var(tc, "descr", 1948 "Vectored AIO should still work even if the iov contains elements " 1949 "that aren't a multiple of the sector size."); 1950 atf_tc_set_md_var(tc, "require.user", "root"); 1951 } 1952 ATF_TC_BODY(vectored_unaligned, tc) 1953 { 1954 struct aio_context ac; 1955 struct aiocb aio; 1956 struct iovec iov[3]; 1957 ssize_t len, total_len; 1958 int fd; 1959 1960 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false)) 1961 atf_tc_skip("https://bugs.freebsd.org/258766"); 1962 1963 ATF_REQUIRE_KERNEL_MODULE("aio"); 1964 ATF_REQUIRE_UNSAFE_AIO(); 1965 1966 /* 1967 * Use a zvol with volmode=dev, so it will allow .d_write with 1968 * unaligned uio. geom devices use physio, which doesn't allow that. 1969 */ 1970 fd = aio_zvol_setup(atf_tc_get_ident(tc)); 1971 aio_context_init(&ac, fd, fd, FILE_LEN); 1972 1973 /* Break the buffer into 3 parts: 1974 * * A 4kB part, aligned to 4kB 1975 * * Two other parts that add up to 4kB: 1976 * - 256B 1977 * - 4kB - 256B 1978 */ 1979 iov[0].iov_base = ac.ac_buffer; 1980 iov[0].iov_len = 4096; 1981 iov[1].iov_base = (void*)((uintptr_t)iov[0].iov_base + iov[0].iov_len); 1982 iov[1].iov_len = 256; 1983 iov[2].iov_base = (void*)((uintptr_t)iov[1].iov_base + iov[1].iov_len); 1984 iov[2].iov_len = 4096 - iov[1].iov_len; 1985 total_len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; 1986 bzero(&aio, sizeof(aio)); 1987 aio.aio_fildes = ac.ac_write_fd; 1988 aio.aio_offset = 0; 1989 aio.aio_iov = iov; 1990 aio.aio_iovcnt = 3; 1991 1992 if (aio_writev(&aio) < 0) 1993 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1994 1995 len = poll(&aio); 1996 if (len < 0) 1997 atf_tc_fail("aio failed: %s", strerror(errno)); 1998 1999 if (len != total_len) 2000 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 2001 2002 bzero(&aio, sizeof(aio)); 2003 aio.aio_fildes = ac.ac_read_fd; 2004 aio.aio_offset = 0; 2005 aio.aio_iov = iov; 2006 aio.aio_iovcnt = 3; 2007 2008 if (aio_readv(&aio) < 0) 2009 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 2010 len = poll(&aio); 2011 2012 ATF_REQUIRE_MSG(aio_test_buffer(ac.ac_buffer, total_len, 2013 ac.ac_seed) != 0, "aio_test_buffer: internal error"); 2014 2015 close(fd); 2016 } 2017 ATF_TC_CLEANUP(vectored_unaligned, tc) 2018 { 2019 aio_zvol_cleanup(atf_tc_get_ident(tc)); 2020 } 2021 2022 static void 2023 aio_zvol_test(completion comp, struct sigevent *sev, bool vectored, 2024 const char *unique) 2025 { 2026 struct aio_context ac; 2027 int fd; 2028 2029 fd = aio_zvol_setup(unique); 2030 aio_context_init(&ac, fd, fd, MD_LEN); 2031 if (vectored) { 2032 aio_writev_test(&ac, comp, sev); 2033 aio_readv_test(&ac, comp, sev); 2034 } else { 2035 aio_write_test(&ac, comp, sev); 2036 aio_read_test(&ac, comp, sev); 2037 } 2038 2039 close(fd); 2040 } 2041 2042 /* 2043 * Note that unlike md, the zvol is not a geom device, does not allow unmapped 2044 * buffers, and does not use physio. 2045 */ 2046 ATF_TC_WITH_CLEANUP(vectored_zvol_poll); 2047 ATF_TC_HEAD(vectored_zvol_poll, tc) 2048 { 2049 atf_tc_set_md_var(tc, "require.user", "root"); 2050 } 2051 ATF_TC_BODY(vectored_zvol_poll, tc) 2052 { 2053 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false)) 2054 atf_tc_skip("https://bugs.freebsd.org/258766"); 2055 aio_zvol_test(poll, NULL, true, atf_tc_get_ident(tc)); 2056 } 2057 ATF_TC_CLEANUP(vectored_zvol_poll, tc) 2058 { 2059 aio_zvol_cleanup(atf_tc_get_ident(tc)); 2060 } 2061 2062 ATF_TP_ADD_TCS(tp) 2063 { 2064 2065 /* Test every file type with every completion method */ 2066 ATF_TP_ADD_TC(tp, file_kq); 2067 ATF_TP_ADD_TC(tp, file_poll); 2068 ATF_TP_ADD_TC(tp, file_signal); 2069 ATF_TP_ADD_TC(tp, file_suspend); 2070 ATF_TP_ADD_TC(tp, file_thread); 2071 ATF_TP_ADD_TC(tp, file_waitcomplete); 2072 ATF_TP_ADD_TC(tp, fifo_kq); 2073 ATF_TP_ADD_TC(tp, fifo_poll); 2074 ATF_TP_ADD_TC(tp, fifo_signal); 2075 ATF_TP_ADD_TC(tp, fifo_suspend); 2076 ATF_TP_ADD_TC(tp, fifo_thread); 2077 ATF_TP_ADD_TC(tp, fifo_waitcomplete); 2078 ATF_TP_ADD_TC(tp, socket_kq); 2079 ATF_TP_ADD_TC(tp, socket_poll); 2080 ATF_TP_ADD_TC(tp, socket_signal); 2081 ATF_TP_ADD_TC(tp, socket_suspend); 2082 ATF_TP_ADD_TC(tp, socket_thread); 2083 ATF_TP_ADD_TC(tp, socket_waitcomplete); 2084 ATF_TP_ADD_TC(tp, pty_kq); 2085 ATF_TP_ADD_TC(tp, pty_poll); 2086 ATF_TP_ADD_TC(tp, pty_signal); 2087 ATF_TP_ADD_TC(tp, pty_suspend); 2088 ATF_TP_ADD_TC(tp, pty_thread); 2089 ATF_TP_ADD_TC(tp, pty_waitcomplete); 2090 ATF_TP_ADD_TC(tp, pipe_kq); 2091 ATF_TP_ADD_TC(tp, pipe_poll); 2092 ATF_TP_ADD_TC(tp, pipe_signal); 2093 ATF_TP_ADD_TC(tp, pipe_suspend); 2094 ATF_TP_ADD_TC(tp, pipe_thread); 2095 ATF_TP_ADD_TC(tp, pipe_waitcomplete); 2096 ATF_TP_ADD_TC(tp, md_kq); 2097 ATF_TP_ADD_TC(tp, md_poll); 2098 ATF_TP_ADD_TC(tp, md_signal); 2099 ATF_TP_ADD_TC(tp, md_suspend); 2100 ATF_TP_ADD_TC(tp, md_thread); 2101 ATF_TP_ADD_TC(tp, md_waitcomplete); 2102 2103 /* Various special cases */ 2104 ATF_TP_ADD_TC(tp, aio_fsync_errors); 2105 ATF_TP_ADD_TC(tp, aio_fsync_sync_test); 2106 ATF_TP_ADD_TC(tp, aio_fsync_dsync_test); 2107 ATF_TP_ADD_TC(tp, aio_large_read_test); 2108 ATF_TP_ADD_TC(tp, aio_socket_two_reads); 2109 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); 2110 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write_vectored); 2111 ATF_TP_ADD_TC(tp, aio_socket_listen_fail); 2112 ATF_TP_ADD_TC(tp, aio_socket_listen_pending); 2113 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); 2114 ATF_TP_ADD_TC(tp, aio_socket_shutdown); 2115 ATF_TP_ADD_TC(tp, aio_writev_dos_iov_len); 2116 ATF_TP_ADD_TC(tp, aio_writev_dos_iovcnt); 2117 ATF_TP_ADD_TC(tp, aio_writev_efault); 2118 ATF_TP_ADD_TC(tp, aio_writev_empty_file_poll); 2119 ATF_TP_ADD_TC(tp, aio_writev_empty_file_signal); 2120 ATF_TP_ADD_TC(tp, ev_oneshot); 2121 ATF_TP_ADD_TC(tp, vectored_big_iovcnt); 2122 ATF_TP_ADD_TC(tp, vectored_file_poll); 2123 ATF_TP_ADD_TC(tp, vectored_md_poll); 2124 ATF_TP_ADD_TC(tp, vectored_zvol_poll); 2125 ATF_TP_ADD_TC(tp, vectored_unaligned); 2126 ATF_TP_ADD_TC(tp, vectored_socket_poll); 2127 ATF_TP_ADD_TC(tp, vectored_thread); 2128 2129 return (atf_no_error()); 2130 } 2131