1 /*- 2 * Copyright (c) 2004 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Regression test to do some very basic AIO exercising on several types of 31 * file descriptors. Currently, the tests consist of initializing a fixed 32 * size buffer with pseudo-random data, writing it to one fd using AIO, then 33 * reading it from a second descriptor using AIO. For some targets, the same 34 * fd is used for write and read (i.e., file, md device), but for others the 35 * operation is performed on a peer (pty, socket, fifo, etc). For each file 36 * descriptor type, several completion methods are tested. This test program 37 * does not attempt to exercise error cases or more subtle asynchronous 38 * behavior, just make sure that the basic operations work on some basic object 39 * types. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/event.h> 44 #include <sys/mdioctl.h> 45 #include <sys/module.h> 46 #include <sys/resource.h> 47 #include <sys/socket.h> 48 #include <sys/stat.h> 49 #include <sys/un.h> 50 51 #include <aio.h> 52 #include <err.h> 53 #include <errno.h> 54 #include <fcntl.h> 55 #include <libutil.h> 56 #include <limits.h> 57 #include <semaphore.h> 58 #include <signal.h> 59 #include <stdint.h> 60 #include <stdio.h> 61 #include <stdlib.h> 62 #include <string.h> 63 #include <termios.h> 64 #include <unistd.h> 65 66 #include <atf-c.h> 67 68 #include "freebsd_test_suite/macros.h" 69 #include "local.h" 70 71 /* 72 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as 73 * it sizes ac_buffer in the aio_context structure. It is also the default 74 * size for file I/O. For other types, we use smaller blocks or we risk 75 * blocking (and we run in a single process/thread so that would be bad). 76 */ 77 #define GLOBAL_MAX 16384 78 79 #define BUFFER_MAX GLOBAL_MAX 80 81 /* 82 * A completion function will block until the aio has completed, then return 83 * the result of the aio. errno will be set appropriately. 84 */ 85 typedef ssize_t (*completion)(struct aiocb*); 86 87 struct aio_context { 88 int ac_read_fd, ac_write_fd; 89 long ac_seed; 90 char ac_buffer[GLOBAL_MAX]; 91 int ac_buflen; 92 int ac_seconds; 93 }; 94 95 static sem_t completions; 96 97 /* 98 * Fill a buffer given a seed that can be fed into srandom() to initialize 99 * the PRNG in a repeatable manner. 100 */ 101 static void 102 aio_fill_buffer(char *buffer, int len, long seed) 103 { 104 char ch; 105 int i; 106 107 srandom(seed); 108 for (i = 0; i < len; i++) { 109 ch = random() & 0xff; 110 buffer[i] = ch; 111 } 112 } 113 114 /* 115 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return 116 * (1) on a match, (0) on a mismatch. 117 */ 118 static int 119 aio_test_buffer(char *buffer, int len, long seed) 120 { 121 char ch; 122 int i; 123 124 srandom(seed); 125 for (i = 0; i < len; i++) { 126 ch = random() & 0xff; 127 if (buffer[i] != ch) 128 return (0); 129 } 130 return (1); 131 } 132 133 /* 134 * Initialize a testing context given the file descriptors provided by the 135 * test setup. 136 */ 137 static void 138 aio_context_init(struct aio_context *ac, int read_fd, 139 int write_fd, int buflen) 140 { 141 142 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX, 143 "aio_context_init: buffer too large (%d > %d)", 144 buflen, BUFFER_MAX); 145 bzero(ac, sizeof(*ac)); 146 ac->ac_read_fd = read_fd; 147 ac->ac_write_fd = write_fd; 148 ac->ac_buflen = buflen; 149 srandomdev(); 150 ac->ac_seed = random(); 151 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed); 152 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen, 153 ac->ac_seed) != 0, "aio_test_buffer: internal error"); 154 } 155 156 static ssize_t 157 poll(struct aiocb *aio) 158 { 159 int error; 160 161 while ((error = aio_error(aio)) == EINPROGRESS) 162 usleep(25000); 163 if (error) 164 return (error); 165 else 166 return (aio_return(aio)); 167 } 168 169 static void 170 sigusr1_handler(int sig __unused) 171 { 172 ATF_REQUIRE_EQ(0, sem_post(&completions)); 173 } 174 175 static void 176 thr_handler(union sigval sv __unused) 177 { 178 ATF_REQUIRE_EQ(0, sem_post(&completions)); 179 } 180 181 static ssize_t 182 poll_signaled(struct aiocb *aio) 183 { 184 int error; 185 186 ATF_REQUIRE_EQ(0, sem_wait(&completions)); 187 error = aio_error(aio); 188 switch (error) { 189 case EINPROGRESS: 190 errno = EINTR; 191 return (-1); 192 case 0: 193 return (aio_return(aio)); 194 default: 195 return (error); 196 } 197 } 198 199 /* 200 * Setup a signal handler for signal delivery tests 201 * This isn't thread safe, but it's ok since ATF runs each testcase in a 202 * separate process 203 */ 204 static struct sigevent* 205 setup_signal(void) 206 { 207 static struct sigevent sev; 208 209 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 210 sev.sigev_notify = SIGEV_SIGNAL; 211 sev.sigev_signo = SIGUSR1; 212 ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler)); 213 return (&sev); 214 } 215 216 /* 217 * Setup a thread for thread delivery tests 218 * This isn't thread safe, but it's ok since ATF runs each testcase in a 219 * separate process 220 */ 221 static struct sigevent* 222 setup_thread(void) 223 { 224 static struct sigevent sev; 225 226 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0)); 227 sev.sigev_notify = SIGEV_THREAD; 228 sev.sigev_notify_function = thr_handler; 229 sev.sigev_notify_attributes = NULL; 230 return (&sev); 231 } 232 233 static ssize_t 234 suspend(struct aiocb *aio) 235 { 236 const struct aiocb *const iocbs[] = {aio}; 237 int error; 238 239 error = aio_suspend(iocbs, 1, NULL); 240 if (error == 0) 241 return (aio_return(aio)); 242 else 243 return (error); 244 } 245 246 static ssize_t 247 waitcomplete(struct aiocb *aio) 248 { 249 struct aiocb *aiop; 250 ssize_t ret; 251 252 ret = aio_waitcomplete(&aiop, NULL); 253 ATF_REQUIRE_EQ(aio, aiop); 254 return (ret); 255 } 256 257 /* 258 * Setup an iocb for kqueue notification. This isn't thread 259 * safe, but it's ok because ATF runs every test case in a separate process. 260 */ 261 static struct sigevent* 262 setup_kqueue(void) 263 { 264 static struct sigevent sev; 265 static int kq; 266 267 kq = kqueue(); 268 ATF_REQUIRE(kq >= 0); 269 270 memset(&sev, 0, sizeof(sev)); 271 sev.sigev_notify_kqueue = kq; 272 sev.sigev_value.sival_ptr = (void*)0xdeadbeef; 273 sev.sigev_notify = SIGEV_KEVENT; 274 275 return (&sev); 276 } 277 278 static ssize_t 279 poll_kqueue(struct aiocb *aio) 280 { 281 int kq, nevents; 282 struct kevent events[1]; 283 284 kq = aio->aio_sigevent.sigev_notify_kqueue; 285 286 nevents = kevent(kq, NULL, 0, events, 1, NULL); 287 ATF_CHECK_EQ(1, nevents); 288 ATF_CHECK_EQ(events[0].ident, (uintptr_t) aio); 289 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO); 290 ATF_CHECK_EQ(events[0].flags, EV_EOF); 291 ATF_CHECK_EQ(events[0].fflags, 0); 292 ATF_CHECK_EQ(events[0].data, 0); 293 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef); 294 295 return (aio_return(aio)); 296 } 297 298 /* 299 * Perform a simple write test of our initialized data buffer to the provided 300 * file descriptor. 301 */ 302 static void 303 aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev) 304 { 305 struct aiocb aio; 306 ssize_t len; 307 308 bzero(&aio, sizeof(aio)); 309 aio.aio_buf = ac->ac_buffer; 310 aio.aio_nbytes = ac->ac_buflen; 311 aio.aio_fildes = ac->ac_write_fd; 312 aio.aio_offset = 0; 313 if (sev) 314 aio.aio_sigevent = *sev; 315 316 if (aio_write(&aio) < 0) 317 atf_tc_fail("aio_write failed: %s", strerror(errno)); 318 319 len = comp(&aio); 320 if (len < 0) 321 atf_tc_fail("aio failed: %s", strerror(errno)); 322 323 if (len != ac->ac_buflen) 324 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 325 } 326 327 /* 328 * Perform a vectored I/O test of our initialized data buffer to the provided 329 * file descriptor. 330 * 331 * To vectorize the linear buffer, chop it up into two pieces of dissimilar 332 * size, and swap their offsets. 333 */ 334 static void 335 aio_writev_test(struct aio_context *ac, completion comp, struct sigevent *sev) 336 { 337 struct aiocb aio; 338 struct iovec iov[2]; 339 size_t len0, len1; 340 ssize_t len; 341 342 bzero(&aio, sizeof(aio)); 343 344 aio.aio_fildes = ac->ac_write_fd; 345 aio.aio_offset = 0; 346 len0 = ac->ac_buflen * 3 / 4; 347 len1 = ac->ac_buflen / 4; 348 iov[0].iov_base = ac->ac_buffer + len1; 349 iov[0].iov_len = len0; 350 iov[1].iov_base = ac->ac_buffer; 351 iov[1].iov_len = len1; 352 aio.aio_iov = iov; 353 aio.aio_iovcnt = 2; 354 if (sev) 355 aio.aio_sigevent = *sev; 356 357 if (aio_writev(&aio) < 0) 358 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 359 360 len = comp(&aio); 361 if (len < 0) 362 atf_tc_fail("aio failed: %s", strerror(errno)); 363 364 if (len != ac->ac_buflen) 365 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 366 } 367 368 /* 369 * Perform a simple read test of our initialized data buffer from the 370 * provided file descriptor. 371 */ 372 static void 373 aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev) 374 { 375 struct aiocb aio; 376 ssize_t len; 377 378 bzero(ac->ac_buffer, ac->ac_buflen); 379 bzero(&aio, sizeof(aio)); 380 aio.aio_buf = ac->ac_buffer; 381 aio.aio_nbytes = ac->ac_buflen; 382 aio.aio_fildes = ac->ac_read_fd; 383 aio.aio_offset = 0; 384 if (sev) 385 aio.aio_sigevent = *sev; 386 387 if (aio_read(&aio) < 0) 388 atf_tc_fail("aio_read failed: %s", strerror(errno)); 389 390 len = comp(&aio); 391 if (len < 0) 392 atf_tc_fail("aio failed: %s", strerror(errno)); 393 394 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 395 "aio short read (%jd)", (intmax_t)len); 396 397 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 398 atf_tc_fail("buffer mismatched"); 399 } 400 401 static void 402 aio_readv_test(struct aio_context *ac, completion comp, struct sigevent *sev) 403 { 404 struct aiocb aio; 405 struct iovec iov[2]; 406 size_t len0, len1; 407 ssize_t len; 408 409 bzero(ac->ac_buffer, ac->ac_buflen); 410 bzero(&aio, sizeof(aio)); 411 aio.aio_fildes = ac->ac_read_fd; 412 aio.aio_offset = 0; 413 len0 = ac->ac_buflen * 3 / 4; 414 len1 = ac->ac_buflen / 4; 415 iov[0].iov_base = ac->ac_buffer + len1; 416 iov[0].iov_len = len0; 417 iov[1].iov_base = ac->ac_buffer; 418 iov[1].iov_len = len1; 419 aio.aio_iov = iov; 420 aio.aio_iovcnt = 2; 421 if (sev) 422 aio.aio_sigevent = *sev; 423 424 if (aio_readv(&aio) < 0) 425 atf_tc_fail("aio_read failed: %s", strerror(errno)); 426 427 len = comp(&aio); 428 if (len < 0) 429 atf_tc_fail("aio failed: %s", strerror(errno)); 430 431 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen, 432 "aio short read (%jd)", (intmax_t)len); 433 434 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) 435 atf_tc_fail("buffer mismatched"); 436 } 437 438 /* 439 * Series of type-specific tests for AIO. For now, we just make sure we can 440 * issue a write and then a read to each type. We assume that once a write 441 * is issued, a read can follow. 442 */ 443 444 /* 445 * Test with a classic file. Assumes we can create a moderate size temporary 446 * file. 447 */ 448 #define FILE_LEN GLOBAL_MAX 449 #define FILE_PATHNAME "testfile" 450 451 static void 452 aio_file_test(completion comp, struct sigevent *sev, bool vectored) 453 { 454 struct aio_context ac; 455 int fd; 456 457 ATF_REQUIRE_KERNEL_MODULE("aio"); 458 ATF_REQUIRE_UNSAFE_AIO(); 459 460 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 461 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 462 463 aio_context_init(&ac, fd, fd, FILE_LEN); 464 if (vectored) { 465 aio_writev_test(&ac, comp, sev); 466 aio_readv_test(&ac, comp, sev); 467 } else { 468 aio_write_test(&ac, comp, sev); 469 aio_read_test(&ac, comp, sev); 470 } 471 close(fd); 472 } 473 474 ATF_TC_WITHOUT_HEAD(file_kq); 475 ATF_TC_BODY(file_kq, tc) 476 { 477 aio_file_test(poll_kqueue, setup_kqueue(), false); 478 } 479 480 ATF_TC_WITHOUT_HEAD(file_poll); 481 ATF_TC_BODY(file_poll, tc) 482 { 483 aio_file_test(poll, NULL, false); 484 } 485 486 ATF_TC_WITHOUT_HEAD(file_signal); 487 ATF_TC_BODY(file_signal, tc) 488 { 489 aio_file_test(poll_signaled, setup_signal(), false); 490 } 491 492 ATF_TC_WITHOUT_HEAD(file_suspend); 493 ATF_TC_BODY(file_suspend, tc) 494 { 495 aio_file_test(suspend, NULL, false); 496 } 497 498 ATF_TC_WITHOUT_HEAD(file_thread); 499 ATF_TC_BODY(file_thread, tc) 500 { 501 aio_file_test(poll_signaled, setup_thread(), false); 502 } 503 504 ATF_TC_WITHOUT_HEAD(file_waitcomplete); 505 ATF_TC_BODY(file_waitcomplete, tc) 506 { 507 aio_file_test(waitcomplete, NULL, false); 508 } 509 510 #define FIFO_LEN 256 511 #define FIFO_PATHNAME "testfifo" 512 513 static void 514 aio_fifo_test(completion comp, struct sigevent *sev) 515 { 516 int error, read_fd = -1, write_fd = -1; 517 struct aio_context ac; 518 519 ATF_REQUIRE_KERNEL_MODULE("aio"); 520 ATF_REQUIRE_UNSAFE_AIO(); 521 522 ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1, 523 "mkfifo failed: %s", strerror(errno)); 524 525 read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK); 526 if (read_fd == -1) { 527 error = errno; 528 errno = error; 529 atf_tc_fail("read_fd open failed: %s", 530 strerror(errno)); 531 } 532 533 write_fd = open(FIFO_PATHNAME, O_WRONLY); 534 if (write_fd == -1) { 535 error = errno; 536 errno = error; 537 atf_tc_fail("write_fd open failed: %s", 538 strerror(errno)); 539 } 540 541 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN); 542 aio_write_test(&ac, comp, sev); 543 aio_read_test(&ac, comp, sev); 544 545 close(read_fd); 546 close(write_fd); 547 } 548 549 ATF_TC_WITHOUT_HEAD(fifo_kq); 550 ATF_TC_BODY(fifo_kq, tc) 551 { 552 aio_fifo_test(poll_kqueue, setup_kqueue()); 553 } 554 555 ATF_TC_WITHOUT_HEAD(fifo_poll); 556 ATF_TC_BODY(fifo_poll, tc) 557 { 558 aio_fifo_test(poll, NULL); 559 } 560 561 ATF_TC_WITHOUT_HEAD(fifo_signal); 562 ATF_TC_BODY(fifo_signal, tc) 563 { 564 aio_fifo_test(poll_signaled, setup_signal()); 565 } 566 567 ATF_TC_WITHOUT_HEAD(fifo_suspend); 568 ATF_TC_BODY(fifo_suspend, tc) 569 { 570 aio_fifo_test(suspend, NULL); 571 } 572 573 ATF_TC_WITHOUT_HEAD(fifo_thread); 574 ATF_TC_BODY(fifo_thread, tc) 575 { 576 aio_fifo_test(poll_signaled, setup_thread()); 577 } 578 579 ATF_TC_WITHOUT_HEAD(fifo_waitcomplete); 580 ATF_TC_BODY(fifo_waitcomplete, tc) 581 { 582 aio_fifo_test(waitcomplete, NULL); 583 } 584 585 #define UNIX_SOCKETPAIR_LEN 256 586 static void 587 aio_unix_socketpair_test(completion comp, struct sigevent *sev, bool vectored) 588 { 589 struct aio_context ac; 590 struct rusage ru_before, ru_after; 591 int sockets[2]; 592 593 ATF_REQUIRE_KERNEL_MODULE("aio"); 594 595 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1, 596 "socketpair failed: %s", strerror(errno)); 597 598 aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN); 599 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1, 600 "getrusage failed: %s", strerror(errno)); 601 if (vectored) { 602 aio_writev_test(&ac, comp, sev); 603 aio_readv_test(&ac, comp, sev); 604 } else { 605 aio_write_test(&ac, comp, sev); 606 aio_read_test(&ac, comp, sev); 607 } 608 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, 609 "getrusage failed: %s", strerror(errno)); 610 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1); 611 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1); 612 613 close(sockets[0]); 614 close(sockets[1]); 615 } 616 617 ATF_TC_WITHOUT_HEAD(socket_kq); 618 ATF_TC_BODY(socket_kq, tc) 619 { 620 aio_unix_socketpair_test(poll_kqueue, setup_kqueue(), false); 621 } 622 623 ATF_TC_WITHOUT_HEAD(socket_poll); 624 ATF_TC_BODY(socket_poll, tc) 625 { 626 aio_unix_socketpair_test(poll, NULL, false); 627 } 628 629 ATF_TC_WITHOUT_HEAD(socket_signal); 630 ATF_TC_BODY(socket_signal, tc) 631 { 632 aio_unix_socketpair_test(poll_signaled, setup_signal(), false); 633 } 634 635 ATF_TC_WITHOUT_HEAD(socket_suspend); 636 ATF_TC_BODY(socket_suspend, tc) 637 { 638 aio_unix_socketpair_test(suspend, NULL, false); 639 } 640 641 ATF_TC_WITHOUT_HEAD(socket_thread); 642 ATF_TC_BODY(socket_thread, tc) 643 { 644 aio_unix_socketpair_test(poll_signaled, setup_thread(), false); 645 } 646 647 ATF_TC_WITHOUT_HEAD(socket_waitcomplete); 648 ATF_TC_BODY(socket_waitcomplete, tc) 649 { 650 aio_unix_socketpair_test(waitcomplete, NULL, false); 651 } 652 653 struct aio_pty_arg { 654 int apa_read_fd; 655 int apa_write_fd; 656 }; 657 658 #define PTY_LEN 256 659 static void 660 aio_pty_test(completion comp, struct sigevent *sev) 661 { 662 struct aio_context ac; 663 int read_fd, write_fd; 664 struct termios ts; 665 int error; 666 667 ATF_REQUIRE_KERNEL_MODULE("aio"); 668 ATF_REQUIRE_UNSAFE_AIO(); 669 670 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0, 671 "openpty failed: %s", strerror(errno)); 672 673 674 if (tcgetattr(write_fd, &ts) < 0) { 675 error = errno; 676 errno = error; 677 atf_tc_fail("tcgetattr failed: %s", strerror(errno)); 678 } 679 cfmakeraw(&ts); 680 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) { 681 error = errno; 682 errno = error; 683 atf_tc_fail("tcsetattr failed: %s", strerror(errno)); 684 } 685 aio_context_init(&ac, read_fd, write_fd, PTY_LEN); 686 687 aio_write_test(&ac, comp, sev); 688 aio_read_test(&ac, comp, sev); 689 690 close(read_fd); 691 close(write_fd); 692 } 693 694 ATF_TC_WITHOUT_HEAD(pty_kq); 695 ATF_TC_BODY(pty_kq, tc) 696 { 697 aio_pty_test(poll_kqueue, setup_kqueue()); 698 } 699 700 ATF_TC_WITHOUT_HEAD(pty_poll); 701 ATF_TC_BODY(pty_poll, tc) 702 { 703 aio_pty_test(poll, NULL); 704 } 705 706 ATF_TC_WITHOUT_HEAD(pty_signal); 707 ATF_TC_BODY(pty_signal, tc) 708 { 709 aio_pty_test(poll_signaled, setup_signal()); 710 } 711 712 ATF_TC_WITHOUT_HEAD(pty_suspend); 713 ATF_TC_BODY(pty_suspend, tc) 714 { 715 aio_pty_test(suspend, NULL); 716 } 717 718 ATF_TC_WITHOUT_HEAD(pty_thread); 719 ATF_TC_BODY(pty_thread, tc) 720 { 721 aio_pty_test(poll_signaled, setup_thread()); 722 } 723 724 ATF_TC_WITHOUT_HEAD(pty_waitcomplete); 725 ATF_TC_BODY(pty_waitcomplete, tc) 726 { 727 aio_pty_test(waitcomplete, NULL); 728 } 729 730 #define PIPE_LEN 256 731 static void 732 aio_pipe_test(completion comp, struct sigevent *sev) 733 { 734 struct aio_context ac; 735 int pipes[2]; 736 737 ATF_REQUIRE_KERNEL_MODULE("aio"); 738 ATF_REQUIRE_UNSAFE_AIO(); 739 740 ATF_REQUIRE_MSG(pipe(pipes) != -1, 741 "pipe failed: %s", strerror(errno)); 742 743 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN); 744 aio_write_test(&ac, comp, sev); 745 aio_read_test(&ac, comp, sev); 746 747 close(pipes[0]); 748 close(pipes[1]); 749 } 750 751 ATF_TC_WITHOUT_HEAD(pipe_kq); 752 ATF_TC_BODY(pipe_kq, tc) 753 { 754 aio_pipe_test(poll_kqueue, setup_kqueue()); 755 } 756 757 ATF_TC_WITHOUT_HEAD(pipe_poll); 758 ATF_TC_BODY(pipe_poll, tc) 759 { 760 aio_pipe_test(poll, NULL); 761 } 762 763 ATF_TC_WITHOUT_HEAD(pipe_signal); 764 ATF_TC_BODY(pipe_signal, tc) 765 { 766 aio_pipe_test(poll_signaled, setup_signal()); 767 } 768 769 ATF_TC_WITHOUT_HEAD(pipe_suspend); 770 ATF_TC_BODY(pipe_suspend, tc) 771 { 772 aio_pipe_test(suspend, NULL); 773 } 774 775 ATF_TC_WITHOUT_HEAD(pipe_thread); 776 ATF_TC_BODY(pipe_thread, tc) 777 { 778 aio_pipe_test(poll_signaled, setup_thread()); 779 } 780 781 ATF_TC_WITHOUT_HEAD(pipe_waitcomplete); 782 ATF_TC_BODY(pipe_waitcomplete, tc) 783 { 784 aio_pipe_test(waitcomplete, NULL); 785 } 786 787 #define MD_LEN GLOBAL_MAX 788 #define MDUNIT_LINK "mdunit_link" 789 790 static int 791 aio_md_setup(void) 792 { 793 int error, fd, mdctl_fd, unit; 794 char pathname[PATH_MAX]; 795 struct md_ioctl mdio; 796 char buf[80]; 797 798 ATF_REQUIRE_KERNEL_MODULE("aio"); 799 800 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 801 ATF_REQUIRE_MSG(mdctl_fd != -1, 802 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno)); 803 804 bzero(&mdio, sizeof(mdio)); 805 mdio.md_version = MDIOVERSION; 806 mdio.md_type = MD_MALLOC; 807 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS; 808 mdio.md_mediasize = GLOBAL_MAX; 809 mdio.md_sectorsize = 512; 810 strlcpy(buf, __func__, sizeof(buf)); 811 mdio.md_label = buf; 812 813 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) { 814 error = errno; 815 errno = error; 816 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno)); 817 } 818 close(mdctl_fd); 819 820 /* Store the md unit number in a symlink for future cleanup */ 821 unit = mdio.md_unit; 822 snprintf(buf, sizeof(buf), "%d", unit); 823 ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK)); 824 snprintf(pathname, PATH_MAX, "/dev/md%d", unit); 825 fd = open(pathname, O_RDWR); 826 ATF_REQUIRE_MSG(fd != -1, 827 "opening %s failed: %s", pathname, strerror(errno)); 828 829 return (fd); 830 } 831 832 static void 833 aio_md_cleanup(void) 834 { 835 struct md_ioctl mdio; 836 int mdctl_fd, n, unit; 837 char buf[80]; 838 839 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 840 if (mdctl_fd < 0) { 841 fprintf(stderr, "opening /dev/%s failed: %s\n", MDCTL_NAME, 842 strerror(errno)); 843 return; 844 } 845 n = readlink(MDUNIT_LINK, buf, sizeof(buf) - 1); 846 if (n > 0) { 847 buf[n] = '\0'; 848 if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) { 849 bzero(&mdio, sizeof(mdio)); 850 mdio.md_version = MDIOVERSION; 851 mdio.md_unit = unit; 852 if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) { 853 fprintf(stderr, 854 "ioctl MDIOCDETACH unit %d failed: %s\n", 855 unit, strerror(errno)); 856 } 857 } 858 } 859 860 close(mdctl_fd); 861 } 862 863 static void 864 aio_md_test(completion comp, struct sigevent *sev, bool vectored) 865 { 866 struct aio_context ac; 867 int fd; 868 869 fd = aio_md_setup(); 870 aio_context_init(&ac, fd, fd, MD_LEN); 871 if (vectored) { 872 aio_writev_test(&ac, comp, sev); 873 aio_readv_test(&ac, comp, sev); 874 } else { 875 aio_write_test(&ac, comp, sev); 876 aio_read_test(&ac, comp, sev); 877 } 878 879 close(fd); 880 } 881 882 ATF_TC_WITH_CLEANUP(md_kq); 883 ATF_TC_HEAD(md_kq, tc) 884 { 885 886 atf_tc_set_md_var(tc, "require.user", "root"); 887 } 888 ATF_TC_BODY(md_kq, tc) 889 { 890 aio_md_test(poll_kqueue, setup_kqueue(), false); 891 } 892 ATF_TC_CLEANUP(md_kq, tc) 893 { 894 aio_md_cleanup(); 895 } 896 897 ATF_TC_WITH_CLEANUP(md_poll); 898 ATF_TC_HEAD(md_poll, tc) 899 { 900 901 atf_tc_set_md_var(tc, "require.user", "root"); 902 } 903 ATF_TC_BODY(md_poll, tc) 904 { 905 aio_md_test(poll, NULL, false); 906 } 907 ATF_TC_CLEANUP(md_poll, tc) 908 { 909 aio_md_cleanup(); 910 } 911 912 ATF_TC_WITH_CLEANUP(md_signal); 913 ATF_TC_HEAD(md_signal, tc) 914 { 915 916 atf_tc_set_md_var(tc, "require.user", "root"); 917 } 918 ATF_TC_BODY(md_signal, tc) 919 { 920 aio_md_test(poll_signaled, setup_signal(), false); 921 } 922 ATF_TC_CLEANUP(md_signal, tc) 923 { 924 aio_md_cleanup(); 925 } 926 927 ATF_TC_WITH_CLEANUP(md_suspend); 928 ATF_TC_HEAD(md_suspend, tc) 929 { 930 931 atf_tc_set_md_var(tc, "require.user", "root"); 932 } 933 ATF_TC_BODY(md_suspend, tc) 934 { 935 aio_md_test(suspend, NULL, false); 936 } 937 ATF_TC_CLEANUP(md_suspend, tc) 938 { 939 aio_md_cleanup(); 940 } 941 942 ATF_TC_WITH_CLEANUP(md_thread); 943 ATF_TC_HEAD(md_thread, tc) 944 { 945 946 atf_tc_set_md_var(tc, "require.user", "root"); 947 } 948 ATF_TC_BODY(md_thread, tc) 949 { 950 aio_md_test(poll_signaled, setup_thread(), false); 951 } 952 ATF_TC_CLEANUP(md_thread, tc) 953 { 954 aio_md_cleanup(); 955 } 956 957 ATF_TC_WITH_CLEANUP(md_waitcomplete); 958 ATF_TC_HEAD(md_waitcomplete, tc) 959 { 960 961 atf_tc_set_md_var(tc, "require.user", "root"); 962 } 963 ATF_TC_BODY(md_waitcomplete, tc) 964 { 965 aio_md_test(waitcomplete, NULL, false); 966 } 967 ATF_TC_CLEANUP(md_waitcomplete, tc) 968 { 969 aio_md_cleanup(); 970 } 971 972 #define ZVOL_VDEV_PATHNAME "test_vdev" 973 #define POOL_SIZE (1 << 28) /* 256 MB */ 974 #define ZVOL_SIZE "64m" 975 #define POOL_NAME "aio_testpool" 976 #define ZVOL_NAME "aio_testvol" 977 978 static int 979 aio_zvol_setup(void) 980 { 981 FILE *pidfile; 982 int fd; 983 pid_t pid; 984 char pool_name[80]; 985 char cmd[160]; 986 char zvol_name[160]; 987 char devname[160]; 988 989 ATF_REQUIRE_KERNEL_MODULE("aio"); 990 ATF_REQUIRE_KERNEL_MODULE("zfs"); 991 992 fd = open(ZVOL_VDEV_PATHNAME, O_RDWR | O_CREAT, 0600); 993 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 994 ATF_REQUIRE_EQ_MSG(0, 995 ftruncate(fd, POOL_SIZE), "ftruncate failed: %s", strerror(errno)); 996 close(fd); 997 998 pid = getpid(); 999 pidfile = fopen("pidfile", "w"); 1000 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 1001 fprintf(pidfile, "%d", pid); 1002 fclose(pidfile); 1003 1004 snprintf(pool_name, sizeof(pool_name), POOL_NAME ".%d", pid); 1005 snprintf(zvol_name, sizeof(zvol_name), "%s/" ZVOL_NAME, pool_name); 1006 snprintf(cmd, sizeof(cmd), "zpool create %s $PWD/" ZVOL_VDEV_PATHNAME, 1007 pool_name); 1008 ATF_REQUIRE_EQ_MSG(0, system(cmd), 1009 "zpool create failed: %s", strerror(errno)); 1010 snprintf(cmd, sizeof(cmd), 1011 "zfs create -o volblocksize=8192 -o volmode=dev -V " 1012 ZVOL_SIZE " %s", zvol_name); 1013 ATF_REQUIRE_EQ_MSG(0, system(cmd), 1014 "zfs create failed: %s", strerror(errno)); 1015 1016 snprintf(devname, sizeof(devname), "/dev/zvol/%s", zvol_name); 1017 do { 1018 fd = open(devname, O_RDWR); 1019 } while (fd == -1 && errno == EINTR) ; 1020 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1021 return (fd); 1022 } 1023 1024 static void 1025 aio_zvol_cleanup(void) 1026 { 1027 FILE *pidfile; 1028 pid_t testpid; 1029 char cmd[160]; 1030 1031 pidfile = fopen("pidfile", "r"); 1032 if (pidfile == NULL && errno == ENOENT) { 1033 /* Setup probably failed */ 1034 return; 1035 } 1036 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno)); 1037 ATF_REQUIRE_EQ(1, fscanf(pidfile, "%d", &testpid)); 1038 fclose(pidfile); 1039 1040 snprintf(cmd, sizeof(cmd), "zpool destroy " POOL_NAME ".%d", testpid); 1041 system(cmd); 1042 } 1043 1044 1045 ATF_TC_WITHOUT_HEAD(aio_large_read_test); 1046 ATF_TC_BODY(aio_large_read_test, tc) 1047 { 1048 struct aiocb cb, *cbp; 1049 ssize_t nread; 1050 size_t len; 1051 int fd; 1052 #ifdef __LP64__ 1053 int clamped; 1054 #endif 1055 1056 ATF_REQUIRE_KERNEL_MODULE("aio"); 1057 ATF_REQUIRE_UNSAFE_AIO(); 1058 1059 #ifdef __LP64__ 1060 len = sizeof(clamped); 1061 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) == 1062 -1) 1063 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp"); 1064 #endif 1065 1066 /* Determine the maximum supported read(2) size. */ 1067 len = SSIZE_MAX; 1068 #ifdef __LP64__ 1069 if (clamped) 1070 len = INT_MAX; 1071 #endif 1072 1073 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1074 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1075 1076 unlink(FILE_PATHNAME); 1077 1078 memset(&cb, 0, sizeof(cb)); 1079 cb.aio_nbytes = len; 1080 cb.aio_fildes = fd; 1081 cb.aio_buf = NULL; 1082 if (aio_read(&cb) == -1) 1083 atf_tc_fail("aio_read() of maximum read size failed: %s", 1084 strerror(errno)); 1085 1086 nread = aio_waitcomplete(&cbp, NULL); 1087 if (nread == -1) 1088 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1089 if (nread != 0) 1090 atf_tc_fail("aio_read() from empty file returned data: %zd", 1091 nread); 1092 1093 memset(&cb, 0, sizeof(cb)); 1094 cb.aio_nbytes = len + 1; 1095 cb.aio_fildes = fd; 1096 cb.aio_buf = NULL; 1097 if (aio_read(&cb) == -1) { 1098 if (errno == EINVAL) 1099 goto finished; 1100 atf_tc_fail("aio_read() of too large read size failed: %s", 1101 strerror(errno)); 1102 } 1103 1104 nread = aio_waitcomplete(&cbp, NULL); 1105 if (nread == -1) { 1106 if (errno == EINVAL) 1107 goto finished; 1108 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 1109 } 1110 atf_tc_fail("aio_read() of too large read size returned: %zd", nread); 1111 1112 finished: 1113 close(fd); 1114 } 1115 1116 /* 1117 * This tests for a bug where arriving socket data can wakeup multiple 1118 * AIO read requests resulting in an uncancellable request. 1119 */ 1120 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads); 1121 ATF_TC_BODY(aio_socket_two_reads, tc) 1122 { 1123 struct ioreq { 1124 struct aiocb iocb; 1125 char buffer[1024]; 1126 } ioreq[2]; 1127 struct aiocb *iocb; 1128 unsigned i; 1129 int s[2]; 1130 char c; 1131 1132 ATF_REQUIRE_KERNEL_MODULE("aio"); 1133 #if __FreeBSD_version < 1100101 1134 aft_tc_skip("kernel version %d is too old (%d required)", 1135 __FreeBSD_version, 1100101); 1136 #endif 1137 1138 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1139 1140 /* Queue two read requests. */ 1141 memset(&ioreq, 0, sizeof(ioreq)); 1142 for (i = 0; i < nitems(ioreq); i++) { 1143 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer); 1144 ioreq[i].iocb.aio_fildes = s[0]; 1145 ioreq[i].iocb.aio_buf = ioreq[i].buffer; 1146 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0); 1147 } 1148 1149 /* Send a single byte. This should complete one request. */ 1150 c = 0xc3; 1151 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1); 1152 1153 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1); 1154 1155 /* Determine which request completed and verify the data was read. */ 1156 if (iocb == &ioreq[0].iocb) 1157 i = 0; 1158 else 1159 i = 1; 1160 ATF_REQUIRE(ioreq[i].buffer[0] == c); 1161 1162 i ^= 1; 1163 1164 /* 1165 * Try to cancel the other request. On broken systems this 1166 * will fail and the process will hang on exit. 1167 */ 1168 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS); 1169 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED); 1170 1171 close(s[1]); 1172 close(s[0]); 1173 } 1174 1175 static void 1176 aio_socket_blocking_short_write_test(bool vectored) 1177 { 1178 struct aiocb iocb, *iocbp; 1179 struct iovec iov[2]; 1180 char *buffer[2]; 1181 ssize_t done, r; 1182 int buffer_size, sb_size; 1183 socklen_t len; 1184 int s[2]; 1185 1186 ATF_REQUIRE_KERNEL_MODULE("aio"); 1187 1188 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1189 1190 len = sizeof(sb_size); 1191 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1192 -1); 1193 ATF_REQUIRE(len == sizeof(sb_size)); 1194 buffer_size = sb_size; 1195 1196 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1197 -1); 1198 ATF_REQUIRE(len == sizeof(sb_size)); 1199 if (sb_size > buffer_size) 1200 buffer_size = sb_size; 1201 1202 /* 1203 * Use twice the size of the MAX(receive buffer, send buffer) 1204 * to ensure that the write is split up into multiple writes 1205 * internally. 1206 */ 1207 buffer_size *= 2; 1208 1209 buffer[0] = malloc(buffer_size); 1210 ATF_REQUIRE(buffer[0] != NULL); 1211 buffer[1] = malloc(buffer_size); 1212 ATF_REQUIRE(buffer[1] != NULL); 1213 1214 srandomdev(); 1215 aio_fill_buffer(buffer[1], buffer_size, random()); 1216 1217 memset(&iocb, 0, sizeof(iocb)); 1218 iocb.aio_fildes = s[1]; 1219 if (vectored) { 1220 iov[0].iov_base = buffer[1]; 1221 iov[0].iov_len = buffer_size / 2 + 1; 1222 iov[1].iov_base = buffer[1] + buffer_size / 2 + 1; 1223 iov[1].iov_len = buffer_size / 2 - 1; 1224 iocb.aio_iov = iov; 1225 iocb.aio_iovcnt = 2; 1226 r = aio_writev(&iocb); 1227 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1228 } else { 1229 iocb.aio_buf = buffer[1]; 1230 iocb.aio_nbytes = buffer_size; 1231 r = aio_write(&iocb); 1232 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1233 } 1234 1235 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1236 ATF_REQUIRE(done == buffer_size); 1237 1238 done = aio_waitcomplete(&iocbp, NULL); 1239 ATF_REQUIRE(iocbp == &iocb); 1240 ATF_REQUIRE(done == buffer_size); 1241 1242 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1243 1244 close(s[1]); 1245 close(s[0]); 1246 } 1247 1248 /* 1249 * This test ensures that aio_write() on a blocking socket of a "large" 1250 * buffer does not return a short completion. 1251 */ 1252 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); 1253 ATF_TC_BODY(aio_socket_blocking_short_write, tc) 1254 { 1255 aio_socket_blocking_short_write_test(false); 1256 } 1257 1258 /* 1259 * Like aio_socket_blocking_short_write, but also tests that partially 1260 * completed vectored sends can be retried correctly. 1261 */ 1262 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write_vectored); 1263 ATF_TC_BODY(aio_socket_blocking_short_write_vectored, tc) 1264 { 1265 aio_socket_blocking_short_write_test(true); 1266 } 1267 1268 /* 1269 * Verify that AIO requests fail when applied to a listening socket. 1270 */ 1271 ATF_TC_WITHOUT_HEAD(aio_socket_listen_fail); 1272 ATF_TC_BODY(aio_socket_listen_fail, tc) 1273 { 1274 struct aiocb iocb; 1275 struct sockaddr_un sun; 1276 char buf[16]; 1277 int s; 1278 1279 s = socket(AF_LOCAL, SOCK_STREAM, 0); 1280 ATF_REQUIRE(s != -1); 1281 1282 memset(&sun, 0, sizeof(sun)); 1283 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX"); 1284 mktemp(sun.sun_path); 1285 sun.sun_family = AF_LOCAL; 1286 sun.sun_len = SUN_LEN(&sun); 1287 1288 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0); 1289 ATF_REQUIRE(listen(s, 5) == 0); 1290 1291 memset(buf, 0, sizeof(buf)); 1292 memset(&iocb, 0, sizeof(iocb)); 1293 iocb.aio_fildes = s; 1294 iocb.aio_buf = buf; 1295 iocb.aio_nbytes = sizeof(buf); 1296 1297 ATF_REQUIRE_ERRNO(EINVAL, aio_read(&iocb) == -1); 1298 ATF_REQUIRE_ERRNO(EINVAL, aio_write(&iocb) == -1); 1299 1300 ATF_REQUIRE(unlink(sun.sun_path) == 0); 1301 close(s); 1302 } 1303 1304 /* 1305 * Verify that listen(2) fails if a socket has pending AIO requests. 1306 */ 1307 ATF_TC_WITHOUT_HEAD(aio_socket_listen_pending); 1308 ATF_TC_BODY(aio_socket_listen_pending, tc) 1309 { 1310 struct aiocb iocb; 1311 struct sockaddr_un sun; 1312 char buf[16]; 1313 int s; 1314 1315 s = socket(AF_LOCAL, SOCK_STREAM, 0); 1316 ATF_REQUIRE(s != -1); 1317 1318 memset(&sun, 0, sizeof(sun)); 1319 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX"); 1320 mktemp(sun.sun_path); 1321 sun.sun_family = AF_LOCAL; 1322 sun.sun_len = SUN_LEN(&sun); 1323 1324 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0); 1325 1326 memset(buf, 0, sizeof(buf)); 1327 memset(&iocb, 0, sizeof(iocb)); 1328 iocb.aio_fildes = s; 1329 iocb.aio_buf = buf; 1330 iocb.aio_nbytes = sizeof(buf); 1331 ATF_REQUIRE(aio_read(&iocb) == 0); 1332 1333 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 5) == -1); 1334 1335 ATF_REQUIRE(aio_cancel(s, &iocb) != -1); 1336 1337 ATF_REQUIRE(unlink(sun.sun_path) == 0); 1338 close(s); 1339 } 1340 1341 /* 1342 * This test verifies that cancelling a partially completed socket write 1343 * returns a short write rather than ECANCELED. 1344 */ 1345 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel); 1346 ATF_TC_BODY(aio_socket_short_write_cancel, tc) 1347 { 1348 struct aiocb iocb, *iocbp; 1349 char *buffer[2]; 1350 ssize_t done; 1351 int buffer_size, sb_size; 1352 socklen_t len; 1353 int s[2]; 1354 1355 ATF_REQUIRE_KERNEL_MODULE("aio"); 1356 1357 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1358 1359 len = sizeof(sb_size); 1360 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 1361 -1); 1362 ATF_REQUIRE(len == sizeof(sb_size)); 1363 buffer_size = sb_size; 1364 1365 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 1366 -1); 1367 ATF_REQUIRE(len == sizeof(sb_size)); 1368 if (sb_size > buffer_size) 1369 buffer_size = sb_size; 1370 1371 /* 1372 * Use three times the size of the MAX(receive buffer, send 1373 * buffer) for the write to ensure that the write is split up 1374 * into multiple writes internally. The recv() ensures that 1375 * the write has partially completed, but a remaining size of 1376 * two buffers should ensure that the write has not completed 1377 * fully when it is cancelled. 1378 */ 1379 buffer[0] = malloc(buffer_size); 1380 ATF_REQUIRE(buffer[0] != NULL); 1381 buffer[1] = malloc(buffer_size * 3); 1382 ATF_REQUIRE(buffer[1] != NULL); 1383 1384 srandomdev(); 1385 aio_fill_buffer(buffer[1], buffer_size * 3, random()); 1386 1387 memset(&iocb, 0, sizeof(iocb)); 1388 iocb.aio_fildes = s[1]; 1389 iocb.aio_buf = buffer[1]; 1390 iocb.aio_nbytes = buffer_size * 3; 1391 ATF_REQUIRE(aio_write(&iocb) == 0); 1392 1393 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 1394 ATF_REQUIRE(done == buffer_size); 1395 1396 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS); 1397 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED); 1398 1399 done = aio_waitcomplete(&iocbp, NULL); 1400 ATF_REQUIRE(iocbp == &iocb); 1401 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2); 1402 1403 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 1404 1405 close(s[1]); 1406 close(s[0]); 1407 } 1408 1409 /* 1410 * Test handling of aio_read() and aio_write() on shut-down sockets. 1411 */ 1412 ATF_TC_WITHOUT_HEAD(aio_socket_shutdown); 1413 ATF_TC_BODY(aio_socket_shutdown, tc) 1414 { 1415 struct aiocb iocb; 1416 sigset_t set; 1417 char *buffer; 1418 ssize_t len; 1419 size_t bsz; 1420 int error, s[2]; 1421 1422 ATF_REQUIRE_KERNEL_MODULE("aio"); 1423 1424 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 1425 1426 bsz = 1024; 1427 buffer = malloc(bsz); 1428 memset(buffer, 0, bsz); 1429 1430 /* Put some data in s[0]'s recv buffer. */ 1431 ATF_REQUIRE(send(s[1], buffer, bsz, 0) == (ssize_t)bsz); 1432 1433 /* No more reading from s[0]. */ 1434 ATF_REQUIRE(shutdown(s[0], SHUT_RD) != -1); 1435 1436 ATF_REQUIRE(buffer != NULL); 1437 1438 memset(&iocb, 0, sizeof(iocb)); 1439 iocb.aio_fildes = s[0]; 1440 iocb.aio_buf = buffer; 1441 iocb.aio_nbytes = bsz; 1442 ATF_REQUIRE(aio_read(&iocb) == 0); 1443 1444 /* Expect to see zero bytes, analogous to recv(2). */ 1445 while ((error = aio_error(&iocb)) == EINPROGRESS) 1446 usleep(25000); 1447 ATF_REQUIRE_MSG(error == 0, "aio_error() returned %d", error); 1448 len = aio_return(&iocb); 1449 ATF_REQUIRE_MSG(len == 0, "read job returned %zd bytes", len); 1450 1451 /* No more writing to s[1]. */ 1452 ATF_REQUIRE(shutdown(s[1], SHUT_WR) != -1); 1453 1454 /* Block SIGPIPE so that we can detect the error in-band. */ 1455 sigemptyset(&set); 1456 sigaddset(&set, SIGPIPE); 1457 ATF_REQUIRE(sigprocmask(SIG_BLOCK, &set, NULL) == 0); 1458 1459 memset(&iocb, 0, sizeof(iocb)); 1460 iocb.aio_fildes = s[1]; 1461 iocb.aio_buf = buffer; 1462 iocb.aio_nbytes = bsz; 1463 ATF_REQUIRE(aio_write(&iocb) == 0); 1464 1465 /* Expect an error, analogous to send(2). */ 1466 while ((error = aio_error(&iocb)) == EINPROGRESS) 1467 usleep(25000); 1468 ATF_REQUIRE_MSG(error == EPIPE, "aio_error() returned %d", error); 1469 1470 ATF_REQUIRE(close(s[0]) != -1); 1471 ATF_REQUIRE(close(s[1]) != -1); 1472 free(buffer); 1473 } 1474 1475 /* 1476 * test aio_fsync's behavior with bad inputs 1477 */ 1478 ATF_TC_WITHOUT_HEAD(aio_fsync_errors); 1479 ATF_TC_BODY(aio_fsync_errors, tc) 1480 { 1481 int fd; 1482 struct aiocb iocb; 1483 1484 ATF_REQUIRE_KERNEL_MODULE("aio"); 1485 ATF_REQUIRE_UNSAFE_AIO(); 1486 1487 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1488 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1489 unlink(FILE_PATHNAME); 1490 1491 /* aio_fsync should return EINVAL unless op is O_SYNC or O_DSYNC */ 1492 memset(&iocb, 0, sizeof(iocb)); 1493 iocb.aio_fildes = fd; 1494 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1495 ATF_CHECK_EQ(EINVAL, errno); 1496 1497 /* aio_fsync should return EBADF if fd is not a valid descriptor */ 1498 memset(&iocb, 0, sizeof(iocb)); 1499 iocb.aio_fildes = 666; 1500 ATF_CHECK_EQ(-1, aio_fsync(O_SYNC, &iocb)); 1501 ATF_CHECK_EQ(EBADF, errno); 1502 1503 /* aio_fsync should return EINVAL if sigev_notify is invalid */ 1504 memset(&iocb, 0, sizeof(iocb)); 1505 iocb.aio_fildes = fd; 1506 iocb.aio_sigevent.sigev_notify = 666; 1507 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb)); 1508 ATF_CHECK_EQ(EINVAL, errno); 1509 } 1510 1511 /* 1512 * This test just performs a basic test of aio_fsync(). 1513 */ 1514 static void 1515 aio_fsync_test(int op) 1516 { 1517 struct aiocb synccb, *iocbp; 1518 struct { 1519 struct aiocb iocb; 1520 bool done; 1521 char *buffer; 1522 } buffers[16]; 1523 struct stat sb; 1524 ssize_t rval; 1525 unsigned i; 1526 int fd; 1527 1528 ATF_REQUIRE_KERNEL_MODULE("aio"); 1529 ATF_REQUIRE_UNSAFE_AIO(); 1530 1531 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1532 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1533 unlink(FILE_PATHNAME); 1534 1535 ATF_REQUIRE(fstat(fd, &sb) == 0); 1536 ATF_REQUIRE(sb.st_blksize != 0); 1537 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0); 1538 1539 /* 1540 * Queue several asynchronous write requests. Hopefully this 1541 * forces the aio_fsync() request to be deferred. There is no 1542 * reliable way to guarantee that however. 1543 */ 1544 srandomdev(); 1545 for (i = 0; i < nitems(buffers); i++) { 1546 buffers[i].done = false; 1547 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb)); 1548 buffers[i].buffer = malloc(sb.st_blksize); 1549 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random()); 1550 buffers[i].iocb.aio_fildes = fd; 1551 buffers[i].iocb.aio_buf = buffers[i].buffer; 1552 buffers[i].iocb.aio_nbytes = sb.st_blksize; 1553 buffers[i].iocb.aio_offset = sb.st_blksize * i; 1554 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0); 1555 } 1556 1557 /* Queue the aio_fsync request. */ 1558 memset(&synccb, 0, sizeof(synccb)); 1559 synccb.aio_fildes = fd; 1560 ATF_REQUIRE(aio_fsync(op, &synccb) == 0); 1561 1562 /* Wait for requests to complete. */ 1563 for (;;) { 1564 next: 1565 rval = aio_waitcomplete(&iocbp, NULL); 1566 ATF_REQUIRE(iocbp != NULL); 1567 if (iocbp == &synccb) { 1568 ATF_REQUIRE(rval == 0); 1569 break; 1570 } 1571 1572 for (i = 0; i < nitems(buffers); i++) { 1573 if (iocbp == &buffers[i].iocb) { 1574 ATF_REQUIRE(buffers[i].done == false); 1575 ATF_REQUIRE(rval == sb.st_blksize); 1576 buffers[i].done = true; 1577 goto next; 1578 } 1579 } 1580 1581 ATF_REQUIRE_MSG(false, "unmatched AIO request"); 1582 } 1583 1584 for (i = 0; i < nitems(buffers); i++) 1585 ATF_REQUIRE_MSG(buffers[i].done, 1586 "AIO request %u did not complete", i); 1587 1588 close(fd); 1589 } 1590 1591 ATF_TC_WITHOUT_HEAD(aio_fsync_sync_test); 1592 ATF_TC_BODY(aio_fsync_sync_test, tc) 1593 { 1594 aio_fsync_test(O_SYNC); 1595 } 1596 1597 ATF_TC_WITHOUT_HEAD(aio_fsync_dsync_test); 1598 ATF_TC_BODY(aio_fsync_dsync_test, tc) 1599 { 1600 aio_fsync_test(O_DSYNC); 1601 } 1602 1603 /* 1604 * We shouldn't be able to DoS the system by setting iov_len to an insane 1605 * value 1606 */ 1607 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iov_len); 1608 ATF_TC_BODY(aio_writev_dos_iov_len, tc) 1609 { 1610 struct aiocb aio; 1611 const struct aiocb *const iocbs[] = {&aio}; 1612 const char *wbuf = "Hello, world!"; 1613 struct iovec iov[1]; 1614 ssize_t r; 1615 int fd; 1616 1617 ATF_REQUIRE_KERNEL_MODULE("aio"); 1618 ATF_REQUIRE_UNSAFE_AIO(); 1619 1620 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1621 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1622 1623 iov[0].iov_base = __DECONST(void*, wbuf); 1624 iov[0].iov_len = 1 << 30; 1625 bzero(&aio, sizeof(aio)); 1626 aio.aio_fildes = fd; 1627 aio.aio_offset = 0; 1628 aio.aio_iov = iov; 1629 aio.aio_iovcnt = 1; 1630 1631 r = aio_writev(&aio); 1632 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r); 1633 ATF_REQUIRE_EQ(0, aio_suspend(iocbs, 1, NULL)); 1634 r = aio_return(&aio); 1635 ATF_CHECK_EQ_MSG(-1, r, "aio_return returned %zd", r); 1636 ATF_CHECK_MSG(errno == EFAULT || errno == EINVAL, 1637 "aio_writev: %s", strerror(errno)); 1638 1639 close(fd); 1640 } 1641 1642 /* 1643 * We shouldn't be able to DoS the system by setting aio_iovcnt to an insane 1644 * value 1645 */ 1646 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iovcnt); 1647 ATF_TC_BODY(aio_writev_dos_iovcnt, tc) 1648 { 1649 struct aiocb aio; 1650 const char *wbuf = "Hello, world!"; 1651 struct iovec iov[1]; 1652 ssize_t len; 1653 int fd; 1654 1655 ATF_REQUIRE_KERNEL_MODULE("aio"); 1656 ATF_REQUIRE_UNSAFE_AIO(); 1657 1658 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1659 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1660 1661 len = strlen(wbuf); 1662 iov[0].iov_base = __DECONST(void*, wbuf); 1663 iov[0].iov_len = len; 1664 bzero(&aio, sizeof(aio)); 1665 aio.aio_fildes = fd; 1666 aio.aio_offset = 0; 1667 aio.aio_iov = iov; 1668 aio.aio_iovcnt = 1 << 30; 1669 1670 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1671 ATF_CHECK_EQ(EINVAL, errno); 1672 1673 close(fd); 1674 } 1675 1676 ATF_TC_WITH_CLEANUP(aio_writev_efault); 1677 ATF_TC_HEAD(aio_writev_efault, tc) 1678 { 1679 atf_tc_set_md_var(tc, "descr", 1680 "Vectored AIO should gracefully handle invalid addresses"); 1681 atf_tc_set_md_var(tc, "require.user", "root"); 1682 } 1683 ATF_TC_BODY(aio_writev_efault, tc) 1684 { 1685 struct aiocb aio; 1686 ssize_t buflen; 1687 char *buffer; 1688 struct iovec iov[2]; 1689 long seed; 1690 int fd; 1691 1692 ATF_REQUIRE_KERNEL_MODULE("aio"); 1693 ATF_REQUIRE_UNSAFE_AIO(); 1694 1695 fd = aio_md_setup(); 1696 1697 seed = random(); 1698 buflen = 4096; 1699 buffer = malloc(buflen); 1700 aio_fill_buffer(buffer, buflen, seed); 1701 iov[0].iov_base = buffer; 1702 iov[0].iov_len = buflen; 1703 iov[1].iov_base = (void*)-1; /* Invalid! */ 1704 iov[1].iov_len = buflen; 1705 bzero(&aio, sizeof(aio)); 1706 aio.aio_fildes = fd; 1707 aio.aio_offset = 0; 1708 aio.aio_iov = iov; 1709 aio.aio_iovcnt = nitems(iov); 1710 1711 ATF_REQUIRE_EQ(-1, aio_writev(&aio)); 1712 ATF_CHECK_EQ(EFAULT, errno); 1713 1714 close(fd); 1715 } 1716 ATF_TC_CLEANUP(aio_writev_efault, tc) 1717 { 1718 aio_md_cleanup(); 1719 } 1720 1721 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_poll); 1722 ATF_TC_BODY(aio_writev_empty_file_poll, tc) 1723 { 1724 struct aiocb aio; 1725 int fd; 1726 1727 ATF_REQUIRE_KERNEL_MODULE("aio"); 1728 ATF_REQUIRE_UNSAFE_AIO(); 1729 1730 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1731 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1732 1733 bzero(&aio, sizeof(aio)); 1734 aio.aio_fildes = fd; 1735 aio.aio_offset = 0; 1736 aio.aio_iovcnt = 0; 1737 1738 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1739 ATF_REQUIRE_EQ(0, suspend(&aio)); 1740 1741 close(fd); 1742 } 1743 1744 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_signal); 1745 ATF_TC_BODY(aio_writev_empty_file_signal, tc) 1746 { 1747 struct aiocb aio; 1748 int fd; 1749 1750 ATF_REQUIRE_KERNEL_MODULE("aio"); 1751 ATF_REQUIRE_UNSAFE_AIO(); 1752 1753 fd = open("testfile", O_RDWR | O_CREAT, 0600); 1754 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1755 1756 bzero(&aio, sizeof(aio)); 1757 aio.aio_fildes = fd; 1758 aio.aio_offset = 0; 1759 aio.aio_iovcnt = 0; 1760 aio.aio_sigevent = *setup_signal(); 1761 1762 ATF_REQUIRE_EQ(0, aio_writev(&aio)); 1763 ATF_REQUIRE_EQ(0, poll_signaled(&aio)); 1764 1765 close(fd); 1766 } 1767 1768 /* 1769 * Use an aiocb with kqueue and EV_ONESHOT. kqueue should deliver the event 1770 * only once, even if the user doesn't promptly call aio_return. 1771 */ 1772 ATF_TC_WITHOUT_HEAD(ev_oneshot); 1773 ATF_TC_BODY(ev_oneshot, tc) 1774 { 1775 int fd, kq, nevents; 1776 struct aiocb iocb; 1777 struct kevent events[1]; 1778 struct timespec timeout; 1779 1780 ATF_REQUIRE_KERNEL_MODULE("aio"); 1781 1782 kq = kqueue(); 1783 ATF_REQUIRE(kq >= 0); 1784 1785 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600); 1786 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno)); 1787 1788 memset(&iocb, 0, sizeof(iocb)); 1789 iocb.aio_fildes = fd; 1790 iocb.aio_sigevent.sigev_notify_kqueue = kq; 1791 iocb.aio_sigevent.sigev_value.sival_ptr = (void*)0xdeadbeef; 1792 iocb.aio_sigevent.sigev_notify_kevent_flags = EV_ONESHOT; 1793 iocb.aio_sigevent.sigev_notify = SIGEV_KEVENT; 1794 1795 ATF_CHECK_EQ(0, aio_fsync(O_SYNC, &iocb)); 1796 1797 nevents = kevent(kq, NULL, 0, events, 1, NULL); 1798 ATF_CHECK_EQ(1, nevents); 1799 ATF_CHECK_EQ(events[0].ident, (uintptr_t) &iocb); 1800 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO); 1801 ATF_CHECK_EQ(events[0].flags, EV_EOF | EV_ONESHOT); 1802 ATF_CHECK_EQ(events[0].fflags, 0); 1803 ATF_CHECK_EQ(events[0].data, 0); 1804 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef); 1805 1806 /* 1807 * Even though we haven't called aio_return, kevent will not return the 1808 * event again due to EV_ONESHOT. 1809 */ 1810 timeout.tv_sec = 0; 1811 timeout.tv_nsec = 100000000; 1812 nevents = kevent(kq, NULL, 0, events, 1, &timeout); 1813 ATF_CHECK_EQ(0, nevents); 1814 1815 ATF_CHECK_EQ(0, aio_return(&iocb)); 1816 close(fd); 1817 close(kq); 1818 } 1819 1820 1821 // aio_writev and aio_readv should still work even if the iovcnt is greater 1822 // than the number of buffered AIO operations permitted per process. 1823 ATF_TC_WITH_CLEANUP(vectored_big_iovcnt); 1824 ATF_TC_HEAD(vectored_big_iovcnt, tc) 1825 { 1826 atf_tc_set_md_var(tc, "descr", 1827 "Vectored AIO should still work even if the iovcnt is greater than " 1828 "the number of buffered AIO operations permitted by the process"); 1829 atf_tc_set_md_var(tc, "require.user", "root"); 1830 } 1831 ATF_TC_BODY(vectored_big_iovcnt, tc) 1832 { 1833 struct aiocb aio; 1834 struct iovec *iov; 1835 ssize_t len, buflen; 1836 char *buffer; 1837 const char *oid = "vfs.aio.max_buf_aio"; 1838 long seed; 1839 int max_buf_aio; 1840 int fd, i; 1841 ssize_t sysctl_len = sizeof(max_buf_aio); 1842 1843 ATF_REQUIRE_KERNEL_MODULE("aio"); 1844 ATF_REQUIRE_UNSAFE_AIO(); 1845 1846 if (sysctlbyname(oid, &max_buf_aio, &sysctl_len, NULL, 0) == -1) 1847 atf_libc_error(errno, "Failed to read %s", oid); 1848 1849 seed = random(); 1850 buflen = 512 * (max_buf_aio + 1); 1851 buffer = malloc(buflen); 1852 aio_fill_buffer(buffer, buflen, seed); 1853 iov = calloc(max_buf_aio + 1, sizeof(struct iovec)); 1854 1855 fd = aio_md_setup(); 1856 1857 bzero(&aio, sizeof(aio)); 1858 aio.aio_fildes = fd; 1859 aio.aio_offset = 0; 1860 for (i = 0; i < max_buf_aio + 1; i++) { 1861 iov[i].iov_base = &buffer[i * 512]; 1862 iov[i].iov_len = 512; 1863 } 1864 aio.aio_iov = iov; 1865 aio.aio_iovcnt = max_buf_aio + 1; 1866 1867 if (aio_writev(&aio) < 0) 1868 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1869 1870 len = poll(&aio); 1871 if (len < 0) 1872 atf_tc_fail("aio failed: %s", strerror(errno)); 1873 1874 if (len != buflen) 1875 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 1876 1877 bzero(&aio, sizeof(aio)); 1878 aio.aio_fildes = fd; 1879 aio.aio_offset = 0; 1880 aio.aio_iov = iov; 1881 aio.aio_iovcnt = max_buf_aio + 1; 1882 1883 if (aio_readv(&aio) < 0) 1884 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 1885 1886 len = poll(&aio); 1887 if (len < 0) 1888 atf_tc_fail("aio failed: %s", strerror(errno)); 1889 1890 if (len != buflen) 1891 atf_tc_fail("aio short read (%jd)", (intmax_t)len); 1892 1893 if (aio_test_buffer(buffer, buflen, seed) == 0) 1894 atf_tc_fail("buffer mismatched"); 1895 1896 close(fd); 1897 } 1898 ATF_TC_CLEANUP(vectored_big_iovcnt, tc) 1899 { 1900 aio_md_cleanup(); 1901 } 1902 1903 ATF_TC_WITHOUT_HEAD(vectored_file_poll); 1904 ATF_TC_BODY(vectored_file_poll, tc) 1905 { 1906 aio_file_test(poll, NULL, true); 1907 } 1908 1909 ATF_TC_WITHOUT_HEAD(vectored_thread); 1910 ATF_TC_BODY(vectored_thread, tc) 1911 { 1912 aio_file_test(poll_signaled, setup_thread(), true); 1913 } 1914 1915 ATF_TC_WITH_CLEANUP(vectored_md_poll); 1916 ATF_TC_HEAD(vectored_md_poll, tc) 1917 { 1918 atf_tc_set_md_var(tc, "require.user", "root"); 1919 } 1920 ATF_TC_BODY(vectored_md_poll, tc) 1921 { 1922 aio_md_test(poll, NULL, true); 1923 } 1924 ATF_TC_CLEANUP(vectored_md_poll, tc) 1925 { 1926 aio_md_cleanup(); 1927 } 1928 1929 ATF_TC_WITHOUT_HEAD(vectored_socket_poll); 1930 ATF_TC_BODY(vectored_socket_poll, tc) 1931 { 1932 aio_unix_socketpair_test(poll, NULL, true); 1933 } 1934 1935 // aio_writev and aio_readv should still work even if the iov contains elements 1936 // that aren't a multiple of the device's sector size, and even if the total 1937 // amount if I/O _is_ a multiple of the device's sector size. 1938 ATF_TC_WITH_CLEANUP(vectored_unaligned); 1939 ATF_TC_HEAD(vectored_unaligned, tc) 1940 { 1941 atf_tc_set_md_var(tc, "descr", 1942 "Vectored AIO should still work even if the iov contains elements " 1943 "that aren't a multiple of the sector size."); 1944 atf_tc_set_md_var(tc, "require.user", "root"); 1945 } 1946 ATF_TC_BODY(vectored_unaligned, tc) 1947 { 1948 struct aio_context ac; 1949 struct aiocb aio; 1950 struct iovec iov[3]; 1951 ssize_t len, total_len; 1952 int fd; 1953 1954 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false)) 1955 atf_tc_skip("https://bugs.freebsd.org/258766"); 1956 1957 ATF_REQUIRE_KERNEL_MODULE("aio"); 1958 ATF_REQUIRE_UNSAFE_AIO(); 1959 1960 /* 1961 * Use a zvol with volmode=dev, so it will allow .d_write with 1962 * unaligned uio. geom devices use physio, which doesn't allow that. 1963 */ 1964 fd = aio_zvol_setup(); 1965 aio_context_init(&ac, fd, fd, FILE_LEN); 1966 1967 /* Break the buffer into 3 parts: 1968 * * A 4kB part, aligned to 4kB 1969 * * Two other parts that add up to 4kB: 1970 * - 256B 1971 * - 4kB - 256B 1972 */ 1973 iov[0].iov_base = ac.ac_buffer; 1974 iov[0].iov_len = 4096; 1975 iov[1].iov_base = (void*)((uintptr_t)iov[0].iov_base + iov[0].iov_len); 1976 iov[1].iov_len = 256; 1977 iov[2].iov_base = (void*)((uintptr_t)iov[1].iov_base + iov[1].iov_len); 1978 iov[2].iov_len = 4096 - iov[1].iov_len; 1979 total_len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; 1980 bzero(&aio, sizeof(aio)); 1981 aio.aio_fildes = ac.ac_write_fd; 1982 aio.aio_offset = 0; 1983 aio.aio_iov = iov; 1984 aio.aio_iovcnt = 3; 1985 1986 if (aio_writev(&aio) < 0) 1987 atf_tc_fail("aio_writev failed: %s", strerror(errno)); 1988 1989 len = poll(&aio); 1990 if (len < 0) 1991 atf_tc_fail("aio failed: %s", strerror(errno)); 1992 1993 if (len != total_len) 1994 atf_tc_fail("aio short write (%jd)", (intmax_t)len); 1995 1996 bzero(&aio, sizeof(aio)); 1997 aio.aio_fildes = ac.ac_read_fd; 1998 aio.aio_offset = 0; 1999 aio.aio_iov = iov; 2000 aio.aio_iovcnt = 3; 2001 2002 if (aio_readv(&aio) < 0) 2003 atf_tc_fail("aio_readv failed: %s", strerror(errno)); 2004 len = poll(&aio); 2005 2006 ATF_REQUIRE_MSG(aio_test_buffer(ac.ac_buffer, total_len, 2007 ac.ac_seed) != 0, "aio_test_buffer: internal error"); 2008 2009 close(fd); 2010 } 2011 ATF_TC_CLEANUP(vectored_unaligned, tc) 2012 { 2013 aio_zvol_cleanup(); 2014 } 2015 2016 static void 2017 aio_zvol_test(completion comp, struct sigevent *sev, bool vectored) 2018 { 2019 struct aio_context ac; 2020 int fd; 2021 2022 fd = aio_zvol_setup(); 2023 aio_context_init(&ac, fd, fd, MD_LEN); 2024 if (vectored) { 2025 aio_writev_test(&ac, comp, sev); 2026 aio_readv_test(&ac, comp, sev); 2027 } else { 2028 aio_write_test(&ac, comp, sev); 2029 aio_read_test(&ac, comp, sev); 2030 } 2031 2032 close(fd); 2033 } 2034 2035 /* 2036 * Note that unlike md, the zvol is not a geom device, does not allow unmapped 2037 * buffers, and does not use physio. 2038 */ 2039 ATF_TC_WITH_CLEANUP(vectored_zvol_poll); 2040 ATF_TC_HEAD(vectored_zvol_poll, tc) 2041 { 2042 atf_tc_set_md_var(tc, "require.user", "root"); 2043 } 2044 ATF_TC_BODY(vectored_zvol_poll, tc) 2045 { 2046 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false)) 2047 atf_tc_skip("https://bugs.freebsd.org/258766"); 2048 aio_zvol_test(poll, NULL, true); 2049 } 2050 ATF_TC_CLEANUP(vectored_zvol_poll, tc) 2051 { 2052 aio_zvol_cleanup(); 2053 } 2054 2055 ATF_TP_ADD_TCS(tp) 2056 { 2057 2058 /* Test every file type with every completion method */ 2059 ATF_TP_ADD_TC(tp, file_kq); 2060 ATF_TP_ADD_TC(tp, file_poll); 2061 ATF_TP_ADD_TC(tp, file_signal); 2062 ATF_TP_ADD_TC(tp, file_suspend); 2063 ATF_TP_ADD_TC(tp, file_thread); 2064 ATF_TP_ADD_TC(tp, file_waitcomplete); 2065 ATF_TP_ADD_TC(tp, fifo_kq); 2066 ATF_TP_ADD_TC(tp, fifo_poll); 2067 ATF_TP_ADD_TC(tp, fifo_signal); 2068 ATF_TP_ADD_TC(tp, fifo_suspend); 2069 ATF_TP_ADD_TC(tp, fifo_thread); 2070 ATF_TP_ADD_TC(tp, fifo_waitcomplete); 2071 ATF_TP_ADD_TC(tp, socket_kq); 2072 ATF_TP_ADD_TC(tp, socket_poll); 2073 ATF_TP_ADD_TC(tp, socket_signal); 2074 ATF_TP_ADD_TC(tp, socket_suspend); 2075 ATF_TP_ADD_TC(tp, socket_thread); 2076 ATF_TP_ADD_TC(tp, socket_waitcomplete); 2077 ATF_TP_ADD_TC(tp, pty_kq); 2078 ATF_TP_ADD_TC(tp, pty_poll); 2079 ATF_TP_ADD_TC(tp, pty_signal); 2080 ATF_TP_ADD_TC(tp, pty_suspend); 2081 ATF_TP_ADD_TC(tp, pty_thread); 2082 ATF_TP_ADD_TC(tp, pty_waitcomplete); 2083 ATF_TP_ADD_TC(tp, pipe_kq); 2084 ATF_TP_ADD_TC(tp, pipe_poll); 2085 ATF_TP_ADD_TC(tp, pipe_signal); 2086 ATF_TP_ADD_TC(tp, pipe_suspend); 2087 ATF_TP_ADD_TC(tp, pipe_thread); 2088 ATF_TP_ADD_TC(tp, pipe_waitcomplete); 2089 ATF_TP_ADD_TC(tp, md_kq); 2090 ATF_TP_ADD_TC(tp, md_poll); 2091 ATF_TP_ADD_TC(tp, md_signal); 2092 ATF_TP_ADD_TC(tp, md_suspend); 2093 ATF_TP_ADD_TC(tp, md_thread); 2094 ATF_TP_ADD_TC(tp, md_waitcomplete); 2095 2096 /* Various special cases */ 2097 ATF_TP_ADD_TC(tp, aio_fsync_errors); 2098 ATF_TP_ADD_TC(tp, aio_fsync_sync_test); 2099 ATF_TP_ADD_TC(tp, aio_fsync_dsync_test); 2100 ATF_TP_ADD_TC(tp, aio_large_read_test); 2101 ATF_TP_ADD_TC(tp, aio_socket_two_reads); 2102 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); 2103 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write_vectored); 2104 ATF_TP_ADD_TC(tp, aio_socket_listen_fail); 2105 ATF_TP_ADD_TC(tp, aio_socket_listen_pending); 2106 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); 2107 ATF_TP_ADD_TC(tp, aio_socket_shutdown); 2108 ATF_TP_ADD_TC(tp, aio_writev_dos_iov_len); 2109 ATF_TP_ADD_TC(tp, aio_writev_dos_iovcnt); 2110 ATF_TP_ADD_TC(tp, aio_writev_efault); 2111 ATF_TP_ADD_TC(tp, aio_writev_empty_file_poll); 2112 ATF_TP_ADD_TC(tp, aio_writev_empty_file_signal); 2113 ATF_TP_ADD_TC(tp, ev_oneshot); 2114 ATF_TP_ADD_TC(tp, vectored_big_iovcnt); 2115 ATF_TP_ADD_TC(tp, vectored_file_poll); 2116 ATF_TP_ADD_TC(tp, vectored_md_poll); 2117 ATF_TP_ADD_TC(tp, vectored_zvol_poll); 2118 ATF_TP_ADD_TC(tp, vectored_unaligned); 2119 ATF_TP_ADD_TC(tp, vectored_socket_poll); 2120 ATF_TP_ADD_TC(tp, vectored_thread); 2121 2122 return (atf_no_error()); 2123 } 2124