1 /*- 2 * Copyright (c) 2004 Robert N. M. Watson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Regression test to do some very basic AIO exercising on several types of 31 * file descriptors. Currently, the tests consist of initializing a fixed 32 * size buffer with pseudo-random data, writing it to one fd using AIO, then 33 * reading it from a second descriptor using AIO. For some targets, the same 34 * fd is used for write and read (i.e., file, md device), but for others the 35 * operation is performed on a peer (pty, socket, fifo, etc). A timeout is 36 * initiated to detect undo blocking. This test does not attempt to exercise 37 * error cases or more subtle asynchronous behavior, just make sure that the 38 * basic operations work on some basic object types. 39 */ 40 41 #include <sys/param.h> 42 #include <sys/module.h> 43 #include <sys/resource.h> 44 #include <sys/socket.h> 45 #include <sys/stat.h> 46 #include <sys/mdioctl.h> 47 48 #include <aio.h> 49 #include <err.h> 50 #include <errno.h> 51 #include <fcntl.h> 52 #include <libutil.h> 53 #include <limits.h> 54 #include <stdint.h> 55 #include <stdio.h> 56 #include <stdlib.h> 57 #include <string.h> 58 #include <termios.h> 59 #include <unistd.h> 60 61 #include <atf-c.h> 62 63 #include "freebsd_test_suite/macros.h" 64 #include "local.h" 65 66 #define PATH_TEMPLATE "aio.XXXXXXXXXX" 67 68 /* 69 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as 70 * it sizes ac_buffer in the aio_context structure. It is also the default 71 * size for file I/O. For other types, we use smaller blocks or we risk 72 * blocking (and we run in a single process/thread so that would be bad). 73 */ 74 #define GLOBAL_MAX 16384 75 76 #define BUFFER_MAX GLOBAL_MAX 77 struct aio_context { 78 int ac_read_fd, ac_write_fd; 79 long ac_seed; 80 char ac_buffer[GLOBAL_MAX]; 81 int ac_buflen; 82 int ac_seconds; 83 void (*ac_cleanup)(void *arg); 84 void *ac_cleanup_arg; 85 }; 86 87 static int aio_timedout; 88 89 /* 90 * Each test run specifies a timeout in seconds. Use the somewhat obsoleted 91 * signal(3) and alarm(3) APIs to set this up. 92 */ 93 static void 94 aio_timeout_signal(int sig __unused) 95 { 96 97 aio_timedout = 1; 98 } 99 100 static void 101 aio_timeout_start(int seconds) 102 { 103 104 aio_timedout = 0; 105 ATF_REQUIRE_MSG(signal(SIGALRM, aio_timeout_signal) != SIG_ERR, 106 "failed to set SIGALRM handler: %s", strerror(errno)); 107 alarm(seconds); 108 } 109 110 static void 111 aio_timeout_stop(void) 112 { 113 114 ATF_REQUIRE_MSG(signal(SIGALRM, NULL) != SIG_ERR, 115 "failed to reset SIGALRM handler to default: %s", strerror(errno)); 116 alarm(0); 117 } 118 119 /* 120 * Fill a buffer given a seed that can be fed into srandom() to initialize 121 * the PRNG in a repeatable manner. 122 */ 123 static void 124 aio_fill_buffer(char *buffer, int len, long seed) 125 { 126 char ch; 127 int i; 128 129 srandom(seed); 130 for (i = 0; i < len; i++) { 131 ch = random() & 0xff; 132 buffer[i] = ch; 133 } 134 } 135 136 /* 137 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return 138 * (1) on a match, (0) on a mismatch. 139 */ 140 static int 141 aio_test_buffer(char *buffer, int len, long seed) 142 { 143 char ch; 144 int i; 145 146 srandom(seed); 147 for (i = 0; i < len; i++) { 148 ch = random() & 0xff; 149 if (buffer[i] != ch) 150 return (0); 151 } 152 return (1); 153 } 154 155 /* 156 * Initialize a testing context given the file descriptors provided by the 157 * test setup. 158 */ 159 static void 160 aio_context_init(struct aio_context *ac, int read_fd, 161 int write_fd, int buflen, int seconds, void (*cleanup)(void *), 162 void *cleanup_arg) 163 { 164 165 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX, 166 "aio_context_init: buffer too large (%d > %d)", 167 buflen, BUFFER_MAX); 168 bzero(ac, sizeof(*ac)); 169 ac->ac_read_fd = read_fd; 170 ac->ac_write_fd = write_fd; 171 ac->ac_buflen = buflen; 172 srandomdev(); 173 ac->ac_seed = random(); 174 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed); 175 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen, 176 ac->ac_seed) != 0, "aio_test_buffer: internal error"); 177 ac->ac_seconds = seconds; 178 ac->ac_cleanup = cleanup; 179 ac->ac_cleanup_arg = cleanup_arg; 180 } 181 182 /* 183 * Each tester can register a callback to clean up in the event the test 184 * fails. Preserve the value of errno so that subsequent calls to errx() 185 * work properly. 186 */ 187 static void 188 aio_cleanup(struct aio_context *ac) 189 { 190 int error; 191 192 if (ac->ac_cleanup == NULL) 193 return; 194 error = errno; 195 (ac->ac_cleanup)(ac->ac_cleanup_arg); 196 errno = error; 197 } 198 199 /* 200 * Perform a simple write test of our initialized data buffer to the provided 201 * file descriptor. 202 */ 203 static void 204 aio_write_test(struct aio_context *ac) 205 { 206 struct aiocb aio, *aiop; 207 ssize_t len; 208 209 ATF_REQUIRE_KERNEL_MODULE("aio"); 210 211 bzero(&aio, sizeof(aio)); 212 aio.aio_buf = ac->ac_buffer; 213 aio.aio_nbytes = ac->ac_buflen; 214 aio.aio_fildes = ac->ac_write_fd; 215 aio.aio_offset = 0; 216 217 aio_timeout_start(ac->ac_seconds); 218 219 if (aio_write(&aio) < 0) { 220 if (errno == EINTR) { 221 if (aio_timedout) { 222 aio_cleanup(ac); 223 atf_tc_fail("aio_write timed out"); 224 } 225 } 226 aio_cleanup(ac); 227 atf_tc_fail("aio_write failed: %s", strerror(errno)); 228 } 229 230 len = aio_waitcomplete(&aiop, NULL); 231 if (len < 0) { 232 if (errno == EINTR) { 233 if (aio_timedout) { 234 aio_cleanup(ac); 235 atf_tc_fail("aio_waitcomplete timed out"); 236 } 237 } 238 aio_cleanup(ac); 239 atf_tc_fail("aio_waitcomplete failed: %s", strerror(errno)); 240 } 241 242 aio_timeout_stop(); 243 244 if (len != ac->ac_buflen) { 245 aio_cleanup(ac); 246 atf_tc_fail("aio_waitcomplete short write (%jd)", 247 (intmax_t)len); 248 } 249 } 250 251 /* 252 * Perform a simple read test of our initialized data buffer from the 253 * provided file descriptor. 254 */ 255 static void 256 aio_read_test(struct aio_context *ac) 257 { 258 struct aiocb aio, *aiop; 259 ssize_t len; 260 261 ATF_REQUIRE_KERNEL_MODULE("aio"); 262 263 bzero(ac->ac_buffer, ac->ac_buflen); 264 bzero(&aio, sizeof(aio)); 265 aio.aio_buf = ac->ac_buffer; 266 aio.aio_nbytes = ac->ac_buflen; 267 aio.aio_fildes = ac->ac_read_fd; 268 aio.aio_offset = 0; 269 270 aio_timeout_start(ac->ac_seconds); 271 272 if (aio_read(&aio) < 0) { 273 if (errno == EINTR) { 274 if (aio_timedout) { 275 aio_cleanup(ac); 276 atf_tc_fail("aio_write timed out"); 277 } 278 } 279 aio_cleanup(ac); 280 atf_tc_fail("aio_read failed: %s", strerror(errno)); 281 } 282 283 len = aio_waitcomplete(&aiop, NULL); 284 if (len < 0) { 285 if (errno == EINTR) { 286 if (aio_timedout) { 287 aio_cleanup(ac); 288 atf_tc_fail("aio_waitcomplete timed out"); 289 } 290 } 291 aio_cleanup(ac); 292 atf_tc_fail("aio_waitcomplete failed: %s", strerror(errno)); 293 } 294 295 aio_timeout_stop(); 296 297 if (len != ac->ac_buflen) { 298 aio_cleanup(ac); 299 atf_tc_fail("aio_waitcomplete short read (%jd)", 300 (intmax_t)len); 301 } 302 303 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0) { 304 aio_cleanup(ac); 305 atf_tc_fail("buffer mismatched"); 306 } 307 } 308 309 /* 310 * Series of type-specific tests for AIO. For now, we just make sure we can 311 * issue a write and then a read to each type. We assume that once a write 312 * is issued, a read can follow. 313 */ 314 315 /* 316 * Test with a classic file. Assumes we can create a moderate size temporary 317 * file. 318 */ 319 struct aio_file_arg { 320 int afa_fd; 321 char *afa_pathname; 322 }; 323 324 static void 325 aio_file_cleanup(void *arg) 326 { 327 struct aio_file_arg *afa; 328 329 afa = arg; 330 close(afa->afa_fd); 331 unlink(afa->afa_pathname); 332 } 333 334 #define FILE_LEN GLOBAL_MAX 335 #define FILE_TIMEOUT 30 336 ATF_TC_WITHOUT_HEAD(aio_file_test); 337 ATF_TC_BODY(aio_file_test, tc) 338 { 339 char pathname[PATH_MAX]; 340 struct aio_file_arg arg; 341 struct aio_context ac; 342 int fd; 343 344 ATF_REQUIRE_KERNEL_MODULE("aio"); 345 ATF_REQUIRE_UNSAFE_AIO(); 346 347 strcpy(pathname, PATH_TEMPLATE); 348 fd = mkstemp(pathname); 349 ATF_REQUIRE_MSG(fd != -1, "mkstemp failed: %s", strerror(errno)); 350 351 arg.afa_fd = fd; 352 arg.afa_pathname = pathname; 353 354 aio_context_init(&ac, fd, fd, FILE_LEN, 355 FILE_TIMEOUT, aio_file_cleanup, &arg); 356 aio_write_test(&ac); 357 aio_read_test(&ac); 358 359 aio_file_cleanup(&arg); 360 } 361 362 struct aio_fifo_arg { 363 int afa_read_fd; 364 int afa_write_fd; 365 char *afa_pathname; 366 }; 367 368 static void 369 aio_fifo_cleanup(void *arg) 370 { 371 struct aio_fifo_arg *afa; 372 373 afa = arg; 374 if (afa->afa_read_fd != -1) 375 close(afa->afa_read_fd); 376 if (afa->afa_write_fd != -1) 377 close(afa->afa_write_fd); 378 unlink(afa->afa_pathname); 379 } 380 381 #define FIFO_LEN 256 382 #define FIFO_TIMEOUT 30 383 ATF_TC_WITHOUT_HEAD(aio_fifo_test); 384 ATF_TC_BODY(aio_fifo_test, tc) 385 { 386 int error, read_fd = -1, write_fd = -1; 387 struct aio_fifo_arg arg; 388 char pathname[PATH_MAX]; 389 struct aio_context ac; 390 391 ATF_REQUIRE_KERNEL_MODULE("aio"); 392 ATF_REQUIRE_UNSAFE_AIO(); 393 394 /* 395 * In theory, mkstemp() can return a name that is then collided with. 396 * Because this is a regression test, we treat that as a test failure 397 * rather than retrying. 398 */ 399 strcpy(pathname, PATH_TEMPLATE); 400 ATF_REQUIRE_MSG(mkstemp(pathname) != -1, 401 "mkstemp failed: %s", strerror(errno)); 402 ATF_REQUIRE_MSG(unlink(pathname) == 0, 403 "unlink failed: %s", strerror(errno)); 404 ATF_REQUIRE_MSG(mkfifo(pathname, 0600) != -1, 405 "mkfifo failed: %s", strerror(errno)); 406 arg.afa_pathname = pathname; 407 arg.afa_read_fd = -1; 408 arg.afa_write_fd = -1; 409 410 read_fd = open(pathname, O_RDONLY | O_NONBLOCK); 411 if (read_fd == -1) { 412 error = errno; 413 aio_fifo_cleanup(&arg); 414 errno = error; 415 atf_tc_fail("read_fd open failed: %s", 416 strerror(errno)); 417 } 418 arg.afa_read_fd = read_fd; 419 420 write_fd = open(pathname, O_WRONLY); 421 if (write_fd == -1) { 422 error = errno; 423 aio_fifo_cleanup(&arg); 424 errno = error; 425 atf_tc_fail("write_fd open failed: %s", 426 strerror(errno)); 427 } 428 arg.afa_write_fd = write_fd; 429 430 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN, 431 FIFO_TIMEOUT, aio_fifo_cleanup, &arg); 432 aio_write_test(&ac); 433 aio_read_test(&ac); 434 435 aio_fifo_cleanup(&arg); 436 } 437 438 struct aio_unix_socketpair_arg { 439 int asa_sockets[2]; 440 }; 441 442 static void 443 aio_unix_socketpair_cleanup(void *arg) 444 { 445 struct aio_unix_socketpair_arg *asa; 446 447 asa = arg; 448 close(asa->asa_sockets[0]); 449 close(asa->asa_sockets[1]); 450 } 451 452 #define UNIX_SOCKETPAIR_LEN 256 453 #define UNIX_SOCKETPAIR_TIMEOUT 30 454 ATF_TC_WITHOUT_HEAD(aio_unix_socketpair_test); 455 ATF_TC_BODY(aio_unix_socketpair_test, tc) 456 { 457 struct aio_unix_socketpair_arg arg; 458 struct aio_context ac; 459 struct rusage ru_before, ru_after; 460 int sockets[2]; 461 462 ATF_REQUIRE_KERNEL_MODULE("aio"); 463 464 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1, 465 "socketpair failed: %s", strerror(errno)); 466 467 arg.asa_sockets[0] = sockets[0]; 468 arg.asa_sockets[1] = sockets[1]; 469 aio_context_init(&ac, sockets[0], 470 sockets[1], UNIX_SOCKETPAIR_LEN, UNIX_SOCKETPAIR_TIMEOUT, 471 aio_unix_socketpair_cleanup, &arg); 472 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1, 473 "getrusage failed: %s", strerror(errno)); 474 aio_write_test(&ac); 475 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, 476 "getrusage failed: %s", strerror(errno)); 477 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1); 478 ru_before = ru_after; 479 aio_read_test(&ac); 480 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1, 481 "getrusage failed: %s", strerror(errno)); 482 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1); 483 484 aio_unix_socketpair_cleanup(&arg); 485 } 486 487 struct aio_pty_arg { 488 int apa_read_fd; 489 int apa_write_fd; 490 }; 491 492 static void 493 aio_pty_cleanup(void *arg) 494 { 495 struct aio_pty_arg *apa; 496 497 apa = arg; 498 close(apa->apa_read_fd); 499 close(apa->apa_write_fd); 500 }; 501 502 #define PTY_LEN 256 503 #define PTY_TIMEOUT 30 504 ATF_TC_WITHOUT_HEAD(aio_pty_test); 505 ATF_TC_BODY(aio_pty_test, tc) 506 { 507 struct aio_pty_arg arg; 508 struct aio_context ac; 509 int read_fd, write_fd; 510 struct termios ts; 511 int error; 512 513 ATF_REQUIRE_KERNEL_MODULE("aio"); 514 ATF_REQUIRE_UNSAFE_AIO(); 515 516 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0, 517 "openpty failed: %s", strerror(errno)); 518 519 arg.apa_read_fd = read_fd; 520 arg.apa_write_fd = write_fd; 521 522 if (tcgetattr(write_fd, &ts) < 0) { 523 error = errno; 524 aio_pty_cleanup(&arg); 525 errno = error; 526 atf_tc_fail("tcgetattr failed: %s", strerror(errno)); 527 } 528 cfmakeraw(&ts); 529 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) { 530 error = errno; 531 aio_pty_cleanup(&arg); 532 errno = error; 533 atf_tc_fail("tcsetattr failed: %s", strerror(errno)); 534 } 535 aio_context_init(&ac, read_fd, write_fd, PTY_LEN, 536 PTY_TIMEOUT, aio_pty_cleanup, &arg); 537 538 aio_write_test(&ac); 539 aio_read_test(&ac); 540 541 aio_pty_cleanup(&arg); 542 } 543 544 static void 545 aio_pipe_cleanup(void *arg) 546 { 547 int *pipes = arg; 548 549 close(pipes[0]); 550 close(pipes[1]); 551 } 552 553 #define PIPE_LEN 256 554 #define PIPE_TIMEOUT 30 555 ATF_TC_WITHOUT_HEAD(aio_pipe_test); 556 ATF_TC_BODY(aio_pipe_test, tc) 557 { 558 struct aio_context ac; 559 int pipes[2]; 560 561 ATF_REQUIRE_KERNEL_MODULE("aio"); 562 ATF_REQUIRE_UNSAFE_AIO(); 563 564 ATF_REQUIRE_MSG(pipe(pipes) != -1, 565 "pipe failed: %s", strerror(errno)); 566 567 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN, 568 PIPE_TIMEOUT, aio_pipe_cleanup, pipes); 569 aio_write_test(&ac); 570 aio_read_test(&ac); 571 572 aio_pipe_cleanup(pipes); 573 } 574 575 struct aio_md_arg { 576 int ama_mdctl_fd; 577 int ama_unit; 578 int ama_fd; 579 }; 580 581 static void 582 aio_md_cleanup(void *arg) 583 { 584 struct aio_md_arg *ama; 585 struct md_ioctl mdio; 586 int error; 587 588 ama = arg; 589 590 if (ama->ama_fd != -1) 591 close(ama->ama_fd); 592 593 if (ama->ama_unit != -1) { 594 bzero(&mdio, sizeof(mdio)); 595 mdio.md_version = MDIOVERSION; 596 mdio.md_unit = ama->ama_unit; 597 if (ioctl(ama->ama_mdctl_fd, MDIOCDETACH, &mdio) == -1) { 598 error = errno; 599 close(ama->ama_mdctl_fd); 600 errno = error; 601 atf_tc_fail("ioctl MDIOCDETACH failed: %s", 602 strerror(errno)); 603 } 604 } 605 606 close(ama->ama_mdctl_fd); 607 } 608 609 #define MD_LEN GLOBAL_MAX 610 #define MD_TIMEOUT 30 611 ATF_TC(aio_md_test); 612 ATF_TC_HEAD(aio_md_test, tc) 613 { 614 615 atf_tc_set_md_var(tc, "require.user", "root"); 616 } 617 ATF_TC_BODY(aio_md_test, tc) 618 { 619 int error, fd, mdctl_fd, unit; 620 char pathname[PATH_MAX]; 621 struct aio_md_arg arg; 622 struct aio_context ac; 623 struct md_ioctl mdio; 624 625 ATF_REQUIRE_KERNEL_MODULE("aio"); 626 627 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0); 628 ATF_REQUIRE_MSG(mdctl_fd != -1, 629 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno)); 630 631 bzero(&mdio, sizeof(mdio)); 632 mdio.md_version = MDIOVERSION; 633 mdio.md_type = MD_MALLOC; 634 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS; 635 mdio.md_mediasize = GLOBAL_MAX; 636 mdio.md_sectorsize = 512; 637 638 arg.ama_mdctl_fd = mdctl_fd; 639 arg.ama_unit = -1; 640 arg.ama_fd = -1; 641 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) { 642 error = errno; 643 aio_md_cleanup(&arg); 644 errno = error; 645 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno)); 646 } 647 648 arg.ama_unit = unit = mdio.md_unit; 649 snprintf(pathname, PATH_MAX, "/dev/md%d", unit); 650 fd = open(pathname, O_RDWR); 651 ATF_REQUIRE_MSG(fd != -1, 652 "opening %s failed: %s", pathname, strerror(errno)); 653 arg.ama_fd = fd; 654 655 aio_context_init(&ac, fd, fd, MD_LEN, MD_TIMEOUT, 656 aio_md_cleanup, &arg); 657 aio_write_test(&ac); 658 aio_read_test(&ac); 659 660 aio_md_cleanup(&arg); 661 } 662 663 ATF_TC_WITHOUT_HEAD(aio_large_read_test); 664 ATF_TC_BODY(aio_large_read_test, tc) 665 { 666 char pathname[PATH_MAX]; 667 struct aiocb cb, *cbp; 668 ssize_t nread; 669 size_t len; 670 int fd; 671 #ifdef __LP64__ 672 int clamped; 673 #endif 674 675 ATF_REQUIRE_KERNEL_MODULE("aio"); 676 ATF_REQUIRE_UNSAFE_AIO(); 677 678 #ifdef __LP64__ 679 len = sizeof(clamped); 680 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) == 681 -1) 682 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp"); 683 #endif 684 685 /* Determine the maximum supported read(2) size. */ 686 len = SSIZE_MAX; 687 #ifdef __LP64__ 688 if (clamped) 689 len = INT_MAX; 690 #endif 691 692 strcpy(pathname, PATH_TEMPLATE); 693 fd = mkstemp(pathname); 694 ATF_REQUIRE_MSG(fd != -1, "mkstemp failed: %s", strerror(errno)); 695 696 unlink(pathname); 697 698 memset(&cb, 0, sizeof(cb)); 699 cb.aio_nbytes = len; 700 cb.aio_fildes = fd; 701 cb.aio_buf = NULL; 702 if (aio_read(&cb) == -1) 703 atf_tc_fail("aio_read() of maximum read size failed: %s", 704 strerror(errno)); 705 706 nread = aio_waitcomplete(&cbp, NULL); 707 if (nread == -1) 708 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 709 if (nread != 0) 710 atf_tc_fail("aio_read() from empty file returned data: %zd", 711 nread); 712 713 memset(&cb, 0, sizeof(cb)); 714 cb.aio_nbytes = len + 1; 715 cb.aio_fildes = fd; 716 cb.aio_buf = NULL; 717 if (aio_read(&cb) == -1) { 718 if (errno == EINVAL) 719 goto finished; 720 atf_tc_fail("aio_read() of too large read size failed: %s", 721 strerror(errno)); 722 } 723 724 nread = aio_waitcomplete(&cbp, NULL); 725 if (nread == -1) { 726 if (errno == EINVAL) 727 goto finished; 728 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno)); 729 } 730 atf_tc_fail("aio_read() of too large read size returned: %zd", nread); 731 732 finished: 733 close(fd); 734 } 735 736 /* 737 * This tests for a bug where arriving socket data can wakeup multiple 738 * AIO read requests resulting in an uncancellable request. 739 */ 740 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads); 741 ATF_TC_BODY(aio_socket_two_reads, tc) 742 { 743 struct ioreq { 744 struct aiocb iocb; 745 char buffer[1024]; 746 } ioreq[2]; 747 struct aiocb *iocb; 748 unsigned i; 749 int s[2]; 750 char c; 751 752 ATF_REQUIRE_KERNEL_MODULE("aio"); 753 #if __FreeBSD_version < 1100101 754 aft_tc_skip("kernel version %d is too old (%d required)", 755 __FreeBSD_version, 1100101); 756 #endif 757 758 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 759 760 /* Queue two read requests. */ 761 memset(&ioreq, 0, sizeof(ioreq)); 762 for (i = 0; i < nitems(ioreq); i++) { 763 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer); 764 ioreq[i].iocb.aio_fildes = s[0]; 765 ioreq[i].iocb.aio_buf = ioreq[i].buffer; 766 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0); 767 } 768 769 /* Send a single byte. This should complete one request. */ 770 c = 0xc3; 771 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1); 772 773 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1); 774 775 /* Determine which request completed and verify the data was read. */ 776 if (iocb == &ioreq[0].iocb) 777 i = 0; 778 else 779 i = 1; 780 ATF_REQUIRE(ioreq[i].buffer[0] == c); 781 782 i ^= 1; 783 784 /* 785 * Try to cancel the other request. On broken systems this 786 * will fail and the process will hang on exit. 787 */ 788 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS); 789 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED); 790 791 close(s[1]); 792 close(s[0]); 793 } 794 795 /* 796 * This test ensures that aio_write() on a blocking socket of a "large" 797 * buffer does not return a short completion. 798 */ 799 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write); 800 ATF_TC_BODY(aio_socket_blocking_short_write, tc) 801 { 802 struct aiocb iocb, *iocbp; 803 char *buffer[2]; 804 ssize_t done; 805 int buffer_size, sb_size; 806 socklen_t len; 807 int s[2]; 808 809 ATF_REQUIRE_KERNEL_MODULE("aio"); 810 811 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 812 813 len = sizeof(sb_size); 814 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 815 -1); 816 ATF_REQUIRE(len == sizeof(sb_size)); 817 buffer_size = sb_size; 818 819 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 820 -1); 821 ATF_REQUIRE(len == sizeof(sb_size)); 822 if (sb_size > buffer_size) 823 buffer_size = sb_size; 824 825 /* 826 * Use twice the size of the MAX(receive buffer, send buffer) 827 * to ensure that the write is split up into multiple writes 828 * internally. 829 */ 830 buffer_size *= 2; 831 832 buffer[0] = malloc(buffer_size); 833 ATF_REQUIRE(buffer[0] != NULL); 834 buffer[1] = malloc(buffer_size); 835 ATF_REQUIRE(buffer[1] != NULL); 836 837 srandomdev(); 838 aio_fill_buffer(buffer[1], buffer_size, random()); 839 840 memset(&iocb, 0, sizeof(iocb)); 841 iocb.aio_fildes = s[1]; 842 iocb.aio_buf = buffer[1]; 843 iocb.aio_nbytes = buffer_size; 844 ATF_REQUIRE(aio_write(&iocb) == 0); 845 846 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 847 ATF_REQUIRE(done == buffer_size); 848 849 done = aio_waitcomplete(&iocbp, NULL); 850 ATF_REQUIRE(iocbp == &iocb); 851 ATF_REQUIRE(done == buffer_size); 852 853 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 854 855 close(s[1]); 856 close(s[0]); 857 } 858 859 /* 860 * This test verifies that cancelling a partially completed socket write 861 * returns a short write rather than ECANCELED. 862 */ 863 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel); 864 ATF_TC_BODY(aio_socket_short_write_cancel, tc) 865 { 866 struct aiocb iocb, *iocbp; 867 char *buffer[2]; 868 ssize_t done; 869 int buffer_size, sb_size; 870 socklen_t len; 871 int s[2]; 872 873 ATF_REQUIRE_KERNEL_MODULE("aio"); 874 875 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1); 876 877 len = sizeof(sb_size); 878 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) != 879 -1); 880 ATF_REQUIRE(len == sizeof(sb_size)); 881 buffer_size = sb_size; 882 883 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) != 884 -1); 885 ATF_REQUIRE(len == sizeof(sb_size)); 886 if (sb_size > buffer_size) 887 buffer_size = sb_size; 888 889 /* 890 * Use three times the size of the MAX(receive buffer, send 891 * buffer) for the write to ensure that the write is split up 892 * into multiple writes internally. The recv() ensures that 893 * the write has partially completed, but a remaining size of 894 * two buffers should ensure that the write has not completed 895 * fully when it is cancelled. 896 */ 897 buffer[0] = malloc(buffer_size); 898 ATF_REQUIRE(buffer[0] != NULL); 899 buffer[1] = malloc(buffer_size * 3); 900 ATF_REQUIRE(buffer[1] != NULL); 901 902 srandomdev(); 903 aio_fill_buffer(buffer[1], buffer_size * 3, random()); 904 905 memset(&iocb, 0, sizeof(iocb)); 906 iocb.aio_fildes = s[1]; 907 iocb.aio_buf = buffer[1]; 908 iocb.aio_nbytes = buffer_size * 3; 909 ATF_REQUIRE(aio_write(&iocb) == 0); 910 911 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL); 912 ATF_REQUIRE(done == buffer_size); 913 914 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS); 915 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED); 916 917 done = aio_waitcomplete(&iocbp, NULL); 918 ATF_REQUIRE(iocbp == &iocb); 919 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2); 920 921 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0); 922 923 close(s[1]); 924 close(s[0]); 925 } 926 927 /* 928 * This test just performs a basic test of aio_fsync(). 929 */ 930 ATF_TC_WITHOUT_HEAD(aio_fsync_test); 931 ATF_TC_BODY(aio_fsync_test, tc) 932 { 933 struct aiocb synccb, *iocbp; 934 struct { 935 struct aiocb iocb; 936 bool done; 937 char *buffer; 938 } buffers[16]; 939 struct stat sb; 940 char pathname[PATH_MAX]; 941 ssize_t rval; 942 unsigned i; 943 int fd; 944 945 ATF_REQUIRE_KERNEL_MODULE("aio"); 946 ATF_REQUIRE_UNSAFE_AIO(); 947 948 strcpy(pathname, PATH_TEMPLATE); 949 fd = mkstemp(pathname); 950 ATF_REQUIRE_MSG(fd != -1, "mkstemp failed: %s", strerror(errno)); 951 unlink(pathname); 952 953 ATF_REQUIRE(fstat(fd, &sb) == 0); 954 ATF_REQUIRE(sb.st_blksize != 0); 955 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0); 956 957 /* 958 * Queue several asynchronous write requests. Hopefully this 959 * forces the aio_fsync() request to be deferred. There is no 960 * reliable way to guarantee that however. 961 */ 962 srandomdev(); 963 for (i = 0; i < nitems(buffers); i++) { 964 buffers[i].done = false; 965 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb)); 966 buffers[i].buffer = malloc(sb.st_blksize); 967 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random()); 968 buffers[i].iocb.aio_fildes = fd; 969 buffers[i].iocb.aio_buf = buffers[i].buffer; 970 buffers[i].iocb.aio_nbytes = sb.st_blksize; 971 buffers[i].iocb.aio_offset = sb.st_blksize * i; 972 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0); 973 } 974 975 /* Queue the aio_fsync request. */ 976 memset(&synccb, 0, sizeof(synccb)); 977 synccb.aio_fildes = fd; 978 ATF_REQUIRE(aio_fsync(O_SYNC, &synccb) == 0); 979 980 /* Wait for requests to complete. */ 981 for (;;) { 982 next: 983 rval = aio_waitcomplete(&iocbp, NULL); 984 ATF_REQUIRE(iocbp != NULL); 985 if (iocbp == &synccb) { 986 ATF_REQUIRE(rval == 0); 987 break; 988 } 989 990 for (i = 0; i < nitems(buffers); i++) { 991 if (iocbp == &buffers[i].iocb) { 992 ATF_REQUIRE(buffers[i].done == false); 993 ATF_REQUIRE(rval == sb.st_blksize); 994 buffers[i].done = true; 995 goto next; 996 } 997 } 998 999 ATF_REQUIRE_MSG(false, "unmatched AIO request"); 1000 } 1001 1002 for (i = 0; i < nitems(buffers); i++) 1003 ATF_REQUIRE_MSG(buffers[i].done, 1004 "AIO request %u did not complete", i); 1005 1006 close(fd); 1007 } 1008 1009 ATF_TP_ADD_TCS(tp) 1010 { 1011 1012 ATF_TP_ADD_TC(tp, aio_file_test); 1013 ATF_TP_ADD_TC(tp, aio_fifo_test); 1014 ATF_TP_ADD_TC(tp, aio_unix_socketpair_test); 1015 ATF_TP_ADD_TC(tp, aio_pty_test); 1016 ATF_TP_ADD_TC(tp, aio_pipe_test); 1017 ATF_TP_ADD_TC(tp, aio_md_test); 1018 ATF_TP_ADD_TC(tp, aio_large_read_test); 1019 ATF_TP_ADD_TC(tp, aio_socket_two_reads); 1020 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write); 1021 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel); 1022 ATF_TP_ADD_TC(tp, aio_fsync_test); 1023 1024 return (atf_no_error()); 1025 } 1026