1 /*-
2 * Copyright (c) 2004 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * Regression test to do some very basic AIO exercising on several types of
29 * file descriptors. Currently, the tests consist of initializing a fixed
30 * size buffer with pseudo-random data, writing it to one fd using AIO, then
31 * reading it from a second descriptor using AIO. For some targets, the same
32 * fd is used for write and read (i.e., file, md device), but for others the
33 * operation is performed on a peer (pty, socket, fifo, etc). For each file
34 * descriptor type, several completion methods are tested. This test program
35 * does not attempt to exercise error cases or more subtle asynchronous
36 * behavior, just make sure that the basic operations work on some basic object
37 * types.
38 */
39
40 #include <sys/param.h>
41 #include <sys/event.h>
42 #include <sys/mdioctl.h>
43 #include <sys/module.h>
44 #include <sys/resource.h>
45 #include <sys/socket.h>
46 #include <sys/stat.h>
47 #include <sys/un.h>
48
49 #include <aio.h>
50 #include <err.h>
51 #include <errno.h>
52 #include <fcntl.h>
53 #include <libutil.h>
54 #include <limits.h>
55 #include <semaphore.h>
56 #include <signal.h>
57 #include <stdint.h>
58 #include <stdio.h>
59 #include <stdlib.h>
60 #include <string.h>
61 #include <termios.h>
62 #include <unistd.h>
63
64 #include <atf-c.h>
65
66 #include "local.h"
67
68 /*
69 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as
70 * it sizes ac_buffer in the aio_context structure. It is also the default
71 * size for file I/O. For other types, we use smaller blocks or we risk
72 * blocking (and we run in a single process/thread so that would be bad).
73 */
74 #define GLOBAL_MAX 16384
75
76 #define BUFFER_MAX GLOBAL_MAX
77
78 /*
79 * A completion function will block until the aio has completed, then return
80 * the result of the aio. errno will be set appropriately.
81 */
82 typedef ssize_t (*completion)(struct aiocb*);
83
84 struct aio_context {
85 int ac_read_fd, ac_write_fd;
86 long ac_seed;
87 char ac_buffer[GLOBAL_MAX];
88 int ac_buflen;
89 int ac_seconds;
90 };
91
92 static sem_t completions;
93
94 /*
95 * Fill a buffer given a seed that can be fed into srandom() to initialize
96 * the PRNG in a repeatable manner.
97 */
98 static void
aio_fill_buffer(char * buffer,int len,long seed)99 aio_fill_buffer(char *buffer, int len, long seed)
100 {
101 char ch;
102 int i;
103
104 srandom(seed);
105 for (i = 0; i < len; i++) {
106 ch = random() & 0xff;
107 buffer[i] = ch;
108 }
109 }
110
111 /*
112 * Test that a buffer matches a given seed. See aio_fill_buffer(). Return
113 * (1) on a match, (0) on a mismatch.
114 */
115 static int
aio_test_buffer(char * buffer,int len,long seed)116 aio_test_buffer(char *buffer, int len, long seed)
117 {
118 char ch;
119 int i;
120
121 srandom(seed);
122 for (i = 0; i < len; i++) {
123 ch = random() & 0xff;
124 if (buffer[i] != ch)
125 return (0);
126 }
127 return (1);
128 }
129
130 /*
131 * Initialize a testing context given the file descriptors provided by the
132 * test setup.
133 */
134 static void
aio_context_init(struct aio_context * ac,int read_fd,int write_fd,int buflen)135 aio_context_init(struct aio_context *ac, int read_fd,
136 int write_fd, int buflen)
137 {
138
139 ATF_REQUIRE_MSG(buflen <= BUFFER_MAX,
140 "aio_context_init: buffer too large (%d > %d)",
141 buflen, BUFFER_MAX);
142 bzero(ac, sizeof(*ac));
143 ac->ac_read_fd = read_fd;
144 ac->ac_write_fd = write_fd;
145 ac->ac_buflen = buflen;
146 srandomdev();
147 ac->ac_seed = random();
148 aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
149 ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen,
150 ac->ac_seed) != 0, "aio_test_buffer: internal error");
151 }
152
153 static ssize_t
poll(struct aiocb * aio)154 poll(struct aiocb *aio)
155 {
156 int error;
157
158 while ((error = aio_error(aio)) == EINPROGRESS)
159 usleep(25000);
160 if (error)
161 return (error);
162 else
163 return (aio_return(aio));
164 }
165
166 static void
sigusr1_handler(int sig __unused)167 sigusr1_handler(int sig __unused)
168 {
169 ATF_REQUIRE_EQ(0, sem_post(&completions));
170 }
171
172 static void
thr_handler(union sigval sv __unused)173 thr_handler(union sigval sv __unused)
174 {
175 ATF_REQUIRE_EQ(0, sem_post(&completions));
176 }
177
178 static ssize_t
poll_signaled(struct aiocb * aio)179 poll_signaled(struct aiocb *aio)
180 {
181 int error;
182
183 ATF_REQUIRE_EQ(0, sem_wait(&completions));
184 error = aio_error(aio);
185 switch (error) {
186 case EINPROGRESS:
187 errno = EINTR;
188 return (-1);
189 case 0:
190 return (aio_return(aio));
191 default:
192 return (error);
193 }
194 }
195
196 /*
197 * Setup a signal handler for signal delivery tests
198 * This isn't thread safe, but it's ok since ATF runs each testcase in a
199 * separate process
200 */
201 static struct sigevent*
setup_signal(void)202 setup_signal(void)
203 {
204 static struct sigevent sev;
205
206 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
207 sev.sigev_notify = SIGEV_SIGNAL;
208 sev.sigev_signo = SIGUSR1;
209 ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler));
210 return (&sev);
211 }
212
213 /*
214 * Setup a thread for thread delivery tests
215 * This isn't thread safe, but it's ok since ATF runs each testcase in a
216 * separate process
217 */
218 static struct sigevent*
setup_thread(void)219 setup_thread(void)
220 {
221 static struct sigevent sev;
222
223 ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
224 sev.sigev_notify = SIGEV_THREAD;
225 sev.sigev_notify_function = thr_handler;
226 sev.sigev_notify_attributes = NULL;
227 return (&sev);
228 }
229
230 static ssize_t
suspend(struct aiocb * aio)231 suspend(struct aiocb *aio)
232 {
233 const struct aiocb *const iocbs[] = {aio};
234 int error;
235
236 error = aio_suspend(iocbs, 1, NULL);
237 if (error == 0)
238 return (aio_return(aio));
239 else
240 return (error);
241 }
242
243 static ssize_t
waitcomplete(struct aiocb * aio)244 waitcomplete(struct aiocb *aio)
245 {
246 struct aiocb *aiop;
247 ssize_t ret;
248
249 ret = aio_waitcomplete(&aiop, NULL);
250 ATF_REQUIRE_EQ(aio, aiop);
251 return (ret);
252 }
253
254 /*
255 * Setup an iocb for kqueue notification. This isn't thread
256 * safe, but it's ok because ATF runs every test case in a separate process.
257 */
258 static struct sigevent*
setup_kqueue(void)259 setup_kqueue(void)
260 {
261 static struct sigevent sev;
262 static int kq;
263
264 kq = kqueue();
265 ATF_REQUIRE(kq >= 0);
266
267 memset(&sev, 0, sizeof(sev));
268 sev.sigev_notify_kqueue = kq;
269 sev.sigev_value.sival_ptr = (void*)0xdeadbeef;
270 sev.sigev_notify = SIGEV_KEVENT;
271
272 return (&sev);
273 }
274
275 static ssize_t
poll_kqueue(struct aiocb * aio)276 poll_kqueue(struct aiocb *aio)
277 {
278 int kq, nevents;
279 struct kevent events[1];
280
281 kq = aio->aio_sigevent.sigev_notify_kqueue;
282
283 nevents = kevent(kq, NULL, 0, events, 1, NULL);
284 ATF_CHECK_EQ(1, nevents);
285 ATF_CHECK_EQ(events[0].ident, (uintptr_t) aio);
286 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO);
287 ATF_CHECK_EQ(events[0].flags, EV_EOF);
288 ATF_CHECK_EQ(events[0].fflags, 0);
289 ATF_CHECK_EQ(events[0].data, 0);
290 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef);
291
292 return (aio_return(aio));
293 }
294
295 /*
296 * Perform a simple write test of our initialized data buffer to the provided
297 * file descriptor.
298 */
299 static void
aio_write_test(struct aio_context * ac,completion comp,struct sigevent * sev)300 aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev)
301 {
302 struct aiocb aio;
303 ssize_t len;
304
305 bzero(&aio, sizeof(aio));
306 aio.aio_buf = ac->ac_buffer;
307 aio.aio_nbytes = ac->ac_buflen;
308 aio.aio_fildes = ac->ac_write_fd;
309 aio.aio_offset = 0;
310 if (sev)
311 aio.aio_sigevent = *sev;
312
313 if (aio_write(&aio) < 0)
314 atf_tc_fail("aio_write failed: %s", strerror(errno));
315
316 len = comp(&aio);
317 if (len < 0)
318 atf_tc_fail("aio failed: %s", strerror(errno));
319
320 if (len != ac->ac_buflen)
321 atf_tc_fail("aio short write (%jd)", (intmax_t)len);
322 }
323
324 /*
325 * Perform a vectored I/O test of our initialized data buffer to the provided
326 * file descriptor.
327 *
328 * To vectorize the linear buffer, chop it up into two pieces of dissimilar
329 * size, and swap their offsets.
330 */
331 static void
aio_writev_test(struct aio_context * ac,completion comp,struct sigevent * sev)332 aio_writev_test(struct aio_context *ac, completion comp, struct sigevent *sev)
333 {
334 struct aiocb aio;
335 struct iovec iov[2];
336 size_t len0, len1;
337 ssize_t len;
338
339 bzero(&aio, sizeof(aio));
340
341 aio.aio_fildes = ac->ac_write_fd;
342 aio.aio_offset = 0;
343 len0 = ac->ac_buflen * 3 / 4;
344 len1 = ac->ac_buflen / 4;
345 iov[0].iov_base = ac->ac_buffer + len1;
346 iov[0].iov_len = len0;
347 iov[1].iov_base = ac->ac_buffer;
348 iov[1].iov_len = len1;
349 aio.aio_iov = iov;
350 aio.aio_iovcnt = 2;
351 if (sev)
352 aio.aio_sigevent = *sev;
353
354 if (aio_writev(&aio) < 0)
355 atf_tc_fail("aio_writev failed: %s", strerror(errno));
356
357 len = comp(&aio);
358 if (len < 0)
359 atf_tc_fail("aio failed: %s", strerror(errno));
360
361 if (len != ac->ac_buflen)
362 atf_tc_fail("aio short write (%jd)", (intmax_t)len);
363 }
364
365 /*
366 * Perform a simple read test of our initialized data buffer from the
367 * provided file descriptor.
368 */
369 static void
aio_read_test(struct aio_context * ac,completion comp,struct sigevent * sev)370 aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev)
371 {
372 struct aiocb aio;
373 ssize_t len;
374
375 bzero(ac->ac_buffer, ac->ac_buflen);
376 bzero(&aio, sizeof(aio));
377 aio.aio_buf = ac->ac_buffer;
378 aio.aio_nbytes = ac->ac_buflen;
379 aio.aio_fildes = ac->ac_read_fd;
380 aio.aio_offset = 0;
381 if (sev)
382 aio.aio_sigevent = *sev;
383
384 if (aio_read(&aio) < 0)
385 atf_tc_fail("aio_read failed: %s", strerror(errno));
386
387 len = comp(&aio);
388 if (len < 0)
389 atf_tc_fail("aio failed: %s", strerror(errno));
390
391 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen,
392 "aio short read (%jd)", (intmax_t)len);
393
394 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0)
395 atf_tc_fail("buffer mismatched");
396 }
397
398 static void
aio_readv_test(struct aio_context * ac,completion comp,struct sigevent * sev)399 aio_readv_test(struct aio_context *ac, completion comp, struct sigevent *sev)
400 {
401 struct aiocb aio;
402 struct iovec iov[2];
403 size_t len0, len1;
404 ssize_t len;
405
406 bzero(ac->ac_buffer, ac->ac_buflen);
407 bzero(&aio, sizeof(aio));
408 aio.aio_fildes = ac->ac_read_fd;
409 aio.aio_offset = 0;
410 len0 = ac->ac_buflen * 3 / 4;
411 len1 = ac->ac_buflen / 4;
412 iov[0].iov_base = ac->ac_buffer + len1;
413 iov[0].iov_len = len0;
414 iov[1].iov_base = ac->ac_buffer;
415 iov[1].iov_len = len1;
416 aio.aio_iov = iov;
417 aio.aio_iovcnt = 2;
418 if (sev)
419 aio.aio_sigevent = *sev;
420
421 if (aio_readv(&aio) < 0)
422 atf_tc_fail("aio_read failed: %s", strerror(errno));
423
424 len = comp(&aio);
425 if (len < 0)
426 atf_tc_fail("aio failed: %s", strerror(errno));
427
428 ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen,
429 "aio short read (%jd)", (intmax_t)len);
430
431 if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0)
432 atf_tc_fail("buffer mismatched");
433 }
434
435 /*
436 * Series of type-specific tests for AIO. For now, we just make sure we can
437 * issue a write and then a read to each type. We assume that once a write
438 * is issued, a read can follow.
439 */
440
441 /*
442 * Test with a classic file. Assumes we can create a moderate size temporary
443 * file.
444 */
445 #define FILE_LEN GLOBAL_MAX
446 #define FILE_PATHNAME "testfile"
447
448 static void
aio_file_test(completion comp,struct sigevent * sev,bool vectored)449 aio_file_test(completion comp, struct sigevent *sev, bool vectored)
450 {
451 struct aio_context ac;
452 int fd;
453
454 ATF_REQUIRE_UNSAFE_AIO();
455
456 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
457 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
458
459 aio_context_init(&ac, fd, fd, FILE_LEN);
460 if (vectored) {
461 aio_writev_test(&ac, comp, sev);
462 aio_readv_test(&ac, comp, sev);
463 } else {
464 aio_write_test(&ac, comp, sev);
465 aio_read_test(&ac, comp, sev);
466 }
467 close(fd);
468 }
469
470 ATF_TC_WITHOUT_HEAD(file_kq);
ATF_TC_BODY(file_kq,tc)471 ATF_TC_BODY(file_kq, tc)
472 {
473 aio_file_test(poll_kqueue, setup_kqueue(), false);
474 }
475
476 ATF_TC_WITHOUT_HEAD(file_poll);
ATF_TC_BODY(file_poll,tc)477 ATF_TC_BODY(file_poll, tc)
478 {
479 aio_file_test(poll, NULL, false);
480 }
481
482 ATF_TC_WITHOUT_HEAD(file_signal);
ATF_TC_BODY(file_signal,tc)483 ATF_TC_BODY(file_signal, tc)
484 {
485 aio_file_test(poll_signaled, setup_signal(), false);
486 }
487
488 ATF_TC_WITHOUT_HEAD(file_suspend);
ATF_TC_BODY(file_suspend,tc)489 ATF_TC_BODY(file_suspend, tc)
490 {
491 aio_file_test(suspend, NULL, false);
492 }
493
494 ATF_TC_WITHOUT_HEAD(file_thread);
ATF_TC_BODY(file_thread,tc)495 ATF_TC_BODY(file_thread, tc)
496 {
497 aio_file_test(poll_signaled, setup_thread(), false);
498 }
499
500 ATF_TC_WITHOUT_HEAD(file_waitcomplete);
ATF_TC_BODY(file_waitcomplete,tc)501 ATF_TC_BODY(file_waitcomplete, tc)
502 {
503 aio_file_test(waitcomplete, NULL, false);
504 }
505
506 #define FIFO_LEN 256
507 #define FIFO_PATHNAME "testfifo"
508
509 static void
aio_fifo_test(completion comp,struct sigevent * sev)510 aio_fifo_test(completion comp, struct sigevent *sev)
511 {
512 int error, read_fd = -1, write_fd = -1;
513 struct aio_context ac;
514
515 ATF_REQUIRE_UNSAFE_AIO();
516
517 ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1,
518 "mkfifo failed: %s", strerror(errno));
519
520 read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK);
521 if (read_fd == -1) {
522 error = errno;
523 errno = error;
524 atf_tc_fail("read_fd open failed: %s",
525 strerror(errno));
526 }
527
528 write_fd = open(FIFO_PATHNAME, O_WRONLY);
529 if (write_fd == -1) {
530 error = errno;
531 errno = error;
532 atf_tc_fail("write_fd open failed: %s",
533 strerror(errno));
534 }
535
536 aio_context_init(&ac, read_fd, write_fd, FIFO_LEN);
537 aio_write_test(&ac, comp, sev);
538 aio_read_test(&ac, comp, sev);
539
540 close(read_fd);
541 close(write_fd);
542 }
543
544 ATF_TC_WITHOUT_HEAD(fifo_kq);
ATF_TC_BODY(fifo_kq,tc)545 ATF_TC_BODY(fifo_kq, tc)
546 {
547 aio_fifo_test(poll_kqueue, setup_kqueue());
548 }
549
550 ATF_TC_WITHOUT_HEAD(fifo_poll);
ATF_TC_BODY(fifo_poll,tc)551 ATF_TC_BODY(fifo_poll, tc)
552 {
553 aio_fifo_test(poll, NULL);
554 }
555
556 ATF_TC_WITHOUT_HEAD(fifo_signal);
ATF_TC_BODY(fifo_signal,tc)557 ATF_TC_BODY(fifo_signal, tc)
558 {
559 aio_fifo_test(poll_signaled, setup_signal());
560 }
561
562 ATF_TC_WITHOUT_HEAD(fifo_suspend);
ATF_TC_BODY(fifo_suspend,tc)563 ATF_TC_BODY(fifo_suspend, tc)
564 {
565 aio_fifo_test(suspend, NULL);
566 }
567
568 ATF_TC_WITHOUT_HEAD(fifo_thread);
ATF_TC_BODY(fifo_thread,tc)569 ATF_TC_BODY(fifo_thread, tc)
570 {
571 aio_fifo_test(poll_signaled, setup_thread());
572 }
573
574 ATF_TC_WITHOUT_HEAD(fifo_waitcomplete);
ATF_TC_BODY(fifo_waitcomplete,tc)575 ATF_TC_BODY(fifo_waitcomplete, tc)
576 {
577 aio_fifo_test(waitcomplete, NULL);
578 }
579
580 #define UNIX_SOCKETPAIR_LEN 256
581 static void
aio_unix_socketpair_test(completion comp,struct sigevent * sev,bool vectored)582 aio_unix_socketpair_test(completion comp, struct sigevent *sev, bool vectored)
583 {
584 struct aio_context ac;
585 struct rusage ru_before, ru_after;
586 int sockets[2];
587
588 ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1,
589 "socketpair failed: %s", strerror(errno));
590
591 aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN);
592 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1,
593 "getrusage failed: %s", strerror(errno));
594 if (vectored) {
595 aio_writev_test(&ac, comp, sev);
596 aio_readv_test(&ac, comp, sev);
597 } else {
598 aio_write_test(&ac, comp, sev);
599 aio_read_test(&ac, comp, sev);
600 }
601 ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
602 "getrusage failed: %s", strerror(errno));
603 ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1);
604 ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1);
605
606 close(sockets[0]);
607 close(sockets[1]);
608 }
609
610 ATF_TC_WITHOUT_HEAD(socket_kq);
ATF_TC_BODY(socket_kq,tc)611 ATF_TC_BODY(socket_kq, tc)
612 {
613 aio_unix_socketpair_test(poll_kqueue, setup_kqueue(), false);
614 }
615
616 ATF_TC_WITHOUT_HEAD(socket_poll);
ATF_TC_BODY(socket_poll,tc)617 ATF_TC_BODY(socket_poll, tc)
618 {
619 aio_unix_socketpair_test(poll, NULL, false);
620 }
621
622 ATF_TC_WITHOUT_HEAD(socket_signal);
ATF_TC_BODY(socket_signal,tc)623 ATF_TC_BODY(socket_signal, tc)
624 {
625 aio_unix_socketpair_test(poll_signaled, setup_signal(), false);
626 }
627
628 ATF_TC_WITHOUT_HEAD(socket_suspend);
ATF_TC_BODY(socket_suspend,tc)629 ATF_TC_BODY(socket_suspend, tc)
630 {
631 aio_unix_socketpair_test(suspend, NULL, false);
632 }
633
634 ATF_TC_WITHOUT_HEAD(socket_thread);
ATF_TC_BODY(socket_thread,tc)635 ATF_TC_BODY(socket_thread, tc)
636 {
637 aio_unix_socketpair_test(poll_signaled, setup_thread(), false);
638 }
639
640 ATF_TC_WITHOUT_HEAD(socket_waitcomplete);
ATF_TC_BODY(socket_waitcomplete,tc)641 ATF_TC_BODY(socket_waitcomplete, tc)
642 {
643 aio_unix_socketpair_test(waitcomplete, NULL, false);
644 }
645
646 struct aio_pty_arg {
647 int apa_read_fd;
648 int apa_write_fd;
649 };
650
651 #define PTY_LEN 256
652 static void
aio_pty_test(completion comp,struct sigevent * sev)653 aio_pty_test(completion comp, struct sigevent *sev)
654 {
655 struct aio_context ac;
656 int read_fd, write_fd;
657 struct termios ts;
658 int error;
659
660 ATF_REQUIRE_UNSAFE_AIO();
661
662 ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0,
663 "openpty failed: %s", strerror(errno));
664
665
666 if (tcgetattr(write_fd, &ts) < 0) {
667 error = errno;
668 errno = error;
669 atf_tc_fail("tcgetattr failed: %s", strerror(errno));
670 }
671 cfmakeraw(&ts);
672 if (tcsetattr(write_fd, TCSANOW, &ts) < 0) {
673 error = errno;
674 errno = error;
675 atf_tc_fail("tcsetattr failed: %s", strerror(errno));
676 }
677 aio_context_init(&ac, read_fd, write_fd, PTY_LEN);
678
679 aio_write_test(&ac, comp, sev);
680 aio_read_test(&ac, comp, sev);
681
682 close(read_fd);
683 close(write_fd);
684 }
685
686 ATF_TC_WITHOUT_HEAD(pty_kq);
ATF_TC_BODY(pty_kq,tc)687 ATF_TC_BODY(pty_kq, tc)
688 {
689 aio_pty_test(poll_kqueue, setup_kqueue());
690 }
691
692 ATF_TC_WITHOUT_HEAD(pty_poll);
ATF_TC_BODY(pty_poll,tc)693 ATF_TC_BODY(pty_poll, tc)
694 {
695 aio_pty_test(poll, NULL);
696 }
697
698 ATF_TC_WITHOUT_HEAD(pty_signal);
ATF_TC_BODY(pty_signal,tc)699 ATF_TC_BODY(pty_signal, tc)
700 {
701 aio_pty_test(poll_signaled, setup_signal());
702 }
703
704 ATF_TC_WITHOUT_HEAD(pty_suspend);
ATF_TC_BODY(pty_suspend,tc)705 ATF_TC_BODY(pty_suspend, tc)
706 {
707 aio_pty_test(suspend, NULL);
708 }
709
710 ATF_TC_WITHOUT_HEAD(pty_thread);
ATF_TC_BODY(pty_thread,tc)711 ATF_TC_BODY(pty_thread, tc)
712 {
713 aio_pty_test(poll_signaled, setup_thread());
714 }
715
716 ATF_TC_WITHOUT_HEAD(pty_waitcomplete);
ATF_TC_BODY(pty_waitcomplete,tc)717 ATF_TC_BODY(pty_waitcomplete, tc)
718 {
719 aio_pty_test(waitcomplete, NULL);
720 }
721
722 #define PIPE_LEN 256
723 static void
aio_pipe_test(completion comp,struct sigevent * sev)724 aio_pipe_test(completion comp, struct sigevent *sev)
725 {
726 struct aio_context ac;
727 int pipes[2];
728
729 ATF_REQUIRE_UNSAFE_AIO();
730
731 ATF_REQUIRE_MSG(pipe(pipes) != -1,
732 "pipe failed: %s", strerror(errno));
733
734 aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN);
735 aio_write_test(&ac, comp, sev);
736 aio_read_test(&ac, comp, sev);
737
738 close(pipes[0]);
739 close(pipes[1]);
740 }
741
742 ATF_TC_WITHOUT_HEAD(pipe_kq);
ATF_TC_BODY(pipe_kq,tc)743 ATF_TC_BODY(pipe_kq, tc)
744 {
745 aio_pipe_test(poll_kqueue, setup_kqueue());
746 }
747
748 ATF_TC_WITHOUT_HEAD(pipe_poll);
ATF_TC_BODY(pipe_poll,tc)749 ATF_TC_BODY(pipe_poll, tc)
750 {
751 aio_pipe_test(poll, NULL);
752 }
753
754 ATF_TC_WITHOUT_HEAD(pipe_signal);
ATF_TC_BODY(pipe_signal,tc)755 ATF_TC_BODY(pipe_signal, tc)
756 {
757 aio_pipe_test(poll_signaled, setup_signal());
758 }
759
760 ATF_TC_WITHOUT_HEAD(pipe_suspend);
ATF_TC_BODY(pipe_suspend,tc)761 ATF_TC_BODY(pipe_suspend, tc)
762 {
763 aio_pipe_test(suspend, NULL);
764 }
765
766 ATF_TC_WITHOUT_HEAD(pipe_thread);
ATF_TC_BODY(pipe_thread,tc)767 ATF_TC_BODY(pipe_thread, tc)
768 {
769 aio_pipe_test(poll_signaled, setup_thread());
770 }
771
772 ATF_TC_WITHOUT_HEAD(pipe_waitcomplete);
ATF_TC_BODY(pipe_waitcomplete,tc)773 ATF_TC_BODY(pipe_waitcomplete, tc)
774 {
775 aio_pipe_test(waitcomplete, NULL);
776 }
777
778 #define MD_LEN GLOBAL_MAX
779 #define MDUNIT_LINK "mdunit_link"
780
781 static int
aio_md_setup(void)782 aio_md_setup(void)
783 {
784 int error, fd, mdctl_fd, unit;
785 char pathname[PATH_MAX];
786 struct md_ioctl mdio;
787 char buf[80];
788
789 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
790 ATF_REQUIRE_MSG(mdctl_fd != -1,
791 "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno));
792
793 bzero(&mdio, sizeof(mdio));
794 mdio.md_version = MDIOVERSION;
795 mdio.md_type = MD_MALLOC;
796 mdio.md_options = MD_AUTOUNIT | MD_COMPRESS;
797 mdio.md_mediasize = GLOBAL_MAX;
798 mdio.md_sectorsize = 512;
799 strlcpy(buf, __func__, sizeof(buf));
800 mdio.md_label = buf;
801
802 if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) {
803 error = errno;
804 errno = error;
805 atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno));
806 }
807 close(mdctl_fd);
808
809 /* Store the md unit number in a symlink for future cleanup */
810 unit = mdio.md_unit;
811 snprintf(buf, sizeof(buf), "%d", unit);
812 ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK));
813 snprintf(pathname, PATH_MAX, "/dev/md%d", unit);
814 fd = open(pathname, O_RDWR);
815 ATF_REQUIRE_MSG(fd != -1,
816 "opening %s failed: %s", pathname, strerror(errno));
817
818 return (fd);
819 }
820
821 static void
aio_md_cleanup(void)822 aio_md_cleanup(void)
823 {
824 struct md_ioctl mdio;
825 int mdctl_fd, n, unit;
826 char buf[80];
827
828 mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
829 if (mdctl_fd < 0) {
830 fprintf(stderr, "opening /dev/%s failed: %s\n", MDCTL_NAME,
831 strerror(errno));
832 return;
833 }
834 n = readlink(MDUNIT_LINK, buf, sizeof(buf) - 1);
835 if (n > 0) {
836 buf[n] = '\0';
837 if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) {
838 bzero(&mdio, sizeof(mdio));
839 mdio.md_version = MDIOVERSION;
840 mdio.md_unit = unit;
841 if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) {
842 fprintf(stderr,
843 "ioctl MDIOCDETACH unit %d failed: %s\n",
844 unit, strerror(errno));
845 }
846 }
847 }
848
849 close(mdctl_fd);
850 }
851
852 static void
aio_md_test(completion comp,struct sigevent * sev,bool vectored)853 aio_md_test(completion comp, struct sigevent *sev, bool vectored)
854 {
855 struct aio_context ac;
856 int fd;
857
858 fd = aio_md_setup();
859 aio_context_init(&ac, fd, fd, MD_LEN);
860 if (vectored) {
861 aio_writev_test(&ac, comp, sev);
862 aio_readv_test(&ac, comp, sev);
863 } else {
864 aio_write_test(&ac, comp, sev);
865 aio_read_test(&ac, comp, sev);
866 }
867
868 close(fd);
869 }
870
871 ATF_TC_WITH_CLEANUP(md_kq);
ATF_TC_HEAD(md_kq,tc)872 ATF_TC_HEAD(md_kq, tc)
873 {
874
875 atf_tc_set_md_var(tc, "require.user", "root");
876 }
ATF_TC_BODY(md_kq,tc)877 ATF_TC_BODY(md_kq, tc)
878 {
879 aio_md_test(poll_kqueue, setup_kqueue(), false);
880 }
ATF_TC_CLEANUP(md_kq,tc)881 ATF_TC_CLEANUP(md_kq, tc)
882 {
883 aio_md_cleanup();
884 }
885
886 ATF_TC_WITH_CLEANUP(md_poll);
ATF_TC_HEAD(md_poll,tc)887 ATF_TC_HEAD(md_poll, tc)
888 {
889
890 atf_tc_set_md_var(tc, "require.user", "root");
891 }
ATF_TC_BODY(md_poll,tc)892 ATF_TC_BODY(md_poll, tc)
893 {
894 aio_md_test(poll, NULL, false);
895 }
ATF_TC_CLEANUP(md_poll,tc)896 ATF_TC_CLEANUP(md_poll, tc)
897 {
898 aio_md_cleanup();
899 }
900
901 ATF_TC_WITH_CLEANUP(md_signal);
ATF_TC_HEAD(md_signal,tc)902 ATF_TC_HEAD(md_signal, tc)
903 {
904
905 atf_tc_set_md_var(tc, "require.user", "root");
906 }
ATF_TC_BODY(md_signal,tc)907 ATF_TC_BODY(md_signal, tc)
908 {
909 aio_md_test(poll_signaled, setup_signal(), false);
910 }
ATF_TC_CLEANUP(md_signal,tc)911 ATF_TC_CLEANUP(md_signal, tc)
912 {
913 aio_md_cleanup();
914 }
915
916 ATF_TC_WITH_CLEANUP(md_suspend);
ATF_TC_HEAD(md_suspend,tc)917 ATF_TC_HEAD(md_suspend, tc)
918 {
919
920 atf_tc_set_md_var(tc, "require.user", "root");
921 }
ATF_TC_BODY(md_suspend,tc)922 ATF_TC_BODY(md_suspend, tc)
923 {
924 aio_md_test(suspend, NULL, false);
925 }
ATF_TC_CLEANUP(md_suspend,tc)926 ATF_TC_CLEANUP(md_suspend, tc)
927 {
928 aio_md_cleanup();
929 }
930
931 ATF_TC_WITH_CLEANUP(md_thread);
ATF_TC_HEAD(md_thread,tc)932 ATF_TC_HEAD(md_thread, tc)
933 {
934
935 atf_tc_set_md_var(tc, "require.user", "root");
936 }
ATF_TC_BODY(md_thread,tc)937 ATF_TC_BODY(md_thread, tc)
938 {
939 aio_md_test(poll_signaled, setup_thread(), false);
940 }
ATF_TC_CLEANUP(md_thread,tc)941 ATF_TC_CLEANUP(md_thread, tc)
942 {
943 aio_md_cleanup();
944 }
945
946 ATF_TC_WITH_CLEANUP(md_waitcomplete);
ATF_TC_HEAD(md_waitcomplete,tc)947 ATF_TC_HEAD(md_waitcomplete, tc)
948 {
949
950 atf_tc_set_md_var(tc, "require.user", "root");
951 }
ATF_TC_BODY(md_waitcomplete,tc)952 ATF_TC_BODY(md_waitcomplete, tc)
953 {
954 aio_md_test(waitcomplete, NULL, false);
955 }
ATF_TC_CLEANUP(md_waitcomplete,tc)956 ATF_TC_CLEANUP(md_waitcomplete, tc)
957 {
958 aio_md_cleanup();
959 }
960
961 #define ZVOL_VDEV_PATHNAME "test_vdev"
962 #define POOL_SIZE (1 << 28) /* 256 MB */
963 #define ZVOL_SIZE "64m"
964 #define POOL_NAME "aio_testpool"
965 #define ZVOL_NAME "aio_testvol"
966
967 static int
aio_zvol_setup(const char * unique)968 aio_zvol_setup(const char *unique)
969 {
970 FILE *pidfile;
971 int fd;
972 pid_t pid;
973 char vdev_name[160];
974 char pool_name[80];
975 char cmd[160];
976 char zvol_name[160];
977 char devname[160];
978
979 pid = getpid();
980 snprintf(vdev_name, sizeof(vdev_name), "%s", ZVOL_VDEV_PATHNAME);
981 snprintf(pool_name, sizeof(pool_name), "%s_%s.%d", POOL_NAME, unique,
982 pid);
983 snprintf(zvol_name, sizeof(zvol_name), "%s/%s_%s", pool_name, ZVOL_NAME,
984 unique);
985
986 fd = open(vdev_name, O_RDWR | O_CREAT, 0600);
987 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
988 ATF_REQUIRE_EQ_MSG(0,
989 ftruncate(fd, POOL_SIZE), "ftruncate failed: %s", strerror(errno));
990 close(fd);
991
992 pidfile = fopen("pidfile", "w");
993 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno));
994 fprintf(pidfile, "%d", pid);
995 fclose(pidfile);
996
997 snprintf(cmd, sizeof(cmd), "zpool create %s $PWD/%s", pool_name,
998 vdev_name);
999 ATF_REQUIRE_EQ_MSG(0, system(cmd),
1000 "zpool create failed: %s", strerror(errno));
1001 snprintf(cmd, sizeof(cmd),
1002 "zfs create -o volblocksize=8192 -o volmode=dev -V %s %s",
1003 ZVOL_SIZE, zvol_name);
1004 ATF_REQUIRE_EQ_MSG(0, system(cmd),
1005 "zfs create failed: %s", strerror(errno));
1006
1007 snprintf(devname, sizeof(devname), "/dev/zvol/%s", zvol_name);
1008 do {
1009 fd = open(devname, O_RDWR);
1010 } while (fd == -1 && errno == EINTR);
1011 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1012 return (fd);
1013 }
1014
1015 static void
aio_zvol_cleanup(const char * unique)1016 aio_zvol_cleanup(const char *unique)
1017 {
1018 FILE *pidfile;
1019 pid_t testpid;
1020 char cmd[160];
1021
1022 pidfile = fopen("pidfile", "r");
1023 if (pidfile == NULL && errno == ENOENT) {
1024 /* Setup probably failed */
1025 return;
1026 }
1027 ATF_REQUIRE_MSG(NULL != pidfile, "fopen: %s", strerror(errno));
1028 ATF_REQUIRE_EQ(1, fscanf(pidfile, "%d", &testpid));
1029 fclose(pidfile);
1030
1031 snprintf(cmd, sizeof(cmd), "zpool destroy %s_%s.%d", POOL_NAME, unique,
1032 testpid);
1033 system(cmd);
1034 }
1035
1036
1037 ATF_TC_WITHOUT_HEAD(aio_large_read_test);
ATF_TC_BODY(aio_large_read_test,tc)1038 ATF_TC_BODY(aio_large_read_test, tc)
1039 {
1040 struct aiocb cb, *cbp;
1041 ssize_t nread;
1042 size_t len;
1043 int fd;
1044 #ifdef __LP64__
1045 int clamped;
1046 #endif
1047
1048 ATF_REQUIRE_UNSAFE_AIO();
1049
1050 #ifdef __LP64__
1051 len = sizeof(clamped);
1052 if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) ==
1053 -1)
1054 atf_libc_error(errno, "Failed to read debug.iosize_max_clamp");
1055 #endif
1056
1057 /* Determine the maximum supported read(2) size. */
1058 len = SSIZE_MAX;
1059 #ifdef __LP64__
1060 if (clamped)
1061 len = INT_MAX;
1062 #endif
1063
1064 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
1065 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1066
1067 unlink(FILE_PATHNAME);
1068
1069 memset(&cb, 0, sizeof(cb));
1070 cb.aio_nbytes = len;
1071 cb.aio_fildes = fd;
1072 cb.aio_buf = NULL;
1073 if (aio_read(&cb) == -1)
1074 atf_tc_fail("aio_read() of maximum read size failed: %s",
1075 strerror(errno));
1076
1077 nread = aio_waitcomplete(&cbp, NULL);
1078 if (nread == -1)
1079 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
1080 if (nread != 0)
1081 atf_tc_fail("aio_read() from empty file returned data: %zd",
1082 nread);
1083
1084 memset(&cb, 0, sizeof(cb));
1085 cb.aio_nbytes = len + 1;
1086 cb.aio_fildes = fd;
1087 cb.aio_buf = NULL;
1088 if (aio_read(&cb) == -1) {
1089 if (errno == EINVAL)
1090 goto finished;
1091 atf_tc_fail("aio_read() of too large read size failed: %s",
1092 strerror(errno));
1093 }
1094
1095 nread = aio_waitcomplete(&cbp, NULL);
1096 if (nread == -1) {
1097 if (errno == EINVAL)
1098 goto finished;
1099 atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
1100 }
1101 atf_tc_fail("aio_read() of too large read size returned: %zd", nread);
1102
1103 finished:
1104 close(fd);
1105 }
1106
1107 /*
1108 * This tests for a bug where arriving socket data can wakeup multiple
1109 * AIO read requests resulting in an uncancellable request.
1110 */
1111 ATF_TC_WITHOUT_HEAD(aio_socket_two_reads);
ATF_TC_BODY(aio_socket_two_reads,tc)1112 ATF_TC_BODY(aio_socket_two_reads, tc)
1113 {
1114 struct ioreq {
1115 struct aiocb iocb;
1116 char buffer[1024];
1117 } ioreq[2];
1118 struct aiocb *iocb;
1119 unsigned i;
1120 int s[2];
1121 char c;
1122
1123 #if __FreeBSD_version < 1100101
1124 aft_tc_skip("kernel version %d is too old (%d required)",
1125 __FreeBSD_version, 1100101);
1126 #endif
1127
1128 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
1129
1130 /* Queue two read requests. */
1131 memset(&ioreq, 0, sizeof(ioreq));
1132 for (i = 0; i < nitems(ioreq); i++) {
1133 ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer);
1134 ioreq[i].iocb.aio_fildes = s[0];
1135 ioreq[i].iocb.aio_buf = ioreq[i].buffer;
1136 ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0);
1137 }
1138
1139 /* Send a single byte. This should complete one request. */
1140 c = 0xc3;
1141 ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1);
1142
1143 ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1);
1144
1145 /* Determine which request completed and verify the data was read. */
1146 if (iocb == &ioreq[0].iocb)
1147 i = 0;
1148 else
1149 i = 1;
1150 ATF_REQUIRE(ioreq[i].buffer[0] == c);
1151
1152 i ^= 1;
1153
1154 /*
1155 * Try to cancel the other request. On broken systems this
1156 * will fail and the process will hang on exit.
1157 */
1158 ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS);
1159 ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED);
1160
1161 close(s[1]);
1162 close(s[0]);
1163 }
1164
1165 static void
aio_socket_blocking_short_write_test(bool vectored)1166 aio_socket_blocking_short_write_test(bool vectored)
1167 {
1168 struct aiocb iocb, *iocbp;
1169 struct iovec iov[2];
1170 char *buffer[2];
1171 ssize_t done, r;
1172 int buffer_size, sb_size;
1173 socklen_t len;
1174 int s[2];
1175
1176 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
1177
1178 len = sizeof(sb_size);
1179 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
1180 -1);
1181 ATF_REQUIRE(len == sizeof(sb_size));
1182 buffer_size = sb_size;
1183
1184 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
1185 -1);
1186 ATF_REQUIRE(len == sizeof(sb_size));
1187 if (sb_size > buffer_size)
1188 buffer_size = sb_size;
1189
1190 /*
1191 * Use twice the size of the MAX(receive buffer, send buffer)
1192 * to ensure that the write is split up into multiple writes
1193 * internally.
1194 */
1195 buffer_size *= 2;
1196
1197 buffer[0] = malloc(buffer_size);
1198 ATF_REQUIRE(buffer[0] != NULL);
1199 buffer[1] = malloc(buffer_size);
1200 ATF_REQUIRE(buffer[1] != NULL);
1201
1202 srandomdev();
1203 aio_fill_buffer(buffer[1], buffer_size, random());
1204
1205 memset(&iocb, 0, sizeof(iocb));
1206 iocb.aio_fildes = s[1];
1207 if (vectored) {
1208 iov[0].iov_base = buffer[1];
1209 iov[0].iov_len = buffer_size / 2 + 1;
1210 iov[1].iov_base = buffer[1] + buffer_size / 2 + 1;
1211 iov[1].iov_len = buffer_size / 2 - 1;
1212 iocb.aio_iov = iov;
1213 iocb.aio_iovcnt = 2;
1214 r = aio_writev(&iocb);
1215 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r);
1216 } else {
1217 iocb.aio_buf = buffer[1];
1218 iocb.aio_nbytes = buffer_size;
1219 r = aio_write(&iocb);
1220 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r);
1221 }
1222
1223 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
1224 ATF_REQUIRE(done == buffer_size);
1225
1226 done = aio_waitcomplete(&iocbp, NULL);
1227 ATF_REQUIRE(iocbp == &iocb);
1228 ATF_REQUIRE(done == buffer_size);
1229
1230 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
1231
1232 close(s[1]);
1233 close(s[0]);
1234 }
1235
1236 /*
1237 * This test ensures that aio_write() on a blocking socket of a "large"
1238 * buffer does not return a short completion.
1239 */
1240 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write);
ATF_TC_BODY(aio_socket_blocking_short_write,tc)1241 ATF_TC_BODY(aio_socket_blocking_short_write, tc)
1242 {
1243 aio_socket_blocking_short_write_test(false);
1244 }
1245
1246 /*
1247 * Like aio_socket_blocking_short_write, but also tests that partially
1248 * completed vectored sends can be retried correctly.
1249 */
1250 ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write_vectored);
ATF_TC_BODY(aio_socket_blocking_short_write_vectored,tc)1251 ATF_TC_BODY(aio_socket_blocking_short_write_vectored, tc)
1252 {
1253 aio_socket_blocking_short_write_test(true);
1254 }
1255
1256 /*
1257 * Verify that AIO requests fail when applied to a listening socket.
1258 */
1259 ATF_TC_WITHOUT_HEAD(aio_socket_listen_fail);
ATF_TC_BODY(aio_socket_listen_fail,tc)1260 ATF_TC_BODY(aio_socket_listen_fail, tc)
1261 {
1262 struct aiocb iocb;
1263 struct sockaddr_un sun;
1264 char buf[16];
1265 int s;
1266
1267 s = socket(AF_LOCAL, SOCK_STREAM, 0);
1268 ATF_REQUIRE(s != -1);
1269
1270 memset(&sun, 0, sizeof(sun));
1271 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX");
1272 mktemp(sun.sun_path);
1273 sun.sun_family = AF_LOCAL;
1274 sun.sun_len = SUN_LEN(&sun);
1275
1276 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0);
1277 ATF_REQUIRE(listen(s, 5) == 0);
1278
1279 memset(buf, 0, sizeof(buf));
1280 memset(&iocb, 0, sizeof(iocb));
1281 iocb.aio_fildes = s;
1282 iocb.aio_buf = buf;
1283 iocb.aio_nbytes = sizeof(buf);
1284
1285 ATF_REQUIRE_ERRNO(EINVAL, aio_read(&iocb) == -1);
1286 ATF_REQUIRE_ERRNO(EINVAL, aio_write(&iocb) == -1);
1287
1288 ATF_REQUIRE(unlink(sun.sun_path) == 0);
1289 close(s);
1290 }
1291
1292 /*
1293 * Verify that listen(2) fails if a socket has pending AIO requests.
1294 */
1295 ATF_TC_WITHOUT_HEAD(aio_socket_listen_pending);
ATF_TC_BODY(aio_socket_listen_pending,tc)1296 ATF_TC_BODY(aio_socket_listen_pending, tc)
1297 {
1298 struct aiocb iocb;
1299 struct sockaddr_un sun;
1300 char buf[16];
1301 int s;
1302
1303 s = socket(AF_LOCAL, SOCK_STREAM, 0);
1304 ATF_REQUIRE(s != -1);
1305
1306 memset(&sun, 0, sizeof(sun));
1307 snprintf(sun.sun_path, sizeof(sun.sun_path), "%s", "listen.XXXXXX");
1308 mktemp(sun.sun_path);
1309 sun.sun_family = AF_LOCAL;
1310 sun.sun_len = SUN_LEN(&sun);
1311
1312 ATF_REQUIRE(bind(s, (struct sockaddr *)&sun, SUN_LEN(&sun)) == 0);
1313
1314 memset(buf, 0, sizeof(buf));
1315 memset(&iocb, 0, sizeof(iocb));
1316 iocb.aio_fildes = s;
1317 iocb.aio_buf = buf;
1318 iocb.aio_nbytes = sizeof(buf);
1319 ATF_REQUIRE(aio_read(&iocb) == 0);
1320
1321 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 5) == -1);
1322
1323 ATF_REQUIRE(aio_cancel(s, &iocb) != -1);
1324
1325 ATF_REQUIRE(unlink(sun.sun_path) == 0);
1326 close(s);
1327 }
1328
1329 /*
1330 * This test verifies that cancelling a partially completed socket write
1331 * returns a short write rather than ECANCELED.
1332 */
1333 ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel);
ATF_TC_BODY(aio_socket_short_write_cancel,tc)1334 ATF_TC_BODY(aio_socket_short_write_cancel, tc)
1335 {
1336 struct aiocb iocb, *iocbp;
1337 char *buffer[2];
1338 ssize_t done;
1339 int buffer_size, sb_size;
1340 socklen_t len;
1341 int s[2];
1342
1343 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
1344
1345 len = sizeof(sb_size);
1346 ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
1347 -1);
1348 ATF_REQUIRE(len == sizeof(sb_size));
1349 buffer_size = sb_size;
1350
1351 ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
1352 -1);
1353 ATF_REQUIRE(len == sizeof(sb_size));
1354 if (sb_size > buffer_size)
1355 buffer_size = sb_size;
1356
1357 /*
1358 * Use three times the size of the MAX(receive buffer, send
1359 * buffer) for the write to ensure that the write is split up
1360 * into multiple writes internally. The recv() ensures that
1361 * the write has partially completed, but a remaining size of
1362 * two buffers should ensure that the write has not completed
1363 * fully when it is cancelled.
1364 */
1365 buffer[0] = malloc(buffer_size);
1366 ATF_REQUIRE(buffer[0] != NULL);
1367 buffer[1] = malloc(buffer_size * 3);
1368 ATF_REQUIRE(buffer[1] != NULL);
1369
1370 srandomdev();
1371 aio_fill_buffer(buffer[1], buffer_size * 3, random());
1372
1373 memset(&iocb, 0, sizeof(iocb));
1374 iocb.aio_fildes = s[1];
1375 iocb.aio_buf = buffer[1];
1376 iocb.aio_nbytes = buffer_size * 3;
1377 ATF_REQUIRE(aio_write(&iocb) == 0);
1378
1379 done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
1380 ATF_REQUIRE(done == buffer_size);
1381
1382 ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS);
1383 ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED);
1384
1385 done = aio_waitcomplete(&iocbp, NULL);
1386 ATF_REQUIRE(iocbp == &iocb);
1387 ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2);
1388
1389 ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
1390
1391 close(s[1]);
1392 close(s[0]);
1393 }
1394
1395 /*
1396 * Test handling of aio_read() and aio_write() on shut-down sockets.
1397 */
1398 ATF_TC_WITHOUT_HEAD(aio_socket_shutdown);
ATF_TC_BODY(aio_socket_shutdown,tc)1399 ATF_TC_BODY(aio_socket_shutdown, tc)
1400 {
1401 struct aiocb iocb;
1402 sigset_t set;
1403 char *buffer;
1404 ssize_t len;
1405 size_t bsz;
1406 int error, s[2];
1407
1408 ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
1409
1410 bsz = 1024;
1411 buffer = malloc(bsz);
1412 memset(buffer, 0, bsz);
1413
1414 /* Put some data in s[0]'s recv buffer. */
1415 ATF_REQUIRE(send(s[1], buffer, bsz, 0) == (ssize_t)bsz);
1416
1417 /* No more reading from s[0]. */
1418 ATF_REQUIRE(shutdown(s[0], SHUT_RD) != -1);
1419
1420 ATF_REQUIRE(buffer != NULL);
1421
1422 memset(&iocb, 0, sizeof(iocb));
1423 iocb.aio_fildes = s[0];
1424 iocb.aio_buf = buffer;
1425 iocb.aio_nbytes = bsz;
1426 ATF_REQUIRE(aio_read(&iocb) == 0);
1427
1428 /* Expect to see zero bytes, analogous to recv(2). */
1429 while ((error = aio_error(&iocb)) == EINPROGRESS)
1430 usleep(25000);
1431 ATF_REQUIRE_MSG(error == 0, "aio_error() returned %d", error);
1432 len = aio_return(&iocb);
1433 ATF_REQUIRE_MSG(len == 0, "read job returned %zd bytes", len);
1434
1435 /* No more writing to s[1]. */
1436 ATF_REQUIRE(shutdown(s[1], SHUT_WR) != -1);
1437
1438 /* Block SIGPIPE so that we can detect the error in-band. */
1439 sigemptyset(&set);
1440 sigaddset(&set, SIGPIPE);
1441 ATF_REQUIRE(sigprocmask(SIG_BLOCK, &set, NULL) == 0);
1442
1443 memset(&iocb, 0, sizeof(iocb));
1444 iocb.aio_fildes = s[1];
1445 iocb.aio_buf = buffer;
1446 iocb.aio_nbytes = bsz;
1447 ATF_REQUIRE(aio_write(&iocb) == 0);
1448
1449 /* Expect an error, analogous to send(2). */
1450 while ((error = aio_error(&iocb)) == EINPROGRESS)
1451 usleep(25000);
1452 ATF_REQUIRE_MSG(error == EPIPE, "aio_error() returned %d", error);
1453
1454 ATF_REQUIRE(close(s[0]) != -1);
1455 ATF_REQUIRE(close(s[1]) != -1);
1456 free(buffer);
1457 }
1458
1459 /*
1460 * test aio_fsync's behavior with bad inputs
1461 */
1462 ATF_TC_WITHOUT_HEAD(aio_fsync_errors);
ATF_TC_BODY(aio_fsync_errors,tc)1463 ATF_TC_BODY(aio_fsync_errors, tc)
1464 {
1465 int fd;
1466 struct aiocb iocb;
1467
1468 ATF_REQUIRE_UNSAFE_AIO();
1469
1470 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
1471 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1472 unlink(FILE_PATHNAME);
1473
1474 /* aio_fsync should return EINVAL unless op is O_SYNC or O_DSYNC */
1475 memset(&iocb, 0, sizeof(iocb));
1476 iocb.aio_fildes = fd;
1477 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb));
1478 ATF_CHECK_EQ(EINVAL, errno);
1479
1480 /* aio_fsync should return EBADF if fd is not a valid descriptor */
1481 memset(&iocb, 0, sizeof(iocb));
1482 iocb.aio_fildes = 666;
1483 ATF_CHECK_EQ(-1, aio_fsync(O_SYNC, &iocb));
1484 ATF_CHECK_EQ(EBADF, errno);
1485
1486 /* aio_fsync should return EINVAL if sigev_notify is invalid */
1487 memset(&iocb, 0, sizeof(iocb));
1488 iocb.aio_fildes = fd;
1489 iocb.aio_sigevent.sigev_notify = 666;
1490 ATF_CHECK_EQ(-1, aio_fsync(666, &iocb));
1491 ATF_CHECK_EQ(EINVAL, errno);
1492 }
1493
1494 /*
1495 * This test just performs a basic test of aio_fsync().
1496 */
1497 static void
aio_fsync_test(int op)1498 aio_fsync_test(int op)
1499 {
1500 struct aiocb synccb, *iocbp;
1501 struct {
1502 struct aiocb iocb;
1503 bool done;
1504 char *buffer;
1505 } buffers[16];
1506 struct stat sb;
1507 ssize_t rval;
1508 unsigned i;
1509 int fd;
1510
1511 ATF_REQUIRE_UNSAFE_AIO();
1512
1513 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
1514 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1515 unlink(FILE_PATHNAME);
1516
1517 ATF_REQUIRE(fstat(fd, &sb) == 0);
1518 ATF_REQUIRE(sb.st_blksize != 0);
1519 ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0);
1520
1521 /*
1522 * Queue several asynchronous write requests. Hopefully this
1523 * forces the aio_fsync() request to be deferred. There is no
1524 * reliable way to guarantee that however.
1525 */
1526 srandomdev();
1527 for (i = 0; i < nitems(buffers); i++) {
1528 buffers[i].done = false;
1529 memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb));
1530 buffers[i].buffer = malloc(sb.st_blksize);
1531 aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random());
1532 buffers[i].iocb.aio_fildes = fd;
1533 buffers[i].iocb.aio_buf = buffers[i].buffer;
1534 buffers[i].iocb.aio_nbytes = sb.st_blksize;
1535 buffers[i].iocb.aio_offset = sb.st_blksize * i;
1536 ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0);
1537 }
1538
1539 /* Queue the aio_fsync request. */
1540 memset(&synccb, 0, sizeof(synccb));
1541 synccb.aio_fildes = fd;
1542 ATF_REQUIRE(aio_fsync(op, &synccb) == 0);
1543
1544 /* Wait for requests to complete. */
1545 for (;;) {
1546 next:
1547 rval = aio_waitcomplete(&iocbp, NULL);
1548 ATF_REQUIRE(iocbp != NULL);
1549 if (iocbp == &synccb) {
1550 ATF_REQUIRE(rval == 0);
1551 break;
1552 }
1553
1554 for (i = 0; i < nitems(buffers); i++) {
1555 if (iocbp == &buffers[i].iocb) {
1556 ATF_REQUIRE(buffers[i].done == false);
1557 ATF_REQUIRE(rval == sb.st_blksize);
1558 buffers[i].done = true;
1559 goto next;
1560 }
1561 }
1562
1563 ATF_REQUIRE_MSG(false, "unmatched AIO request");
1564 }
1565
1566 for (i = 0; i < nitems(buffers); i++)
1567 ATF_REQUIRE_MSG(buffers[i].done,
1568 "AIO request %u did not complete", i);
1569
1570 close(fd);
1571 }
1572
1573 ATF_TC_WITHOUT_HEAD(aio_fsync_sync_test);
ATF_TC_BODY(aio_fsync_sync_test,tc)1574 ATF_TC_BODY(aio_fsync_sync_test, tc)
1575 {
1576 aio_fsync_test(O_SYNC);
1577 }
1578
1579 ATF_TC_WITHOUT_HEAD(aio_fsync_dsync_test);
ATF_TC_BODY(aio_fsync_dsync_test,tc)1580 ATF_TC_BODY(aio_fsync_dsync_test, tc)
1581 {
1582 aio_fsync_test(O_DSYNC);
1583 }
1584
1585 /*
1586 * We shouldn't be able to DoS the system by setting iov_len to an insane
1587 * value
1588 */
1589 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iov_len);
ATF_TC_BODY(aio_writev_dos_iov_len,tc)1590 ATF_TC_BODY(aio_writev_dos_iov_len, tc)
1591 {
1592 struct aiocb aio;
1593 const struct aiocb *const iocbs[] = {&aio};
1594 const char *wbuf = "Hello, world!";
1595 struct iovec iov[1];
1596 ssize_t r;
1597 int fd;
1598
1599 ATF_REQUIRE_UNSAFE_AIO();
1600
1601 fd = open("testfile", O_RDWR | O_CREAT, 0600);
1602 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1603
1604 iov[0].iov_base = __DECONST(void*, wbuf);
1605 iov[0].iov_len = 1 << 30;
1606 bzero(&aio, sizeof(aio));
1607 aio.aio_fildes = fd;
1608 aio.aio_offset = 0;
1609 aio.aio_iov = iov;
1610 aio.aio_iovcnt = 1;
1611
1612 r = aio_writev(&aio);
1613 ATF_CHECK_EQ_MSG(0, r, "aio_writev returned %zd", r);
1614 ATF_REQUIRE_EQ(0, aio_suspend(iocbs, 1, NULL));
1615 r = aio_return(&aio);
1616 ATF_CHECK_EQ_MSG(-1, r, "aio_return returned %zd", r);
1617 ATF_CHECK_MSG(errno == EFAULT || errno == EINVAL,
1618 "aio_writev: %s", strerror(errno));
1619
1620 close(fd);
1621 }
1622
1623 /*
1624 * We shouldn't be able to DoS the system by setting aio_iovcnt to an insane
1625 * value
1626 */
1627 ATF_TC_WITHOUT_HEAD(aio_writev_dos_iovcnt);
ATF_TC_BODY(aio_writev_dos_iovcnt,tc)1628 ATF_TC_BODY(aio_writev_dos_iovcnt, tc)
1629 {
1630 struct aiocb aio;
1631 const char *wbuf = "Hello, world!";
1632 struct iovec iov[1];
1633 ssize_t len;
1634 int fd;
1635
1636 ATF_REQUIRE_UNSAFE_AIO();
1637
1638 fd = open("testfile", O_RDWR | O_CREAT, 0600);
1639 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1640
1641 len = strlen(wbuf);
1642 iov[0].iov_base = __DECONST(void*, wbuf);
1643 iov[0].iov_len = len;
1644 bzero(&aio, sizeof(aio));
1645 aio.aio_fildes = fd;
1646 aio.aio_offset = 0;
1647 aio.aio_iov = iov;
1648 aio.aio_iovcnt = 1 << 30;
1649
1650 ATF_REQUIRE_EQ(-1, aio_writev(&aio));
1651 ATF_CHECK_EQ(EINVAL, errno);
1652
1653 close(fd);
1654 }
1655
1656 ATF_TC_WITH_CLEANUP(aio_writev_efault);
ATF_TC_HEAD(aio_writev_efault,tc)1657 ATF_TC_HEAD(aio_writev_efault, tc)
1658 {
1659 atf_tc_set_md_var(tc, "descr",
1660 "Vectored AIO should gracefully handle invalid addresses");
1661 atf_tc_set_md_var(tc, "require.user", "root");
1662 }
ATF_TC_BODY(aio_writev_efault,tc)1663 ATF_TC_BODY(aio_writev_efault, tc)
1664 {
1665 struct aiocb aio;
1666 ssize_t buflen;
1667 char *buffer;
1668 struct iovec iov[2];
1669 long seed;
1670 int fd;
1671
1672 ATF_REQUIRE_UNSAFE_AIO();
1673
1674 fd = aio_md_setup();
1675
1676 seed = random();
1677 buflen = 4096;
1678 buffer = malloc(buflen);
1679 aio_fill_buffer(buffer, buflen, seed);
1680 iov[0].iov_base = buffer;
1681 iov[0].iov_len = buflen;
1682 iov[1].iov_base = (void*)-1; /* Invalid! */
1683 iov[1].iov_len = buflen;
1684 bzero(&aio, sizeof(aio));
1685 aio.aio_fildes = fd;
1686 aio.aio_offset = 0;
1687 aio.aio_iov = iov;
1688 aio.aio_iovcnt = nitems(iov);
1689
1690 ATF_REQUIRE_EQ(-1, aio_writev(&aio));
1691 ATF_CHECK_EQ(EFAULT, errno);
1692
1693 close(fd);
1694 }
ATF_TC_CLEANUP(aio_writev_efault,tc)1695 ATF_TC_CLEANUP(aio_writev_efault, tc)
1696 {
1697 aio_md_cleanup();
1698 }
1699
1700 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_poll);
ATF_TC_BODY(aio_writev_empty_file_poll,tc)1701 ATF_TC_BODY(aio_writev_empty_file_poll, tc)
1702 {
1703 struct aiocb aio;
1704 int fd;
1705
1706 ATF_REQUIRE_UNSAFE_AIO();
1707
1708 fd = open("testfile", O_RDWR | O_CREAT, 0600);
1709 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1710
1711 bzero(&aio, sizeof(aio));
1712 aio.aio_fildes = fd;
1713 aio.aio_offset = 0;
1714 aio.aio_iovcnt = 0;
1715
1716 ATF_REQUIRE_EQ(0, aio_writev(&aio));
1717 ATF_REQUIRE_EQ(0, suspend(&aio));
1718
1719 close(fd);
1720 }
1721
1722 ATF_TC_WITHOUT_HEAD(aio_writev_empty_file_signal);
ATF_TC_BODY(aio_writev_empty_file_signal,tc)1723 ATF_TC_BODY(aio_writev_empty_file_signal, tc)
1724 {
1725 struct aiocb aio;
1726 int fd;
1727
1728 ATF_REQUIRE_UNSAFE_AIO();
1729
1730 fd = open("testfile", O_RDWR | O_CREAT, 0600);
1731 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1732
1733 bzero(&aio, sizeof(aio));
1734 aio.aio_fildes = fd;
1735 aio.aio_offset = 0;
1736 aio.aio_iovcnt = 0;
1737 aio.aio_sigevent = *setup_signal();
1738
1739 ATF_REQUIRE_EQ(0, aio_writev(&aio));
1740 ATF_REQUIRE_EQ(0, poll_signaled(&aio));
1741
1742 close(fd);
1743 }
1744
1745 /*
1746 * Use an aiocb with kqueue and EV_ONESHOT. kqueue should deliver the event
1747 * only once, even if the user doesn't promptly call aio_return.
1748 */
1749 ATF_TC_WITHOUT_HEAD(ev_oneshot);
ATF_TC_BODY(ev_oneshot,tc)1750 ATF_TC_BODY(ev_oneshot, tc)
1751 {
1752 int fd, kq, nevents;
1753 struct aiocb iocb;
1754 struct kevent events[1];
1755 struct timespec timeout;
1756
1757 kq = kqueue();
1758 ATF_REQUIRE(kq >= 0);
1759
1760 fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
1761 ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1762
1763 memset(&iocb, 0, sizeof(iocb));
1764 iocb.aio_fildes = fd;
1765 iocb.aio_sigevent.sigev_notify_kqueue = kq;
1766 iocb.aio_sigevent.sigev_value.sival_ptr = (void*)0xdeadbeef;
1767 iocb.aio_sigevent.sigev_notify_kevent_flags = EV_ONESHOT;
1768 iocb.aio_sigevent.sigev_notify = SIGEV_KEVENT;
1769
1770 ATF_CHECK_EQ(0, aio_fsync(O_SYNC, &iocb));
1771
1772 nevents = kevent(kq, NULL, 0, events, 1, NULL);
1773 ATF_CHECK_EQ(1, nevents);
1774 ATF_CHECK_EQ(events[0].ident, (uintptr_t) &iocb);
1775 ATF_CHECK_EQ(events[0].filter, EVFILT_AIO);
1776 ATF_CHECK_EQ(events[0].flags, EV_EOF | EV_ONESHOT);
1777 ATF_CHECK_EQ(events[0].fflags, 0);
1778 ATF_CHECK_EQ(events[0].data, 0);
1779 ATF_CHECK_EQ((uintptr_t)events[0].udata, 0xdeadbeef);
1780
1781 /*
1782 * Even though we haven't called aio_return, kevent will not return the
1783 * event again due to EV_ONESHOT.
1784 */
1785 timeout.tv_sec = 0;
1786 timeout.tv_nsec = 100000000;
1787 nevents = kevent(kq, NULL, 0, events, 1, &timeout);
1788 ATF_CHECK_EQ(0, nevents);
1789
1790 ATF_CHECK_EQ(0, aio_return(&iocb));
1791 close(fd);
1792 close(kq);
1793 }
1794
1795
1796 // aio_writev and aio_readv should still work even if the iovcnt is greater
1797 // than the number of buffered AIO operations permitted per process.
1798 ATF_TC_WITH_CLEANUP(vectored_big_iovcnt);
ATF_TC_HEAD(vectored_big_iovcnt,tc)1799 ATF_TC_HEAD(vectored_big_iovcnt, tc)
1800 {
1801 atf_tc_set_md_var(tc, "descr",
1802 "Vectored AIO should still work even if the iovcnt is greater than "
1803 "the number of buffered AIO operations permitted by the process");
1804 atf_tc_set_md_var(tc, "require.user", "root");
1805 }
ATF_TC_BODY(vectored_big_iovcnt,tc)1806 ATF_TC_BODY(vectored_big_iovcnt, tc)
1807 {
1808 struct aiocb aio;
1809 struct iovec *iov;
1810 ssize_t len, buflen;
1811 char *buffer;
1812 const char *oid = "vfs.aio.max_buf_aio";
1813 long seed;
1814 int max_buf_aio;
1815 int fd, i;
1816 ssize_t sysctl_len = sizeof(max_buf_aio);
1817
1818 ATF_REQUIRE_UNSAFE_AIO();
1819
1820 if (sysctlbyname(oid, &max_buf_aio, &sysctl_len, NULL, 0) == -1)
1821 atf_libc_error(errno, "Failed to read %s", oid);
1822
1823 seed = random();
1824 buflen = 512 * (max_buf_aio + 1);
1825 buffer = malloc(buflen);
1826 aio_fill_buffer(buffer, buflen, seed);
1827 iov = calloc(max_buf_aio + 1, sizeof(struct iovec));
1828
1829 fd = aio_md_setup();
1830
1831 bzero(&aio, sizeof(aio));
1832 aio.aio_fildes = fd;
1833 aio.aio_offset = 0;
1834 for (i = 0; i < max_buf_aio + 1; i++) {
1835 iov[i].iov_base = &buffer[i * 512];
1836 iov[i].iov_len = 512;
1837 }
1838 aio.aio_iov = iov;
1839 aio.aio_iovcnt = max_buf_aio + 1;
1840
1841 if (aio_writev(&aio) < 0)
1842 atf_tc_fail("aio_writev failed: %s", strerror(errno));
1843
1844 len = poll(&aio);
1845 if (len < 0)
1846 atf_tc_fail("aio failed: %s", strerror(errno));
1847
1848 if (len != buflen)
1849 atf_tc_fail("aio short write (%jd)", (intmax_t)len);
1850
1851 bzero(&aio, sizeof(aio));
1852 aio.aio_fildes = fd;
1853 aio.aio_offset = 0;
1854 aio.aio_iov = iov;
1855 aio.aio_iovcnt = max_buf_aio + 1;
1856
1857 if (aio_readv(&aio) < 0)
1858 atf_tc_fail("aio_readv failed: %s", strerror(errno));
1859
1860 len = poll(&aio);
1861 if (len < 0)
1862 atf_tc_fail("aio failed: %s", strerror(errno));
1863
1864 if (len != buflen)
1865 atf_tc_fail("aio short read (%jd)", (intmax_t)len);
1866
1867 if (aio_test_buffer(buffer, buflen, seed) == 0)
1868 atf_tc_fail("buffer mismatched");
1869
1870 close(fd);
1871 }
ATF_TC_CLEANUP(vectored_big_iovcnt,tc)1872 ATF_TC_CLEANUP(vectored_big_iovcnt, tc)
1873 {
1874 aio_md_cleanup();
1875 }
1876
1877 ATF_TC_WITHOUT_HEAD(vectored_file_poll);
ATF_TC_BODY(vectored_file_poll,tc)1878 ATF_TC_BODY(vectored_file_poll, tc)
1879 {
1880 aio_file_test(poll, NULL, true);
1881 }
1882
1883 ATF_TC_WITHOUT_HEAD(vectored_thread);
ATF_TC_BODY(vectored_thread,tc)1884 ATF_TC_BODY(vectored_thread, tc)
1885 {
1886 aio_file_test(poll_signaled, setup_thread(), true);
1887 }
1888
1889 ATF_TC_WITH_CLEANUP(vectored_md_poll);
ATF_TC_HEAD(vectored_md_poll,tc)1890 ATF_TC_HEAD(vectored_md_poll, tc)
1891 {
1892 atf_tc_set_md_var(tc, "require.user", "root");
1893 }
ATF_TC_BODY(vectored_md_poll,tc)1894 ATF_TC_BODY(vectored_md_poll, tc)
1895 {
1896 aio_md_test(poll, NULL, true);
1897 }
ATF_TC_CLEANUP(vectored_md_poll,tc)1898 ATF_TC_CLEANUP(vectored_md_poll, tc)
1899 {
1900 aio_md_cleanup();
1901 }
1902
1903 ATF_TC_WITHOUT_HEAD(vectored_socket_poll);
ATF_TC_BODY(vectored_socket_poll,tc)1904 ATF_TC_BODY(vectored_socket_poll, tc)
1905 {
1906 aio_unix_socketpair_test(poll, NULL, true);
1907 }
1908
1909 // aio_writev and aio_readv should still work even if the iov contains elements
1910 // that aren't a multiple of the device's sector size, and even if the total
1911 // amount if I/O _is_ a multiple of the device's sector size.
1912 ATF_TC_WITH_CLEANUP(vectored_unaligned);
ATF_TC_HEAD(vectored_unaligned,tc)1913 ATF_TC_HEAD(vectored_unaligned, tc)
1914 {
1915 atf_tc_set_md_var(tc, "descr",
1916 "Vectored AIO should still work even if the iov contains elements "
1917 "that aren't a multiple of the sector size.");
1918 atf_tc_set_md_var(tc, "require.user", "root");
1919 atf_tc_set_md_var(tc, "require.kmods", "zfs");
1920 }
ATF_TC_BODY(vectored_unaligned,tc)1921 ATF_TC_BODY(vectored_unaligned, tc)
1922 {
1923 struct aio_context ac;
1924 struct aiocb aio;
1925 struct iovec iov[3];
1926 ssize_t len, total_len;
1927 int fd;
1928
1929 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false))
1930 atf_tc_skip("https://bugs.freebsd.org/258766");
1931
1932 ATF_REQUIRE_UNSAFE_AIO();
1933
1934 /*
1935 * Use a zvol with volmode=dev, so it will allow .d_write with
1936 * unaligned uio. geom devices use physio, which doesn't allow that.
1937 */
1938 fd = aio_zvol_setup(atf_tc_get_ident(tc));
1939 aio_context_init(&ac, fd, fd, FILE_LEN);
1940
1941 /* Break the buffer into 3 parts:
1942 * * A 4kB part, aligned to 4kB
1943 * * Two other parts that add up to 4kB:
1944 * - 256B
1945 * - 4kB - 256B
1946 */
1947 iov[0].iov_base = ac.ac_buffer;
1948 iov[0].iov_len = 4096;
1949 iov[1].iov_base = (void*)((uintptr_t)iov[0].iov_base + iov[0].iov_len);
1950 iov[1].iov_len = 256;
1951 iov[2].iov_base = (void*)((uintptr_t)iov[1].iov_base + iov[1].iov_len);
1952 iov[2].iov_len = 4096 - iov[1].iov_len;
1953 total_len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
1954 bzero(&aio, sizeof(aio));
1955 aio.aio_fildes = ac.ac_write_fd;
1956 aio.aio_offset = 0;
1957 aio.aio_iov = iov;
1958 aio.aio_iovcnt = 3;
1959
1960 if (aio_writev(&aio) < 0)
1961 atf_tc_fail("aio_writev failed: %s", strerror(errno));
1962
1963 len = poll(&aio);
1964 if (len < 0)
1965 atf_tc_fail("aio failed: %s", strerror(errno));
1966
1967 if (len != total_len)
1968 atf_tc_fail("aio short write (%jd)", (intmax_t)len);
1969
1970 bzero(&aio, sizeof(aio));
1971 aio.aio_fildes = ac.ac_read_fd;
1972 aio.aio_offset = 0;
1973 aio.aio_iov = iov;
1974 aio.aio_iovcnt = 3;
1975
1976 if (aio_readv(&aio) < 0)
1977 atf_tc_fail("aio_readv failed: %s", strerror(errno));
1978 len = poll(&aio);
1979
1980 ATF_REQUIRE_MSG(aio_test_buffer(ac.ac_buffer, total_len,
1981 ac.ac_seed) != 0, "aio_test_buffer: internal error");
1982
1983 close(fd);
1984 }
ATF_TC_CLEANUP(vectored_unaligned,tc)1985 ATF_TC_CLEANUP(vectored_unaligned, tc)
1986 {
1987 aio_zvol_cleanup(atf_tc_get_ident(tc));
1988 }
1989
1990 static void
aio_zvol_test(completion comp,struct sigevent * sev,bool vectored,const char * unique)1991 aio_zvol_test(completion comp, struct sigevent *sev, bool vectored,
1992 const char *unique)
1993 {
1994 struct aio_context ac;
1995 int fd;
1996
1997 fd = aio_zvol_setup(unique);
1998 aio_context_init(&ac, fd, fd, MD_LEN);
1999 if (vectored) {
2000 aio_writev_test(&ac, comp, sev);
2001 aio_readv_test(&ac, comp, sev);
2002 } else {
2003 aio_write_test(&ac, comp, sev);
2004 aio_read_test(&ac, comp, sev);
2005 }
2006
2007 close(fd);
2008 }
2009
2010 /*
2011 * Note that unlike md, the zvol is not a geom device, does not allow unmapped
2012 * buffers, and does not use physio.
2013 */
2014 ATF_TC_WITH_CLEANUP(vectored_zvol_poll);
ATF_TC_HEAD(vectored_zvol_poll,tc)2015 ATF_TC_HEAD(vectored_zvol_poll, tc)
2016 {
2017 atf_tc_set_md_var(tc, "require.user", "root");
2018 atf_tc_set_md_var(tc, "require.kmods", "zfs");
2019 }
ATF_TC_BODY(vectored_zvol_poll,tc)2020 ATF_TC_BODY(vectored_zvol_poll, tc)
2021 {
2022 if (atf_tc_get_config_var_as_bool_wd(tc, "ci", false))
2023 atf_tc_skip("https://bugs.freebsd.org/258766");
2024 aio_zvol_test(poll, NULL, true, atf_tc_get_ident(tc));
2025 }
ATF_TC_CLEANUP(vectored_zvol_poll,tc)2026 ATF_TC_CLEANUP(vectored_zvol_poll, tc)
2027 {
2028 aio_zvol_cleanup(atf_tc_get_ident(tc));
2029 }
2030
ATF_TP_ADD_TCS(tp)2031 ATF_TP_ADD_TCS(tp)
2032 {
2033
2034 /* Test every file type with every completion method */
2035 ATF_TP_ADD_TC(tp, file_kq);
2036 ATF_TP_ADD_TC(tp, file_poll);
2037 ATF_TP_ADD_TC(tp, file_signal);
2038 ATF_TP_ADD_TC(tp, file_suspend);
2039 ATF_TP_ADD_TC(tp, file_thread);
2040 ATF_TP_ADD_TC(tp, file_waitcomplete);
2041 ATF_TP_ADD_TC(tp, fifo_kq);
2042 ATF_TP_ADD_TC(tp, fifo_poll);
2043 ATF_TP_ADD_TC(tp, fifo_signal);
2044 ATF_TP_ADD_TC(tp, fifo_suspend);
2045 ATF_TP_ADD_TC(tp, fifo_thread);
2046 ATF_TP_ADD_TC(tp, fifo_waitcomplete);
2047 ATF_TP_ADD_TC(tp, socket_kq);
2048 ATF_TP_ADD_TC(tp, socket_poll);
2049 ATF_TP_ADD_TC(tp, socket_signal);
2050 ATF_TP_ADD_TC(tp, socket_suspend);
2051 ATF_TP_ADD_TC(tp, socket_thread);
2052 ATF_TP_ADD_TC(tp, socket_waitcomplete);
2053 ATF_TP_ADD_TC(tp, pty_kq);
2054 ATF_TP_ADD_TC(tp, pty_poll);
2055 ATF_TP_ADD_TC(tp, pty_signal);
2056 ATF_TP_ADD_TC(tp, pty_suspend);
2057 ATF_TP_ADD_TC(tp, pty_thread);
2058 ATF_TP_ADD_TC(tp, pty_waitcomplete);
2059 ATF_TP_ADD_TC(tp, pipe_kq);
2060 ATF_TP_ADD_TC(tp, pipe_poll);
2061 ATF_TP_ADD_TC(tp, pipe_signal);
2062 ATF_TP_ADD_TC(tp, pipe_suspend);
2063 ATF_TP_ADD_TC(tp, pipe_thread);
2064 ATF_TP_ADD_TC(tp, pipe_waitcomplete);
2065 ATF_TP_ADD_TC(tp, md_kq);
2066 ATF_TP_ADD_TC(tp, md_poll);
2067 ATF_TP_ADD_TC(tp, md_signal);
2068 ATF_TP_ADD_TC(tp, md_suspend);
2069 ATF_TP_ADD_TC(tp, md_thread);
2070 ATF_TP_ADD_TC(tp, md_waitcomplete);
2071
2072 /* Various special cases */
2073 ATF_TP_ADD_TC(tp, aio_fsync_errors);
2074 ATF_TP_ADD_TC(tp, aio_fsync_sync_test);
2075 ATF_TP_ADD_TC(tp, aio_fsync_dsync_test);
2076 ATF_TP_ADD_TC(tp, aio_large_read_test);
2077 ATF_TP_ADD_TC(tp, aio_socket_two_reads);
2078 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write);
2079 ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write_vectored);
2080 ATF_TP_ADD_TC(tp, aio_socket_listen_fail);
2081 ATF_TP_ADD_TC(tp, aio_socket_listen_pending);
2082 ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel);
2083 ATF_TP_ADD_TC(tp, aio_socket_shutdown);
2084 ATF_TP_ADD_TC(tp, aio_writev_dos_iov_len);
2085 ATF_TP_ADD_TC(tp, aio_writev_dos_iovcnt);
2086 ATF_TP_ADD_TC(tp, aio_writev_efault);
2087 ATF_TP_ADD_TC(tp, aio_writev_empty_file_poll);
2088 ATF_TP_ADD_TC(tp, aio_writev_empty_file_signal);
2089 ATF_TP_ADD_TC(tp, ev_oneshot);
2090 ATF_TP_ADD_TC(tp, vectored_big_iovcnt);
2091 ATF_TP_ADD_TC(tp, vectored_file_poll);
2092 ATF_TP_ADD_TC(tp, vectored_md_poll);
2093 ATF_TP_ADD_TC(tp, vectored_zvol_poll);
2094 ATF_TP_ADD_TC(tp, vectored_unaligned);
2095 ATF_TP_ADD_TC(tp, vectored_socket_poll);
2096 ATF_TP_ADD_TC(tp, vectored_thread);
2097
2098 return (atf_no_error());
2099 }
2100