xref: /freebsd/tests/sys/fs/fusefs/read.cc (revision f74b33d9dbdc1235d409ab098d71277d33b8b8e1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <sys/uio.h>
37 
38 #include <aio.h>
39 #include <fcntl.h>
40 #include <semaphore.h>
41 #include <unistd.h>
42 }
43 
44 #include "mockfs.hh"
45 #include "utils.hh"
46 
47 using namespace testing;
48 
49 class Read: public FuseTest {
50 
51 public:
52 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
53 {
54 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
55 }
56 };
57 
58 class Read_7_8: public FuseTest {
59 public:
60 virtual void SetUp() {
61 	m_kernel_minor_version = 8;
62 	FuseTest::SetUp();
63 }
64 
65 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
66 {
67 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
68 }
69 };
70 
71 class AioRead: public Read {
72 public:
73 virtual void SetUp() {
74 	const char *node = "vfs.aio.enable_unsafe";
75 	int val = 0;
76 	size_t size = sizeof(val);
77 
78 	FuseTest::SetUp();
79 
80 	ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
81 		<< strerror(errno);
82 	if (!val)
83 		GTEST_SKIP() <<
84 			"vfs.aio.enable_unsafe must be set for this test";
85 }
86 };
87 
88 class AsyncRead: public AioRead {
89 	virtual void SetUp() {
90 		m_init_flags = FUSE_ASYNC_READ;
91 		AioRead::SetUp();
92 	}
93 };
94 
95 class ReadAhead: public Read,
96 		 public WithParamInterface<tuple<bool, int>>
97 {
98 	virtual void SetUp() {
99 		int val;
100 		const char *node = "vfs.maxbcachebuf";
101 		size_t size = sizeof(val);
102 		ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
103 			<< strerror(errno);
104 
105 		m_maxreadahead = val * get<1>(GetParam());
106 		m_noclusterr = get<0>(GetParam());
107 		Read::SetUp();
108 	}
109 };
110 
111 /* AIO reads need to set the header's pid field correctly */
112 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
113 TEST_F(AioRead, aio_read)
114 {
115 	const char FULLPATH[] = "mountpoint/some_file.txt";
116 	const char RELPATH[] = "some_file.txt";
117 	const char *CONTENTS = "abcdefgh";
118 	uint64_t ino = 42;
119 	int fd;
120 	ssize_t bufsize = strlen(CONTENTS);
121 	char buf[bufsize];
122 	struct aiocb iocb, *piocb;
123 
124 	expect_lookup(RELPATH, ino, bufsize);
125 	expect_open(ino, 0, 1);
126 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
127 
128 	fd = open(FULLPATH, O_RDONLY);
129 	ASSERT_LE(0, fd) << strerror(errno);
130 
131 	iocb.aio_nbytes = bufsize;
132 	iocb.aio_fildes = fd;
133 	iocb.aio_buf = buf;
134 	iocb.aio_offset = 0;
135 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
136 	ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
137 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
138 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
139 
140 	leak(fd);
141 }
142 
143 /*
144  * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
145  * is at most one outstanding read operation per file handle
146  */
147 TEST_F(AioRead, async_read_disabled)
148 {
149 	const char FULLPATH[] = "mountpoint/some_file.txt";
150 	const char RELPATH[] = "some_file.txt";
151 	uint64_t ino = 42;
152 	int fd;
153 	ssize_t bufsize = 50;
154 	char buf0[bufsize], buf1[bufsize];
155 	off_t off0 = 0;
156 	off_t off1 = m_maxbcachebuf;
157 	struct aiocb iocb0, iocb1;
158 	volatile sig_atomic_t read_count = 0;
159 
160 	expect_lookup(RELPATH, ino, 131072);
161 	expect_open(ino, 0, 1);
162 	EXPECT_CALL(*m_mock, process(
163 		ResultOf([=](auto in) {
164 			return (in.header.opcode == FUSE_READ &&
165 				in.header.nodeid == ino &&
166 				in.body.read.fh == FH &&
167 				in.body.read.offset == (uint64_t)off0);
168 		}, Eq(true)),
169 		_)
170 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
171 		read_count++;
172 		/* Filesystem is slow to respond */
173 	}));
174 	EXPECT_CALL(*m_mock, process(
175 		ResultOf([=](auto in) {
176 			return (in.header.opcode == FUSE_READ &&
177 				in.header.nodeid == ino &&
178 				in.body.read.fh == FH &&
179 				in.body.read.offset == (uint64_t)off1);
180 		}, Eq(true)),
181 		_)
182 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
183 		read_count++;
184 		/* Filesystem is slow to respond */
185 	}));
186 
187 	fd = open(FULLPATH, O_RDONLY);
188 	ASSERT_LE(0, fd) << strerror(errno);
189 
190 	/*
191 	 * Submit two AIO read requests, and respond to neither.  If the
192 	 * filesystem ever gets the second read request, then we failed to
193 	 * limit outstanding reads.
194 	 */
195 	iocb0.aio_nbytes = bufsize;
196 	iocb0.aio_fildes = fd;
197 	iocb0.aio_buf = buf0;
198 	iocb0.aio_offset = off0;
199 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
200 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
201 
202 	iocb1.aio_nbytes = bufsize;
203 	iocb1.aio_fildes = fd;
204 	iocb1.aio_buf = buf1;
205 	iocb1.aio_offset = off1;
206 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
207 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
208 
209 	/*
210 	 * Sleep for awhile to make sure the kernel has had a chance to issue
211 	 * the second read, even though the first has not yet returned
212 	 */
213 	nap();
214 	EXPECT_EQ(read_count, 1);
215 
216 	m_mock->kill_daemon();
217 	/* Wait for AIO activity to complete, but ignore errors */
218 	(void)aio_waitcomplete(NULL, NULL);
219 
220 	leak(fd);
221 }
222 
223 /*
224  * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
225  * simultaneous read requests on the same file handle.
226  */
227 TEST_F(AsyncRead, async_read)
228 {
229 	const char FULLPATH[] = "mountpoint/some_file.txt";
230 	const char RELPATH[] = "some_file.txt";
231 	uint64_t ino = 42;
232 	int fd;
233 	ssize_t bufsize = 50;
234 	char buf0[bufsize], buf1[bufsize];
235 	off_t off0 = 0;
236 	off_t off1 = m_maxbcachebuf;
237 	off_t fsize = 2 * m_maxbcachebuf;
238 	struct aiocb iocb0, iocb1;
239 	sem_t sem;
240 
241 	ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
242 
243 	expect_lookup(RELPATH, ino, fsize);
244 	expect_open(ino, 0, 1);
245 	EXPECT_CALL(*m_mock, process(
246 		ResultOf([=](auto in) {
247 			return (in.header.opcode == FUSE_READ &&
248 				in.header.nodeid == ino &&
249 				in.body.read.fh == FH &&
250 				in.body.read.offset == (uint64_t)off0);
251 		}, Eq(true)),
252 		_)
253 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
254 		sem_post(&sem);
255 		/* Filesystem is slow to respond */
256 	}));
257 	EXPECT_CALL(*m_mock, process(
258 		ResultOf([=](auto in) {
259 			return (in.header.opcode == FUSE_READ &&
260 				in.header.nodeid == ino &&
261 				in.body.read.fh == FH &&
262 				in.body.read.offset == (uint64_t)off1);
263 		}, Eq(true)),
264 		_)
265 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
266 		sem_post(&sem);
267 		/* Filesystem is slow to respond */
268 	}));
269 
270 	fd = open(FULLPATH, O_RDONLY);
271 	ASSERT_LE(0, fd) << strerror(errno);
272 
273 	/*
274 	 * Submit two AIO read requests, but respond to neither.  Ensure that
275 	 * we received both.
276 	 */
277 	iocb0.aio_nbytes = bufsize;
278 	iocb0.aio_fildes = fd;
279 	iocb0.aio_buf = buf0;
280 	iocb0.aio_offset = off0;
281 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
282 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
283 
284 	iocb1.aio_nbytes = bufsize;
285 	iocb1.aio_fildes = fd;
286 	iocb1.aio_buf = buf1;
287 	iocb1.aio_offset = off1;
288 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
289 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
290 
291 	/* Wait until both reads have reached the daemon */
292 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
293 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
294 
295 	m_mock->kill_daemon();
296 	/* Wait for AIO activity to complete, but ignore errors */
297 	(void)aio_waitcomplete(NULL, NULL);
298 
299 	leak(fd);
300 }
301 
302 /* 0-length reads shouldn't cause any confusion */
303 TEST_F(Read, direct_io_read_nothing)
304 {
305 	const char FULLPATH[] = "mountpoint/some_file.txt";
306 	const char RELPATH[] = "some_file.txt";
307 	uint64_t ino = 42;
308 	int fd;
309 	uint64_t offset = 100;
310 	char buf[80];
311 
312 	expect_lookup(RELPATH, ino, offset + 1000);
313 	expect_open(ino, FOPEN_DIRECT_IO, 1);
314 
315 	fd = open(FULLPATH, O_RDONLY);
316 	ASSERT_LE(0, fd) << strerror(errno);
317 
318 	ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
319 	leak(fd);
320 }
321 
322 /*
323  * With direct_io, reads should not fill the cache.  They should go straight to
324  * the daemon
325  */
326 TEST_F(Read, direct_io_pread)
327 {
328 	const char FULLPATH[] = "mountpoint/some_file.txt";
329 	const char RELPATH[] = "some_file.txt";
330 	const char *CONTENTS = "abcdefgh";
331 	uint64_t ino = 42;
332 	int fd;
333 	uint64_t offset = 100;
334 	ssize_t bufsize = strlen(CONTENTS);
335 	char buf[bufsize];
336 
337 	expect_lookup(RELPATH, ino, offset + bufsize);
338 	expect_open(ino, FOPEN_DIRECT_IO, 1);
339 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
340 
341 	fd = open(FULLPATH, O_RDONLY);
342 	ASSERT_LE(0, fd) << strerror(errno);
343 
344 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
345 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
346 
347 	// With FOPEN_DIRECT_IO, the cache should be bypassed.  The server will
348 	// get a 2nd read request.
349 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
350 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
351 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
352 	leak(fd);
353 }
354 
355 /*
356  * With direct_io, filesystems are allowed to return less data than is
357  * requested.  fuse(4) should return a short read to userland.
358  */
359 TEST_F(Read, direct_io_short_read)
360 {
361 	const char FULLPATH[] = "mountpoint/some_file.txt";
362 	const char RELPATH[] = "some_file.txt";
363 	const char *CONTENTS = "abcdefghijklmnop";
364 	uint64_t ino = 42;
365 	int fd;
366 	uint64_t offset = 100;
367 	ssize_t bufsize = strlen(CONTENTS);
368 	ssize_t halfbufsize = bufsize / 2;
369 	char buf[bufsize];
370 
371 	expect_lookup(RELPATH, ino, offset + bufsize);
372 	expect_open(ino, FOPEN_DIRECT_IO, 1);
373 	expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
374 
375 	fd = open(FULLPATH, O_RDONLY);
376 	ASSERT_LE(0, fd) << strerror(errno);
377 
378 	ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
379 		<< strerror(errno);
380 	ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
381 	leak(fd);
382 }
383 
384 TEST_F(Read, eio)
385 {
386 	const char FULLPATH[] = "mountpoint/some_file.txt";
387 	const char RELPATH[] = "some_file.txt";
388 	const char *CONTENTS = "abcdefgh";
389 	uint64_t ino = 42;
390 	int fd;
391 	ssize_t bufsize = strlen(CONTENTS);
392 	char buf[bufsize];
393 
394 	expect_lookup(RELPATH, ino, bufsize);
395 	expect_open(ino, 0, 1);
396 	EXPECT_CALL(*m_mock, process(
397 		ResultOf([=](auto in) {
398 			return (in.header.opcode == FUSE_READ);
399 		}, Eq(true)),
400 		_)
401 	).WillOnce(Invoke(ReturnErrno(EIO)));
402 
403 	fd = open(FULLPATH, O_RDONLY);
404 	ASSERT_LE(0, fd) << strerror(errno);
405 
406 	ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
407 	ASSERT_EQ(EIO, errno);
408 	leak(fd);
409 }
410 
411 /*
412  * If the server returns a short read when direct io is not in use, that
413  * indicates EOF, because of a server-side truncation.  We should invalidate
414  * all cached attributes.  We may update the file size,
415  */
416 TEST_F(Read, eof)
417 {
418 	const char FULLPATH[] = "mountpoint/some_file.txt";
419 	const char RELPATH[] = "some_file.txt";
420 	const char *CONTENTS = "abcdefghijklmnop";
421 	uint64_t ino = 42;
422 	int fd;
423 	uint64_t offset = 100;
424 	ssize_t bufsize = strlen(CONTENTS);
425 	ssize_t partbufsize = 3 * bufsize / 4;
426 	ssize_t r;
427 	char buf[bufsize];
428 	struct stat sb;
429 
430 	expect_lookup(RELPATH, ino, offset + bufsize);
431 	expect_open(ino, 0, 1);
432 	expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
433 	expect_getattr(ino, offset + partbufsize);
434 
435 	fd = open(FULLPATH, O_RDONLY);
436 	ASSERT_LE(0, fd) << strerror(errno);
437 
438 	r = pread(fd, buf, bufsize, offset);
439 	ASSERT_LE(0, r) << strerror(errno);
440 	EXPECT_EQ(partbufsize, r) << strerror(errno);
441 	ASSERT_EQ(0, fstat(fd, &sb));
442 	EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
443 	leak(fd);
444 }
445 
446 /* Like Read.eof, but causes an entire buffer to be invalidated */
447 TEST_F(Read, eof_of_whole_buffer)
448 {
449 	const char FULLPATH[] = "mountpoint/some_file.txt";
450 	const char RELPATH[] = "some_file.txt";
451 	const char *CONTENTS = "abcdefghijklmnop";
452 	uint64_t ino = 42;
453 	int fd;
454 	ssize_t bufsize = strlen(CONTENTS);
455 	off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
456 	char buf[bufsize];
457 	struct stat sb;
458 
459 	expect_lookup(RELPATH, ino, old_filesize);
460 	expect_open(ino, 0, 1);
461 	expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
462 	expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
463 	expect_getattr(ino, m_maxbcachebuf);
464 
465 	fd = open(FULLPATH, O_RDONLY);
466 	ASSERT_LE(0, fd) << strerror(errno);
467 
468 	/* Cache the third block */
469 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
470 		<< strerror(errno);
471 	/* Try to read the 2nd block, but it's past EOF */
472 	ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
473 		<< strerror(errno);
474 	ASSERT_EQ(0, fstat(fd, &sb));
475 	EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
476 	leak(fd);
477 }
478 
479 /*
480  * With the keep_cache option, the kernel may keep its read cache across
481  * multiple open(2)s.
482  */
483 TEST_F(Read, keep_cache)
484 {
485 	const char FULLPATH[] = "mountpoint/some_file.txt";
486 	const char RELPATH[] = "some_file.txt";
487 	const char *CONTENTS = "abcdefgh";
488 	uint64_t ino = 42;
489 	int fd0, fd1;
490 	ssize_t bufsize = strlen(CONTENTS);
491 	char buf[bufsize];
492 
493 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
494 	expect_open(ino, FOPEN_KEEP_CACHE, 2);
495 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
496 
497 	fd0 = open(FULLPATH, O_RDONLY);
498 	ASSERT_LE(0, fd0) << strerror(errno);
499 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
500 
501 	fd1 = open(FULLPATH, O_RDWR);
502 	ASSERT_LE(0, fd1) << strerror(errno);
503 
504 	/*
505 	 * This read should be serviced by cache, even though it's on the other
506 	 * file descriptor
507 	 */
508 	ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
509 
510 	leak(fd0);
511 	leak(fd1);
512 }
513 
514 /*
515  * Without the keep_cache option, the kernel should drop its read caches on
516  * every open
517  */
518 TEST_F(Read, keep_cache_disabled)
519 {
520 	const char FULLPATH[] = "mountpoint/some_file.txt";
521 	const char RELPATH[] = "some_file.txt";
522 	const char *CONTENTS = "abcdefgh";
523 	uint64_t ino = 42;
524 	int fd0, fd1;
525 	ssize_t bufsize = strlen(CONTENTS);
526 	char buf[bufsize];
527 
528 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
529 	expect_open(ino, 0, 2);
530 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
531 
532 	fd0 = open(FULLPATH, O_RDONLY);
533 	ASSERT_LE(0, fd0) << strerror(errno);
534 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
535 
536 	fd1 = open(FULLPATH, O_RDWR);
537 	ASSERT_LE(0, fd1) << strerror(errno);
538 
539 	/*
540 	 * This read should not be serviced by cache, even though it's on the
541 	 * original file descriptor
542 	 */
543 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
544 	ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
545 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
546 
547 	leak(fd0);
548 	leak(fd1);
549 }
550 
551 TEST_F(Read, mmap)
552 {
553 	const char FULLPATH[] = "mountpoint/some_file.txt";
554 	const char RELPATH[] = "some_file.txt";
555 	const char *CONTENTS = "abcdefgh";
556 	uint64_t ino = 42;
557 	int fd;
558 	ssize_t len;
559 	size_t bufsize = strlen(CONTENTS);
560 	void *p;
561 
562 	len = getpagesize();
563 
564 	expect_lookup(RELPATH, ino, bufsize);
565 	expect_open(ino, 0, 1);
566 	EXPECT_CALL(*m_mock, process(
567 		ResultOf([=](auto in) {
568 			return (in.header.opcode == FUSE_READ &&
569 				in.header.nodeid == ino &&
570 				in.body.read.fh == Read::FH &&
571 				in.body.read.offset == 0 &&
572 				in.body.read.size == bufsize);
573 		}, Eq(true)),
574 		_)
575 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
576 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
577 		memmove(out.body.bytes, CONTENTS, bufsize);
578 	})));
579 
580 	fd = open(FULLPATH, O_RDONLY);
581 	ASSERT_LE(0, fd) << strerror(errno);
582 
583 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
584 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
585 
586 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
587 
588 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
589 	leak(fd);
590 }
591 
592 /*
593  * A read via mmap comes up short, indicating that the file was truncated
594  * server-side.
595  */
596 TEST_F(Read, mmap_eof)
597 {
598 	const char FULLPATH[] = "mountpoint/some_file.txt";
599 	const char RELPATH[] = "some_file.txt";
600 	const char *CONTENTS = "abcdefgh";
601 	uint64_t ino = 42;
602 	int fd;
603 	ssize_t len;
604 	size_t bufsize = strlen(CONTENTS);
605 	struct stat sb;
606 	void *p;
607 
608 	len = getpagesize();
609 
610 	expect_lookup(RELPATH, ino, m_maxbcachebuf);
611 	expect_open(ino, 0, 1);
612 	EXPECT_CALL(*m_mock, process(
613 		ResultOf([=](auto in) {
614 			return (in.header.opcode == FUSE_READ &&
615 				in.header.nodeid == ino &&
616 				in.body.read.fh == Read::FH &&
617 				in.body.read.offset == 0 &&
618 				in.body.read.size == (uint32_t)m_maxbcachebuf);
619 		}, Eq(true)),
620 		_)
621 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
622 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
623 		memmove(out.body.bytes, CONTENTS, bufsize);
624 	})));
625 	expect_getattr(ino, bufsize);
626 
627 	fd = open(FULLPATH, O_RDONLY);
628 	ASSERT_LE(0, fd) << strerror(errno);
629 
630 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
631 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
632 
633 	/* The file size should be automatically truncated */
634 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
635 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
636 	EXPECT_EQ((off_t)bufsize, sb.st_size);
637 
638 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
639 	leak(fd);
640 }
641 
642 /*
643  * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
644  * cache and to straight to the daemon
645  */
646 TEST_F(Read, o_direct)
647 {
648 	const char FULLPATH[] = "mountpoint/some_file.txt";
649 	const char RELPATH[] = "some_file.txt";
650 	const char *CONTENTS = "abcdefgh";
651 	uint64_t ino = 42;
652 	int fd;
653 	ssize_t bufsize = strlen(CONTENTS);
654 	char buf[bufsize];
655 
656 	expect_lookup(RELPATH, ino, bufsize);
657 	expect_open(ino, 0, 1);
658 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
659 
660 	fd = open(FULLPATH, O_RDONLY);
661 	ASSERT_LE(0, fd) << strerror(errno);
662 
663 	// Fill the cache
664 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
665 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
666 
667 	// Reads with o_direct should bypass the cache
668 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
669 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
670 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
671 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
672 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
673 
674 	leak(fd);
675 }
676 
677 TEST_F(Read, pread)
678 {
679 	const char FULLPATH[] = "mountpoint/some_file.txt";
680 	const char RELPATH[] = "some_file.txt";
681 	const char *CONTENTS = "abcdefgh";
682 	uint64_t ino = 42;
683 	int fd;
684 	/*
685 	 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
686 	 * to read from.  Without this, the read might start at a lower offset.
687 	 */
688 	uint64_t offset = m_maxbcachebuf;
689 	ssize_t bufsize = strlen(CONTENTS);
690 	char buf[bufsize];
691 
692 	expect_lookup(RELPATH, ino, offset + bufsize);
693 	expect_open(ino, 0, 1);
694 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
695 
696 	fd = open(FULLPATH, O_RDONLY);
697 	ASSERT_LE(0, fd) << strerror(errno);
698 
699 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
700 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
701 	leak(fd);
702 }
703 
704 TEST_F(Read, read)
705 {
706 	const char FULLPATH[] = "mountpoint/some_file.txt";
707 	const char RELPATH[] = "some_file.txt";
708 	const char *CONTENTS = "abcdefgh";
709 	uint64_t ino = 42;
710 	int fd;
711 	ssize_t bufsize = strlen(CONTENTS);
712 	char buf[bufsize];
713 
714 	expect_lookup(RELPATH, ino, bufsize);
715 	expect_open(ino, 0, 1);
716 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
717 
718 	fd = open(FULLPATH, O_RDONLY);
719 	ASSERT_LE(0, fd) << strerror(errno);
720 
721 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
722 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
723 
724 	leak(fd);
725 }
726 
727 TEST_F(Read_7_8, read)
728 {
729 	const char FULLPATH[] = "mountpoint/some_file.txt";
730 	const char RELPATH[] = "some_file.txt";
731 	const char *CONTENTS = "abcdefgh";
732 	uint64_t ino = 42;
733 	int fd;
734 	ssize_t bufsize = strlen(CONTENTS);
735 	char buf[bufsize];
736 
737 	expect_lookup(RELPATH, ino, bufsize);
738 	expect_open(ino, 0, 1);
739 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
740 
741 	fd = open(FULLPATH, O_RDONLY);
742 	ASSERT_LE(0, fd) << strerror(errno);
743 
744 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
745 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
746 
747 	leak(fd);
748 }
749 
750 /*
751  * If cacheing is enabled, the kernel should try to read an entire cache block
752  * at a time.
753  */
754 TEST_F(Read, cache_block)
755 {
756 	const char FULLPATH[] = "mountpoint/some_file.txt";
757 	const char RELPATH[] = "some_file.txt";
758 	const char *CONTENTS0 = "abcdefghijklmnop";
759 	uint64_t ino = 42;
760 	int fd;
761 	ssize_t bufsize = 8;
762 	ssize_t filesize = m_maxbcachebuf * 2;
763 	char *contents;
764 	char buf[bufsize];
765 	const char *contents1 = CONTENTS0 + bufsize;
766 
767 	contents = (char*)calloc(1, filesize);
768 	ASSERT_NE(NULL, contents);
769 	memmove(contents, CONTENTS0, strlen(CONTENTS0));
770 
771 	expect_lookup(RELPATH, ino, filesize);
772 	expect_open(ino, 0, 1);
773 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
774 		contents);
775 
776 	fd = open(FULLPATH, O_RDONLY);
777 	ASSERT_LE(0, fd) << strerror(errno);
778 
779 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
780 	ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
781 
782 	/* A subsequent read should be serviced by cache */
783 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
784 	ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
785 	leak(fd);
786 }
787 
788 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
789 TEST_F(Read, sendfile)
790 {
791 	const char FULLPATH[] = "mountpoint/some_file.txt";
792 	const char RELPATH[] = "some_file.txt";
793 	const char *CONTENTS = "abcdefgh";
794 	uint64_t ino = 42;
795 	int fd;
796 	size_t bufsize = strlen(CONTENTS);
797 	char buf[bufsize];
798 	int sp[2];
799 	off_t sbytes;
800 
801 	expect_lookup(RELPATH, ino, bufsize);
802 	expect_open(ino, 0, 1);
803 	EXPECT_CALL(*m_mock, process(
804 		ResultOf([=](auto in) {
805 			return (in.header.opcode == FUSE_READ &&
806 				in.header.nodeid == ino &&
807 				in.body.read.fh == Read::FH &&
808 				in.body.read.offset == 0 &&
809 				in.body.read.size == bufsize);
810 		}, Eq(true)),
811 		_)
812 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
813 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
814 		memmove(out.body.bytes, CONTENTS, bufsize);
815 	})));
816 
817 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
818 		<< strerror(errno);
819 	fd = open(FULLPATH, O_RDONLY);
820 	ASSERT_LE(0, fd) << strerror(errno);
821 
822 	ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
823 		<< strerror(errno);
824 	ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
825 		<< strerror(errno);
826 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
827 
828 	close(sp[1]);
829 	close(sp[0]);
830 	leak(fd);
831 }
832 
833 /* sendfile should fail gracefully if fuse declines the read */
834 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
835 TEST_F(Read, DISABLED_sendfile_eio)
836 {
837 	const char FULLPATH[] = "mountpoint/some_file.txt";
838 	const char RELPATH[] = "some_file.txt";
839 	const char *CONTENTS = "abcdefgh";
840 	uint64_t ino = 42;
841 	int fd;
842 	ssize_t bufsize = strlen(CONTENTS);
843 	int sp[2];
844 	off_t sbytes;
845 
846 	expect_lookup(RELPATH, ino, bufsize);
847 	expect_open(ino, 0, 1);
848 	EXPECT_CALL(*m_mock, process(
849 		ResultOf([=](auto in) {
850 			return (in.header.opcode == FUSE_READ);
851 		}, Eq(true)),
852 		_)
853 	).WillOnce(Invoke(ReturnErrno(EIO)));
854 
855 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
856 		<< strerror(errno);
857 	fd = open(FULLPATH, O_RDONLY);
858 	ASSERT_LE(0, fd) << strerror(errno);
859 
860 	ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
861 
862 	close(sp[1]);
863 	close(sp[0]);
864 	leak(fd);
865 }
866 
867 /*
868  * Sequential reads should use readahead.  And if allowed, large reads should
869  * be clustered.
870  */
871 TEST_P(ReadAhead, readahead) {
872 	const char FULLPATH[] = "mountpoint/some_file.txt";
873 	const char RELPATH[] = "some_file.txt";
874 	uint64_t ino = 42;
875 	int fd, maxcontig, clustersize;
876 	ssize_t bufsize = 4 * m_maxbcachebuf;
877 	ssize_t filesize = bufsize;
878 	uint64_t len;
879 	char *rbuf, *contents;
880 	off_t offs;
881 
882 	contents = (char*)malloc(filesize);
883 	ASSERT_NE(NULL, contents);
884 	memset(contents, 'X', filesize);
885 	rbuf = (char*)calloc(1, bufsize);
886 
887 	expect_lookup(RELPATH, ino, filesize);
888 	expect_open(ino, 0, 1);
889 	maxcontig = m_noclusterr ? m_maxbcachebuf :
890 		m_maxbcachebuf + m_maxreadahead;
891 	clustersize = MIN(maxcontig, m_maxphys);
892 	for (offs = 0; offs < bufsize; offs += clustersize) {
893 		len = std::min((size_t)clustersize, (size_t)(filesize - offs));
894 		expect_read(ino, offs, len, len, contents + offs);
895 	}
896 
897 	fd = open(FULLPATH, O_RDONLY);
898 	ASSERT_LE(0, fd) << strerror(errno);
899 
900 	/* Set the internal readahead counter to a "large" value */
901 	ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
902 
903 	ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
904 	ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
905 
906 	leak(fd);
907 }
908 
909 INSTANTIATE_TEST_CASE_P(RA, ReadAhead,
910 	Values(tuple<bool, int>(false, 0),
911 	       tuple<bool, int>(false, 1),
912 	       tuple<bool, int>(false, 2),
913 	       tuple<bool, int>(false, 3),
914 	       tuple<bool, int>(true, 0),
915 	       tuple<bool, int>(true, 1),
916 	       tuple<bool, int>(true, 2)));
917