xref: /freebsd/tests/sys/fs/fusefs/read.cc (revision e63d20b70ee1dbee9b075f29de6f30cdcfe1abe1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <sys/uio.h>
37 
38 #include <aio.h>
39 #include <fcntl.h>
40 #include <semaphore.h>
41 #include <setjmp.h>
42 #include <signal.h>
43 #include <unistd.h>
44 }
45 
46 #include "mockfs.hh"
47 #include "utils.hh"
48 
49 using namespace testing;
50 
51 class Read: public FuseTest {
52 
53 public:
54 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
55 {
56 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
57 }
58 };
59 
60 class RofsRead: public Read {
61 public:
62 virtual void SetUp() {
63 	m_ro = true;
64 	Read::SetUp();
65 }
66 };
67 
68 class Read_7_8: public FuseTest {
69 public:
70 virtual void SetUp() {
71 	m_kernel_minor_version = 8;
72 	FuseTest::SetUp();
73 }
74 
75 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
76 {
77 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
78 }
79 };
80 
81 class AioRead: public Read {
82 public:
83 virtual void SetUp() {
84 	if (!is_unsafe_aio_enabled())
85 		GTEST_SKIP() <<
86 			"vfs.aio.enable_unsafe must be set for this test";
87 	FuseTest::SetUp();
88 }
89 };
90 
91 class AsyncRead: public AioRead {
92 	virtual void SetUp() {
93 		m_init_flags = FUSE_ASYNC_READ;
94 		AioRead::SetUp();
95 	}
96 };
97 
98 class ReadAhead: public Read,
99 		 public WithParamInterface<tuple<bool, int>>
100 {
101 	virtual void SetUp() {
102 		int val;
103 		const char *node = "vfs.maxbcachebuf";
104 		size_t size = sizeof(val);
105 		ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
106 			<< strerror(errno);
107 
108 		m_maxreadahead = val * get<1>(GetParam());
109 		m_noclusterr = get<0>(GetParam());
110 		Read::SetUp();
111 	}
112 };
113 
114 class ReadNoatime: public Read {
115 	virtual void SetUp() {
116 		m_noatime = true;
117 		Read::SetUp();
118 	}
119 };
120 
121 class ReadSigbus: public Read
122 {
123 public:
124 static jmp_buf s_jmpbuf;
125 static void *s_si_addr;
126 
127 void TearDown() {
128 	struct sigaction sa;
129 
130 	bzero(&sa, sizeof(sa));
131 	sa.sa_handler = SIG_DFL;
132 	sigaction(SIGBUS, &sa, NULL);
133 
134 	FuseTest::TearDown();
135 }
136 
137 };
138 
139 static void
140 handle_sigbus(int signo __unused, siginfo_t *info, void *uap __unused) {
141 	ReadSigbus::s_si_addr = info->si_addr;
142 	longjmp(ReadSigbus::s_jmpbuf, 1);
143 }
144 
145 jmp_buf ReadSigbus::s_jmpbuf;
146 void *ReadSigbus::s_si_addr;
147 
148 class TimeGran: public Read, public WithParamInterface<unsigned> {
149 public:
150 virtual void SetUp() {
151 	m_time_gran = 1 << GetParam();
152 	Read::SetUp();
153 }
154 };
155 
156 /* AIO reads need to set the header's pid field correctly */
157 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
158 TEST_F(AioRead, aio_read)
159 {
160 	const char FULLPATH[] = "mountpoint/some_file.txt";
161 	const char RELPATH[] = "some_file.txt";
162 	const char *CONTENTS = "abcdefgh";
163 	uint64_t ino = 42;
164 	int fd;
165 	ssize_t bufsize = strlen(CONTENTS);
166 	uint8_t buf[bufsize];
167 	struct aiocb iocb, *piocb;
168 
169 	expect_lookup(RELPATH, ino, bufsize);
170 	expect_open(ino, 0, 1);
171 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
172 
173 	fd = open(FULLPATH, O_RDONLY);
174 	ASSERT_LE(0, fd) << strerror(errno);
175 
176 	iocb.aio_nbytes = bufsize;
177 	iocb.aio_fildes = fd;
178 	iocb.aio_buf = buf;
179 	iocb.aio_offset = 0;
180 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
181 	ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
182 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
183 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
184 
185 	leak(fd);
186 }
187 
188 /*
189  * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
190  * is at most one outstanding read operation per file handle
191  */
192 TEST_F(AioRead, async_read_disabled)
193 {
194 	const char FULLPATH[] = "mountpoint/some_file.txt";
195 	const char RELPATH[] = "some_file.txt";
196 	uint64_t ino = 42;
197 	int fd;
198 	ssize_t bufsize = 50;
199 	char buf0[bufsize], buf1[bufsize];
200 	off_t off0 = 0;
201 	off_t off1 = m_maxbcachebuf;
202 	struct aiocb iocb0, iocb1;
203 	volatile sig_atomic_t read_count = 0;
204 
205 	expect_lookup(RELPATH, ino, 131072);
206 	expect_open(ino, 0, 1);
207 	EXPECT_CALL(*m_mock, process(
208 		ResultOf([=](auto in) {
209 			return (in.header.opcode == FUSE_READ &&
210 				in.header.nodeid == ino &&
211 				in.body.read.fh == FH &&
212 				in.body.read.offset == (uint64_t)off0);
213 		}, Eq(true)),
214 		_)
215 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
216 		read_count++;
217 		/* Filesystem is slow to respond */
218 	}));
219 	EXPECT_CALL(*m_mock, process(
220 		ResultOf([=](auto in) {
221 			return (in.header.opcode == FUSE_READ &&
222 				in.header.nodeid == ino &&
223 				in.body.read.fh == FH &&
224 				in.body.read.offset == (uint64_t)off1);
225 		}, Eq(true)),
226 		_)
227 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
228 		read_count++;
229 		/* Filesystem is slow to respond */
230 	}));
231 
232 	fd = open(FULLPATH, O_RDONLY);
233 	ASSERT_LE(0, fd) << strerror(errno);
234 
235 	/*
236 	 * Submit two AIO read requests, and respond to neither.  If the
237 	 * filesystem ever gets the second read request, then we failed to
238 	 * limit outstanding reads.
239 	 */
240 	iocb0.aio_nbytes = bufsize;
241 	iocb0.aio_fildes = fd;
242 	iocb0.aio_buf = buf0;
243 	iocb0.aio_offset = off0;
244 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
245 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
246 
247 	iocb1.aio_nbytes = bufsize;
248 	iocb1.aio_fildes = fd;
249 	iocb1.aio_buf = buf1;
250 	iocb1.aio_offset = off1;
251 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
252 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
253 
254 	/*
255 	 * Sleep for awhile to make sure the kernel has had a chance to issue
256 	 * the second read, even though the first has not yet returned
257 	 */
258 	nap();
259 	EXPECT_EQ(read_count, 1);
260 
261 	m_mock->kill_daemon();
262 	/* Wait for AIO activity to complete, but ignore errors */
263 	(void)aio_waitcomplete(NULL, NULL);
264 
265 	leak(fd);
266 }
267 
268 /*
269  * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
270  * simultaneous read requests on the same file handle.
271  */
272 TEST_F(AsyncRead, async_read)
273 {
274 	const char FULLPATH[] = "mountpoint/some_file.txt";
275 	const char RELPATH[] = "some_file.txt";
276 	uint64_t ino = 42;
277 	int fd;
278 	ssize_t bufsize = 50;
279 	char buf0[bufsize], buf1[bufsize];
280 	off_t off0 = 0;
281 	off_t off1 = m_maxbcachebuf;
282 	off_t fsize = 2 * m_maxbcachebuf;
283 	struct aiocb iocb0, iocb1;
284 	sem_t sem;
285 
286 	ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
287 
288 	expect_lookup(RELPATH, ino, fsize);
289 	expect_open(ino, 0, 1);
290 	EXPECT_CALL(*m_mock, process(
291 		ResultOf([=](auto in) {
292 			return (in.header.opcode == FUSE_READ &&
293 				in.header.nodeid == ino &&
294 				in.body.read.fh == FH &&
295 				in.body.read.offset == (uint64_t)off0);
296 		}, Eq(true)),
297 		_)
298 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
299 		sem_post(&sem);
300 		/* Filesystem is slow to respond */
301 	}));
302 	EXPECT_CALL(*m_mock, process(
303 		ResultOf([=](auto in) {
304 			return (in.header.opcode == FUSE_READ &&
305 				in.header.nodeid == ino &&
306 				in.body.read.fh == FH &&
307 				in.body.read.offset == (uint64_t)off1);
308 		}, Eq(true)),
309 		_)
310 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
311 		sem_post(&sem);
312 		/* Filesystem is slow to respond */
313 	}));
314 
315 	fd = open(FULLPATH, O_RDONLY);
316 	ASSERT_LE(0, fd) << strerror(errno);
317 
318 	/*
319 	 * Submit two AIO read requests, but respond to neither.  Ensure that
320 	 * we received both.
321 	 */
322 	iocb0.aio_nbytes = bufsize;
323 	iocb0.aio_fildes = fd;
324 	iocb0.aio_buf = buf0;
325 	iocb0.aio_offset = off0;
326 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
327 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
328 
329 	iocb1.aio_nbytes = bufsize;
330 	iocb1.aio_fildes = fd;
331 	iocb1.aio_buf = buf1;
332 	iocb1.aio_offset = off1;
333 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
334 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
335 
336 	/* Wait until both reads have reached the daemon */
337 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
338 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
339 
340 	m_mock->kill_daemon();
341 	/* Wait for AIO activity to complete, but ignore errors */
342 	(void)aio_waitcomplete(NULL, NULL);
343 
344 	leak(fd);
345 }
346 
347 /* The kernel should update the cached atime attribute during a read */
348 TEST_F(Read, atime)
349 {
350 	const char FULLPATH[] = "mountpoint/some_file.txt";
351 	const char RELPATH[] = "some_file.txt";
352 	const char *CONTENTS = "abcdefgh";
353 	struct stat sb1, sb2;
354 	uint64_t ino = 42;
355 	int fd;
356 	ssize_t bufsize = strlen(CONTENTS);
357 	uint8_t buf[bufsize];
358 
359 	expect_lookup(RELPATH, ino, bufsize);
360 	expect_open(ino, 0, 1);
361 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
362 
363 	fd = open(FULLPATH, O_RDONLY);
364 	ASSERT_LE(0, fd) << strerror(errno);
365 	ASSERT_EQ(0, fstat(fd, &sb1));
366 
367 	/* Ensure atime will be different than it was during lookup */
368 	nap();
369 
370 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
371 	ASSERT_EQ(0, fstat(fd, &sb2));
372 
373 	/* The kernel should automatically update atime during read */
374 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
375 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
376 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
377 
378 	leak(fd);
379 }
380 
381 /* The kernel should update the cached atime attribute during a cached read */
382 TEST_F(Read, atime_cached)
383 {
384 	const char FULLPATH[] = "mountpoint/some_file.txt";
385 	const char RELPATH[] = "some_file.txt";
386 	const char *CONTENTS = "abcdefgh";
387 	struct stat sb1, sb2;
388 	uint64_t ino = 42;
389 	int fd;
390 	ssize_t bufsize = strlen(CONTENTS);
391 	uint8_t buf[bufsize];
392 
393 	expect_lookup(RELPATH, ino, bufsize);
394 	expect_open(ino, 0, 1);
395 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
396 
397 	fd = open(FULLPATH, O_RDONLY);
398 	ASSERT_LE(0, fd) << strerror(errno);
399 
400 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
401 	ASSERT_EQ(0, fstat(fd, &sb1));
402 
403 	/* Ensure atime will be different than it was during the first read */
404 	nap();
405 
406 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
407 	ASSERT_EQ(0, fstat(fd, &sb2));
408 
409 	/* The kernel should automatically update atime during read */
410 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
411 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
412 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
413 
414 	leak(fd);
415 }
416 
417 /* dirty atime values should be flushed during close */
418 TEST_F(Read, atime_during_close)
419 {
420 	const char FULLPATH[] = "mountpoint/some_file.txt";
421 	const char RELPATH[] = "some_file.txt";
422 	const char *CONTENTS = "abcdefgh";
423 	struct stat sb;
424 	uint64_t ino = 42;
425 	const mode_t newmode = 0755;
426 	int fd;
427 	ssize_t bufsize = strlen(CONTENTS);
428 	uint8_t buf[bufsize];
429 
430 	expect_lookup(RELPATH, ino, bufsize);
431 	expect_open(ino, 0, 1);
432 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
433 	EXPECT_CALL(*m_mock, process(
434 		ResultOf([&](auto in) {
435 			uint32_t valid = FATTR_ATIME;
436 			return (in.header.opcode == FUSE_SETATTR &&
437 				in.header.nodeid == ino &&
438 				in.body.setattr.valid == valid &&
439 				(time_t)in.body.setattr.atime ==
440 					sb.st_atim.tv_sec &&
441 				(long)in.body.setattr.atimensec ==
442 					sb.st_atim.tv_nsec);
443 		}, Eq(true)),
444 		_)
445 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
446 		SET_OUT_HEADER_LEN(out, attr);
447 		out.body.attr.attr.ino = ino;
448 		out.body.attr.attr.mode = S_IFREG | newmode;
449 	})));
450 	expect_flush(ino, 1, ReturnErrno(0));
451 	expect_release(ino, FuseTest::FH);
452 
453 	fd = open(FULLPATH, O_RDONLY);
454 	ASSERT_LE(0, fd) << strerror(errno);
455 
456 	/* Ensure atime will be different than during lookup */
457 	nap();
458 
459 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
460 	ASSERT_EQ(0, fstat(fd, &sb));
461 
462 	close(fd);
463 }
464 
465 /*
466  * When not using -o default_permissions, the daemon may make its own decisions
467  * regarding access permissions, and these may be unpredictable.  If it rejects
468  * our attempt to set atime, that should not cause close(2) to fail.
469  */
470 TEST_F(Read, atime_during_close_eacces)
471 {
472 	const char FULLPATH[] = "mountpoint/some_file.txt";
473 	const char RELPATH[] = "some_file.txt";
474 	const char *CONTENTS = "abcdefgh";
475 	uint64_t ino = 42;
476 	int fd;
477 	ssize_t bufsize = strlen(CONTENTS);
478 	uint8_t buf[bufsize];
479 
480 	expect_lookup(RELPATH, ino, bufsize);
481 	expect_open(ino, 0, 1);
482 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
483 	EXPECT_CALL(*m_mock, process(
484 		ResultOf([&](auto in) {
485 			uint32_t valid = FATTR_ATIME;
486 			return (in.header.opcode == FUSE_SETATTR &&
487 				in.header.nodeid == ino &&
488 				in.body.setattr.valid == valid);
489 		}, Eq(true)),
490 		_)
491 	).WillOnce(Invoke(ReturnErrno(EACCES)));
492 	expect_flush(ino, 1, ReturnErrno(0));
493 	expect_release(ino, FuseTest::FH);
494 
495 	fd = open(FULLPATH, O_RDONLY);
496 	ASSERT_LE(0, fd) << strerror(errno);
497 
498 	/* Ensure atime will be different than during lookup */
499 	nap();
500 
501 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
502 
503 	ASSERT_EQ(0, close(fd));
504 }
505 
506 /* A cached atime should be flushed during FUSE_SETATTR */
507 TEST_F(Read, atime_during_setattr)
508 {
509 	const char FULLPATH[] = "mountpoint/some_file.txt";
510 	const char RELPATH[] = "some_file.txt";
511 	const char *CONTENTS = "abcdefgh";
512 	struct stat sb;
513 	uint64_t ino = 42;
514 	const mode_t newmode = 0755;
515 	int fd;
516 	ssize_t bufsize = strlen(CONTENTS);
517 	uint8_t buf[bufsize];
518 
519 	expect_lookup(RELPATH, ino, bufsize);
520 	expect_open(ino, 0, 1);
521 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
522 	EXPECT_CALL(*m_mock, process(
523 		ResultOf([&](auto in) {
524 			uint32_t valid = FATTR_MODE | FATTR_ATIME;
525 			return (in.header.opcode == FUSE_SETATTR &&
526 				in.header.nodeid == ino &&
527 				in.body.setattr.valid == valid &&
528 				(time_t)in.body.setattr.atime ==
529 					sb.st_atim.tv_sec &&
530 				(long)in.body.setattr.atimensec ==
531 					sb.st_atim.tv_nsec);
532 		}, Eq(true)),
533 		_)
534 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
535 		SET_OUT_HEADER_LEN(out, attr);
536 		out.body.attr.attr.ino = ino;
537 		out.body.attr.attr.mode = S_IFREG | newmode;
538 	})));
539 
540 	fd = open(FULLPATH, O_RDONLY);
541 	ASSERT_LE(0, fd) << strerror(errno);
542 
543 	/* Ensure atime will be different than during lookup */
544 	nap();
545 
546 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
547 	ASSERT_EQ(0, fstat(fd, &sb));
548 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
549 
550 	leak(fd);
551 }
552 
553 /* 0-length reads shouldn't cause any confusion */
554 TEST_F(Read, direct_io_read_nothing)
555 {
556 	const char FULLPATH[] = "mountpoint/some_file.txt";
557 	const char RELPATH[] = "some_file.txt";
558 	uint64_t ino = 42;
559 	int fd;
560 	uint64_t offset = 100;
561 	char buf[80];
562 
563 	expect_lookup(RELPATH, ino, offset + 1000);
564 	expect_open(ino, FOPEN_DIRECT_IO, 1);
565 
566 	fd = open(FULLPATH, O_RDONLY);
567 	ASSERT_LE(0, fd) << strerror(errno);
568 
569 	ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
570 	leak(fd);
571 }
572 
573 /*
574  * With direct_io, reads should not fill the cache.  They should go straight to
575  * the daemon
576  */
577 TEST_F(Read, direct_io_pread)
578 {
579 	const char FULLPATH[] = "mountpoint/some_file.txt";
580 	const char RELPATH[] = "some_file.txt";
581 	const char *CONTENTS = "abcdefgh";
582 	uint64_t ino = 42;
583 	int fd;
584 	uint64_t offset = 100;
585 	ssize_t bufsize = strlen(CONTENTS);
586 	uint8_t buf[bufsize];
587 
588 	expect_lookup(RELPATH, ino, offset + bufsize);
589 	expect_open(ino, FOPEN_DIRECT_IO, 1);
590 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
591 
592 	fd = open(FULLPATH, O_RDONLY);
593 	ASSERT_LE(0, fd) << strerror(errno);
594 
595 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
596 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
597 
598 	// With FOPEN_DIRECT_IO, the cache should be bypassed.  The server will
599 	// get a 2nd read request.
600 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
601 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
602 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
603 	leak(fd);
604 }
605 
606 /*
607  * With direct_io, filesystems are allowed to return less data than is
608  * requested.  fuse(4) should return a short read to userland.
609  */
610 TEST_F(Read, direct_io_short_read)
611 {
612 	const char FULLPATH[] = "mountpoint/some_file.txt";
613 	const char RELPATH[] = "some_file.txt";
614 	const char *CONTENTS = "abcdefghijklmnop";
615 	uint64_t ino = 42;
616 	int fd;
617 	uint64_t offset = 100;
618 	ssize_t bufsize = strlen(CONTENTS);
619 	ssize_t halfbufsize = bufsize / 2;
620 	uint8_t buf[bufsize];
621 
622 	expect_lookup(RELPATH, ino, offset + bufsize);
623 	expect_open(ino, FOPEN_DIRECT_IO, 1);
624 	expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
625 
626 	fd = open(FULLPATH, O_RDONLY);
627 	ASSERT_LE(0, fd) << strerror(errno);
628 
629 	ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
630 		<< strerror(errno);
631 	ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
632 	leak(fd);
633 }
634 
635 TEST_F(Read, eio)
636 {
637 	const char FULLPATH[] = "mountpoint/some_file.txt";
638 	const char RELPATH[] = "some_file.txt";
639 	const char *CONTENTS = "abcdefgh";
640 	uint64_t ino = 42;
641 	int fd;
642 	ssize_t bufsize = strlen(CONTENTS);
643 	uint8_t buf[bufsize];
644 
645 	expect_lookup(RELPATH, ino, bufsize);
646 	expect_open(ino, 0, 1);
647 	EXPECT_CALL(*m_mock, process(
648 		ResultOf([=](auto in) {
649 			return (in.header.opcode == FUSE_READ);
650 		}, Eq(true)),
651 		_)
652 	).WillOnce(Invoke(ReturnErrno(EIO)));
653 
654 	fd = open(FULLPATH, O_RDONLY);
655 	ASSERT_LE(0, fd) << strerror(errno);
656 
657 	ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
658 	ASSERT_EQ(EIO, errno);
659 	leak(fd);
660 }
661 
662 /*
663  * If the server returns a short read when direct io is not in use, that
664  * indicates EOF, because of a server-side truncation.  We should invalidate
665  * all cached attributes.  We may update the file size,
666  */
667 TEST_F(Read, eof)
668 {
669 	const char FULLPATH[] = "mountpoint/some_file.txt";
670 	const char RELPATH[] = "some_file.txt";
671 	const char *CONTENTS = "abcdefghijklmnop";
672 	uint64_t ino = 42;
673 	int fd;
674 	uint64_t offset = 100;
675 	ssize_t bufsize = strlen(CONTENTS);
676 	ssize_t partbufsize = 3 * bufsize / 4;
677 	ssize_t r;
678 	uint8_t buf[bufsize];
679 	struct stat sb;
680 
681 	expect_lookup(RELPATH, ino, offset + bufsize);
682 	expect_open(ino, 0, 1);
683 	expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
684 	expect_getattr(ino, offset + partbufsize);
685 
686 	fd = open(FULLPATH, O_RDONLY);
687 	ASSERT_LE(0, fd) << strerror(errno);
688 
689 	r = pread(fd, buf, bufsize, offset);
690 	ASSERT_LE(0, r) << strerror(errno);
691 	EXPECT_EQ(partbufsize, r) << strerror(errno);
692 	ASSERT_EQ(0, fstat(fd, &sb));
693 	EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
694 	leak(fd);
695 }
696 
697 /* Like Read.eof, but causes an entire buffer to be invalidated */
698 TEST_F(Read, eof_of_whole_buffer)
699 {
700 	const char FULLPATH[] = "mountpoint/some_file.txt";
701 	const char RELPATH[] = "some_file.txt";
702 	const char *CONTENTS = "abcdefghijklmnop";
703 	uint64_t ino = 42;
704 	int fd;
705 	ssize_t bufsize = strlen(CONTENTS);
706 	off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
707 	uint8_t buf[bufsize];
708 	struct stat sb;
709 
710 	expect_lookup(RELPATH, ino, old_filesize);
711 	expect_open(ino, 0, 1);
712 	expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
713 	expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
714 	expect_getattr(ino, m_maxbcachebuf);
715 
716 	fd = open(FULLPATH, O_RDONLY);
717 	ASSERT_LE(0, fd) << strerror(errno);
718 
719 	/* Cache the third block */
720 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
721 		<< strerror(errno);
722 	/* Try to read the 2nd block, but it's past EOF */
723 	ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
724 		<< strerror(errno);
725 	ASSERT_EQ(0, fstat(fd, &sb));
726 	EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
727 	leak(fd);
728 }
729 
730 /*
731  * With the keep_cache option, the kernel may keep its read cache across
732  * multiple open(2)s.
733  */
734 TEST_F(Read, keep_cache)
735 {
736 	const char FULLPATH[] = "mountpoint/some_file.txt";
737 	const char RELPATH[] = "some_file.txt";
738 	const char *CONTENTS = "abcdefgh";
739 	uint64_t ino = 42;
740 	int fd0, fd1;
741 	ssize_t bufsize = strlen(CONTENTS);
742 	uint8_t buf[bufsize];
743 
744 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
745 	expect_open(ino, FOPEN_KEEP_CACHE, 2);
746 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
747 
748 	fd0 = open(FULLPATH, O_RDONLY);
749 	ASSERT_LE(0, fd0) << strerror(errno);
750 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
751 
752 	fd1 = open(FULLPATH, O_RDWR);
753 	ASSERT_LE(0, fd1) << strerror(errno);
754 
755 	/*
756 	 * This read should be serviced by cache, even though it's on the other
757 	 * file descriptor
758 	 */
759 	ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
760 
761 	leak(fd0);
762 	leak(fd1);
763 }
764 
765 /*
766  * Without the keep_cache option, the kernel should drop its read caches on
767  * every open
768  */
769 TEST_F(Read, keep_cache_disabled)
770 {
771 	const char FULLPATH[] = "mountpoint/some_file.txt";
772 	const char RELPATH[] = "some_file.txt";
773 	const char *CONTENTS = "abcdefgh";
774 	uint64_t ino = 42;
775 	int fd0, fd1;
776 	ssize_t bufsize = strlen(CONTENTS);
777 	uint8_t buf[bufsize];
778 
779 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
780 	expect_open(ino, 0, 2);
781 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
782 
783 	fd0 = open(FULLPATH, O_RDONLY);
784 	ASSERT_LE(0, fd0) << strerror(errno);
785 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
786 
787 	fd1 = open(FULLPATH, O_RDWR);
788 	ASSERT_LE(0, fd1) << strerror(errno);
789 
790 	/*
791 	 * This read should not be serviced by cache, even though it's on the
792 	 * original file descriptor
793 	 */
794 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
795 	ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
796 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
797 
798 	leak(fd0);
799 	leak(fd1);
800 }
801 
802 TEST_F(Read, mmap)
803 {
804 	const char FULLPATH[] = "mountpoint/some_file.txt";
805 	const char RELPATH[] = "some_file.txt";
806 	const char *CONTENTS = "abcdefgh";
807 	uint64_t ino = 42;
808 	int fd;
809 	ssize_t len;
810 	size_t bufsize = strlen(CONTENTS);
811 	void *p;
812 
813 	len = getpagesize();
814 
815 	expect_lookup(RELPATH, ino, bufsize);
816 	expect_open(ino, 0, 1);
817 	EXPECT_CALL(*m_mock, process(
818 		ResultOf([=](auto in) {
819 			return (in.header.opcode == FUSE_READ &&
820 				in.header.nodeid == ino &&
821 				in.body.read.fh == Read::FH &&
822 				in.body.read.offset == 0 &&
823 				in.body.read.size == bufsize);
824 		}, Eq(true)),
825 		_)
826 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
827 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
828 		memmove(out.body.bytes, CONTENTS, bufsize);
829 	})));
830 
831 	fd = open(FULLPATH, O_RDONLY);
832 	ASSERT_LE(0, fd) << strerror(errno);
833 
834 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
835 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
836 
837 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
838 
839 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
840 	leak(fd);
841 }
842 
843 /*
844  * The kernel should not update the cached atime attribute during a read, if
845  * MNT_NOATIME is used.
846  */
847 TEST_F(ReadNoatime, atime)
848 {
849 	const char FULLPATH[] = "mountpoint/some_file.txt";
850 	const char RELPATH[] = "some_file.txt";
851 	const char *CONTENTS = "abcdefgh";
852 	struct stat sb1, sb2;
853 	uint64_t ino = 42;
854 	int fd;
855 	ssize_t bufsize = strlen(CONTENTS);
856 	uint8_t buf[bufsize];
857 
858 	expect_lookup(RELPATH, ino, bufsize);
859 	expect_open(ino, 0, 1);
860 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
861 
862 	fd = open(FULLPATH, O_RDONLY);
863 	ASSERT_LE(0, fd) << strerror(errno);
864 	ASSERT_EQ(0, fstat(fd, &sb1));
865 
866 	nap();
867 
868 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
869 	ASSERT_EQ(0, fstat(fd, &sb2));
870 
871 	/* The kernel should not update atime during read */
872 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
873 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
874 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
875 
876 	leak(fd);
877 }
878 
879 /*
880  * The kernel should not update the cached atime attribute during a cached
881  * read, if MNT_NOATIME is used.
882  */
883 TEST_F(ReadNoatime, atime_cached)
884 {
885 	const char FULLPATH[] = "mountpoint/some_file.txt";
886 	const char RELPATH[] = "some_file.txt";
887 	const char *CONTENTS = "abcdefgh";
888 	struct stat sb1, sb2;
889 	uint64_t ino = 42;
890 	int fd;
891 	ssize_t bufsize = strlen(CONTENTS);
892 	uint8_t buf[bufsize];
893 
894 	expect_lookup(RELPATH, ino, bufsize);
895 	expect_open(ino, 0, 1);
896 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
897 
898 	fd = open(FULLPATH, O_RDONLY);
899 	ASSERT_LE(0, fd) << strerror(errno);
900 
901 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
902 	ASSERT_EQ(0, fstat(fd, &sb1));
903 
904 	nap();
905 
906 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
907 	ASSERT_EQ(0, fstat(fd, &sb2));
908 
909 	/* The kernel should automatically update atime during read */
910 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
911 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
912 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
913 
914 	leak(fd);
915 }
916 
917 /* Read of an mmap()ed file fails */
918 TEST_F(ReadSigbus, mmap_eio)
919 {
920 	const char FULLPATH[] = "mountpoint/some_file.txt";
921 	const char RELPATH[] = "some_file.txt";
922 	const char *CONTENTS = "abcdefgh";
923 	struct sigaction sa;
924 	uint64_t ino = 42;
925 	int fd;
926 	ssize_t len;
927 	size_t bufsize = strlen(CONTENTS);
928 	void *p;
929 
930 	len = getpagesize();
931 
932 	expect_lookup(RELPATH, ino, bufsize);
933 	expect_open(ino, 0, 1);
934 	EXPECT_CALL(*m_mock, process(
935 		ResultOf([=](auto in) {
936 			return (in.header.opcode == FUSE_READ &&
937 				in.header.nodeid == ino &&
938 				in.body.read.fh == Read::FH);
939 		}, Eq(true)),
940 		_)
941 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
942 
943 	fd = open(FULLPATH, O_RDONLY);
944 	ASSERT_LE(0, fd) << strerror(errno);
945 
946 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
947 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
948 
949 	/* Accessing the mapped page should return SIGBUS.  */
950 
951 	bzero(&sa, sizeof(sa));
952 	sa.sa_handler = SIG_DFL;
953 	sa.sa_sigaction = handle_sigbus;
954 	sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
955 	ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
956 	if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
957 		atomic_signal_fence(std::memory_order::memory_order_seq_cst);
958 		volatile char x __unused = *(volatile char*)p;
959 		FAIL() << "shouldn't get here";
960 	}
961 
962 	ASSERT_EQ(p, ReadSigbus::s_si_addr);
963 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
964 	leak(fd);
965 }
966 
967 /*
968  * A read via mmap comes up short, indicating that the file was truncated
969  * server-side.
970  */
971 TEST_F(Read, mmap_eof)
972 {
973 	const char FULLPATH[] = "mountpoint/some_file.txt";
974 	const char RELPATH[] = "some_file.txt";
975 	const char *CONTENTS = "abcdefgh";
976 	uint64_t ino = 42;
977 	int fd;
978 	ssize_t len;
979 	size_t bufsize = strlen(CONTENTS);
980 	struct stat sb;
981 	void *p;
982 
983 	len = getpagesize();
984 
985 	expect_lookup(RELPATH, ino, m_maxbcachebuf);
986 	expect_open(ino, 0, 1);
987 	EXPECT_CALL(*m_mock, process(
988 		ResultOf([=](auto in) {
989 			return (in.header.opcode == FUSE_READ &&
990 				in.header.nodeid == ino &&
991 				in.body.read.fh == Read::FH &&
992 				in.body.read.offset == 0 &&
993 				in.body.read.size == (uint32_t)m_maxbcachebuf);
994 		}, Eq(true)),
995 		_)
996 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
997 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
998 		memmove(out.body.bytes, CONTENTS, bufsize);
999 	})));
1000 	expect_getattr(ino, bufsize);
1001 
1002 	fd = open(FULLPATH, O_RDONLY);
1003 	ASSERT_LE(0, fd) << strerror(errno);
1004 
1005 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1006 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1007 
1008 	/* The file size should be automatically truncated */
1009 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
1010 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1011 	EXPECT_EQ((off_t)bufsize, sb.st_size);
1012 
1013 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1014 	leak(fd);
1015 }
1016 
1017 /*
1018  * During VOP_GETPAGES, the FUSE server fails a FUSE_GETATTR operation.  This
1019  * almost certainly indicates a buggy FUSE server, and our goal should be not
1020  * to panic.  Instead, generate SIGBUS.
1021  */
1022 TEST_F(ReadSigbus, mmap_getblksz_fail)
1023 {
1024 	const char FULLPATH[] = "mountpoint/some_file.txt";
1025 	const char RELPATH[] = "some_file.txt";
1026 	const char *CONTENTS = "abcdefgh";
1027 	struct sigaction sa;
1028 	Sequence seq;
1029 	uint64_t ino = 42;
1030 	int fd;
1031 	ssize_t len;
1032 	size_t bufsize = strlen(CONTENTS);
1033 	mode_t mode = S_IFREG | 0644;
1034 	void *p;
1035 
1036 	len = getpagesize();
1037 
1038 	FuseTest::expect_lookup(RELPATH, ino, mode, bufsize, 1, 0);
1039 	/* Expect two GETATTR calls that succeed, followed by one that fail. */
1040 	EXPECT_CALL(*m_mock, process(
1041 		ResultOf([=](auto in) {
1042 			return (in.header.opcode == FUSE_GETATTR &&
1043 				in.header.nodeid == ino);
1044 		}, Eq(true)),
1045 		_)
1046 	).Times(2)
1047 	.InSequence(seq)
1048 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
1049 		SET_OUT_HEADER_LEN(out, attr);
1050 		out.body.attr.attr.ino = ino;
1051 		out.body.attr.attr.mode = mode;
1052 		out.body.attr.attr.size = bufsize;
1053 		out.body.attr.attr_valid = 0;
1054 	})));
1055 	EXPECT_CALL(*m_mock, process(
1056 		ResultOf([=](auto in) {
1057 			return (in.header.opcode == FUSE_GETATTR &&
1058 				in.header.nodeid == ino);
1059 		}, Eq(true)),
1060 		_)
1061 	).InSequence(seq)
1062 	.WillRepeatedly(Invoke(ReturnErrno(EIO)));
1063 	expect_open(ino, 0, 1);
1064 	EXPECT_CALL(*m_mock, process(
1065 		ResultOf([=](auto in) {
1066 			return (in.header.opcode == FUSE_READ);
1067 		}, Eq(true)),
1068 		_)
1069 	).Times(0);
1070 
1071 	fd = open(FULLPATH, O_RDONLY);
1072 	ASSERT_LE(0, fd) << strerror(errno);
1073 
1074 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1075 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1076 
1077 	/* Accessing the mapped page should return SIGBUS.  */
1078 	bzero(&sa, sizeof(sa));
1079 	sa.sa_handler = SIG_DFL;
1080 	sa.sa_sigaction = handle_sigbus;
1081 	sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1082 	ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1083 	if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1084 		atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1085 		volatile char x __unused = *(volatile char*)p;
1086 		FAIL() << "shouldn't get here";
1087 	}
1088 
1089 	ASSERT_EQ(p, ReadSigbus::s_si_addr);
1090 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1091 	leak(fd);
1092 }
1093 
1094 /*
1095  * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
1096  * cache and to straight to the daemon
1097  */
1098 TEST_F(Read, o_direct)
1099 {
1100 	const char FULLPATH[] = "mountpoint/some_file.txt";
1101 	const char RELPATH[] = "some_file.txt";
1102 	const char *CONTENTS = "abcdefgh";
1103 	uint64_t ino = 42;
1104 	int fd;
1105 	ssize_t bufsize = strlen(CONTENTS);
1106 	uint8_t buf[bufsize];
1107 
1108 	expect_lookup(RELPATH, ino, bufsize);
1109 	expect_open(ino, 0, 1);
1110 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1111 
1112 	fd = open(FULLPATH, O_RDONLY);
1113 	ASSERT_LE(0, fd) << strerror(errno);
1114 
1115 	// Fill the cache
1116 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1117 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1118 
1119 	// Reads with o_direct should bypass the cache
1120 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1121 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1122 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1123 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1124 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1125 
1126 	leak(fd);
1127 }
1128 
1129 TEST_F(Read, pread)
1130 {
1131 	const char FULLPATH[] = "mountpoint/some_file.txt";
1132 	const char RELPATH[] = "some_file.txt";
1133 	const char *CONTENTS = "abcdefgh";
1134 	uint64_t ino = 42;
1135 	int fd;
1136 	/*
1137 	 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
1138 	 * to read from.  Without this, the read might start at a lower offset.
1139 	 */
1140 	uint64_t offset = m_maxbcachebuf;
1141 	ssize_t bufsize = strlen(CONTENTS);
1142 	uint8_t buf[bufsize];
1143 
1144 	expect_lookup(RELPATH, ino, offset + bufsize);
1145 	expect_open(ino, 0, 1);
1146 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
1147 
1148 	fd = open(FULLPATH, O_RDONLY);
1149 	ASSERT_LE(0, fd) << strerror(errno);
1150 
1151 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
1152 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1153 	leak(fd);
1154 }
1155 
1156 TEST_F(Read, read)
1157 {
1158 	const char FULLPATH[] = "mountpoint/some_file.txt";
1159 	const char RELPATH[] = "some_file.txt";
1160 	const char *CONTENTS = "abcdefgh";
1161 	uint64_t ino = 42;
1162 	int fd;
1163 	ssize_t bufsize = strlen(CONTENTS);
1164 	uint8_t buf[bufsize];
1165 
1166 	expect_lookup(RELPATH, ino, bufsize);
1167 	expect_open(ino, 0, 1);
1168 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1169 
1170 	fd = open(FULLPATH, O_RDONLY);
1171 	ASSERT_LE(0, fd) << strerror(errno);
1172 
1173 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1174 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1175 
1176 	leak(fd);
1177 }
1178 
1179 TEST_F(Read_7_8, read)
1180 {
1181 	const char FULLPATH[] = "mountpoint/some_file.txt";
1182 	const char RELPATH[] = "some_file.txt";
1183 	const char *CONTENTS = "abcdefgh";
1184 	uint64_t ino = 42;
1185 	int fd;
1186 	ssize_t bufsize = strlen(CONTENTS);
1187 	uint8_t buf[bufsize];
1188 
1189 	expect_lookup(RELPATH, ino, bufsize);
1190 	expect_open(ino, 0, 1);
1191 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1192 
1193 	fd = open(FULLPATH, O_RDONLY);
1194 	ASSERT_LE(0, fd) << strerror(errno);
1195 
1196 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1197 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1198 
1199 	leak(fd);
1200 }
1201 
1202 /*
1203  * If cacheing is enabled, the kernel should try to read an entire cache block
1204  * at a time.
1205  */
1206 TEST_F(Read, cache_block)
1207 {
1208 	const char FULLPATH[] = "mountpoint/some_file.txt";
1209 	const char RELPATH[] = "some_file.txt";
1210 	const char *CONTENTS0 = "abcdefghijklmnop";
1211 	uint64_t ino = 42;
1212 	int fd;
1213 	ssize_t bufsize = 8;
1214 	ssize_t filesize = m_maxbcachebuf * 2;
1215 	char *contents;
1216 	char buf[bufsize];
1217 	const char *contents1 = CONTENTS0 + bufsize;
1218 
1219 	contents = new char[filesize]();
1220 	memmove(contents, CONTENTS0, strlen(CONTENTS0));
1221 
1222 	expect_lookup(RELPATH, ino, filesize);
1223 	expect_open(ino, 0, 1);
1224 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
1225 		contents);
1226 
1227 	fd = open(FULLPATH, O_RDONLY);
1228 	ASSERT_LE(0, fd) << strerror(errno);
1229 
1230 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1231 	ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
1232 
1233 	/* A subsequent read should be serviced by cache */
1234 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1235 	ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
1236 	leak(fd);
1237 	delete[] contents;
1238 }
1239 
1240 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
1241 TEST_F(Read, sendfile)
1242 {
1243 	const char FULLPATH[] = "mountpoint/some_file.txt";
1244 	const char RELPATH[] = "some_file.txt";
1245 	const char *CONTENTS = "abcdefgh";
1246 	uint64_t ino = 42;
1247 	int fd;
1248 	size_t bufsize = strlen(CONTENTS);
1249 	uint8_t buf[bufsize];
1250 	int sp[2];
1251 	off_t sbytes;
1252 
1253 	expect_lookup(RELPATH, ino, bufsize);
1254 	expect_open(ino, 0, 1);
1255 	EXPECT_CALL(*m_mock, process(
1256 		ResultOf([=](auto in) {
1257 			return (in.header.opcode == FUSE_READ &&
1258 				in.header.nodeid == ino &&
1259 				in.body.read.fh == Read::FH &&
1260 				in.body.read.offset == 0 &&
1261 				in.body.read.size == bufsize);
1262 		}, Eq(true)),
1263 		_)
1264 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1265 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
1266 		memmove(out.body.bytes, CONTENTS, bufsize);
1267 	})));
1268 
1269 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1270 		<< strerror(errno);
1271 	fd = open(FULLPATH, O_RDONLY);
1272 	ASSERT_LE(0, fd) << strerror(errno);
1273 
1274 	ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
1275 		<< strerror(errno);
1276 	ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
1277 		<< strerror(errno);
1278 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1279 
1280 	close(sp[1]);
1281 	close(sp[0]);
1282 	leak(fd);
1283 }
1284 
1285 /* sendfile should fail gracefully if fuse declines the read */
1286 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
1287 TEST_F(Read, sendfile_eio)
1288 {
1289 	const char FULLPATH[] = "mountpoint/some_file.txt";
1290 	const char RELPATH[] = "some_file.txt";
1291 	const char *CONTENTS = "abcdefgh";
1292 	uint64_t ino = 42;
1293 	int fd;
1294 	ssize_t bufsize = strlen(CONTENTS);
1295 	int sp[2];
1296 	off_t sbytes;
1297 
1298 	expect_lookup(RELPATH, ino, bufsize);
1299 	expect_open(ino, 0, 1);
1300 	EXPECT_CALL(*m_mock, process(
1301 		ResultOf([=](auto in) {
1302 			return (in.header.opcode == FUSE_READ);
1303 		}, Eq(true)),
1304 		_)
1305 	).WillOnce(Invoke(ReturnErrno(EIO)));
1306 
1307 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1308 		<< strerror(errno);
1309 	fd = open(FULLPATH, O_RDONLY);
1310 	ASSERT_LE(0, fd) << strerror(errno);
1311 
1312 	ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
1313 
1314 	close(sp[1]);
1315 	close(sp[0]);
1316 	leak(fd);
1317 }
1318 
1319 /*
1320  * Sequential reads should use readahead.  And if allowed, large reads should
1321  * be clustered.
1322  */
1323 TEST_P(ReadAhead, readahead) {
1324 	const char FULLPATH[] = "mountpoint/some_file.txt";
1325 	const char RELPATH[] = "some_file.txt";
1326 	uint64_t ino = 42;
1327 	int fd, maxcontig, clustersize;
1328 	ssize_t bufsize = 4 * m_maxbcachebuf;
1329 	ssize_t filesize = bufsize;
1330 	uint64_t len;
1331 	char *rbuf, *contents;
1332 	off_t offs;
1333 
1334 	contents = new char[filesize];
1335 	memset(contents, 'X', filesize);
1336 	rbuf = new char[bufsize]();
1337 
1338 	expect_lookup(RELPATH, ino, filesize);
1339 	expect_open(ino, 0, 1);
1340 	maxcontig = m_noclusterr ? m_maxbcachebuf :
1341 		m_maxbcachebuf + m_maxreadahead;
1342 	clustersize = MIN((unsigned long )maxcontig, m_maxphys);
1343 	for (offs = 0; offs < bufsize; offs += clustersize) {
1344 		len = std::min((size_t)clustersize, (size_t)(filesize - offs));
1345 		expect_read(ino, offs, len, len, contents + offs);
1346 	}
1347 
1348 	fd = open(FULLPATH, O_RDONLY);
1349 	ASSERT_LE(0, fd) << strerror(errno);
1350 
1351 	/* Set the internal readahead counter to a "large" value */
1352 	ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
1353 
1354 	ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
1355 	ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
1356 
1357 	leak(fd);
1358 	delete[] rbuf;
1359 	delete[] contents;
1360 }
1361 
1362 INSTANTIATE_TEST_SUITE_P(RA, ReadAhead,
1363 	Values(tuple<bool, int>(false, 0),
1364 	       tuple<bool, int>(false, 1),
1365 	       tuple<bool, int>(false, 2),
1366 	       tuple<bool, int>(false, 3),
1367 	       tuple<bool, int>(true, 0),
1368 	       tuple<bool, int>(true, 1),
1369 	       tuple<bool, int>(true, 2)));
1370 
1371 /* With read-only mounts, fuse should never update atime during close */
1372 TEST_F(RofsRead, atime_during_close)
1373 {
1374 	const char FULLPATH[] = "mountpoint/some_file.txt";
1375 	const char RELPATH[] = "some_file.txt";
1376 	const char *CONTENTS = "abcdefgh";
1377 	uint64_t ino = 42;
1378 	int fd;
1379 	ssize_t bufsize = strlen(CONTENTS);
1380 	uint8_t buf[bufsize];
1381 
1382 	expect_lookup(RELPATH, ino, bufsize);
1383 	expect_open(ino, 0, 1);
1384 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1385 	EXPECT_CALL(*m_mock, process(
1386 		ResultOf([&](auto in) {
1387 			return (in.header.opcode == FUSE_SETATTR);
1388 		}, Eq(true)),
1389 		_)
1390 	).Times(0);
1391 	expect_flush(ino, 1, ReturnErrno(0));
1392 	expect_release(ino, FuseTest::FH);
1393 
1394 	fd = open(FULLPATH, O_RDONLY);
1395 	ASSERT_LE(0, fd) << strerror(errno);
1396 
1397 	/* Ensure atime will be different than during lookup */
1398 	nap();
1399 
1400 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1401 
1402 	close(fd);
1403 }
1404 
1405 /* fuse_init_out.time_gran controls the granularity of timestamps */
1406 TEST_P(TimeGran, atime_during_setattr)
1407 {
1408 	const char FULLPATH[] = "mountpoint/some_file.txt";
1409 	const char RELPATH[] = "some_file.txt";
1410 	const char *CONTENTS = "abcdefgh";
1411 	ssize_t bufsize = strlen(CONTENTS);
1412 	uint8_t buf[bufsize];
1413 	uint64_t ino = 42;
1414 	const mode_t newmode = 0755;
1415 	int fd;
1416 
1417 	expect_lookup(RELPATH, ino, bufsize);
1418 	expect_open(ino, 0, 1);
1419 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1420 	EXPECT_CALL(*m_mock, process(
1421 		ResultOf([=](auto in) {
1422 			uint32_t valid = FATTR_MODE | FATTR_ATIME;
1423 			return (in.header.opcode == FUSE_SETATTR &&
1424 				in.header.nodeid == ino &&
1425 				in.body.setattr.valid == valid &&
1426 				in.body.setattr.atimensec % m_time_gran == 0);
1427 		}, Eq(true)),
1428 		_)
1429 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1430 		SET_OUT_HEADER_LEN(out, attr);
1431 		out.body.attr.attr.ino = ino;
1432 		out.body.attr.attr.mode = S_IFREG | newmode;
1433 	})));
1434 
1435 	fd = open(FULLPATH, O_RDWR);
1436 	ASSERT_LE(0, fd) << strerror(errno);
1437 
1438 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1439 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1440 
1441 	leak(fd);
1442 }
1443 
1444 INSTANTIATE_TEST_SUITE_P(TG, TimeGran, Range(0u, 10u));
1445