xref: /freebsd/tests/sys/fs/fusefs/read.cc (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/socket.h>
37 #include <sys/sysctl.h>
38 #include <sys/uio.h>
39 
40 #include <aio.h>
41 #include <fcntl.h>
42 #include <semaphore.h>
43 #include <setjmp.h>
44 #include <signal.h>
45 #include <unistd.h>
46 }
47 
48 #include "mockfs.hh"
49 #include "utils.hh"
50 
51 using namespace testing;
52 
53 class Read: public FuseTest {
54 
55 public:
56 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
57 {
58 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
59 }
60 };
61 
62 class Read_7_8: public FuseTest {
63 public:
64 virtual void SetUp() {
65 	m_kernel_minor_version = 8;
66 	FuseTest::SetUp();
67 }
68 
69 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
70 {
71 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
72 }
73 };
74 
75 class AioRead: public Read {
76 public:
77 virtual void SetUp() {
78 	if (!is_unsafe_aio_enabled())
79 		GTEST_SKIP() <<
80 			"vfs.aio.enable_unsafe must be set for this test";
81 	FuseTest::SetUp();
82 }
83 };
84 
85 class AsyncRead: public AioRead {
86 	virtual void SetUp() {
87 		m_init_flags = FUSE_ASYNC_READ;
88 		AioRead::SetUp();
89 	}
90 };
91 
92 class ReadAhead: public Read,
93 		 public WithParamInterface<tuple<bool, int>>
94 {
95 	virtual void SetUp() {
96 		int val;
97 		const char *node = "vfs.maxbcachebuf";
98 		size_t size = sizeof(val);
99 		ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
100 			<< strerror(errno);
101 
102 		m_maxreadahead = val * get<1>(GetParam());
103 		m_noclusterr = get<0>(GetParam());
104 		Read::SetUp();
105 	}
106 };
107 
108 class ReadNoatime: public Read {
109 	virtual void SetUp() {
110 		m_noatime = true;
111 		Read::SetUp();
112 	}
113 };
114 
115 class ReadSigbus: public Read
116 {
117 public:
118 static jmp_buf s_jmpbuf;
119 static void *s_si_addr;
120 
121 void TearDown() {
122 	struct sigaction sa;
123 
124 	bzero(&sa, sizeof(sa));
125 	sa.sa_handler = SIG_DFL;
126 	sigaction(SIGBUS, &sa, NULL);
127 
128 	FuseTest::TearDown();
129 }
130 
131 };
132 
133 static void
134 handle_sigbus(int signo __unused, siginfo_t *info, void *uap __unused) {
135 	ReadSigbus::s_si_addr = info->si_addr;
136 	longjmp(ReadSigbus::s_jmpbuf, 1);
137 }
138 
139 jmp_buf ReadSigbus::s_jmpbuf;
140 void *ReadSigbus::s_si_addr;
141 
142 class TimeGran: public Read, public WithParamInterface<unsigned> {
143 public:
144 virtual void SetUp() {
145 	m_time_gran = 1 << GetParam();
146 	Read::SetUp();
147 }
148 };
149 
150 /* AIO reads need to set the header's pid field correctly */
151 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
152 TEST_F(AioRead, aio_read)
153 {
154 	const char FULLPATH[] = "mountpoint/some_file.txt";
155 	const char RELPATH[] = "some_file.txt";
156 	const char *CONTENTS = "abcdefgh";
157 	uint64_t ino = 42;
158 	int fd;
159 	ssize_t bufsize = strlen(CONTENTS);
160 	uint8_t buf[bufsize];
161 	struct aiocb iocb, *piocb;
162 
163 	expect_lookup(RELPATH, ino, bufsize);
164 	expect_open(ino, 0, 1);
165 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
166 
167 	fd = open(FULLPATH, O_RDONLY);
168 	ASSERT_LE(0, fd) << strerror(errno);
169 
170 	iocb.aio_nbytes = bufsize;
171 	iocb.aio_fildes = fd;
172 	iocb.aio_buf = buf;
173 	iocb.aio_offset = 0;
174 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
175 	ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno);
176 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
177 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
178 
179 	leak(fd);
180 }
181 
182 /*
183  * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there
184  * is at most one outstanding read operation per file handle
185  */
186 TEST_F(AioRead, async_read_disabled)
187 {
188 	const char FULLPATH[] = "mountpoint/some_file.txt";
189 	const char RELPATH[] = "some_file.txt";
190 	uint64_t ino = 42;
191 	int fd;
192 	ssize_t bufsize = 50;
193 	char buf0[bufsize], buf1[bufsize];
194 	off_t off0 = 0;
195 	off_t off1 = m_maxbcachebuf;
196 	struct aiocb iocb0, iocb1;
197 	volatile sig_atomic_t read_count = 0;
198 
199 	expect_lookup(RELPATH, ino, 131072);
200 	expect_open(ino, 0, 1);
201 	EXPECT_CALL(*m_mock, process(
202 		ResultOf([=](auto in) {
203 			return (in.header.opcode == FUSE_READ &&
204 				in.header.nodeid == ino &&
205 				in.body.read.fh == FH &&
206 				in.body.read.offset == (uint64_t)off0);
207 		}, Eq(true)),
208 		_)
209 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
210 		read_count++;
211 		/* Filesystem is slow to respond */
212 	}));
213 	EXPECT_CALL(*m_mock, process(
214 		ResultOf([=](auto in) {
215 			return (in.header.opcode == FUSE_READ &&
216 				in.header.nodeid == ino &&
217 				in.body.read.fh == FH &&
218 				in.body.read.offset == (uint64_t)off1);
219 		}, Eq(true)),
220 		_)
221 	).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) {
222 		read_count++;
223 		/* Filesystem is slow to respond */
224 	}));
225 
226 	fd = open(FULLPATH, O_RDONLY);
227 	ASSERT_LE(0, fd) << strerror(errno);
228 
229 	/*
230 	 * Submit two AIO read requests, and respond to neither.  If the
231 	 * filesystem ever gets the second read request, then we failed to
232 	 * limit outstanding reads.
233 	 */
234 	iocb0.aio_nbytes = bufsize;
235 	iocb0.aio_fildes = fd;
236 	iocb0.aio_buf = buf0;
237 	iocb0.aio_offset = off0;
238 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
239 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
240 
241 	iocb1.aio_nbytes = bufsize;
242 	iocb1.aio_fildes = fd;
243 	iocb1.aio_buf = buf1;
244 	iocb1.aio_offset = off1;
245 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
246 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
247 
248 	/*
249 	 * Sleep for awhile to make sure the kernel has had a chance to issue
250 	 * the second read, even though the first has not yet returned
251 	 */
252 	nap();
253 	EXPECT_EQ(read_count, 1);
254 
255 	m_mock->kill_daemon();
256 	/* Wait for AIO activity to complete, but ignore errors */
257 	(void)aio_waitcomplete(NULL, NULL);
258 
259 	leak(fd);
260 }
261 
262 /*
263  * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple
264  * simultaneous read requests on the same file handle.
265  */
266 TEST_F(AsyncRead, async_read)
267 {
268 	const char FULLPATH[] = "mountpoint/some_file.txt";
269 	const char RELPATH[] = "some_file.txt";
270 	uint64_t ino = 42;
271 	int fd;
272 	ssize_t bufsize = 50;
273 	char buf0[bufsize], buf1[bufsize];
274 	off_t off0 = 0;
275 	off_t off1 = m_maxbcachebuf;
276 	off_t fsize = 2 * m_maxbcachebuf;
277 	struct aiocb iocb0, iocb1;
278 	sem_t sem;
279 
280 	ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno);
281 
282 	expect_lookup(RELPATH, ino, fsize);
283 	expect_open(ino, 0, 1);
284 	EXPECT_CALL(*m_mock, process(
285 		ResultOf([=](auto in) {
286 			return (in.header.opcode == FUSE_READ &&
287 				in.header.nodeid == ino &&
288 				in.body.read.fh == FH &&
289 				in.body.read.offset == (uint64_t)off0);
290 		}, Eq(true)),
291 		_)
292 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
293 		sem_post(&sem);
294 		/* Filesystem is slow to respond */
295 	}));
296 	EXPECT_CALL(*m_mock, process(
297 		ResultOf([=](auto in) {
298 			return (in.header.opcode == FUSE_READ &&
299 				in.header.nodeid == ino &&
300 				in.body.read.fh == FH &&
301 				in.body.read.offset == (uint64_t)off1);
302 		}, Eq(true)),
303 		_)
304 	).WillOnce(Invoke([&](auto in __unused, auto &out __unused) {
305 		sem_post(&sem);
306 		/* Filesystem is slow to respond */
307 	}));
308 
309 	fd = open(FULLPATH, O_RDONLY);
310 	ASSERT_LE(0, fd) << strerror(errno);
311 
312 	/*
313 	 * Submit two AIO read requests, but respond to neither.  Ensure that
314 	 * we received both.
315 	 */
316 	iocb0.aio_nbytes = bufsize;
317 	iocb0.aio_fildes = fd;
318 	iocb0.aio_buf = buf0;
319 	iocb0.aio_offset = off0;
320 	iocb0.aio_sigevent.sigev_notify = SIGEV_NONE;
321 	ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno);
322 
323 	iocb1.aio_nbytes = bufsize;
324 	iocb1.aio_fildes = fd;
325 	iocb1.aio_buf = buf1;
326 	iocb1.aio_offset = off1;
327 	iocb1.aio_sigevent.sigev_notify = SIGEV_NONE;
328 	ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno);
329 
330 	/* Wait until both reads have reached the daemon */
331 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
332 	ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno);
333 
334 	m_mock->kill_daemon();
335 	/* Wait for AIO activity to complete, but ignore errors */
336 	(void)aio_waitcomplete(NULL, NULL);
337 
338 	leak(fd);
339 }
340 
341 /* The kernel should update the cached atime attribute during a read */
342 TEST_F(Read, atime)
343 {
344 	const char FULLPATH[] = "mountpoint/some_file.txt";
345 	const char RELPATH[] = "some_file.txt";
346 	const char *CONTENTS = "abcdefgh";
347 	struct stat sb1, sb2;
348 	uint64_t ino = 42;
349 	int fd;
350 	ssize_t bufsize = strlen(CONTENTS);
351 	uint8_t buf[bufsize];
352 
353 	expect_lookup(RELPATH, ino, bufsize);
354 	expect_open(ino, 0, 1);
355 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
356 
357 	fd = open(FULLPATH, O_RDONLY);
358 	ASSERT_LE(0, fd) << strerror(errno);
359 	ASSERT_EQ(0, fstat(fd, &sb1));
360 
361 	/* Ensure atime will be different than it was during lookup */
362 	nap();
363 
364 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
365 	ASSERT_EQ(0, fstat(fd, &sb2));
366 
367 	/* The kernel should automatically update atime during read */
368 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
369 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
370 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
371 
372 	leak(fd);
373 }
374 
375 /* The kernel should update the cached atime attribute during a cached read */
376 TEST_F(Read, atime_cached)
377 {
378 	const char FULLPATH[] = "mountpoint/some_file.txt";
379 	const char RELPATH[] = "some_file.txt";
380 	const char *CONTENTS = "abcdefgh";
381 	struct stat sb1, sb2;
382 	uint64_t ino = 42;
383 	int fd;
384 	ssize_t bufsize = strlen(CONTENTS);
385 	uint8_t buf[bufsize];
386 
387 	expect_lookup(RELPATH, ino, bufsize);
388 	expect_open(ino, 0, 1);
389 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
390 
391 	fd = open(FULLPATH, O_RDONLY);
392 	ASSERT_LE(0, fd) << strerror(errno);
393 
394 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
395 	ASSERT_EQ(0, fstat(fd, &sb1));
396 
397 	/* Ensure atime will be different than it was during the first read */
398 	nap();
399 
400 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
401 	ASSERT_EQ(0, fstat(fd, &sb2));
402 
403 	/* The kernel should automatically update atime during read */
404 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <));
405 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
406 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
407 
408 	leak(fd);
409 }
410 
411 /* dirty atime values should be flushed during close */
412 TEST_F(Read, atime_during_close)
413 {
414 	const char FULLPATH[] = "mountpoint/some_file.txt";
415 	const char RELPATH[] = "some_file.txt";
416 	const char *CONTENTS = "abcdefgh";
417 	struct stat sb;
418 	uint64_t ino = 42;
419 	const mode_t newmode = 0755;
420 	int fd;
421 	ssize_t bufsize = strlen(CONTENTS);
422 	uint8_t buf[bufsize];
423 
424 	expect_lookup(RELPATH, ino, bufsize);
425 	expect_open(ino, 0, 1);
426 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
427 	EXPECT_CALL(*m_mock, process(
428 		ResultOf([&](auto in) {
429 			uint32_t valid = FATTR_ATIME;
430 			return (in.header.opcode == FUSE_SETATTR &&
431 				in.header.nodeid == ino &&
432 				in.body.setattr.valid == valid &&
433 				(time_t)in.body.setattr.atime ==
434 					sb.st_atim.tv_sec &&
435 				(long)in.body.setattr.atimensec ==
436 					sb.st_atim.tv_nsec);
437 		}, Eq(true)),
438 		_)
439 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
440 		SET_OUT_HEADER_LEN(out, attr);
441 		out.body.attr.attr.ino = ino;
442 		out.body.attr.attr.mode = S_IFREG | newmode;
443 	})));
444 	expect_flush(ino, 1, ReturnErrno(0));
445 	expect_release(ino, FuseTest::FH);
446 
447 	fd = open(FULLPATH, O_RDONLY);
448 	ASSERT_LE(0, fd) << strerror(errno);
449 
450 	/* Ensure atime will be different than during lookup */
451 	nap();
452 
453 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
454 	ASSERT_EQ(0, fstat(fd, &sb));
455 
456 	close(fd);
457 }
458 
459 /* A cached atime should be flushed during FUSE_SETATTR */
460 TEST_F(Read, atime_during_setattr)
461 {
462 	const char FULLPATH[] = "mountpoint/some_file.txt";
463 	const char RELPATH[] = "some_file.txt";
464 	const char *CONTENTS = "abcdefgh";
465 	struct stat sb;
466 	uint64_t ino = 42;
467 	const mode_t newmode = 0755;
468 	int fd;
469 	ssize_t bufsize = strlen(CONTENTS);
470 	uint8_t buf[bufsize];
471 
472 	expect_lookup(RELPATH, ino, bufsize);
473 	expect_open(ino, 0, 1);
474 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
475 	EXPECT_CALL(*m_mock, process(
476 		ResultOf([&](auto in) {
477 			uint32_t valid = FATTR_MODE | FATTR_ATIME;
478 			return (in.header.opcode == FUSE_SETATTR &&
479 				in.header.nodeid == ino &&
480 				in.body.setattr.valid == valid &&
481 				(time_t)in.body.setattr.atime ==
482 					sb.st_atim.tv_sec &&
483 				(long)in.body.setattr.atimensec ==
484 					sb.st_atim.tv_nsec);
485 		}, Eq(true)),
486 		_)
487 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
488 		SET_OUT_HEADER_LEN(out, attr);
489 		out.body.attr.attr.ino = ino;
490 		out.body.attr.attr.mode = S_IFREG | newmode;
491 	})));
492 
493 	fd = open(FULLPATH, O_RDONLY);
494 	ASSERT_LE(0, fd) << strerror(errno);
495 
496 	/* Ensure atime will be different than during lookup */
497 	nap();
498 
499 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
500 	ASSERT_EQ(0, fstat(fd, &sb));
501 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
502 
503 	leak(fd);
504 }
505 
506 /* The kernel should flush dirty atime values during close */
507 /* 0-length reads shouldn't cause any confusion */
508 TEST_F(Read, direct_io_read_nothing)
509 {
510 	const char FULLPATH[] = "mountpoint/some_file.txt";
511 	const char RELPATH[] = "some_file.txt";
512 	uint64_t ino = 42;
513 	int fd;
514 	uint64_t offset = 100;
515 	char buf[80];
516 
517 	expect_lookup(RELPATH, ino, offset + 1000);
518 	expect_open(ino, FOPEN_DIRECT_IO, 1);
519 
520 	fd = open(FULLPATH, O_RDONLY);
521 	ASSERT_LE(0, fd) << strerror(errno);
522 
523 	ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno);
524 	leak(fd);
525 }
526 
527 /*
528  * With direct_io, reads should not fill the cache.  They should go straight to
529  * the daemon
530  */
531 TEST_F(Read, direct_io_pread)
532 {
533 	const char FULLPATH[] = "mountpoint/some_file.txt";
534 	const char RELPATH[] = "some_file.txt";
535 	const char *CONTENTS = "abcdefgh";
536 	uint64_t ino = 42;
537 	int fd;
538 	uint64_t offset = 100;
539 	ssize_t bufsize = strlen(CONTENTS);
540 	uint8_t buf[bufsize];
541 
542 	expect_lookup(RELPATH, ino, offset + bufsize);
543 	expect_open(ino, FOPEN_DIRECT_IO, 1);
544 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
545 
546 	fd = open(FULLPATH, O_RDONLY);
547 	ASSERT_LE(0, fd) << strerror(errno);
548 
549 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
550 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
551 
552 	// With FOPEN_DIRECT_IO, the cache should be bypassed.  The server will
553 	// get a 2nd read request.
554 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
555 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
556 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
557 	leak(fd);
558 }
559 
560 /*
561  * With direct_io, filesystems are allowed to return less data than is
562  * requested.  fuse(4) should return a short read to userland.
563  */
564 TEST_F(Read, direct_io_short_read)
565 {
566 	const char FULLPATH[] = "mountpoint/some_file.txt";
567 	const char RELPATH[] = "some_file.txt";
568 	const char *CONTENTS = "abcdefghijklmnop";
569 	uint64_t ino = 42;
570 	int fd;
571 	uint64_t offset = 100;
572 	ssize_t bufsize = strlen(CONTENTS);
573 	ssize_t halfbufsize = bufsize / 2;
574 	uint8_t buf[bufsize];
575 
576 	expect_lookup(RELPATH, ino, offset + bufsize);
577 	expect_open(ino, FOPEN_DIRECT_IO, 1);
578 	expect_read(ino, offset, bufsize, halfbufsize, CONTENTS);
579 
580 	fd = open(FULLPATH, O_RDONLY);
581 	ASSERT_LE(0, fd) << strerror(errno);
582 
583 	ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset))
584 		<< strerror(errno);
585 	ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize));
586 	leak(fd);
587 }
588 
589 TEST_F(Read, eio)
590 {
591 	const char FULLPATH[] = "mountpoint/some_file.txt";
592 	const char RELPATH[] = "some_file.txt";
593 	const char *CONTENTS = "abcdefgh";
594 	uint64_t ino = 42;
595 	int fd;
596 	ssize_t bufsize = strlen(CONTENTS);
597 	uint8_t buf[bufsize];
598 
599 	expect_lookup(RELPATH, ino, bufsize);
600 	expect_open(ino, 0, 1);
601 	EXPECT_CALL(*m_mock, process(
602 		ResultOf([=](auto in) {
603 			return (in.header.opcode == FUSE_READ);
604 		}, Eq(true)),
605 		_)
606 	).WillOnce(Invoke(ReturnErrno(EIO)));
607 
608 	fd = open(FULLPATH, O_RDONLY);
609 	ASSERT_LE(0, fd) << strerror(errno);
610 
611 	ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno);
612 	ASSERT_EQ(EIO, errno);
613 	leak(fd);
614 }
615 
616 /*
617  * If the server returns a short read when direct io is not in use, that
618  * indicates EOF, because of a server-side truncation.  We should invalidate
619  * all cached attributes.  We may update the file size,
620  */
621 TEST_F(Read, eof)
622 {
623 	const char FULLPATH[] = "mountpoint/some_file.txt";
624 	const char RELPATH[] = "some_file.txt";
625 	const char *CONTENTS = "abcdefghijklmnop";
626 	uint64_t ino = 42;
627 	int fd;
628 	uint64_t offset = 100;
629 	ssize_t bufsize = strlen(CONTENTS);
630 	ssize_t partbufsize = 3 * bufsize / 4;
631 	ssize_t r;
632 	uint8_t buf[bufsize];
633 	struct stat sb;
634 
635 	expect_lookup(RELPATH, ino, offset + bufsize);
636 	expect_open(ino, 0, 1);
637 	expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS);
638 	expect_getattr(ino, offset + partbufsize);
639 
640 	fd = open(FULLPATH, O_RDONLY);
641 	ASSERT_LE(0, fd) << strerror(errno);
642 
643 	r = pread(fd, buf, bufsize, offset);
644 	ASSERT_LE(0, r) << strerror(errno);
645 	EXPECT_EQ(partbufsize, r) << strerror(errno);
646 	ASSERT_EQ(0, fstat(fd, &sb));
647 	EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size);
648 	leak(fd);
649 }
650 
651 /* Like Read.eof, but causes an entire buffer to be invalidated */
652 TEST_F(Read, eof_of_whole_buffer)
653 {
654 	const char FULLPATH[] = "mountpoint/some_file.txt";
655 	const char RELPATH[] = "some_file.txt";
656 	const char *CONTENTS = "abcdefghijklmnop";
657 	uint64_t ino = 42;
658 	int fd;
659 	ssize_t bufsize = strlen(CONTENTS);
660 	off_t old_filesize = m_maxbcachebuf * 2 + bufsize;
661 	uint8_t buf[bufsize];
662 	struct stat sb;
663 
664 	expect_lookup(RELPATH, ino, old_filesize);
665 	expect_open(ino, 0, 1);
666 	expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS);
667 	expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS);
668 	expect_getattr(ino, m_maxbcachebuf);
669 
670 	fd = open(FULLPATH, O_RDONLY);
671 	ASSERT_LE(0, fd) << strerror(errno);
672 
673 	/* Cache the third block */
674 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2))
675 		<< strerror(errno);
676 	/* Try to read the 2nd block, but it's past EOF */
677 	ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf))
678 		<< strerror(errno);
679 	ASSERT_EQ(0, fstat(fd, &sb));
680 	EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size);
681 	leak(fd);
682 }
683 
684 /*
685  * With the keep_cache option, the kernel may keep its read cache across
686  * multiple open(2)s.
687  */
688 TEST_F(Read, keep_cache)
689 {
690 	const char FULLPATH[] = "mountpoint/some_file.txt";
691 	const char RELPATH[] = "some_file.txt";
692 	const char *CONTENTS = "abcdefgh";
693 	uint64_t ino = 42;
694 	int fd0, fd1;
695 	ssize_t bufsize = strlen(CONTENTS);
696 	uint8_t buf[bufsize];
697 
698 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
699 	expect_open(ino, FOPEN_KEEP_CACHE, 2);
700 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
701 
702 	fd0 = open(FULLPATH, O_RDONLY);
703 	ASSERT_LE(0, fd0) << strerror(errno);
704 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
705 
706 	fd1 = open(FULLPATH, O_RDWR);
707 	ASSERT_LE(0, fd1) << strerror(errno);
708 
709 	/*
710 	 * This read should be serviced by cache, even though it's on the other
711 	 * file descriptor
712 	 */
713 	ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno);
714 
715 	leak(fd0);
716 	leak(fd1);
717 }
718 
719 /*
720  * Without the keep_cache option, the kernel should drop its read caches on
721  * every open
722  */
723 TEST_F(Read, keep_cache_disabled)
724 {
725 	const char FULLPATH[] = "mountpoint/some_file.txt";
726 	const char RELPATH[] = "some_file.txt";
727 	const char *CONTENTS = "abcdefgh";
728 	uint64_t ino = 42;
729 	int fd0, fd1;
730 	ssize_t bufsize = strlen(CONTENTS);
731 	uint8_t buf[bufsize];
732 
733 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2);
734 	expect_open(ino, 0, 2);
735 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
736 
737 	fd0 = open(FULLPATH, O_RDONLY);
738 	ASSERT_LE(0, fd0) << strerror(errno);
739 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
740 
741 	fd1 = open(FULLPATH, O_RDWR);
742 	ASSERT_LE(0, fd1) << strerror(errno);
743 
744 	/*
745 	 * This read should not be serviced by cache, even though it's on the
746 	 * original file descriptor
747 	 */
748 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
749 	ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno);
750 	ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno);
751 
752 	leak(fd0);
753 	leak(fd1);
754 }
755 
756 TEST_F(Read, mmap)
757 {
758 	const char FULLPATH[] = "mountpoint/some_file.txt";
759 	const char RELPATH[] = "some_file.txt";
760 	const char *CONTENTS = "abcdefgh";
761 	uint64_t ino = 42;
762 	int fd;
763 	ssize_t len;
764 	size_t bufsize = strlen(CONTENTS);
765 	void *p;
766 
767 	len = getpagesize();
768 
769 	expect_lookup(RELPATH, ino, bufsize);
770 	expect_open(ino, 0, 1);
771 	EXPECT_CALL(*m_mock, process(
772 		ResultOf([=](auto in) {
773 			return (in.header.opcode == FUSE_READ &&
774 				in.header.nodeid == ino &&
775 				in.body.read.fh == Read::FH &&
776 				in.body.read.offset == 0 &&
777 				in.body.read.size == bufsize);
778 		}, Eq(true)),
779 		_)
780 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
781 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
782 		memmove(out.body.bytes, CONTENTS, bufsize);
783 	})));
784 
785 	fd = open(FULLPATH, O_RDONLY);
786 	ASSERT_LE(0, fd) << strerror(errno);
787 
788 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
789 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
790 
791 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
792 
793 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
794 	leak(fd);
795 }
796 
797 /*
798  * The kernel should not update the cached atime attribute during a read, if
799  * MNT_NOATIME is used.
800  */
801 TEST_F(ReadNoatime, atime)
802 {
803 	const char FULLPATH[] = "mountpoint/some_file.txt";
804 	const char RELPATH[] = "some_file.txt";
805 	const char *CONTENTS = "abcdefgh";
806 	struct stat sb1, sb2;
807 	uint64_t ino = 42;
808 	int fd;
809 	ssize_t bufsize = strlen(CONTENTS);
810 	uint8_t buf[bufsize];
811 
812 	expect_lookup(RELPATH, ino, bufsize);
813 	expect_open(ino, 0, 1);
814 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
815 
816 	fd = open(FULLPATH, O_RDONLY);
817 	ASSERT_LE(0, fd) << strerror(errno);
818 	ASSERT_EQ(0, fstat(fd, &sb1));
819 
820 	nap();
821 
822 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
823 	ASSERT_EQ(0, fstat(fd, &sb2));
824 
825 	/* The kernel should not update atime during read */
826 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
827 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
828 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
829 
830 	leak(fd);
831 }
832 
833 /*
834  * The kernel should not update the cached atime attribute during a cached
835  * read, if MNT_NOATIME is used.
836  */
837 TEST_F(ReadNoatime, atime_cached)
838 {
839 	const char FULLPATH[] = "mountpoint/some_file.txt";
840 	const char RELPATH[] = "some_file.txt";
841 	const char *CONTENTS = "abcdefgh";
842 	struct stat sb1, sb2;
843 	uint64_t ino = 42;
844 	int fd;
845 	ssize_t bufsize = strlen(CONTENTS);
846 	uint8_t buf[bufsize];
847 
848 	expect_lookup(RELPATH, ino, bufsize);
849 	expect_open(ino, 0, 1);
850 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
851 
852 	fd = open(FULLPATH, O_RDONLY);
853 	ASSERT_LE(0, fd) << strerror(errno);
854 
855 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
856 	ASSERT_EQ(0, fstat(fd, &sb1));
857 
858 	nap();
859 
860 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno);
861 	ASSERT_EQ(0, fstat(fd, &sb2));
862 
863 	/* The kernel should automatically update atime during read */
864 	EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==));
865 	EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==));
866 	EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==));
867 
868 	leak(fd);
869 }
870 
871 /* Read of an mmap()ed file fails */
872 TEST_F(ReadSigbus, mmap_eio)
873 {
874 	const char FULLPATH[] = "mountpoint/some_file.txt";
875 	const char RELPATH[] = "some_file.txt";
876 	const char *CONTENTS = "abcdefgh";
877 	struct sigaction sa;
878 	uint64_t ino = 42;
879 	int fd;
880 	ssize_t len;
881 	size_t bufsize = strlen(CONTENTS);
882 	void *p;
883 
884 	len = getpagesize();
885 
886 	expect_lookup(RELPATH, ino, bufsize);
887 	expect_open(ino, 0, 1);
888 	EXPECT_CALL(*m_mock, process(
889 		ResultOf([=](auto in) {
890 			return (in.header.opcode == FUSE_READ &&
891 				in.header.nodeid == ino &&
892 				in.body.read.fh == Read::FH);
893 		}, Eq(true)),
894 		_)
895 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
896 
897 	fd = open(FULLPATH, O_RDONLY);
898 	ASSERT_LE(0, fd) << strerror(errno);
899 
900 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
901 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
902 
903 	/* Accessing the mapped page should return SIGBUS.  */
904 
905 	bzero(&sa, sizeof(sa));
906 	sa.sa_handler = SIG_DFL;
907 	sa.sa_sigaction = handle_sigbus;
908 	sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
909 	ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
910 	if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
911 		atomic_signal_fence(std::memory_order::memory_order_seq_cst);
912 		volatile char x __unused = *(volatile char*)p;
913 		FAIL() << "shouldn't get here";
914 	}
915 
916 	ASSERT_EQ(p, ReadSigbus::s_si_addr);
917 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
918 	leak(fd);
919 }
920 
921 /*
922  * A read via mmap comes up short, indicating that the file was truncated
923  * server-side.
924  */
925 TEST_F(Read, mmap_eof)
926 {
927 	const char FULLPATH[] = "mountpoint/some_file.txt";
928 	const char RELPATH[] = "some_file.txt";
929 	const char *CONTENTS = "abcdefgh";
930 	uint64_t ino = 42;
931 	int fd;
932 	ssize_t len;
933 	size_t bufsize = strlen(CONTENTS);
934 	struct stat sb;
935 	void *p;
936 
937 	len = getpagesize();
938 
939 	expect_lookup(RELPATH, ino, m_maxbcachebuf);
940 	expect_open(ino, 0, 1);
941 	EXPECT_CALL(*m_mock, process(
942 		ResultOf([=](auto in) {
943 			return (in.header.opcode == FUSE_READ &&
944 				in.header.nodeid == ino &&
945 				in.body.read.fh == Read::FH &&
946 				in.body.read.offset == 0 &&
947 				in.body.read.size == (uint32_t)m_maxbcachebuf);
948 		}, Eq(true)),
949 		_)
950 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
951 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
952 		memmove(out.body.bytes, CONTENTS, bufsize);
953 	})));
954 	expect_getattr(ino, bufsize);
955 
956 	fd = open(FULLPATH, O_RDONLY);
957 	ASSERT_LE(0, fd) << strerror(errno);
958 
959 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
960 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
961 
962 	/* The file size should be automatically truncated */
963 	ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize));
964 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
965 	EXPECT_EQ((off_t)bufsize, sb.st_size);
966 
967 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
968 	leak(fd);
969 }
970 
971 /*
972  * During VOP_GETPAGES, the FUSE server fails a FUSE_GETATTR operation.  This
973  * almost certainly indicates a buggy FUSE server, and our goal should be not
974  * to panic.  Instead, generate SIGBUS.
975  */
976 TEST_F(ReadSigbus, mmap_getblksz_fail)
977 {
978 	const char FULLPATH[] = "mountpoint/some_file.txt";
979 	const char RELPATH[] = "some_file.txt";
980 	const char *CONTENTS = "abcdefgh";
981 	struct sigaction sa;
982 	Sequence seq;
983 	uint64_t ino = 42;
984 	int fd;
985 	ssize_t len;
986 	size_t bufsize = strlen(CONTENTS);
987 	mode_t mode = S_IFREG | 0644;
988 	void *p;
989 
990 	len = getpagesize();
991 
992 	FuseTest::expect_lookup(RELPATH, ino, mode, bufsize, 1, 0);
993 	/* Expect two GETATTR calls that succeed, followed by one that fail. */
994 	EXPECT_CALL(*m_mock, process(
995 		ResultOf([=](auto in) {
996 			return (in.header.opcode == FUSE_GETATTR &&
997 				in.header.nodeid == ino);
998 		}, Eq(true)),
999 		_)
1000 	).Times(2)
1001 	.InSequence(seq)
1002 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
1003 		SET_OUT_HEADER_LEN(out, attr);
1004 		out.body.attr.attr.ino = ino;
1005 		out.body.attr.attr.mode = mode;
1006 		out.body.attr.attr.size = bufsize;
1007 		out.body.attr.attr_valid = 0;
1008 	})));
1009 	EXPECT_CALL(*m_mock, process(
1010 		ResultOf([=](auto in) {
1011 			return (in.header.opcode == FUSE_GETATTR &&
1012 				in.header.nodeid == ino);
1013 		}, Eq(true)),
1014 		_)
1015 	).InSequence(seq)
1016 	.WillRepeatedly(Invoke(ReturnErrno(EIO)));
1017 	expect_open(ino, 0, 1);
1018 	EXPECT_CALL(*m_mock, process(
1019 		ResultOf([=](auto in) {
1020 			return (in.header.opcode == FUSE_READ);
1021 		}, Eq(true)),
1022 		_)
1023 	).Times(0);
1024 
1025 	fd = open(FULLPATH, O_RDONLY);
1026 	ASSERT_LE(0, fd) << strerror(errno);
1027 
1028 	p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1029 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1030 
1031 	/* Accessing the mapped page should return SIGBUS.  */
1032 	bzero(&sa, sizeof(sa));
1033 	sa.sa_handler = SIG_DFL;
1034 	sa.sa_sigaction = handle_sigbus;
1035 	sa.sa_flags = SA_RESETHAND | SA_SIGINFO;
1036 	ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno);
1037 	if (setjmp(ReadSigbus::s_jmpbuf) == 0) {
1038 		atomic_signal_fence(std::memory_order::memory_order_seq_cst);
1039 		volatile char x __unused = *(volatile char*)p;
1040 		FAIL() << "shouldn't get here";
1041 	}
1042 
1043 	ASSERT_EQ(p, ReadSigbus::s_si_addr);
1044 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1045 	leak(fd);
1046 }
1047 
1048 /*
1049  * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass
1050  * cache and to straight to the daemon
1051  */
1052 TEST_F(Read, o_direct)
1053 {
1054 	const char FULLPATH[] = "mountpoint/some_file.txt";
1055 	const char RELPATH[] = "some_file.txt";
1056 	const char *CONTENTS = "abcdefgh";
1057 	uint64_t ino = 42;
1058 	int fd;
1059 	ssize_t bufsize = strlen(CONTENTS);
1060 	uint8_t buf[bufsize];
1061 
1062 	expect_lookup(RELPATH, ino, bufsize);
1063 	expect_open(ino, 0, 1);
1064 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1065 
1066 	fd = open(FULLPATH, O_RDONLY);
1067 	ASSERT_LE(0, fd) << strerror(errno);
1068 
1069 	// Fill the cache
1070 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1071 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1072 
1073 	// Reads with o_direct should bypass the cache
1074 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1075 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1076 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1077 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1078 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1079 
1080 	leak(fd);
1081 }
1082 
1083 TEST_F(Read, pread)
1084 {
1085 	const char FULLPATH[] = "mountpoint/some_file.txt";
1086 	const char RELPATH[] = "some_file.txt";
1087 	const char *CONTENTS = "abcdefgh";
1088 	uint64_t ino = 42;
1089 	int fd;
1090 	/*
1091 	 * Set offset to a maxbcachebuf boundary so we'll be sure what offset
1092 	 * to read from.  Without this, the read might start at a lower offset.
1093 	 */
1094 	uint64_t offset = m_maxbcachebuf;
1095 	ssize_t bufsize = strlen(CONTENTS);
1096 	uint8_t buf[bufsize];
1097 
1098 	expect_lookup(RELPATH, ino, offset + bufsize);
1099 	expect_open(ino, 0, 1);
1100 	expect_read(ino, offset, bufsize, bufsize, CONTENTS);
1101 
1102 	fd = open(FULLPATH, O_RDONLY);
1103 	ASSERT_LE(0, fd) << strerror(errno);
1104 
1105 	ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno);
1106 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1107 	leak(fd);
1108 }
1109 
1110 TEST_F(Read, read)
1111 {
1112 	const char FULLPATH[] = "mountpoint/some_file.txt";
1113 	const char RELPATH[] = "some_file.txt";
1114 	const char *CONTENTS = "abcdefgh";
1115 	uint64_t ino = 42;
1116 	int fd;
1117 	ssize_t bufsize = strlen(CONTENTS);
1118 	uint8_t buf[bufsize];
1119 
1120 	expect_lookup(RELPATH, ino, bufsize);
1121 	expect_open(ino, 0, 1);
1122 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1123 
1124 	fd = open(FULLPATH, O_RDONLY);
1125 	ASSERT_LE(0, fd) << strerror(errno);
1126 
1127 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1128 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1129 
1130 	leak(fd);
1131 }
1132 
1133 TEST_F(Read_7_8, read)
1134 {
1135 	const char FULLPATH[] = "mountpoint/some_file.txt";
1136 	const char RELPATH[] = "some_file.txt";
1137 	const char *CONTENTS = "abcdefgh";
1138 	uint64_t ino = 42;
1139 	int fd;
1140 	ssize_t bufsize = strlen(CONTENTS);
1141 	uint8_t buf[bufsize];
1142 
1143 	expect_lookup(RELPATH, ino, bufsize);
1144 	expect_open(ino, 0, 1);
1145 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1146 
1147 	fd = open(FULLPATH, O_RDONLY);
1148 	ASSERT_LE(0, fd) << strerror(errno);
1149 
1150 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1151 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1152 
1153 	leak(fd);
1154 }
1155 
1156 /*
1157  * If cacheing is enabled, the kernel should try to read an entire cache block
1158  * at a time.
1159  */
1160 TEST_F(Read, cache_block)
1161 {
1162 	const char FULLPATH[] = "mountpoint/some_file.txt";
1163 	const char RELPATH[] = "some_file.txt";
1164 	const char *CONTENTS0 = "abcdefghijklmnop";
1165 	uint64_t ino = 42;
1166 	int fd;
1167 	ssize_t bufsize = 8;
1168 	ssize_t filesize = m_maxbcachebuf * 2;
1169 	char *contents;
1170 	char buf[bufsize];
1171 	const char *contents1 = CONTENTS0 + bufsize;
1172 
1173 	contents = (char*)calloc(1, filesize);
1174 	ASSERT_NE(nullptr, contents);
1175 	memmove(contents, CONTENTS0, strlen(CONTENTS0));
1176 
1177 	expect_lookup(RELPATH, ino, filesize);
1178 	expect_open(ino, 0, 1);
1179 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf,
1180 		contents);
1181 
1182 	fd = open(FULLPATH, O_RDONLY);
1183 	ASSERT_LE(0, fd) << strerror(errno);
1184 
1185 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1186 	ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize));
1187 
1188 	/* A subsequent read should be serviced by cache */
1189 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1190 	ASSERT_EQ(0, memcmp(buf, contents1, bufsize));
1191 	leak(fd);
1192 	free(contents);
1193 }
1194 
1195 /* Reading with sendfile should work (though it obviously won't be 0-copy) */
1196 TEST_F(Read, sendfile)
1197 {
1198 	const char FULLPATH[] = "mountpoint/some_file.txt";
1199 	const char RELPATH[] = "some_file.txt";
1200 	const char *CONTENTS = "abcdefgh";
1201 	uint64_t ino = 42;
1202 	int fd;
1203 	size_t bufsize = strlen(CONTENTS);
1204 	uint8_t buf[bufsize];
1205 	int sp[2];
1206 	off_t sbytes;
1207 
1208 	expect_lookup(RELPATH, ino, bufsize);
1209 	expect_open(ino, 0, 1);
1210 	EXPECT_CALL(*m_mock, process(
1211 		ResultOf([=](auto in) {
1212 			return (in.header.opcode == FUSE_READ &&
1213 				in.header.nodeid == ino &&
1214 				in.body.read.fh == Read::FH &&
1215 				in.body.read.offset == 0 &&
1216 				in.body.read.size == bufsize);
1217 		}, Eq(true)),
1218 		_)
1219 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1220 		out.header.len = sizeof(struct fuse_out_header) + bufsize;
1221 		memmove(out.body.bytes, CONTENTS, bufsize);
1222 	})));
1223 
1224 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1225 		<< strerror(errno);
1226 	fd = open(FULLPATH, O_RDONLY);
1227 	ASSERT_LE(0, fd) << strerror(errno);
1228 
1229 	ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0))
1230 		<< strerror(errno);
1231 	ASSERT_EQ(static_cast<ssize_t>(bufsize), read(sp[0], buf, bufsize))
1232 		<< strerror(errno);
1233 	ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize));
1234 
1235 	close(sp[1]);
1236 	close(sp[0]);
1237 	leak(fd);
1238 }
1239 
1240 /* sendfile should fail gracefully if fuse declines the read */
1241 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */
1242 TEST_F(Read, sendfile_eio)
1243 {
1244 	const char FULLPATH[] = "mountpoint/some_file.txt";
1245 	const char RELPATH[] = "some_file.txt";
1246 	const char *CONTENTS = "abcdefgh";
1247 	uint64_t ino = 42;
1248 	int fd;
1249 	ssize_t bufsize = strlen(CONTENTS);
1250 	int sp[2];
1251 	off_t sbytes;
1252 
1253 	expect_lookup(RELPATH, ino, bufsize);
1254 	expect_open(ino, 0, 1);
1255 	EXPECT_CALL(*m_mock, process(
1256 		ResultOf([=](auto in) {
1257 			return (in.header.opcode == FUSE_READ);
1258 		}, Eq(true)),
1259 		_)
1260 	).WillOnce(Invoke(ReturnErrno(EIO)));
1261 
1262 	ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp))
1263 		<< strerror(errno);
1264 	fd = open(FULLPATH, O_RDONLY);
1265 	ASSERT_LE(0, fd) << strerror(errno);
1266 
1267 	ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0));
1268 
1269 	close(sp[1]);
1270 	close(sp[0]);
1271 	leak(fd);
1272 }
1273 
1274 /*
1275  * Sequential reads should use readahead.  And if allowed, large reads should
1276  * be clustered.
1277  */
1278 TEST_P(ReadAhead, readahead) {
1279 	const char FULLPATH[] = "mountpoint/some_file.txt";
1280 	const char RELPATH[] = "some_file.txt";
1281 	uint64_t ino = 42;
1282 	int fd, maxcontig, clustersize;
1283 	ssize_t bufsize = 4 * m_maxbcachebuf;
1284 	ssize_t filesize = bufsize;
1285 	uint64_t len;
1286 	char *rbuf, *contents;
1287 	off_t offs;
1288 
1289 	contents = (char*)malloc(filesize);
1290 	ASSERT_NE(nullptr, contents);
1291 	memset(contents, 'X', filesize);
1292 	rbuf = (char*)calloc(1, bufsize);
1293 
1294 	expect_lookup(RELPATH, ino, filesize);
1295 	expect_open(ino, 0, 1);
1296 	maxcontig = m_noclusterr ? m_maxbcachebuf :
1297 		m_maxbcachebuf + m_maxreadahead;
1298 	clustersize = MIN(maxcontig, m_maxphys);
1299 	for (offs = 0; offs < bufsize; offs += clustersize) {
1300 		len = std::min((size_t)clustersize, (size_t)(filesize - offs));
1301 		expect_read(ino, offs, len, len, contents + offs);
1302 	}
1303 
1304 	fd = open(FULLPATH, O_RDONLY);
1305 	ASSERT_LE(0, fd) << strerror(errno);
1306 
1307 	/* Set the internal readahead counter to a "large" value */
1308 	ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno);
1309 
1310 	ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno);
1311 	ASSERT_EQ(0, memcmp(rbuf, contents, bufsize));
1312 
1313 	leak(fd);
1314 	free(rbuf);
1315 	free(contents);
1316 }
1317 
1318 INSTANTIATE_TEST_CASE_P(RA, ReadAhead,
1319 	Values(tuple<bool, int>(false, 0),
1320 	       tuple<bool, int>(false, 1),
1321 	       tuple<bool, int>(false, 2),
1322 	       tuple<bool, int>(false, 3),
1323 	       tuple<bool, int>(true, 0),
1324 	       tuple<bool, int>(true, 1),
1325 	       tuple<bool, int>(true, 2)));
1326 
1327 /* fuse_init_out.time_gran controls the granularity of timestamps */
1328 TEST_P(TimeGran, atime_during_setattr)
1329 {
1330 	const char FULLPATH[] = "mountpoint/some_file.txt";
1331 	const char RELPATH[] = "some_file.txt";
1332 	const char *CONTENTS = "abcdefgh";
1333 	ssize_t bufsize = strlen(CONTENTS);
1334 	uint8_t buf[bufsize];
1335 	uint64_t ino = 42;
1336 	const mode_t newmode = 0755;
1337 	int fd;
1338 
1339 	expect_lookup(RELPATH, ino, bufsize);
1340 	expect_open(ino, 0, 1);
1341 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1342 	EXPECT_CALL(*m_mock, process(
1343 		ResultOf([=](auto in) {
1344 			uint32_t valid = FATTR_MODE | FATTR_ATIME;
1345 			return (in.header.opcode == FUSE_SETATTR &&
1346 				in.header.nodeid == ino &&
1347 				in.body.setattr.valid == valid &&
1348 				in.body.setattr.atimensec % m_time_gran == 0);
1349 		}, Eq(true)),
1350 		_)
1351 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1352 		SET_OUT_HEADER_LEN(out, attr);
1353 		out.body.attr.attr.ino = ino;
1354 		out.body.attr.attr.mode = S_IFREG | newmode;
1355 	})));
1356 
1357 	fd = open(FULLPATH, O_RDWR);
1358 	ASSERT_LE(0, fd) << strerror(errno);
1359 
1360 	ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno);
1361 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1362 
1363 	leak(fd);
1364 }
1365 
1366 INSTANTIATE_TEST_CASE_P(TG, TimeGran, Range(0u, 10u));
1367