xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision 0a36787e4c1fa0cf77dcf83be0867178476e372b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/resource.h>
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #include <sys/uio.h>
40 
41 #include <aio.h>
42 #include <fcntl.h>
43 #include <signal.h>
44 #include <unistd.h>
45 }
46 
47 #include "mockfs.hh"
48 #include "utils.hh"
49 
50 using namespace testing;
51 
52 class Write: public FuseTest {
53 
54 public:
55 static sig_atomic_t s_sigxfsz;
56 
57 void SetUp() {
58 	s_sigxfsz = 0;
59 	FuseTest::SetUp();
60 }
61 
62 void TearDown() {
63 	struct sigaction sa;
64 
65 	bzero(&sa, sizeof(sa));
66 	sa.sa_handler = SIG_DFL;
67 	sigaction(SIGXFSZ, &sa, NULL);
68 
69 	FuseTest::TearDown();
70 }
71 
72 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
73 {
74 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
75 }
76 
77 void expect_release(uint64_t ino, ProcessMockerT r)
78 {
79 	EXPECT_CALL(*m_mock, process(
80 		ResultOf([=](auto in) {
81 			return (in.header.opcode == FUSE_RELEASE &&
82 				in.header.nodeid == ino);
83 		}, Eq(true)),
84 		_)
85 	).WillRepeatedly(Invoke(r));
86 }
87 
88 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
89 	uint64_t osize, const void *contents)
90 {
91 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
92 }
93 
94 /* Expect a write that may or may not come, depending on the cache mode */
95 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
96 	const void *contents)
97 {
98 	EXPECT_CALL(*m_mock, process(
99 		ResultOf([=](auto in) {
100 			const char *buf = (const char*)in.body.bytes +
101 				sizeof(struct fuse_write_in);
102 
103 			return (in.header.opcode == FUSE_WRITE &&
104 				in.header.nodeid == ino &&
105 				in.body.write.offset == offset  &&
106 				in.body.write.size == size &&
107 				0 == bcmp(buf, contents, size));
108 		}, Eq(true)),
109 		_)
110 	).Times(AtMost(1))
111 	.WillRepeatedly(Invoke(
112 		ReturnImmediate([=](auto in __unused, auto& out) {
113 			SET_OUT_HEADER_LEN(out, write);
114 			out.body.write.size = size;
115 		})
116 	));
117 }
118 
119 };
120 
121 sig_atomic_t Write::s_sigxfsz = 0;
122 
123 class Write_7_8: public FuseTest {
124 
125 public:
126 virtual void SetUp() {
127 	m_kernel_minor_version = 8;
128 	FuseTest::SetUp();
129 }
130 
131 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
132 {
133 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
134 }
135 
136 };
137 
138 class AioWrite: public Write {
139 virtual void SetUp() {
140 	if (!is_unsafe_aio_enabled())
141 		GTEST_SKIP() <<
142 			"vfs.aio.enable_unsafe must be set for this test";
143 	FuseTest::SetUp();
144 }
145 };
146 
147 /* Tests for the writeback cache mode */
148 class WriteBack: public Write {
149 public:
150 virtual void SetUp() {
151 	m_init_flags |= FUSE_WRITEBACK_CACHE;
152 	FuseTest::SetUp();
153 	if (IsSkipped())
154 		return;
155 }
156 
157 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
158 	uint64_t osize, const void *contents)
159 {
160 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
161 		contents);
162 }
163 };
164 
165 class WriteBackAsync: public WriteBack {
166 public:
167 virtual void SetUp() {
168 	m_async = true;
169 	m_maxwrite = 65536;
170 	WriteBack::SetUp();
171 }
172 };
173 
174 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
175 public:
176 virtual void SetUp() {
177 	m_time_gran = 1 << GetParam();
178 	WriteBackAsync::SetUp();
179 }
180 };
181 
182 /* Tests for clustered writes with WriteBack cacheing */
183 class WriteCluster: public WriteBack {
184 public:
185 virtual void SetUp() {
186 	m_async = true;
187 	m_maxwrite = 1 << 25;	// Anything larger than MAXPHYS will suffice
188 	WriteBack::SetUp();
189 	if (m_maxphys < 2 * DFLTPHYS)
190 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
191 			<< " for this test";
192 	if (m_maxphys < 2 * m_maxbcachebuf)
193 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
194 			<< " for this test";
195 }
196 };
197 
198 /* Tests relating to the server's max_write property */
199 class WriteMaxWrite: public Write {
200 public:
201 virtual void SetUp() {
202 	/*
203 	 * For this test, m_maxwrite must be less than either m_maxbcachebuf or
204 	 * maxphys.
205 	 */
206 	m_maxwrite = 32768;
207 	Write::SetUp();
208 }
209 };
210 
211 void sigxfsz_handler(int __unused sig) {
212 	Write::s_sigxfsz = 1;
213 }
214 
215 /* AIO writes need to set the header's pid field correctly */
216 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
217 TEST_F(AioWrite, DISABLED_aio_write)
218 {
219 	const char FULLPATH[] = "mountpoint/some_file.txt";
220 	const char RELPATH[] = "some_file.txt";
221 	const char *CONTENTS = "abcdefgh";
222 	uint64_t ino = 42;
223 	uint64_t offset = 4096;
224 	int fd;
225 	ssize_t bufsize = strlen(CONTENTS);
226 	struct aiocb iocb, *piocb;
227 
228 	expect_lookup(RELPATH, ino, 0);
229 	expect_open(ino, 0, 1);
230 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
231 
232 	fd = open(FULLPATH, O_WRONLY);
233 	ASSERT_LE(0, fd) << strerror(errno);
234 
235 	iocb.aio_nbytes = bufsize;
236 	iocb.aio_fildes = fd;
237 	iocb.aio_buf = __DECONST(void *, CONTENTS);
238 	iocb.aio_offset = offset;
239 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
240 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
241 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
242 	leak(fd);
243 }
244 
245 /*
246  * When a file is opened with O_APPEND, we should forward that flag to
247  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
248  * offset internally.  That way we'll work both with filesystems that
249  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
250  * simply use the offset).
251  *
252  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
253  * Open.o_append test.
254  */
255 TEST_F(Write, append)
256 {
257 	const ssize_t BUFSIZE = 9;
258 	const char FULLPATH[] = "mountpoint/some_file.txt";
259 	const char RELPATH[] = "some_file.txt";
260 	const char CONTENTS[BUFSIZE] = "abcdefgh";
261 	uint64_t ino = 42;
262 	/*
263 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
264 	 * using writeback caching
265 	 */
266 	uint64_t initial_offset = m_maxbcachebuf;
267 	int fd;
268 
269 	expect_lookup(RELPATH, ino, initial_offset);
270 	expect_open(ino, 0, 1);
271 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
272 
273 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
274 	fd = open(FULLPATH, O_RDWR | O_APPEND);
275 	ASSERT_LE(0, fd) << strerror(errno);
276 
277 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
278 	leak(fd);
279 }
280 
281 /* If a file is cached, then appending to the end should not cause a read */
282 TEST_F(Write, append_to_cached)
283 {
284 	const ssize_t BUFSIZE = 9;
285 	const char FULLPATH[] = "mountpoint/some_file.txt";
286 	const char RELPATH[] = "some_file.txt";
287 	char *oldcontents, *oldbuf;
288 	const char CONTENTS[BUFSIZE] = "abcdefgh";
289 	uint64_t ino = 42;
290 	/*
291 	 * Set offset in between maxbcachebuf boundary to test buffer handling
292 	 */
293 	uint64_t oldsize = m_maxbcachebuf / 2;
294 	int fd;
295 
296 	oldcontents = (char*)calloc(1, oldsize);
297 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
298 	oldbuf = (char*)malloc(oldsize);
299 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
300 
301 	expect_lookup(RELPATH, ino, oldsize);
302 	expect_open(ino, 0, 1);
303 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
304 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
305 
306 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
307 	fd = open(FULLPATH, O_RDWR | O_APPEND);
308 	ASSERT_LE(0, fd) << strerror(errno);
309 
310 	/* Read the old data into the cache */
311 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
312 		<< strerror(errno);
313 
314 	/* Write the new data.  There should be no more read operations */
315 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
316 	leak(fd);
317 	free(oldbuf);
318 	free(oldcontents);
319 }
320 
321 TEST_F(Write, append_direct_io)
322 {
323 	const ssize_t BUFSIZE = 9;
324 	const char FULLPATH[] = "mountpoint/some_file.txt";
325 	const char RELPATH[] = "some_file.txt";
326 	const char CONTENTS[BUFSIZE] = "abcdefgh";
327 	uint64_t ino = 42;
328 	uint64_t initial_offset = 4096;
329 	int fd;
330 
331 	expect_lookup(RELPATH, ino, initial_offset);
332 	expect_open(ino, FOPEN_DIRECT_IO, 1);
333 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
334 
335 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
336 	ASSERT_LE(0, fd) << strerror(errno);
337 
338 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
339 	leak(fd);
340 }
341 
342 /* A direct write should evict any overlapping cached data */
343 TEST_F(Write, direct_io_evicts_cache)
344 {
345 	const char FULLPATH[] = "mountpoint/some_file.txt";
346 	const char RELPATH[] = "some_file.txt";
347 	const char CONTENTS0[] = "abcdefgh";
348 	const char CONTENTS1[] = "ijklmnop";
349 	uint64_t ino = 42;
350 	int fd;
351 	ssize_t bufsize = strlen(CONTENTS0) + 1;
352 	char readbuf[bufsize];
353 
354 	expect_lookup(RELPATH, ino, bufsize);
355 	expect_open(ino, 0, 1);
356 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
357 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
358 
359 	fd = open(FULLPATH, O_RDWR);
360 	ASSERT_LE(0, fd) << strerror(errno);
361 
362 	// Prime cache
363 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
364 
365 	// Write directly, evicting cache
366 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
367 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
368 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
369 
370 	// Read again.  Cache should be bypassed
371 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
372 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
373 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
374 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
375 	ASSERT_STREQ(readbuf, CONTENTS1);
376 
377 	leak(fd);
378 }
379 
380 /*
381  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
382  * allowed to return a short write for that file handle.  However, if it does
383  * then we should still do our darndest to handle it by resending the unwritten
384  * portion.
385  */
386 TEST_F(Write, indirect_io_short_write)
387 {
388 	const char FULLPATH[] = "mountpoint/some_file.txt";
389 	const char RELPATH[] = "some_file.txt";
390 	const char *CONTENTS = "abcdefghijklmnop";
391 	uint64_t ino = 42;
392 	int fd;
393 	ssize_t bufsize = strlen(CONTENTS);
394 	ssize_t bufsize0 = 11;
395 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
396 	const char *contents1 = CONTENTS + bufsize0;
397 
398 	expect_lookup(RELPATH, ino, 0);
399 	expect_open(ino, 0, 1);
400 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
401 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
402 
403 	fd = open(FULLPATH, O_WRONLY);
404 	ASSERT_LE(0, fd) << strerror(errno);
405 
406 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
407 	leak(fd);
408 }
409 
410 /*
411  * When the direct_io option is used, filesystems are allowed to write less
412  * data than requested.  We should return the short write to userland.
413  */
414 TEST_F(Write, direct_io_short_write)
415 {
416 	const char FULLPATH[] = "mountpoint/some_file.txt";
417 	const char RELPATH[] = "some_file.txt";
418 	const char *CONTENTS = "abcdefghijklmnop";
419 	uint64_t ino = 42;
420 	int fd;
421 	ssize_t bufsize = strlen(CONTENTS);
422 	ssize_t halfbufsize = bufsize / 2;
423 
424 	expect_lookup(RELPATH, ino, 0);
425 	expect_open(ino, FOPEN_DIRECT_IO, 1);
426 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
427 
428 	fd = open(FULLPATH, O_WRONLY);
429 	ASSERT_LE(0, fd) << strerror(errno);
430 
431 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
432 	leak(fd);
433 }
434 
435 /*
436  * An insidious edge case: the filesystem returns a short write, and the
437  * difference between what we requested and what it actually wrote crosses an
438  * iov element boundary
439  */
440 TEST_F(Write, direct_io_short_write_iov)
441 {
442 	const char FULLPATH[] = "mountpoint/some_file.txt";
443 	const char RELPATH[] = "some_file.txt";
444 	const char *CONTENTS0 = "abcdefgh";
445 	const char *CONTENTS1 = "ijklmnop";
446 	const char *EXPECTED0 = "abcdefghijklmnop";
447 	uint64_t ino = 42;
448 	int fd;
449 	ssize_t size0 = strlen(CONTENTS0) - 1;
450 	ssize_t size1 = strlen(CONTENTS1) + 1;
451 	ssize_t totalsize = size0 + size1;
452 	struct iovec iov[2];
453 
454 	expect_lookup(RELPATH, ino, 0);
455 	expect_open(ino, FOPEN_DIRECT_IO, 1);
456 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
457 
458 	fd = open(FULLPATH, O_WRONLY);
459 	ASSERT_LE(0, fd) << strerror(errno);
460 
461 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
462 	iov[0].iov_len = strlen(CONTENTS0);
463 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
464 	iov[1].iov_len = strlen(CONTENTS1);
465 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
466 	leak(fd);
467 }
468 
469 /* fusefs should respect RLIMIT_FSIZE */
470 TEST_F(Write, rlimit_fsize)
471 {
472 	const char FULLPATH[] = "mountpoint/some_file.txt";
473 	const char RELPATH[] = "some_file.txt";
474 	const char *CONTENTS = "abcdefgh";
475 	struct rlimit rl;
476 	ssize_t bufsize = strlen(CONTENTS);
477 	off_t offset = 1'000'000'000;
478 	uint64_t ino = 42;
479 	int fd;
480 
481 	expect_lookup(RELPATH, ino, 0);
482 	expect_open(ino, 0, 1);
483 
484 	rl.rlim_cur = offset;
485 	rl.rlim_max = 10 * offset;
486 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
487 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
488 
489 	fd = open(FULLPATH, O_WRONLY);
490 
491 	ASSERT_LE(0, fd) << strerror(errno);
492 
493 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
494 	EXPECT_EQ(EFBIG, errno);
495 	EXPECT_EQ(1, s_sigxfsz);
496 	leak(fd);
497 }
498 
499 /*
500  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
501  * during the R of a RMW operation.
502  */
503 TEST_F(Write, eof_during_rmw)
504 {
505 	const char FULLPATH[] = "mountpoint/some_file.txt";
506 	const char RELPATH[] = "some_file.txt";
507 	const char *CONTENTS = "abcdefgh";
508 	const char *INITIAL   = "XXXXXXXXXX";
509 	uint64_t ino = 42;
510 	uint64_t offset = 1;
511 	ssize_t bufsize = strlen(CONTENTS) + 1;
512 	off_t orig_fsize = 10;
513 	off_t truncated_fsize = 5;
514 	int fd;
515 
516 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
517 	expect_open(ino, 0, 1);
518 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
519 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
520 
521 	fd = open(FULLPATH, O_RDWR);
522 	ASSERT_LE(0, fd) << strerror(errno);
523 
524 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
525 		<< strerror(errno);
526 	leak(fd);
527 }
528 
529 /*
530  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
531  * write, then it must set the FUSE_WRITE_CACHE bit
532  */
533 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
534 TEST_F(Write, mmap)
535 {
536 	const char FULLPATH[] = "mountpoint/some_file.txt";
537 	const char RELPATH[] = "some_file.txt";
538 	const char *CONTENTS = "abcdefgh";
539 	uint64_t ino = 42;
540 	int fd;
541 	ssize_t bufsize = strlen(CONTENTS);
542 	void *p;
543 	uint64_t offset = 10;
544 	size_t len;
545 	void *zeros, *expected;
546 
547 	len = getpagesize();
548 
549 	zeros = calloc(1, len);
550 	ASSERT_NE(nullptr, zeros);
551 	expected = calloc(1, len);
552 	ASSERT_NE(nullptr, expected);
553 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
554 
555 	expect_lookup(RELPATH, ino, len);
556 	expect_open(ino, 0, 1);
557 	expect_read(ino, 0, len, len, zeros);
558 	/*
559 	 * Writes from the pager may or may not be associated with the correct
560 	 * pid, so they must set FUSE_WRITE_CACHE.
561 	 */
562 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
563 	expect_flush(ino, 1, ReturnErrno(0));
564 	expect_release(ino, ReturnErrno(0));
565 
566 	fd = open(FULLPATH, O_RDWR);
567 	ASSERT_LE(0, fd) << strerror(errno);
568 
569 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
570 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
571 
572 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
573 
574 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
575 	close(fd);	// Write mmap'd data on close
576 
577 	free(expected);
578 	free(zeros);
579 
580 	leak(fd);
581 }
582 
583 TEST_F(Write, pwrite)
584 {
585 	const char FULLPATH[] = "mountpoint/some_file.txt";
586 	const char RELPATH[] = "some_file.txt";
587 	const char *CONTENTS = "abcdefgh";
588 	uint64_t ino = 42;
589 	uint64_t offset = m_maxbcachebuf;
590 	int fd;
591 	ssize_t bufsize = strlen(CONTENTS);
592 
593 	expect_lookup(RELPATH, ino, 0);
594 	expect_open(ino, 0, 1);
595 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
596 
597 	fd = open(FULLPATH, O_WRONLY);
598 	ASSERT_LE(0, fd) << strerror(errno);
599 
600 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
601 		<< strerror(errno);
602 	leak(fd);
603 }
604 
605 /* Writing a file should update its cached mtime and ctime */
606 TEST_F(Write, timestamps)
607 {
608 	const char FULLPATH[] = "mountpoint/some_file.txt";
609 	const char RELPATH[] = "some_file.txt";
610 	const char *CONTENTS = "abcdefgh";
611 	ssize_t bufsize = strlen(CONTENTS);
612 	uint64_t ino = 42;
613 	struct stat sb0, sb1;
614 	int fd;
615 
616 	expect_lookup(RELPATH, ino, 0);
617 	expect_open(ino, 0, 1);
618 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
619 
620 	fd = open(FULLPATH, O_RDWR);
621 	ASSERT_LE(0, fd) << strerror(errno);
622 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
623 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
624 
625 	nap();
626 
627 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
628 
629 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
630 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
631 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
632 
633 	leak(fd);
634 }
635 
636 TEST_F(Write, write)
637 {
638 	const char FULLPATH[] = "mountpoint/some_file.txt";
639 	const char RELPATH[] = "some_file.txt";
640 	const char *CONTENTS = "abcdefgh";
641 	uint64_t ino = 42;
642 	int fd;
643 	ssize_t bufsize = strlen(CONTENTS);
644 
645 	expect_lookup(RELPATH, ino, 0);
646 	expect_open(ino, 0, 1);
647 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
648 
649 	fd = open(FULLPATH, O_WRONLY);
650 	ASSERT_LE(0, fd) << strerror(errno);
651 
652 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
653 	leak(fd);
654 }
655 
656 /* fuse(4) should not issue writes of greater size than the daemon requests */
657 TEST_F(WriteMaxWrite, write)
658 {
659 	const char FULLPATH[] = "mountpoint/some_file.txt";
660 	const char RELPATH[] = "some_file.txt";
661 	int *contents;
662 	uint64_t ino = 42;
663 	int fd;
664 	ssize_t halfbufsize, bufsize;
665 
666 	halfbufsize = m_mock->m_maxwrite;
667 	if (halfbufsize >= m_maxbcachebuf || halfbufsize >= m_maxphys)
668 		GTEST_SKIP() << "Must lower m_maxwrite for this test";
669 	bufsize = halfbufsize * 2;
670 	contents = (int*)malloc(bufsize);
671 	ASSERT_NE(nullptr, contents);
672 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
673 		contents[i] = i;
674 	}
675 
676 	expect_lookup(RELPATH, ino, 0);
677 	expect_open(ino, 0, 1);
678 	maybe_expect_write(ino, 0, halfbufsize, contents);
679 	maybe_expect_write(ino, halfbufsize, halfbufsize,
680 		&contents[halfbufsize / sizeof(int)]);
681 
682 	fd = open(FULLPATH, O_WRONLY);
683 	ASSERT_LE(0, fd) << strerror(errno);
684 
685 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
686 	leak(fd);
687 
688 	free(contents);
689 }
690 
691 TEST_F(Write, write_nothing)
692 {
693 	const char FULLPATH[] = "mountpoint/some_file.txt";
694 	const char RELPATH[] = "some_file.txt";
695 	const char *CONTENTS = "";
696 	uint64_t ino = 42;
697 	int fd;
698 	ssize_t bufsize = 0;
699 
700 	expect_lookup(RELPATH, ino, 0);
701 	expect_open(ino, 0, 1);
702 
703 	fd = open(FULLPATH, O_WRONLY);
704 	ASSERT_LE(0, fd) << strerror(errno);
705 
706 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
707 	leak(fd);
708 }
709 
710 TEST_F(Write_7_8, write)
711 {
712 	const char FULLPATH[] = "mountpoint/some_file.txt";
713 	const char RELPATH[] = "some_file.txt";
714 	const char *CONTENTS = "abcdefgh";
715 	uint64_t ino = 42;
716 	int fd;
717 	ssize_t bufsize = strlen(CONTENTS);
718 
719 	expect_lookup(RELPATH, ino, 0);
720 	expect_open(ino, 0, 1);
721 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
722 
723 	fd = open(FULLPATH, O_WRONLY);
724 	ASSERT_LE(0, fd) << strerror(errno);
725 
726 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
727 	leak(fd);
728 }
729 
730 /* In writeback mode, dirty data should be written on close */
731 TEST_F(WriteBackAsync, close)
732 {
733 	const char FULLPATH[] = "mountpoint/some_file.txt";
734 	const char RELPATH[] = "some_file.txt";
735 	const char *CONTENTS = "abcdefgh";
736 	uint64_t ino = 42;
737 	int fd;
738 	ssize_t bufsize = strlen(CONTENTS);
739 
740 	expect_lookup(RELPATH, ino, 0);
741 	expect_open(ino, 0, 1);
742 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
743 	EXPECT_CALL(*m_mock, process(
744 		ResultOf([=](auto in) {
745 			return (in.header.opcode == FUSE_SETATTR);
746 		}, Eq(true)),
747 		_)
748 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
749 		SET_OUT_HEADER_LEN(out, attr);
750 		out.body.attr.attr.ino = ino;	// Must match nodeid
751 	})));
752 	expect_flush(ino, 1, ReturnErrno(0));
753 	expect_release(ino, ReturnErrno(0));
754 
755 	fd = open(FULLPATH, O_RDWR);
756 	ASSERT_LE(0, fd) << strerror(errno);
757 
758 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
759 	close(fd);
760 }
761 
762 /* In writeback mode, adjacent writes will be clustered together */
763 TEST_F(WriteCluster, clustering)
764 {
765 	const char FULLPATH[] = "mountpoint/some_file.txt";
766 	const char RELPATH[] = "some_file.txt";
767 	uint64_t ino = 42;
768 	int i, fd;
769 	void *wbuf, *wbuf2x;
770 	ssize_t bufsize = m_maxbcachebuf;
771 	off_t filesize = 5 * bufsize;
772 
773 	wbuf = malloc(bufsize);
774 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
775 	memset(wbuf, 'X', bufsize);
776 	wbuf2x = malloc(2 * bufsize);
777 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
778 	memset(wbuf2x, 'X', 2 * bufsize);
779 
780 	expect_lookup(RELPATH, ino, filesize);
781 	expect_open(ino, 0, 1);
782 	/*
783 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
784 	 * The amount of clustering is adaptive, so the first write actually
785 	 * issued will be 2x bufsize and subsequent writes may be larger
786 	 */
787 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
788 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
789 	expect_flush(ino, 1, ReturnErrno(0));
790 	expect_release(ino, ReturnErrno(0));
791 
792 	fd = open(FULLPATH, O_RDWR);
793 	ASSERT_LE(0, fd) << strerror(errno);
794 
795 	for (i = 0; i < 4; i++) {
796 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
797 			<< strerror(errno);
798 	}
799 	close(fd);
800 	free(wbuf2x);
801 	free(wbuf);
802 }
803 
804 /*
805  * When clustering writes, an I/O error to any of the cluster's children should
806  * not panic the system on unmount
807  */
808 /*
809  * Regression test for bug 238585
810  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
811  */
812 TEST_F(WriteCluster, cluster_write_err)
813 {
814 	const char FULLPATH[] = "mountpoint/some_file.txt";
815 	const char RELPATH[] = "some_file.txt";
816 	uint64_t ino = 42;
817 	int i, fd;
818 	void *wbuf;
819 	ssize_t bufsize = m_maxbcachebuf;
820 	off_t filesize = 4 * bufsize;
821 
822 	wbuf = malloc(bufsize);
823 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
824 	memset(wbuf, 'X', bufsize);
825 
826 	expect_lookup(RELPATH, ino, filesize);
827 	expect_open(ino, 0, 1);
828 	EXPECT_CALL(*m_mock, process(
829 		ResultOf([=](auto in) {
830 			return (in.header.opcode == FUSE_WRITE);
831 		}, Eq(true)),
832 		_)
833 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
834 	expect_flush(ino, 1, ReturnErrno(0));
835 	expect_release(ino, ReturnErrno(0));
836 
837 	fd = open(FULLPATH, O_RDWR);
838 	ASSERT_LE(0, fd) << strerror(errno);
839 
840 	for (i = 0; i < 3; i++) {
841 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
842 			<< strerror(errno);
843 	}
844 	close(fd);
845 	free(wbuf);
846 }
847 
848 /*
849  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
850  * server.  The FUSE protocol explicitly allows that.
851  */
852 TEST_F(WriteBack, rmw)
853 {
854 	const char FULLPATH[] = "mountpoint/some_file.txt";
855 	const char RELPATH[] = "some_file.txt";
856 	const char *CONTENTS = "abcdefgh";
857 	const char *INITIAL   = "XXXXXXXXXX";
858 	uint64_t ino = 42;
859 	uint64_t offset = 1;
860 	off_t fsize = 10;
861 	int fd;
862 	ssize_t bufsize = strlen(CONTENTS);
863 
864 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
865 	expect_open(ino, 0, 1);
866 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
867 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
868 
869 	fd = open(FULLPATH, O_WRONLY);
870 	ASSERT_LE(0, fd) << strerror(errno);
871 
872 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
873 		<< strerror(errno);
874 	leak(fd);
875 }
876 
877 /*
878  * Without direct_io, writes should be committed to cache
879  */
880 TEST_F(WriteBack, cache)
881 {
882 	const char FULLPATH[] = "mountpoint/some_file.txt";
883 	const char RELPATH[] = "some_file.txt";
884 	const char *CONTENTS = "abcdefgh";
885 	uint64_t ino = 42;
886 	int fd;
887 	ssize_t bufsize = strlen(CONTENTS);
888 	uint8_t readbuf[bufsize];
889 
890 	expect_lookup(RELPATH, ino, 0);
891 	expect_open(ino, 0, 1);
892 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
893 
894 	fd = open(FULLPATH, O_RDWR);
895 	ASSERT_LE(0, fd) << strerror(errno);
896 
897 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
898 	/*
899 	 * A subsequent read should be serviced by cache, without querying the
900 	 * filesystem daemon
901 	 */
902 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
903 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
904 	leak(fd);
905 }
906 
907 /*
908  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
909  * an odd test, because it would be unusual to use O_DIRECT for writes but not
910  * reads.
911  */
912 TEST_F(WriteBack, o_direct)
913 {
914 	const char FULLPATH[] = "mountpoint/some_file.txt";
915 	const char RELPATH[] = "some_file.txt";
916 	const char *CONTENTS = "abcdefgh";
917 	uint64_t ino = 42;
918 	int fd;
919 	ssize_t bufsize = strlen(CONTENTS);
920 	uint8_t readbuf[bufsize];
921 
922 	expect_lookup(RELPATH, ino, 0);
923 	expect_open(ino, 0, 1);
924 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
925 		CONTENTS);
926 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
927 
928 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
929 	ASSERT_LE(0, fd) << strerror(errno);
930 
931 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
932 	/* A subsequent read must query the daemon because cache is empty */
933 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
934 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
935 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
936 	leak(fd);
937 }
938 
939 TEST_F(WriteBack, direct_io)
940 {
941 	const char FULLPATH[] = "mountpoint/some_file.txt";
942 	const char RELPATH[] = "some_file.txt";
943 	const char *CONTENTS = "abcdefgh";
944 	uint64_t ino = 42;
945 	int fd;
946 	ssize_t bufsize = strlen(CONTENTS);
947 	uint8_t readbuf[bufsize];
948 
949 	expect_lookup(RELPATH, ino, 0);
950 	expect_open(ino, FOPEN_DIRECT_IO, 1);
951 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
952 		CONTENTS);
953 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
954 
955 	fd = open(FULLPATH, O_RDWR);
956 	ASSERT_LE(0, fd) << strerror(errno);
957 
958 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
959 	/* A subsequent read must query the daemon because cache is empty */
960 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
961 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
962 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
963 	leak(fd);
964 }
965 
966 /*
967  * mmap should still be possible even if the server used direct_io.  Mmap will
968  * still use the cache, though.
969  *
970  * Regression test for bug 247276
971  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=247276
972  */
973 TEST_F(WriteBack, mmap_direct_io)
974 {
975 	const char FULLPATH[] = "mountpoint/some_file.txt";
976 	const char RELPATH[] = "some_file.txt";
977 	const char *CONTENTS = "abcdefgh";
978 	uint64_t ino = 42;
979 	int fd;
980 	size_t len;
981 	ssize_t bufsize = strlen(CONTENTS);
982 	void *p, *zeros;
983 
984 	len = getpagesize();
985 	zeros = calloc(1, len);
986 	ASSERT_NE(nullptr, zeros);
987 
988 	expect_lookup(RELPATH, ino, len);
989 	expect_open(ino, FOPEN_DIRECT_IO, 1);
990 	expect_read(ino, 0, len, len, zeros);
991 	expect_flush(ino, 1, ReturnErrno(0));
992 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, zeros);
993 	expect_release(ino, ReturnErrno(0));
994 
995 	fd = open(FULLPATH, O_RDWR);
996 	ASSERT_LE(0, fd) << strerror(errno);
997 
998 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
999 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1000 
1001 	memmove((uint8_t*)p, CONTENTS, bufsize);
1002 
1003 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1004 	close(fd);	// Write mmap'd data on close
1005 
1006 	free(zeros);
1007 }
1008 
1009 /*
1010  * When mounted with -o async, the writeback cache mode should delay writes
1011  */
1012 TEST_F(WriteBackAsync, delay)
1013 {
1014 	const char FULLPATH[] = "mountpoint/some_file.txt";
1015 	const char RELPATH[] = "some_file.txt";
1016 	const char *CONTENTS = "abcdefgh";
1017 	uint64_t ino = 42;
1018 	int fd;
1019 	ssize_t bufsize = strlen(CONTENTS);
1020 
1021 	expect_lookup(RELPATH, ino, 0);
1022 	expect_open(ino, 0, 1);
1023 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
1024 	EXPECT_CALL(*m_mock, process(
1025 		ResultOf([=](auto in) {
1026 			return (in.header.opcode == FUSE_WRITE);
1027 		}, Eq(true)),
1028 		_)
1029 	).Times(0);
1030 
1031 	fd = open(FULLPATH, O_RDWR);
1032 	ASSERT_LE(0, fd) << strerror(errno);
1033 
1034 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1035 
1036 	/* Don't close the file because that would flush the cache */
1037 	leak(fd);
1038 }
1039 
1040 /*
1041  * A direct write should not evict dirty cached data from outside of its own
1042  * byte range.
1043  */
1044 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
1045 {
1046 	const char FULLPATH[] = "mountpoint/some_file.txt";
1047 	const char RELPATH[] = "some_file.txt";
1048 	const char CONTENTS0[] = "abcdefgh";
1049 	const char CONTENTS1[] = "ijklmnop";
1050 	uint64_t ino = 42;
1051 	int fd;
1052 	ssize_t bufsize = strlen(CONTENTS0) + 1;
1053 	ssize_t fsize = 2 * m_maxbcachebuf;
1054 	char readbuf[bufsize];
1055 	void *zeros;
1056 
1057 	zeros = calloc(1, m_maxbcachebuf);
1058 	ASSERT_NE(nullptr, zeros);
1059 
1060 	expect_lookup(RELPATH, ino, fsize);
1061 	expect_open(ino, 0, 1);
1062 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
1063 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
1064 		CONTENTS1);
1065 
1066 	fd = open(FULLPATH, O_RDWR);
1067 	ASSERT_LE(0, fd) << strerror(errno);
1068 
1069 	// Cache first block with dirty data.  This will entail first reading
1070 	// the existing data.
1071 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
1072 		<< strerror(errno);
1073 
1074 	// Write directly to second block
1075 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1076 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
1077 		<< strerror(errno);
1078 
1079 	// Read from the first block again.  Should be serviced by cache.
1080 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1081 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
1082 	ASSERT_STREQ(readbuf, CONTENTS0);
1083 
1084 	leak(fd);
1085 	free(zeros);
1086 }
1087 
1088 /*
1089  * If a direct io write partially overlaps one or two blocks of dirty cached
1090  * data, No dirty data should be lost.  Admittedly this is a weird test,
1091  * because it would be unusual to use O_DIRECT and the writeback cache.
1092  */
1093 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1094 {
1095 	const char FULLPATH[] = "mountpoint/some_file.txt";
1096 	const char RELPATH[] = "some_file.txt";
1097 	uint64_t ino = 42;
1098 	int fd;
1099 	off_t bs = m_maxbcachebuf;
1100 	ssize_t fsize = 3 * bs;
1101 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1102 
1103 	readbuf = malloc(bs);
1104 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1105 	zeros = calloc(1, 3 * bs);
1106 	ASSERT_NE(nullptr, zeros);
1107 	ones = calloc(1, 2 * bs);
1108 	ASSERT_NE(nullptr, ones);
1109 	memset(ones, 1, 2 * bs);
1110 	zeroones = calloc(1, bs);
1111 	ASSERT_NE(nullptr, zeroones);
1112 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1113 	onezeros = calloc(1, bs);
1114 	ASSERT_NE(nullptr, onezeros);
1115 	memset(onezeros, 1, bs / 2);
1116 
1117 	expect_lookup(RELPATH, ino, fsize);
1118 	expect_open(ino, 0, 1);
1119 
1120 	fd = open(FULLPATH, O_RDWR);
1121 	ASSERT_LE(0, fd) << strerror(errno);
1122 
1123 	/* Cache first and third blocks with dirty data.  */
1124 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1125 
1126 	/*
1127 	 * Write directly to all three blocks.  The partially written blocks
1128 	 * will be flushed because they're dirty.
1129 	 */
1130 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1131 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1132 	/* The direct write is split in two because of the m_maxwrite value */
1133 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1134 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1135 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1136 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1137 
1138 	/*
1139 	 * Read from both the valid and invalid portions of the first and third
1140 	 * blocks again.  This will entail FUSE_READ operations because these
1141 	 * blocks were invalidated by the direct write.
1142 	 */
1143 	expect_read(ino, 0, bs, bs, zeroones);
1144 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1145 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1146 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1147 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1148 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1149 		<< strerror(errno);
1150 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1151 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1152 		<< strerror(errno);
1153 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1154 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1155 		<< strerror(errno);
1156 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1157 
1158 	leak(fd);
1159 	free(zeroones);
1160 	free(onezeros);
1161 	free(ones);
1162 	free(zeros);
1163 	free(readbuf);
1164 }
1165 
1166 /*
1167  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1168  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1169  * the file's size.
1170  */
1171 TEST_F(WriteBackAsync, eof)
1172 {
1173 	const char FULLPATH[] = "mountpoint/some_file.txt";
1174 	const char RELPATH[] = "some_file.txt";
1175 	const char *CONTENTS0 = "abcdefgh";
1176 	const char *CONTENTS1 = "ijklmnop";
1177 	uint64_t ino = 42;
1178 	int fd;
1179 	off_t offset = m_maxbcachebuf;
1180 	ssize_t wbufsize = strlen(CONTENTS1);
1181 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1182 	ssize_t rbufsize = 2 * old_filesize;
1183 	char readbuf[rbufsize];
1184 	size_t holesize = rbufsize - old_filesize;
1185 	char hole[holesize];
1186 	struct stat sb;
1187 	ssize_t r;
1188 
1189 	expect_lookup(RELPATH, ino, 0);
1190 	expect_open(ino, 0, 1);
1191 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1192 
1193 	fd = open(FULLPATH, O_RDWR);
1194 	ASSERT_LE(0, fd) << strerror(errno);
1195 
1196 	/* Write and cache data beyond EOF */
1197 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1198 		<< strerror(errno);
1199 
1200 	/* Read from the old EOF */
1201 	r = pread(fd, readbuf, rbufsize, 0);
1202 	ASSERT_LE(0, r) << strerror(errno);
1203 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1204 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1205 	bzero(hole, holesize);
1206 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1207 
1208 	/* The file's size should still be what was established by pwrite */
1209 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1210 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1211 	leak(fd);
1212 }
1213 
1214 /*
1215  * When a file has dirty writes that haven't been flushed, the server's notion
1216  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1217  * gets them from a FUSE_GETATTR before flushing.
1218  */
1219 TEST_F(WriteBackAsync, timestamps)
1220 {
1221 	const char FULLPATH[] = "mountpoint/some_file.txt";
1222 	const char RELPATH[] = "some_file.txt";
1223 	const char *CONTENTS = "abcdefgh";
1224 	ssize_t bufsize = strlen(CONTENTS);
1225 	uint64_t ino = 42;
1226 	uint64_t attr_valid = 0;
1227 	uint64_t attr_valid_nsec = 0;
1228 	uint64_t server_time = 12345;
1229 	mode_t mode = S_IFREG | 0644;
1230 	int fd;
1231 
1232 	struct stat sb;
1233 
1234 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1235 	.WillRepeatedly(Invoke(
1236 		ReturnImmediate([=](auto in __unused, auto& out) {
1237 		SET_OUT_HEADER_LEN(out, entry);
1238 		out.body.entry.attr.mode = mode;
1239 		out.body.entry.nodeid = ino;
1240 		out.body.entry.attr.nlink = 1;
1241 		out.body.entry.attr_valid = attr_valid;
1242 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1243 	})));
1244 	expect_open(ino, 0, 1);
1245 	EXPECT_CALL(*m_mock, process(
1246 		ResultOf([=](auto in) {
1247 			return (in.header.opcode == FUSE_GETATTR &&
1248 				in.header.nodeid == ino);
1249 		}, Eq(true)),
1250 		_)
1251 	).WillRepeatedly(Invoke(
1252 	ReturnImmediate([=](auto i __unused, auto& out) {
1253 		SET_OUT_HEADER_LEN(out, attr);
1254 		out.body.attr.attr.ino = ino;
1255 		out.body.attr.attr.mode = mode;
1256 		out.body.attr.attr_valid = attr_valid;
1257 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1258 		out.body.attr.attr.atime = server_time;
1259 		out.body.attr.attr.mtime = server_time;
1260 		out.body.attr.attr.ctime = server_time;
1261 	})));
1262 
1263 	fd = open(FULLPATH, O_RDWR);
1264 	ASSERT_LE(0, fd) << strerror(errno);
1265 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1266 
1267 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1268 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1269 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1270 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1271 
1272 	leak(fd);
1273 }
1274 
1275 /* Any dirty timestamp fields should be flushed during a SETATTR */
1276 TEST_F(WriteBackAsync, timestamps_during_setattr)
1277 {
1278 	const char FULLPATH[] = "mountpoint/some_file.txt";
1279 	const char RELPATH[] = "some_file.txt";
1280 	const char *CONTENTS = "abcdefgh";
1281 	ssize_t bufsize = strlen(CONTENTS);
1282 	uint64_t ino = 42;
1283 	const mode_t newmode = 0755;
1284 	int fd;
1285 
1286 	expect_lookup(RELPATH, ino, 0);
1287 	expect_open(ino, 0, 1);
1288 	EXPECT_CALL(*m_mock, process(
1289 		ResultOf([=](auto in) {
1290 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1291 			return (in.header.opcode == FUSE_SETATTR &&
1292 				in.header.nodeid == ino &&
1293 				in.body.setattr.valid == valid);
1294 		}, Eq(true)),
1295 		_)
1296 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1297 		SET_OUT_HEADER_LEN(out, attr);
1298 		out.body.attr.attr.ino = ino;
1299 		out.body.attr.attr.mode = S_IFREG | newmode;
1300 	})));
1301 
1302 	fd = open(FULLPATH, O_RDWR);
1303 	ASSERT_LE(0, fd) << strerror(errno);
1304 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1305 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1306 
1307 	leak(fd);
1308 }
1309 
1310 /* fuse_init_out.time_gran controls the granularity of timestamps */
1311 TEST_P(TimeGran, timestamps_during_setattr)
1312 {
1313 	const char FULLPATH[] = "mountpoint/some_file.txt";
1314 	const char RELPATH[] = "some_file.txt";
1315 	const char *CONTENTS = "abcdefgh";
1316 	ssize_t bufsize = strlen(CONTENTS);
1317 	uint64_t ino = 42;
1318 	const mode_t newmode = 0755;
1319 	int fd;
1320 
1321 	expect_lookup(RELPATH, ino, 0);
1322 	expect_open(ino, 0, 1);
1323 	EXPECT_CALL(*m_mock, process(
1324 		ResultOf([=](auto in) {
1325 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1326 			return (in.header.opcode == FUSE_SETATTR &&
1327 				in.header.nodeid == ino &&
1328 				in.body.setattr.valid == valid &&
1329 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1330 				in.body.setattr.ctimensec % m_time_gran == 0);
1331 		}, Eq(true)),
1332 		_)
1333 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1334 		SET_OUT_HEADER_LEN(out, attr);
1335 		out.body.attr.attr.ino = ino;
1336 		out.body.attr.attr.mode = S_IFREG | newmode;
1337 	})));
1338 
1339 	fd = open(FULLPATH, O_RDWR);
1340 	ASSERT_LE(0, fd) << strerror(errno);
1341 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1342 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1343 
1344 	leak(fd);
1345 }
1346 
1347 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1348 
1349 /*
1350  * Without direct_io, writes should be committed to cache
1351  */
1352 TEST_F(Write, writethrough)
1353 {
1354 	const char FULLPATH[] = "mountpoint/some_file.txt";
1355 	const char RELPATH[] = "some_file.txt";
1356 	const char *CONTENTS = "abcdefgh";
1357 	uint64_t ino = 42;
1358 	int fd;
1359 	ssize_t bufsize = strlen(CONTENTS);
1360 	uint8_t readbuf[bufsize];
1361 
1362 	expect_lookup(RELPATH, ino, 0);
1363 	expect_open(ino, 0, 1);
1364 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1365 
1366 	fd = open(FULLPATH, O_RDWR);
1367 	ASSERT_LE(0, fd) << strerror(errno);
1368 
1369 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1370 	/*
1371 	 * A subsequent read should be serviced by cache, without querying the
1372 	 * filesystem daemon
1373 	 */
1374 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1375 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1376 	leak(fd);
1377 }
1378 
1379 /* Writes that extend a file should update the cached file size */
1380 TEST_F(Write, update_file_size)
1381 {
1382 	const char FULLPATH[] = "mountpoint/some_file.txt";
1383 	const char RELPATH[] = "some_file.txt";
1384 	const char *CONTENTS = "abcdefgh";
1385 	struct stat sb;
1386 	uint64_t ino = 42;
1387 	int fd;
1388 	ssize_t bufsize = strlen(CONTENTS);
1389 
1390 	expect_lookup(RELPATH, ino, 0);
1391 	expect_open(ino, 0, 1);
1392 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1393 
1394 	fd = open(FULLPATH, O_RDWR);
1395 	ASSERT_LE(0, fd) << strerror(errno);
1396 
1397 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1398 	/* Get cached attributes */
1399 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1400 	ASSERT_EQ(bufsize, sb.st_size);
1401 	leak(fd);
1402 }
1403