xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision bbce101753b9f68edd34180cb617fff9327a9e0b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/resource.h>
35 #include <sys/stat.h>
36 #include <sys/sysctl.h>
37 #include <sys/time.h>
38 #include <sys/uio.h>
39 
40 #include <aio.h>
41 #include <fcntl.h>
42 #include <signal.h>
43 #include <unistd.h>
44 }
45 
46 #include "mockfs.hh"
47 #include "utils.hh"
48 
49 using namespace testing;
50 
51 class Write: public FuseTest {
52 
53 public:
54 static sig_atomic_t s_sigxfsz;
55 
56 void SetUp() {
57 	s_sigxfsz = 0;
58 	FuseTest::SetUp();
59 }
60 
61 void TearDown() {
62 	struct sigaction sa;
63 
64 	bzero(&sa, sizeof(sa));
65 	sa.sa_handler = SIG_DFL;
66 	sigaction(SIGXFSZ, &sa, NULL);
67 
68 	FuseTest::TearDown();
69 }
70 
71 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
72 {
73 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
74 }
75 
76 void expect_release(uint64_t ino, ProcessMockerT r)
77 {
78 	EXPECT_CALL(*m_mock, process(
79 		ResultOf([=](auto in) {
80 			return (in.header.opcode == FUSE_RELEASE &&
81 				in.header.nodeid == ino);
82 		}, Eq(true)),
83 		_)
84 	).WillRepeatedly(Invoke(r));
85 }
86 
87 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
88 	uint64_t osize, const void *contents)
89 {
90 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
91 }
92 
93 /* Expect a write that may or may not come, depending on the cache mode */
94 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
95 	const void *contents)
96 {
97 	EXPECT_CALL(*m_mock, process(
98 		ResultOf([=](auto in) {
99 			const char *buf = (const char*)in.body.bytes +
100 				sizeof(struct fuse_write_in);
101 
102 			return (in.header.opcode == FUSE_WRITE &&
103 				in.header.nodeid == ino &&
104 				in.body.write.offset == offset  &&
105 				in.body.write.size == size &&
106 				0 == bcmp(buf, contents, size));
107 		}, Eq(true)),
108 		_)
109 	).Times(AtMost(1))
110 	.WillRepeatedly(Invoke(
111 		ReturnImmediate([=](auto in __unused, auto& out) {
112 			SET_OUT_HEADER_LEN(out, write);
113 			out.body.write.size = size;
114 		})
115 	));
116 }
117 
118 };
119 
120 sig_atomic_t Write::s_sigxfsz = 0;
121 
122 class Write_7_8: public FuseTest {
123 
124 public:
125 virtual void SetUp() {
126 	m_kernel_minor_version = 8;
127 	FuseTest::SetUp();
128 }
129 
130 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
131 {
132 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
133 }
134 
135 };
136 
137 class AioWrite: public Write {
138 virtual void SetUp() {
139 	const char *node = "vfs.aio.enable_unsafe";
140 	int val = 0;
141 	size_t size = sizeof(val);
142 
143 	FuseTest::SetUp();
144 
145 	ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0))
146 		<< strerror(errno);
147 	if (!val)
148 		GTEST_SKIP() <<
149 			"vfs.aio.enable_unsafe must be set for this test";
150 }
151 };
152 
153 /* Tests for the writeback cache mode */
154 class WriteBack: public Write {
155 public:
156 virtual void SetUp() {
157 	m_init_flags |= FUSE_WRITEBACK_CACHE;
158 	FuseTest::SetUp();
159 	if (IsSkipped())
160 		return;
161 }
162 
163 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
164 	uint64_t osize, const void *contents)
165 {
166 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
167 		contents);
168 }
169 };
170 
171 class WriteBackAsync: public WriteBack {
172 public:
173 virtual void SetUp() {
174 	m_async = true;
175 	WriteBack::SetUp();
176 }
177 };
178 
179 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
180 public:
181 virtual void SetUp() {
182 	m_time_gran = 1 << GetParam();
183 	WriteBackAsync::SetUp();
184 }
185 };
186 
187 /* Tests for clustered writes with WriteBack cacheing */
188 class WriteCluster: public WriteBack {
189 public:
190 virtual void SetUp() {
191 	m_async = true;
192 	m_maxwrite = m_maxphys;
193 	WriteBack::SetUp();
194 	if (m_maxphys < 2 * DFLTPHYS)
195 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
196 			<< " for this test";
197 	if (m_maxphys < 2 * m_maxbcachebuf)
198 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
199 			<< " for this test";
200 }
201 };
202 
203 void sigxfsz_handler(int __unused sig) {
204 	Write::s_sigxfsz = 1;
205 }
206 
207 /* AIO writes need to set the header's pid field correctly */
208 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
209 TEST_F(AioWrite, DISABLED_aio_write)
210 {
211 	const char FULLPATH[] = "mountpoint/some_file.txt";
212 	const char RELPATH[] = "some_file.txt";
213 	const char *CONTENTS = "abcdefgh";
214 	uint64_t ino = 42;
215 	uint64_t offset = 4096;
216 	int fd;
217 	ssize_t bufsize = strlen(CONTENTS);
218 	struct aiocb iocb, *piocb;
219 
220 	expect_lookup(RELPATH, ino, 0);
221 	expect_open(ino, 0, 1);
222 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
223 
224 	fd = open(FULLPATH, O_WRONLY);
225 	EXPECT_LE(0, fd) << strerror(errno);
226 
227 	iocb.aio_nbytes = bufsize;
228 	iocb.aio_fildes = fd;
229 	iocb.aio_buf = __DECONST(void *, CONTENTS);
230 	iocb.aio_offset = offset;
231 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
232 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
233 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
234 	leak(fd);
235 }
236 
237 /*
238  * When a file is opened with O_APPEND, we should forward that flag to
239  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
240  * offset internally.  That way we'll work both with filesystems that
241  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
242  * simply use the offset).
243  *
244  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
245  * Open.o_append test.
246  */
247 TEST_F(Write, append)
248 {
249 	const ssize_t BUFSIZE = 9;
250 	const char FULLPATH[] = "mountpoint/some_file.txt";
251 	const char RELPATH[] = "some_file.txt";
252 	const char CONTENTS[BUFSIZE] = "abcdefgh";
253 	uint64_t ino = 42;
254 	/*
255 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
256 	 * using writeback caching
257 	 */
258 	uint64_t initial_offset = m_maxbcachebuf;
259 	int fd;
260 
261 	expect_lookup(RELPATH, ino, initial_offset);
262 	expect_open(ino, 0, 1);
263 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
264 
265 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
266 	fd = open(FULLPATH, O_RDWR | O_APPEND);
267 	EXPECT_LE(0, fd) << strerror(errno);
268 
269 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
270 	leak(fd);
271 }
272 
273 /* If a file is cached, then appending to the end should not cause a read */
274 TEST_F(Write, append_to_cached)
275 {
276 	const ssize_t BUFSIZE = 9;
277 	const char FULLPATH[] = "mountpoint/some_file.txt";
278 	const char RELPATH[] = "some_file.txt";
279 	char *oldcontents, *oldbuf;
280 	const char CONTENTS[BUFSIZE] = "abcdefgh";
281 	uint64_t ino = 42;
282 	/*
283 	 * Set offset in between maxbcachebuf boundary to test buffer handling
284 	 */
285 	uint64_t oldsize = m_maxbcachebuf / 2;
286 	int fd;
287 
288 	oldcontents = (char*)calloc(1, oldsize);
289 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
290 	oldbuf = (char*)malloc(oldsize);
291 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
292 
293 	expect_lookup(RELPATH, ino, oldsize);
294 	expect_open(ino, 0, 1);
295 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
296 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
297 
298 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
299 	fd = open(FULLPATH, O_RDWR | O_APPEND);
300 	EXPECT_LE(0, fd) << strerror(errno);
301 
302 	/* Read the old data into the cache */
303 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
304 		<< strerror(errno);
305 
306 	/* Write the new data.  There should be no more read operations */
307 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
308 	leak(fd);
309 }
310 
311 TEST_F(Write, append_direct_io)
312 {
313 	const ssize_t BUFSIZE = 9;
314 	const char FULLPATH[] = "mountpoint/some_file.txt";
315 	const char RELPATH[] = "some_file.txt";
316 	const char CONTENTS[BUFSIZE] = "abcdefgh";
317 	uint64_t ino = 42;
318 	uint64_t initial_offset = 4096;
319 	int fd;
320 
321 	expect_lookup(RELPATH, ino, initial_offset);
322 	expect_open(ino, FOPEN_DIRECT_IO, 1);
323 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
324 
325 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
326 	EXPECT_LE(0, fd) << strerror(errno);
327 
328 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
329 	leak(fd);
330 }
331 
332 /* A direct write should evict any overlapping cached data */
333 TEST_F(Write, direct_io_evicts_cache)
334 {
335 	const char FULLPATH[] = "mountpoint/some_file.txt";
336 	const char RELPATH[] = "some_file.txt";
337 	const char CONTENTS0[] = "abcdefgh";
338 	const char CONTENTS1[] = "ijklmnop";
339 	uint64_t ino = 42;
340 	int fd;
341 	ssize_t bufsize = strlen(CONTENTS0) + 1;
342 	char readbuf[bufsize];
343 
344 	expect_lookup(RELPATH, ino, bufsize);
345 	expect_open(ino, 0, 1);
346 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
347 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
348 
349 	fd = open(FULLPATH, O_RDWR);
350 	EXPECT_LE(0, fd) << strerror(errno);
351 
352 	// Prime cache
353 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
354 
355 	// Write directly, evicting cache
356 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
357 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
358 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
359 
360 	// Read again.  Cache should be bypassed
361 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
362 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
363 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
364 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
365 	ASSERT_STREQ(readbuf, CONTENTS1);
366 
367 	leak(fd);
368 }
369 
370 /*
371  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
372  * allowed to return a short write for that file handle.  However, if it does
373  * then we should still do our darndest to handle it by resending the unwritten
374  * portion.
375  */
376 TEST_F(Write, indirect_io_short_write)
377 {
378 	const char FULLPATH[] = "mountpoint/some_file.txt";
379 	const char RELPATH[] = "some_file.txt";
380 	const char *CONTENTS = "abcdefghijklmnop";
381 	uint64_t ino = 42;
382 	int fd;
383 	ssize_t bufsize = strlen(CONTENTS);
384 	ssize_t bufsize0 = 11;
385 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
386 	const char *contents1 = CONTENTS + bufsize0;
387 
388 	expect_lookup(RELPATH, ino, 0);
389 	expect_open(ino, 0, 1);
390 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
391 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
392 
393 	fd = open(FULLPATH, O_WRONLY);
394 	EXPECT_LE(0, fd) << strerror(errno);
395 
396 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
397 	leak(fd);
398 }
399 
400 /*
401  * When the direct_io option is used, filesystems are allowed to write less
402  * data than requested.  We should return the short write to userland.
403  */
404 TEST_F(Write, direct_io_short_write)
405 {
406 	const char FULLPATH[] = "mountpoint/some_file.txt";
407 	const char RELPATH[] = "some_file.txt";
408 	const char *CONTENTS = "abcdefghijklmnop";
409 	uint64_t ino = 42;
410 	int fd;
411 	ssize_t bufsize = strlen(CONTENTS);
412 	ssize_t halfbufsize = bufsize / 2;
413 
414 	expect_lookup(RELPATH, ino, 0);
415 	expect_open(ino, FOPEN_DIRECT_IO, 1);
416 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
417 
418 	fd = open(FULLPATH, O_WRONLY);
419 	EXPECT_LE(0, fd) << strerror(errno);
420 
421 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
422 	leak(fd);
423 }
424 
425 /*
426  * An insidious edge case: the filesystem returns a short write, and the
427  * difference between what we requested and what it actually wrote crosses an
428  * iov element boundary
429  */
430 TEST_F(Write, direct_io_short_write_iov)
431 {
432 	const char FULLPATH[] = "mountpoint/some_file.txt";
433 	const char RELPATH[] = "some_file.txt";
434 	const char *CONTENTS0 = "abcdefgh";
435 	const char *CONTENTS1 = "ijklmnop";
436 	const char *EXPECTED0 = "abcdefghijklmnop";
437 	uint64_t ino = 42;
438 	int fd;
439 	ssize_t size0 = strlen(CONTENTS0) - 1;
440 	ssize_t size1 = strlen(CONTENTS1) + 1;
441 	ssize_t totalsize = size0 + size1;
442 	struct iovec iov[2];
443 
444 	expect_lookup(RELPATH, ino, 0);
445 	expect_open(ino, FOPEN_DIRECT_IO, 1);
446 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
447 
448 	fd = open(FULLPATH, O_WRONLY);
449 	EXPECT_LE(0, fd) << strerror(errno);
450 
451 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
452 	iov[0].iov_len = strlen(CONTENTS0);
453 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
454 	iov[1].iov_len = strlen(CONTENTS1);
455 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
456 	leak(fd);
457 }
458 
459 /* fusefs should respect RLIMIT_FSIZE */
460 TEST_F(Write, rlimit_fsize)
461 {
462 	const char FULLPATH[] = "mountpoint/some_file.txt";
463 	const char RELPATH[] = "some_file.txt";
464 	const char *CONTENTS = "abcdefgh";
465 	struct rlimit rl;
466 	ssize_t bufsize = strlen(CONTENTS);
467 	off_t offset = 1'000'000'000;
468 	uint64_t ino = 42;
469 	int fd;
470 
471 	expect_lookup(RELPATH, ino, 0);
472 	expect_open(ino, 0, 1);
473 
474 	rl.rlim_cur = offset;
475 	rl.rlim_max = 10 * offset;
476 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
477 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
478 
479 	fd = open(FULLPATH, O_WRONLY);
480 
481 	EXPECT_LE(0, fd) << strerror(errno);
482 
483 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
484 	EXPECT_EQ(EFBIG, errno);
485 	EXPECT_EQ(1, s_sigxfsz);
486 	leak(fd);
487 }
488 
489 /*
490  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
491  * during the R of a RMW operation.
492  */
493 TEST_F(Write, eof_during_rmw)
494 {
495 	const char FULLPATH[] = "mountpoint/some_file.txt";
496 	const char RELPATH[] = "some_file.txt";
497 	const char *CONTENTS = "abcdefgh";
498 	const char *INITIAL   = "XXXXXXXXXX";
499 	uint64_t ino = 42;
500 	uint64_t offset = 1;
501 	ssize_t bufsize = strlen(CONTENTS);
502 	off_t orig_fsize = 10;
503 	off_t truncated_fsize = 5;
504 	off_t final_fsize = bufsize;
505 	int fd;
506 
507 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
508 	expect_open(ino, 0, 1);
509 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
510 	expect_getattr(ino, truncated_fsize);
511 	expect_read(ino, 0, final_fsize, final_fsize, INITIAL, O_RDWR);
512 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
513 
514 	fd = open(FULLPATH, O_RDWR);
515 	EXPECT_LE(0, fd) << strerror(errno);
516 
517 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
518 		<< strerror(errno);
519 	leak(fd);
520 }
521 
522 /*
523  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
524  * write, then it must set the FUSE_WRITE_CACHE bit
525  */
526 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
527 TEST_F(Write, mmap)
528 {
529 	const char FULLPATH[] = "mountpoint/some_file.txt";
530 	const char RELPATH[] = "some_file.txt";
531 	const char *CONTENTS = "abcdefgh";
532 	uint64_t ino = 42;
533 	int fd;
534 	ssize_t bufsize = strlen(CONTENTS);
535 	void *p;
536 	uint64_t offset = 10;
537 	size_t len;
538 	void *zeros, *expected;
539 
540 	len = getpagesize();
541 
542 	zeros = calloc(1, len);
543 	ASSERT_NE(nullptr, zeros);
544 	expected = calloc(1, len);
545 	ASSERT_NE(nullptr, expected);
546 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
547 
548 	expect_lookup(RELPATH, ino, len);
549 	expect_open(ino, 0, 1);
550 	expect_read(ino, 0, len, len, zeros);
551 	/*
552 	 * Writes from the pager may or may not be associated with the correct
553 	 * pid, so they must set FUSE_WRITE_CACHE.
554 	 */
555 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
556 	expect_flush(ino, 1, ReturnErrno(0));
557 	expect_release(ino, ReturnErrno(0));
558 
559 	fd = open(FULLPATH, O_RDWR);
560 	EXPECT_LE(0, fd) << strerror(errno);
561 
562 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
563 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
564 
565 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
566 
567 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
568 	close(fd);	// Write mmap'd data on close
569 
570 	free(expected);
571 	free(zeros);
572 }
573 
574 TEST_F(Write, pwrite)
575 {
576 	const char FULLPATH[] = "mountpoint/some_file.txt";
577 	const char RELPATH[] = "some_file.txt";
578 	const char *CONTENTS = "abcdefgh";
579 	uint64_t ino = 42;
580 	uint64_t offset = m_maxbcachebuf;
581 	int fd;
582 	ssize_t bufsize = strlen(CONTENTS);
583 
584 	expect_lookup(RELPATH, ino, 0);
585 	expect_open(ino, 0, 1);
586 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
587 
588 	fd = open(FULLPATH, O_WRONLY);
589 	EXPECT_LE(0, fd) << strerror(errno);
590 
591 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
592 		<< strerror(errno);
593 	leak(fd);
594 }
595 
596 /* Writing a file should update its cached mtime and ctime */
597 TEST_F(Write, timestamps)
598 {
599 	const char FULLPATH[] = "mountpoint/some_file.txt";
600 	const char RELPATH[] = "some_file.txt";
601 	const char *CONTENTS = "abcdefgh";
602 	ssize_t bufsize = strlen(CONTENTS);
603 	uint64_t ino = 42;
604 	struct stat sb0, sb1;
605 	int fd;
606 
607 	expect_lookup(RELPATH, ino, 0);
608 	expect_open(ino, 0, 1);
609 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
610 
611 	fd = open(FULLPATH, O_RDWR);
612 	EXPECT_LE(0, fd) << strerror(errno);
613 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
614 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
615 
616 	nap();
617 
618 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
619 
620 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
621 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
622 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
623 }
624 
625 TEST_F(Write, write)
626 {
627 	const char FULLPATH[] = "mountpoint/some_file.txt";
628 	const char RELPATH[] = "some_file.txt";
629 	const char *CONTENTS = "abcdefgh";
630 	uint64_t ino = 42;
631 	int fd;
632 	ssize_t bufsize = strlen(CONTENTS);
633 
634 	expect_lookup(RELPATH, ino, 0);
635 	expect_open(ino, 0, 1);
636 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
637 
638 	fd = open(FULLPATH, O_WRONLY);
639 	EXPECT_LE(0, fd) << strerror(errno);
640 
641 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
642 	leak(fd);
643 }
644 
645 /* fuse(4) should not issue writes of greater size than the daemon requests */
646 TEST_F(Write, write_large)
647 {
648 	const char FULLPATH[] = "mountpoint/some_file.txt";
649 	const char RELPATH[] = "some_file.txt";
650 	int *contents;
651 	uint64_t ino = 42;
652 	int fd;
653 	ssize_t halfbufsize, bufsize;
654 
655 	halfbufsize = m_mock->m_maxwrite;
656 	bufsize = halfbufsize * 2;
657 	contents = (int*)malloc(bufsize);
658 	ASSERT_NE(nullptr, contents);
659 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
660 		contents[i] = i;
661 	}
662 
663 	expect_lookup(RELPATH, ino, 0);
664 	expect_open(ino, 0, 1);
665 	maybe_expect_write(ino, 0, halfbufsize, contents);
666 	maybe_expect_write(ino, halfbufsize, halfbufsize,
667 		&contents[halfbufsize / sizeof(int)]);
668 
669 	fd = open(FULLPATH, O_WRONLY);
670 	EXPECT_LE(0, fd) << strerror(errno);
671 
672 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
673 	leak(fd);
674 
675 	free(contents);
676 }
677 
678 TEST_F(Write, write_nothing)
679 {
680 	const char FULLPATH[] = "mountpoint/some_file.txt";
681 	const char RELPATH[] = "some_file.txt";
682 	const char *CONTENTS = "";
683 	uint64_t ino = 42;
684 	int fd;
685 	ssize_t bufsize = 0;
686 
687 	expect_lookup(RELPATH, ino, 0);
688 	expect_open(ino, 0, 1);
689 
690 	fd = open(FULLPATH, O_WRONLY);
691 	EXPECT_LE(0, fd) << strerror(errno);
692 
693 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
694 	leak(fd);
695 }
696 
697 TEST_F(Write_7_8, write)
698 {
699 	const char FULLPATH[] = "mountpoint/some_file.txt";
700 	const char RELPATH[] = "some_file.txt";
701 	const char *CONTENTS = "abcdefgh";
702 	uint64_t ino = 42;
703 	int fd;
704 	ssize_t bufsize = strlen(CONTENTS);
705 
706 	expect_lookup(RELPATH, ino, 0);
707 	expect_open(ino, 0, 1);
708 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
709 
710 	fd = open(FULLPATH, O_WRONLY);
711 	EXPECT_LE(0, fd) << strerror(errno);
712 
713 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
714 	leak(fd);
715 }
716 
717 /* In writeback mode, dirty data should be written on close */
718 TEST_F(WriteBackAsync, close)
719 {
720 	const char FULLPATH[] = "mountpoint/some_file.txt";
721 	const char RELPATH[] = "some_file.txt";
722 	const char *CONTENTS = "abcdefgh";
723 	uint64_t ino = 42;
724 	int fd;
725 	ssize_t bufsize = strlen(CONTENTS);
726 
727 	expect_lookup(RELPATH, ino, 0);
728 	expect_open(ino, 0, 1);
729 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
730 	EXPECT_CALL(*m_mock, process(
731 		ResultOf([=](auto in) {
732 			return (in.header.opcode == FUSE_SETATTR);
733 		}, Eq(true)),
734 		_)
735 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
736 		SET_OUT_HEADER_LEN(out, attr);
737 		out.body.attr.attr.ino = ino;	// Must match nodeid
738 	})));
739 	expect_flush(ino, 1, ReturnErrno(0));
740 	expect_release(ino, ReturnErrno(0));
741 
742 	fd = open(FULLPATH, O_RDWR);
743 	ASSERT_LE(0, fd) << strerror(errno);
744 
745 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
746 	close(fd);
747 }
748 
749 /* In writeback mode, adjacent writes will be clustered together */
750 TEST_F(WriteCluster, clustering)
751 {
752 	const char FULLPATH[] = "mountpoint/some_file.txt";
753 	const char RELPATH[] = "some_file.txt";
754 	uint64_t ino = 42;
755 	int i, fd;
756 	void *wbuf, *wbuf2x;
757 	ssize_t bufsize = m_maxbcachebuf;
758 	off_t filesize = 5 * bufsize;
759 
760 	wbuf = malloc(bufsize);
761 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
762 	memset(wbuf, 'X', bufsize);
763 	wbuf2x = malloc(2 * bufsize);
764 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
765 	memset(wbuf2x, 'X', 2 * bufsize);
766 
767 	expect_lookup(RELPATH, ino, filesize);
768 	expect_open(ino, 0, 1);
769 	/*
770 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
771 	 * The amount of clustering is adaptive, so the first write actually
772 	 * issued will be 2x bufsize and subsequent writes may be larger
773 	 */
774 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
775 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
776 	expect_flush(ino, 1, ReturnErrno(0));
777 	expect_release(ino, ReturnErrno(0));
778 
779 	fd = open(FULLPATH, O_RDWR);
780 	ASSERT_LE(0, fd) << strerror(errno);
781 
782 	for (i = 0; i < 4; i++) {
783 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
784 			<< strerror(errno);
785 	}
786 	close(fd);
787 }
788 
789 /*
790  * When clustering writes, an I/O error to any of the cluster's children should
791  * not panic the system on unmount
792  */
793 /*
794  * Disabled because it panics.
795  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
796  */
797 TEST_F(WriteCluster, DISABLED_cluster_write_err)
798 {
799 	const char FULLPATH[] = "mountpoint/some_file.txt";
800 	const char RELPATH[] = "some_file.txt";
801 	uint64_t ino = 42;
802 	int i, fd;
803 	void *wbuf;
804 	ssize_t bufsize = m_maxbcachebuf;
805 	off_t filesize = 4 * bufsize;
806 
807 	wbuf = malloc(bufsize);
808 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
809 	memset(wbuf, 'X', bufsize);
810 
811 	expect_lookup(RELPATH, ino, filesize);
812 	expect_open(ino, 0, 1);
813 	EXPECT_CALL(*m_mock, process(
814 		ResultOf([=](auto in) {
815 			return (in.header.opcode == FUSE_WRITE);
816 		}, Eq(true)),
817 		_)
818 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
819 	expect_flush(ino, 1, ReturnErrno(0));
820 	expect_release(ino, ReturnErrno(0));
821 
822 	fd = open(FULLPATH, O_RDWR);
823 	ASSERT_LE(0, fd) << strerror(errno);
824 
825 	for (i = 0; i < 3; i++) {
826 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
827 			<< strerror(errno);
828 	}
829 	close(fd);
830 }
831 
832 /*
833  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
834  * server.  The FUSE protocol explicitly allows that.
835  */
836 TEST_F(WriteBack, rmw)
837 {
838 	const char FULLPATH[] = "mountpoint/some_file.txt";
839 	const char RELPATH[] = "some_file.txt";
840 	const char *CONTENTS = "abcdefgh";
841 	const char *INITIAL   = "XXXXXXXXXX";
842 	uint64_t ino = 42;
843 	uint64_t offset = 1;
844 	off_t fsize = 10;
845 	int fd;
846 	ssize_t bufsize = strlen(CONTENTS);
847 
848 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
849 	expect_open(ino, 0, 1);
850 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
851 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
852 
853 	fd = open(FULLPATH, O_WRONLY);
854 	EXPECT_LE(0, fd) << strerror(errno);
855 
856 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
857 		<< strerror(errno);
858 	leak(fd);
859 }
860 
861 /*
862  * Without direct_io, writes should be committed to cache
863  */
864 TEST_F(WriteBack, cache)
865 {
866 	const char FULLPATH[] = "mountpoint/some_file.txt";
867 	const char RELPATH[] = "some_file.txt";
868 	const char *CONTENTS = "abcdefgh";
869 	uint64_t ino = 42;
870 	int fd;
871 	ssize_t bufsize = strlen(CONTENTS);
872 	char readbuf[bufsize];
873 
874 	expect_lookup(RELPATH, ino, 0);
875 	expect_open(ino, 0, 1);
876 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
877 
878 	fd = open(FULLPATH, O_RDWR);
879 	EXPECT_LE(0, fd) << strerror(errno);
880 
881 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
882 	/*
883 	 * A subsequent read should be serviced by cache, without querying the
884 	 * filesystem daemon
885 	 */
886 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
887 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
888 	leak(fd);
889 }
890 
891 /*
892  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
893  * an odd test, because it would be unusual to use O_DIRECT for writes but not
894  * reads.
895  */
896 TEST_F(WriteBack, o_direct)
897 {
898 	const char FULLPATH[] = "mountpoint/some_file.txt";
899 	const char RELPATH[] = "some_file.txt";
900 	const char *CONTENTS = "abcdefgh";
901 	uint64_t ino = 42;
902 	int fd;
903 	ssize_t bufsize = strlen(CONTENTS);
904 	char readbuf[bufsize];
905 
906 	expect_lookup(RELPATH, ino, 0);
907 	expect_open(ino, 0, 1);
908 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
909 		CONTENTS);
910 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
911 
912 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
913 	EXPECT_LE(0, fd) << strerror(errno);
914 
915 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
916 	/* A subsequent read must query the daemon because cache is empty */
917 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
918 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
919 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
920 	leak(fd);
921 }
922 
923 /*
924  * When mounted with -o async, the writeback cache mode should delay writes
925  */
926 TEST_F(WriteBackAsync, delay)
927 {
928 	const char FULLPATH[] = "mountpoint/some_file.txt";
929 	const char RELPATH[] = "some_file.txt";
930 	const char *CONTENTS = "abcdefgh";
931 	uint64_t ino = 42;
932 	int fd;
933 	ssize_t bufsize = strlen(CONTENTS);
934 
935 	expect_lookup(RELPATH, ino, 0);
936 	expect_open(ino, 0, 1);
937 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
938 	EXPECT_CALL(*m_mock, process(
939 		ResultOf([=](auto in) {
940 			return (in.header.opcode == FUSE_WRITE);
941 		}, Eq(true)),
942 		_)
943 	).Times(0);
944 
945 	fd = open(FULLPATH, O_RDWR);
946 	EXPECT_LE(0, fd) << strerror(errno);
947 
948 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
949 
950 	/* Don't close the file because that would flush the cache */
951 }
952 
953 /*
954  * A direct write should not evict dirty cached data from outside of its own
955  * byte range.
956  */
957 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
958 {
959 	const char FULLPATH[] = "mountpoint/some_file.txt";
960 	const char RELPATH[] = "some_file.txt";
961 	const char CONTENTS0[] = "abcdefgh";
962 	const char CONTENTS1[] = "ijklmnop";
963 	uint64_t ino = 42;
964 	int fd;
965 	ssize_t bufsize = strlen(CONTENTS0) + 1;
966 	ssize_t fsize = 2 * m_maxbcachebuf;
967 	char readbuf[bufsize];
968 	void *zeros;
969 
970 	zeros = calloc(1, m_maxbcachebuf);
971 	ASSERT_NE(nullptr, zeros);
972 
973 	expect_lookup(RELPATH, ino, fsize);
974 	expect_open(ino, 0, 1);
975 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
976 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
977 		CONTENTS1);
978 
979 	fd = open(FULLPATH, O_RDWR);
980 	EXPECT_LE(0, fd) << strerror(errno);
981 
982 	// Cache first block with dirty data.  This will entail first reading
983 	// the existing data.
984 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
985 		<< strerror(errno);
986 
987 	// Write directly to second block
988 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
989 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
990 		<< strerror(errno);
991 
992 	// Read from the first block again.  Should be serviced by cache.
993 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
994 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
995 	ASSERT_STREQ(readbuf, CONTENTS0);
996 
997 	leak(fd);
998 	free(zeros);
999 }
1000 
1001 /*
1002  * If a direct io write partially overlaps one or two blocks of dirty cached
1003  * data, No dirty data should be lost.  Admittedly this is a weird test,
1004  * because it would be unusual to use O_DIRECT and the writeback cache.
1005  */
1006 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1007 {
1008 	const char FULLPATH[] = "mountpoint/some_file.txt";
1009 	const char RELPATH[] = "some_file.txt";
1010 	uint64_t ino = 42;
1011 	int fd;
1012 	off_t bs = m_maxbcachebuf;
1013 	ssize_t fsize = 3 * bs;
1014 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1015 
1016 	readbuf = malloc(bs);
1017 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1018 	zeros = calloc(1, 3 * bs);
1019 	ASSERT_NE(nullptr, zeros);
1020 	ones = calloc(1, 2 * bs);
1021 	ASSERT_NE(nullptr, ones);
1022 	memset(ones, 1, 2 * bs);
1023 	zeroones = calloc(1, bs);
1024 	ASSERT_NE(nullptr, zeroones);
1025 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1026 	onezeros = calloc(1, bs);
1027 	ASSERT_NE(nullptr, onezeros);
1028 	memset(onezeros, 1, bs / 2);
1029 
1030 	expect_lookup(RELPATH, ino, fsize);
1031 	expect_open(ino, 0, 1);
1032 
1033 	fd = open(FULLPATH, O_RDWR);
1034 	EXPECT_LE(0, fd) << strerror(errno);
1035 
1036 	/* Cache first and third blocks with dirty data.  */
1037 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1038 
1039 	/*
1040 	 * Write directly to all three blocks.  The partially written blocks
1041 	 * will be flushed because they're dirty.
1042 	 */
1043 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1044 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1045 	/* The direct write is split in two because of the m_maxwrite value */
1046 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1047 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1048 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1049 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1050 
1051 	/*
1052 	 * Read from both the valid and invalid portions of the first and third
1053 	 * blocks again.  This will entail FUSE_READ operations because these
1054 	 * blocks were invalidated by the direct write.
1055 	 */
1056 	expect_read(ino, 0, bs, bs, zeroones);
1057 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1058 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1059 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1060 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1061 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1062 		<< strerror(errno);
1063 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1064 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1065 		<< strerror(errno);
1066 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1067 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1068 		<< strerror(errno);
1069 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1070 
1071 	leak(fd);
1072 	free(zeroones);
1073 	free(onezeros);
1074 	free(ones);
1075 	free(zeros);
1076 	free(readbuf);
1077 }
1078 
1079 /*
1080  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1081  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1082  * the file's size.
1083  */
1084 TEST_F(WriteBackAsync, eof)
1085 {
1086 	const char FULLPATH[] = "mountpoint/some_file.txt";
1087 	const char RELPATH[] = "some_file.txt";
1088 	const char *CONTENTS0 = "abcdefgh";
1089 	const char *CONTENTS1 = "ijklmnop";
1090 	uint64_t ino = 42;
1091 	int fd;
1092 	off_t offset = m_maxbcachebuf;
1093 	ssize_t wbufsize = strlen(CONTENTS1);
1094 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1095 	ssize_t rbufsize = 2 * old_filesize;
1096 	char readbuf[rbufsize];
1097 	size_t holesize = rbufsize - old_filesize;
1098 	char hole[holesize];
1099 	struct stat sb;
1100 	ssize_t r;
1101 
1102 	expect_lookup(RELPATH, ino, 0);
1103 	expect_open(ino, 0, 1);
1104 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1105 
1106 	fd = open(FULLPATH, O_RDWR);
1107 	EXPECT_LE(0, fd) << strerror(errno);
1108 
1109 	/* Write and cache data beyond EOF */
1110 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1111 		<< strerror(errno);
1112 
1113 	/* Read from the old EOF */
1114 	r = pread(fd, readbuf, rbufsize, 0);
1115 	ASSERT_LE(0, r) << strerror(errno);
1116 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1117 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1118 	bzero(hole, holesize);
1119 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1120 
1121 	/* The file's size should still be what was established by pwrite */
1122 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1123 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1124 	leak(fd);
1125 }
1126 
1127 /*
1128  * When a file has dirty writes that haven't been flushed, the server's notion
1129  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1130  * gets them from a FUSE_GETATTR before flushing.
1131  */
1132 TEST_F(WriteBackAsync, timestamps)
1133 {
1134 	const char FULLPATH[] = "mountpoint/some_file.txt";
1135 	const char RELPATH[] = "some_file.txt";
1136 	const char *CONTENTS = "abcdefgh";
1137 	ssize_t bufsize = strlen(CONTENTS);
1138 	uint64_t ino = 42;
1139 	uint64_t attr_valid = 0;
1140 	uint64_t attr_valid_nsec = 0;
1141 	uint64_t server_time = 12345;
1142 	mode_t mode = S_IFREG | 0644;
1143 	int fd;
1144 
1145 	struct stat sb;
1146 
1147 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1148 	.WillRepeatedly(Invoke(
1149 		ReturnImmediate([=](auto in __unused, auto& out) {
1150 		SET_OUT_HEADER_LEN(out, entry);
1151 		out.body.entry.attr.mode = mode;
1152 		out.body.entry.nodeid = ino;
1153 		out.body.entry.attr.nlink = 1;
1154 		out.body.entry.attr_valid = attr_valid;
1155 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1156 	})));
1157 	expect_open(ino, 0, 1);
1158 	EXPECT_CALL(*m_mock, process(
1159 		ResultOf([=](auto in) {
1160 			return (in.header.opcode == FUSE_GETATTR &&
1161 				in.header.nodeid == ino);
1162 		}, Eq(true)),
1163 		_)
1164 	).WillRepeatedly(Invoke(
1165 	ReturnImmediate([=](auto i __unused, auto& out) {
1166 		SET_OUT_HEADER_LEN(out, attr);
1167 		out.body.attr.attr.ino = ino;
1168 		out.body.attr.attr.mode = mode;
1169 		out.body.attr.attr_valid = attr_valid;
1170 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1171 		out.body.attr.attr.atime = server_time;
1172 		out.body.attr.attr.mtime = server_time;
1173 		out.body.attr.attr.ctime = server_time;
1174 	})));
1175 
1176 	fd = open(FULLPATH, O_RDWR);
1177 	EXPECT_LE(0, fd) << strerror(errno);
1178 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1179 
1180 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1181 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1182 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1183 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1184 }
1185 
1186 /* Any dirty timestamp fields should be flushed during a SETATTR */
1187 TEST_F(WriteBackAsync, timestamps_during_setattr)
1188 {
1189 	const char FULLPATH[] = "mountpoint/some_file.txt";
1190 	const char RELPATH[] = "some_file.txt";
1191 	const char *CONTENTS = "abcdefgh";
1192 	ssize_t bufsize = strlen(CONTENTS);
1193 	uint64_t ino = 42;
1194 	const mode_t newmode = 0755;
1195 	int fd;
1196 
1197 	expect_lookup(RELPATH, ino, 0);
1198 	expect_open(ino, 0, 1);
1199 	EXPECT_CALL(*m_mock, process(
1200 		ResultOf([=](auto in) {
1201 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1202 			return (in.header.opcode == FUSE_SETATTR &&
1203 				in.header.nodeid == ino &&
1204 				in.body.setattr.valid == valid);
1205 		}, Eq(true)),
1206 		_)
1207 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1208 		SET_OUT_HEADER_LEN(out, attr);
1209 		out.body.attr.attr.ino = ino;
1210 		out.body.attr.attr.mode = S_IFREG | newmode;
1211 	})));
1212 
1213 	fd = open(FULLPATH, O_RDWR);
1214 	EXPECT_LE(0, fd) << strerror(errno);
1215 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1216 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1217 }
1218 
1219 /* fuse_init_out.time_gran controls the granularity of timestamps */
1220 TEST_P(TimeGran, timestamps_during_setattr)
1221 {
1222 	const char FULLPATH[] = "mountpoint/some_file.txt";
1223 	const char RELPATH[] = "some_file.txt";
1224 	const char *CONTENTS = "abcdefgh";
1225 	ssize_t bufsize = strlen(CONTENTS);
1226 	uint64_t ino = 42;
1227 	const mode_t newmode = 0755;
1228 	int fd;
1229 
1230 	expect_lookup(RELPATH, ino, 0);
1231 	expect_open(ino, 0, 1);
1232 	EXPECT_CALL(*m_mock, process(
1233 		ResultOf([=](auto in) {
1234 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1235 			return (in.header.opcode == FUSE_SETATTR &&
1236 				in.header.nodeid == ino &&
1237 				in.body.setattr.valid == valid &&
1238 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1239 				in.body.setattr.ctimensec % m_time_gran == 0);
1240 		}, Eq(true)),
1241 		_)
1242 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1243 		SET_OUT_HEADER_LEN(out, attr);
1244 		out.body.attr.attr.ino = ino;
1245 		out.body.attr.attr.mode = S_IFREG | newmode;
1246 	})));
1247 
1248 	fd = open(FULLPATH, O_RDWR);
1249 	EXPECT_LE(0, fd) << strerror(errno);
1250 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1251 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1252 }
1253 
1254 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1255 
1256 /*
1257  * Without direct_io, writes should be committed to cache
1258  */
1259 TEST_F(Write, writethrough)
1260 {
1261 	const char FULLPATH[] = "mountpoint/some_file.txt";
1262 	const char RELPATH[] = "some_file.txt";
1263 	const char *CONTENTS = "abcdefgh";
1264 	uint64_t ino = 42;
1265 	int fd;
1266 	ssize_t bufsize = strlen(CONTENTS);
1267 	char readbuf[bufsize];
1268 
1269 	expect_lookup(RELPATH, ino, 0);
1270 	expect_open(ino, 0, 1);
1271 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1272 
1273 	fd = open(FULLPATH, O_RDWR);
1274 	EXPECT_LE(0, fd) << strerror(errno);
1275 
1276 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1277 	/*
1278 	 * A subsequent read should be serviced by cache, without querying the
1279 	 * filesystem daemon
1280 	 */
1281 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1282 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1283 	leak(fd);
1284 }
1285 
1286 /* Writes that extend a file should update the cached file size */
1287 TEST_F(Write, update_file_size)
1288 {
1289 	const char FULLPATH[] = "mountpoint/some_file.txt";
1290 	const char RELPATH[] = "some_file.txt";
1291 	const char *CONTENTS = "abcdefgh";
1292 	struct stat sb;
1293 	uint64_t ino = 42;
1294 	int fd;
1295 	ssize_t bufsize = strlen(CONTENTS);
1296 
1297 	expect_lookup(RELPATH, ino, 0);
1298 	expect_open(ino, 0, 1);
1299 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1300 
1301 	fd = open(FULLPATH, O_RDWR);
1302 	EXPECT_LE(0, fd) << strerror(errno);
1303 
1304 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1305 	/* Get cached attributes */
1306 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1307 	ASSERT_EQ(bufsize, sb.st_size);
1308 	leak(fd);
1309 }
1310