xref: /freebsd/tests/sys/fs/fusefs/write.cc (revision a466cc55373fc3cf86837f09da729535b57e69a1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 extern "C" {
34 #include <sys/param.h>
35 #include <sys/mman.h>
36 #include <sys/resource.h>
37 #include <sys/stat.h>
38 #include <sys/time.h>
39 #include <sys/uio.h>
40 
41 #include <aio.h>
42 #include <fcntl.h>
43 #include <signal.h>
44 #include <unistd.h>
45 }
46 
47 #include "mockfs.hh"
48 #include "utils.hh"
49 
50 using namespace testing;
51 
52 class Write: public FuseTest {
53 
54 public:
55 void SetUp() {
56 	FuseTest::SetUp();
57 }
58 
59 void TearDown() {
60 	struct sigaction sa;
61 
62 	bzero(&sa, sizeof(sa));
63 	sa.sa_handler = SIG_DFL;
64 	sigaction(SIGXFSZ, &sa, NULL);
65 
66 	FuseTest::TearDown();
67 }
68 
69 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
70 {
71 	FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1);
72 }
73 
74 void expect_release(uint64_t ino, ProcessMockerT r)
75 {
76 	EXPECT_CALL(*m_mock, process(
77 		ResultOf([=](auto in) {
78 			return (in.header.opcode == FUSE_RELEASE &&
79 				in.header.nodeid == ino);
80 		}, Eq(true)),
81 		_)
82 	).WillRepeatedly(Invoke(r));
83 }
84 
85 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
86 	uint64_t osize, const void *contents)
87 {
88 	FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
89 }
90 
91 /* Expect a write that may or may not come, depending on the cache mode */
92 void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
93 	const void *contents)
94 {
95 	EXPECT_CALL(*m_mock, process(
96 		ResultOf([=](auto in) {
97 			const char *buf = (const char*)in.body.bytes +
98 				sizeof(struct fuse_write_in);
99 
100 			assert(size <= sizeof(in.body.bytes) -
101 				sizeof(struct fuse_write_in));
102 			return (in.header.opcode == FUSE_WRITE &&
103 				in.header.nodeid == ino &&
104 				in.body.write.offset == offset  &&
105 				in.body.write.size == size &&
106 				0 == bcmp(buf, contents, size));
107 		}, Eq(true)),
108 		_)
109 	).Times(AtMost(1))
110 	.WillRepeatedly(Invoke(
111 		ReturnImmediate([=](auto in __unused, auto& out) {
112 			SET_OUT_HEADER_LEN(out, write);
113 			out.body.write.size = size;
114 		})
115 	));
116 }
117 
118 };
119 
120 class Write_7_8: public FuseTest {
121 
122 public:
123 virtual void SetUp() {
124 	m_kernel_minor_version = 8;
125 	FuseTest::SetUp();
126 }
127 
128 void expect_lookup(const char *relpath, uint64_t ino, uint64_t size)
129 {
130 	FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1);
131 }
132 
133 };
134 
135 class AioWrite: public Write {
136 virtual void SetUp() {
137 	if (!is_unsafe_aio_enabled())
138 		GTEST_SKIP() <<
139 			"vfs.aio.enable_unsafe must be set for this test";
140 	FuseTest::SetUp();
141 }
142 };
143 
144 /* Tests for the writeback cache mode */
145 class WriteBack: public Write {
146 public:
147 virtual void SetUp() {
148 	m_init_flags |= FUSE_WRITEBACK_CACHE;
149 	FuseTest::SetUp();
150 	if (IsSkipped())
151 		return;
152 }
153 
154 void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
155 	uint64_t osize, const void *contents)
156 {
157 	FuseTest::expect_write(ino, offset, isize, osize, FUSE_WRITE_CACHE, 0,
158 		contents);
159 }
160 };
161 
162 class WriteBackAsync: public WriteBack {
163 public:
164 virtual void SetUp() {
165 	m_async = true;
166 	m_maxwrite = 65536;
167 	WriteBack::SetUp();
168 }
169 };
170 
171 class TimeGran: public WriteBackAsync, public WithParamInterface<unsigned> {
172 public:
173 virtual void SetUp() {
174 	m_time_gran = 1 << GetParam();
175 	WriteBackAsync::SetUp();
176 }
177 };
178 
179 /* Tests for clustered writes with WriteBack cacheing */
180 class WriteCluster: public WriteBack {
181 public:
182 virtual void SetUp() {
183 	m_async = true;
184 	m_maxwrite = 1 << 25;	// Anything larger than MAXPHYS will suffice
185 	WriteBack::SetUp();
186 	if (m_maxphys < 2 * DFLTPHYS)
187 		GTEST_SKIP() << "MAXPHYS must be at least twice DFLTPHYS"
188 			<< " for this test";
189 	if (m_maxphys < 2 * m_maxbcachebuf)
190 		GTEST_SKIP() << "MAXPHYS must be at least twice maxbcachebuf"
191 			<< " for this test";
192 }
193 };
194 
195 /* Tests relating to the server's max_write property */
196 class WriteMaxWrite: public Write {
197 public:
198 virtual void SetUp() {
199 	/*
200 	 * For this test, m_maxwrite must be less than either m_maxbcachebuf or
201 	 * maxphys.
202 	 */
203 	m_maxwrite = 32768;
204 	Write::SetUp();
205 }
206 };
207 
208 class WriteEofDuringVnopStrategy: public Write, public WithParamInterface<int>
209 {};
210 
211 class WriteRlimitFsize: public Write, public WithParamInterface<int> {
212 public:
213 static sig_atomic_t s_sigxfsz;
214 struct rlimit	m_initial_limit;
215 
216 void SetUp() {
217 	s_sigxfsz = 0;
218 	getrlimit(RLIMIT_FSIZE, &m_initial_limit);
219 	FuseTest::SetUp();
220 }
221 
222 void TearDown() {
223 	setrlimit(RLIMIT_FSIZE, &m_initial_limit);
224 
225 	FuseTest::TearDown();
226 }
227 };
228 
229 sig_atomic_t WriteRlimitFsize::s_sigxfsz = 0;
230 
231 void sigxfsz_handler(int __unused sig) {
232 	WriteRlimitFsize::s_sigxfsz = 1;
233 }
234 
235 /* AIO writes need to set the header's pid field correctly */
236 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */
237 TEST_F(AioWrite, DISABLED_aio_write)
238 {
239 	const char FULLPATH[] = "mountpoint/some_file.txt";
240 	const char RELPATH[] = "some_file.txt";
241 	const char *CONTENTS = "abcdefgh";
242 	uint64_t ino = 42;
243 	uint64_t offset = 4096;
244 	int fd;
245 	ssize_t bufsize = strlen(CONTENTS);
246 	struct aiocb iocb, *piocb;
247 
248 	expect_lookup(RELPATH, ino, 0);
249 	expect_open(ino, 0, 1);
250 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
251 
252 	fd = open(FULLPATH, O_WRONLY);
253 	ASSERT_LE(0, fd) << strerror(errno);
254 
255 	iocb.aio_nbytes = bufsize;
256 	iocb.aio_fildes = fd;
257 	iocb.aio_buf = __DECONST(void *, CONTENTS);
258 	iocb.aio_offset = offset;
259 	iocb.aio_sigevent.sigev_notify = SIGEV_NONE;
260 	ASSERT_EQ(0, aio_write(&iocb)) << strerror(errno);
261 	ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno);
262 	leak(fd);
263 }
264 
265 /*
266  * When a file is opened with O_APPEND, we should forward that flag to
267  * FUSE_OPEN (tested by Open.o_append) but still attempt to calculate the
268  * offset internally.  That way we'll work both with filesystems that
269  * understand O_APPEND (and ignore the offset) and filesystems that don't (and
270  * simply use the offset).
271  *
272  * Note that verifying the O_APPEND flag in FUSE_OPEN is done in the
273  * Open.o_append test.
274  */
275 TEST_F(Write, append)
276 {
277 	const ssize_t BUFSIZE = 9;
278 	const char FULLPATH[] = "mountpoint/some_file.txt";
279 	const char RELPATH[] = "some_file.txt";
280 	const char CONTENTS[BUFSIZE] = "abcdefgh";
281 	uint64_t ino = 42;
282 	/*
283 	 * Set offset to a maxbcachebuf boundary so we don't need to RMW when
284 	 * using writeback caching
285 	 */
286 	uint64_t initial_offset = m_maxbcachebuf;
287 	int fd;
288 
289 	expect_lookup(RELPATH, ino, initial_offset);
290 	expect_open(ino, 0, 1);
291 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
292 
293 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
294 	fd = open(FULLPATH, O_RDWR | O_APPEND);
295 	ASSERT_LE(0, fd) << strerror(errno);
296 
297 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
298 	leak(fd);
299 }
300 
301 /* If a file is cached, then appending to the end should not cause a read */
302 TEST_F(Write, append_to_cached)
303 {
304 	const ssize_t BUFSIZE = 9;
305 	const char FULLPATH[] = "mountpoint/some_file.txt";
306 	const char RELPATH[] = "some_file.txt";
307 	char *oldcontents, *oldbuf;
308 	const char CONTENTS[BUFSIZE] = "abcdefgh";
309 	uint64_t ino = 42;
310 	/*
311 	 * Set offset in between maxbcachebuf boundary to test buffer handling
312 	 */
313 	uint64_t oldsize = m_maxbcachebuf / 2;
314 	int fd;
315 
316 	oldcontents = (char*)calloc(1, oldsize);
317 	ASSERT_NE(nullptr, oldcontents) << strerror(errno);
318 	oldbuf = (char*)malloc(oldsize);
319 	ASSERT_NE(nullptr, oldbuf) << strerror(errno);
320 
321 	expect_lookup(RELPATH, ino, oldsize);
322 	expect_open(ino, 0, 1);
323 	expect_read(ino, 0, oldsize, oldsize, oldcontents);
324 	maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
325 
326 	/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
327 	fd = open(FULLPATH, O_RDWR | O_APPEND);
328 	ASSERT_LE(0, fd) << strerror(errno);
329 
330 	/* Read the old data into the cache */
331 	ASSERT_EQ((ssize_t)oldsize, read(fd, oldbuf, oldsize))
332 		<< strerror(errno);
333 
334 	/* Write the new data.  There should be no more read operations */
335 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
336 	leak(fd);
337 	free(oldbuf);
338 	free(oldcontents);
339 }
340 
341 TEST_F(Write, append_direct_io)
342 {
343 	const ssize_t BUFSIZE = 9;
344 	const char FULLPATH[] = "mountpoint/some_file.txt";
345 	const char RELPATH[] = "some_file.txt";
346 	const char CONTENTS[BUFSIZE] = "abcdefgh";
347 	uint64_t ino = 42;
348 	uint64_t initial_offset = 4096;
349 	int fd;
350 
351 	expect_lookup(RELPATH, ino, initial_offset);
352 	expect_open(ino, FOPEN_DIRECT_IO, 1);
353 	expect_write(ino, initial_offset, BUFSIZE, BUFSIZE, CONTENTS);
354 
355 	fd = open(FULLPATH, O_WRONLY | O_APPEND);
356 	ASSERT_LE(0, fd) << strerror(errno);
357 
358 	ASSERT_EQ(BUFSIZE, write(fd, CONTENTS, BUFSIZE)) << strerror(errno);
359 	leak(fd);
360 }
361 
362 /* A direct write should evict any overlapping cached data */
363 TEST_F(Write, direct_io_evicts_cache)
364 {
365 	const char FULLPATH[] = "mountpoint/some_file.txt";
366 	const char RELPATH[] = "some_file.txt";
367 	const char CONTENTS0[] = "abcdefgh";
368 	const char CONTENTS1[] = "ijklmnop";
369 	uint64_t ino = 42;
370 	int fd;
371 	ssize_t bufsize = strlen(CONTENTS0) + 1;
372 	char readbuf[bufsize];
373 
374 	expect_lookup(RELPATH, ino, bufsize);
375 	expect_open(ino, 0, 1);
376 	expect_read(ino, 0, bufsize, bufsize, CONTENTS0);
377 	expect_write(ino, 0, bufsize, bufsize, CONTENTS1);
378 
379 	fd = open(FULLPATH, O_RDWR);
380 	ASSERT_LE(0, fd) << strerror(errno);
381 
382 	// Prime cache
383 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
384 
385 	// Write directly, evicting cache
386 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
387 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
388 	ASSERT_EQ(bufsize, write(fd, CONTENTS1, bufsize)) << strerror(errno);
389 
390 	// Read again.  Cache should be bypassed
391 	expect_read(ino, 0, bufsize, bufsize, CONTENTS1);
392 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
393 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
394 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
395 	ASSERT_STREQ(readbuf, CONTENTS1);
396 
397 	leak(fd);
398 }
399 
400 /*
401  * If the server doesn't return FOPEN_DIRECT_IO during FUSE_OPEN, then it's not
402  * allowed to return a short write for that file handle.  However, if it does
403  * then we should still do our darndest to handle it by resending the unwritten
404  * portion.
405  */
406 TEST_F(Write, indirect_io_short_write)
407 {
408 	const char FULLPATH[] = "mountpoint/some_file.txt";
409 	const char RELPATH[] = "some_file.txt";
410 	const char *CONTENTS = "abcdefghijklmnop";
411 	uint64_t ino = 42;
412 	int fd;
413 	ssize_t bufsize = strlen(CONTENTS);
414 	ssize_t bufsize0 = 11;
415 	ssize_t bufsize1 = strlen(CONTENTS) - bufsize0;
416 	const char *contents1 = CONTENTS + bufsize0;
417 
418 	expect_lookup(RELPATH, ino, 0);
419 	expect_open(ino, 0, 1);
420 	expect_write(ino, 0, bufsize, bufsize0, CONTENTS);
421 	expect_write(ino, bufsize0, bufsize1, bufsize1, contents1);
422 
423 	fd = open(FULLPATH, O_WRONLY);
424 	ASSERT_LE(0, fd) << strerror(errno);
425 
426 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
427 	leak(fd);
428 }
429 
430 /* It is an error if the daemon claims to have written more data than we sent */
431 TEST_F(Write, indirect_io_long_write)
432 {
433 	const char FULLPATH[] = "mountpoint/some_file.txt";
434 	const char RELPATH[] = "some_file.txt";
435 	const char *CONTENTS = "abcdefghijklmnop";
436 	uint64_t ino = 42;
437 	int fd;
438 	ssize_t bufsize = strlen(CONTENTS);
439 	ssize_t bufsize_out = 100;
440 	off_t some_other_size = 25;
441 	struct stat sb;
442 
443 	expect_lookup(RELPATH, ino, 0);
444 	expect_open(ino, 0, 1);
445 	expect_write(ino, 0, bufsize, bufsize_out, CONTENTS);
446 	expect_getattr(ino, some_other_size);
447 
448 	fd = open(FULLPATH, O_WRONLY);
449 	ASSERT_LE(0, fd) << strerror(errno);
450 
451 	ASSERT_EQ(-1, write(fd, CONTENTS, bufsize)) << strerror(errno);
452 	ASSERT_EQ(EINVAL, errno);
453 
454 	/*
455 	 * Following such an error, we should requery the server for the file's
456 	 * size.
457 	 */
458 	fstat(fd, &sb);
459 	ASSERT_EQ(sb.st_size, some_other_size);
460 
461 	leak(fd);
462 }
463 
464 /*
465  * Don't crash if the server returns a write that can't be represented as a
466  * signed 32 bit number.  Regression test for
467  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=263263
468  */
469 TEST_F(Write, indirect_io_very_long_write)
470 {
471 	const char FULLPATH[] = "mountpoint/some_file.txt";
472 	const char RELPATH[] = "some_file.txt";
473 	const char *CONTENTS = "abcdefghijklmnop";
474 	uint64_t ino = 42;
475 	int fd;
476 	ssize_t bufsize = strlen(CONTENTS);
477 	ssize_t bufsize_out = 3 << 30;
478 
479 	expect_lookup(RELPATH, ino, 0);
480 	expect_open(ino, 0, 1);
481 	expect_write(ino, 0, bufsize, bufsize_out, CONTENTS);
482 
483 	fd = open(FULLPATH, O_WRONLY);
484 	ASSERT_LE(0, fd) << strerror(errno);
485 
486 	ASSERT_EQ(-1, write(fd, CONTENTS, bufsize)) << strerror(errno);
487 	ASSERT_EQ(EINVAL, errno);
488 	leak(fd);
489 }
490 
491 /*
492  * When the direct_io option is used, filesystems are allowed to write less
493  * data than requested.  We should return the short write to userland.
494  */
495 TEST_F(Write, direct_io_short_write)
496 {
497 	const char FULLPATH[] = "mountpoint/some_file.txt";
498 	const char RELPATH[] = "some_file.txt";
499 	const char *CONTENTS = "abcdefghijklmnop";
500 	uint64_t ino = 42;
501 	int fd;
502 	ssize_t bufsize = strlen(CONTENTS);
503 	ssize_t halfbufsize = bufsize / 2;
504 
505 	expect_lookup(RELPATH, ino, 0);
506 	expect_open(ino, FOPEN_DIRECT_IO, 1);
507 	expect_write(ino, 0, bufsize, halfbufsize, CONTENTS);
508 
509 	fd = open(FULLPATH, O_WRONLY);
510 	ASSERT_LE(0, fd) << strerror(errno);
511 
512 	ASSERT_EQ(halfbufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
513 	leak(fd);
514 }
515 
516 /*
517  * An insidious edge case: the filesystem returns a short write, and the
518  * difference between what we requested and what it actually wrote crosses an
519  * iov element boundary
520  */
521 TEST_F(Write, direct_io_short_write_iov)
522 {
523 	const char FULLPATH[] = "mountpoint/some_file.txt";
524 	const char RELPATH[] = "some_file.txt";
525 	const char *CONTENTS0 = "abcdefgh";
526 	const char *CONTENTS1 = "ijklmnop";
527 	const char *EXPECTED0 = "abcdefghijklmnop";
528 	uint64_t ino = 42;
529 	int fd;
530 	ssize_t size0 = strlen(CONTENTS0) - 1;
531 	ssize_t size1 = strlen(CONTENTS1) + 1;
532 	ssize_t totalsize = size0 + size1;
533 	struct iovec iov[2];
534 
535 	expect_lookup(RELPATH, ino, 0);
536 	expect_open(ino, FOPEN_DIRECT_IO, 1);
537 	expect_write(ino, 0, totalsize, size0, EXPECTED0);
538 
539 	fd = open(FULLPATH, O_WRONLY);
540 	ASSERT_LE(0, fd) << strerror(errno);
541 
542 	iov[0].iov_base = __DECONST(void*, CONTENTS0);
543 	iov[0].iov_len = strlen(CONTENTS0);
544 	iov[1].iov_base = __DECONST(void*, CONTENTS1);
545 	iov[1].iov_len = strlen(CONTENTS1);
546 	ASSERT_EQ(size0, writev(fd, iov, 2)) << strerror(errno);
547 	leak(fd);
548 }
549 
550 /* fusefs should respect RLIMIT_FSIZE */
551 TEST_P(WriteRlimitFsize, rlimit_fsize)
552 {
553 	const char FULLPATH[] = "mountpoint/some_file.txt";
554 	const char RELPATH[] = "some_file.txt";
555 	const char *CONTENTS = "abcdefgh";
556 	struct rlimit rl;
557 	ssize_t bufsize = strlen(CONTENTS);
558 	off_t offset = 1'000'000'000;
559 	uint64_t ino = 42;
560 	int fd, oflag;
561 
562 	oflag = GetParam();
563 
564 	expect_lookup(RELPATH, ino, 0);
565 	expect_open(ino, 0, 1);
566 
567 	rl.rlim_cur = offset;
568 	rl.rlim_max = m_initial_limit.rlim_max;
569 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
570 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
571 
572 	fd = open(FULLPATH, O_WRONLY | oflag);
573 
574 	ASSERT_LE(0, fd) << strerror(errno);
575 
576 	ASSERT_EQ(-1, pwrite(fd, CONTENTS, bufsize, offset));
577 	EXPECT_EQ(EFBIG, errno);
578 	EXPECT_EQ(1, s_sigxfsz);
579 	leak(fd);
580 }
581 
582 /*
583  * When crossing the RLIMIT_FSIZE boundary, writes should be truncated, not
584  * aborted.
585  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=164793
586  */
587 TEST_P(WriteRlimitFsize, rlimit_fsize_truncate)
588 {
589 	const char FULLPATH[] = "mountpoint/some_file.txt";
590 	const char RELPATH[] = "some_file.txt";
591 	const char *CONTENTS = "abcdefghijklmnopqrstuvwxyz";
592 	struct rlimit rl;
593 	ssize_t bufsize = strlen(CONTENTS);
594 	uint64_t ino = 42;
595 	off_t offset = 1 << 30;
596 	off_t limit = offset + strlen(CONTENTS) / 2;
597 	int fd, oflag;
598 
599 	oflag = GetParam();
600 
601 	expect_lookup(RELPATH, ino, 0);
602 	expect_open(ino, 0, 1);
603 	expect_write(ino, offset, bufsize / 2, bufsize / 2, CONTENTS);
604 
605 	rl.rlim_cur = limit;
606 	rl.rlim_max = m_initial_limit.rlim_max;
607 	ASSERT_EQ(0, setrlimit(RLIMIT_FSIZE, &rl)) << strerror(errno);
608 	ASSERT_NE(SIG_ERR, signal(SIGXFSZ, sigxfsz_handler)) << strerror(errno);
609 
610 	fd = open(FULLPATH, O_WRONLY | oflag);
611 
612 	ASSERT_LE(0, fd) << strerror(errno);
613 
614 	ASSERT_EQ(bufsize / 2, pwrite(fd, CONTENTS, bufsize, offset))
615 		<< strerror(errno);
616 	leak(fd);
617 }
618 
619 INSTANTIATE_TEST_CASE_P(W, WriteRlimitFsize,
620 	Values(0, O_DIRECT)
621 );
622 
623 /*
624  * A short read indicates EOF.  Test that nothing bad happens if we get EOF
625  * during the R of a RMW operation.
626  */
627 TEST_F(Write, eof_during_rmw)
628 {
629 	const char FULLPATH[] = "mountpoint/some_file.txt";
630 	const char RELPATH[] = "some_file.txt";
631 	const char *CONTENTS = "abcdefgh";
632 	const char *INITIAL   = "XXXXXXXXXX";
633 	uint64_t ino = 42;
634 	uint64_t offset = 1;
635 	ssize_t bufsize = strlen(CONTENTS) + 1;
636 	off_t orig_fsize = 10;
637 	off_t truncated_fsize = 5;
638 	int fd;
639 
640 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, orig_fsize, 1);
641 	expect_open(ino, 0, 1);
642 	expect_read(ino, 0, orig_fsize, truncated_fsize, INITIAL, O_RDWR);
643 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
644 
645 	fd = open(FULLPATH, O_RDWR);
646 	ASSERT_LE(0, fd) << strerror(errno);
647 
648 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
649 		<< strerror(errno);
650 	leak(fd);
651 }
652 
653 /*
654  * VOP_STRATEGY should not query the server for the file's size, even if its
655  * cached attributes have expired.
656  * Regression test for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=256937
657  */
658 TEST_P(WriteEofDuringVnopStrategy, eof_during_vop_strategy)
659 {
660 	const char FULLPATH[] = "mountpoint/some_file.txt";
661 	const char RELPATH[] = "some_file.txt";
662 	Sequence seq;
663 	const off_t filesize = 2 * m_maxbcachebuf;
664 	void *contents;
665 	uint64_t ino = 42;
666 	uint64_t attr_valid = 0;
667 	uint64_t attr_valid_nsec = 0;
668 	mode_t mode = S_IFREG | 0644;
669 	int fd;
670 	int ngetattrs;
671 
672 	ngetattrs = GetParam();
673 	contents = calloc(1, filesize);
674 
675 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
676 	.WillRepeatedly(Invoke(
677 		ReturnImmediate([=](auto in __unused, auto& out) {
678 		SET_OUT_HEADER_LEN(out, entry);
679 		out.body.entry.attr.mode = mode;
680 		out.body.entry.nodeid = ino;
681 		out.body.entry.attr.nlink = 1;
682 		out.body.entry.attr.size = filesize;
683 		out.body.entry.attr_valid = attr_valid;
684 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
685 	})));
686 	expect_open(ino, 0, 1);
687 	EXPECT_CALL(*m_mock, process(
688 		ResultOf([=](auto in) {
689 			return (in.header.opcode == FUSE_GETATTR &&
690 				in.header.nodeid == ino);
691 		}, Eq(true)),
692 		_)
693 	).Times(Between(ngetattrs - 1, ngetattrs))
694 	.InSequence(seq)
695 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
696 		SET_OUT_HEADER_LEN(out, attr);
697 		out.body.attr.attr.ino = ino;
698 		out.body.attr.attr.mode = mode;
699 		out.body.attr.attr_valid = attr_valid;
700 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
701 		out.body.attr.attr.size = filesize;
702 	})));
703 	EXPECT_CALL(*m_mock, process(
704 		ResultOf([=](auto in) {
705 			return (in.header.opcode == FUSE_GETATTR &&
706 				in.header.nodeid == ino);
707 		}, Eq(true)),
708 		_)
709 	).InSequence(seq)
710 	.WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
711 		SET_OUT_HEADER_LEN(out, attr);
712 		out.body.attr.attr.ino = ino;
713 		out.body.attr.attr.mode = mode;
714 		out.body.attr.attr_valid = attr_valid;
715 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
716 		out.body.attr.attr.size = filesize / 2;
717 	})));
718 	expect_write(ino, 0, filesize / 2, filesize / 2, contents);
719 
720 	fd = open(FULLPATH, O_RDWR);
721 	ASSERT_LE(0, fd) << strerror(errno);
722 	ASSERT_EQ(filesize / 2, write(fd, contents, filesize / 2))
723 		<< strerror(errno);
724 
725 }
726 
727 INSTANTIATE_TEST_CASE_P(W, WriteEofDuringVnopStrategy,
728 	Values(1, 2, 3)
729 );
730 
731 /*
732  * If the kernel cannot be sure which uid, gid, or pid was responsible for a
733  * write, then it must set the FUSE_WRITE_CACHE bit
734  */
735 /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236378 */
736 TEST_F(Write, mmap)
737 {
738 	const char FULLPATH[] = "mountpoint/some_file.txt";
739 	const char RELPATH[] = "some_file.txt";
740 	const char *CONTENTS = "abcdefgh";
741 	uint64_t ino = 42;
742 	int fd;
743 	ssize_t bufsize = strlen(CONTENTS);
744 	void *p;
745 	uint64_t offset = 10;
746 	size_t len;
747 	void *zeros, *expected;
748 
749 	len = getpagesize();
750 
751 	zeros = calloc(1, len);
752 	ASSERT_NE(nullptr, zeros);
753 	expected = calloc(1, len);
754 	ASSERT_NE(nullptr, expected);
755 	memmove((uint8_t*)expected + offset, CONTENTS, bufsize);
756 
757 	expect_lookup(RELPATH, ino, len);
758 	expect_open(ino, 0, 1);
759 	expect_read(ino, 0, len, len, zeros);
760 	/*
761 	 * Writes from the pager may or may not be associated with the correct
762 	 * pid, so they must set FUSE_WRITE_CACHE.
763 	 */
764 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, expected);
765 	expect_flush(ino, 1, ReturnErrno(0));
766 	expect_release(ino, ReturnErrno(0));
767 
768 	fd = open(FULLPATH, O_RDWR);
769 	ASSERT_LE(0, fd) << strerror(errno);
770 
771 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
772 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
773 
774 	memmove((uint8_t*)p + offset, CONTENTS, bufsize);
775 
776 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
777 	close(fd);	// Write mmap'd data on close
778 
779 	free(expected);
780 	free(zeros);
781 
782 	leak(fd);
783 }
784 
785 TEST_F(Write, pwrite)
786 {
787 	const char FULLPATH[] = "mountpoint/some_file.txt";
788 	const char RELPATH[] = "some_file.txt";
789 	const char *CONTENTS = "abcdefgh";
790 	uint64_t ino = 42;
791 	uint64_t offset = m_maxbcachebuf;
792 	int fd;
793 	ssize_t bufsize = strlen(CONTENTS);
794 
795 	expect_lookup(RELPATH, ino, 0);
796 	expect_open(ino, 0, 1);
797 	expect_write(ino, offset, bufsize, bufsize, CONTENTS);
798 
799 	fd = open(FULLPATH, O_WRONLY);
800 	ASSERT_LE(0, fd) << strerror(errno);
801 
802 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
803 		<< strerror(errno);
804 	leak(fd);
805 }
806 
807 /* Writing a file should update its cached mtime and ctime */
808 TEST_F(Write, timestamps)
809 {
810 	const char FULLPATH[] = "mountpoint/some_file.txt";
811 	const char RELPATH[] = "some_file.txt";
812 	const char *CONTENTS = "abcdefgh";
813 	ssize_t bufsize = strlen(CONTENTS);
814 	uint64_t ino = 42;
815 	struct stat sb0, sb1;
816 	int fd;
817 
818 	expect_lookup(RELPATH, ino, 0);
819 	expect_open(ino, 0, 1);
820 	maybe_expect_write(ino, 0, bufsize, CONTENTS);
821 
822 	fd = open(FULLPATH, O_RDWR);
823 	ASSERT_LE(0, fd) << strerror(errno);
824 	ASSERT_EQ(0, fstat(fd, &sb0)) << strerror(errno);
825 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
826 
827 	nap();
828 
829 	ASSERT_EQ(0, fstat(fd, &sb1)) << strerror(errno);
830 
831 	EXPECT_EQ(sb0.st_atime, sb1.st_atime);
832 	EXPECT_NE(sb0.st_mtime, sb1.st_mtime);
833 	EXPECT_NE(sb0.st_ctime, sb1.st_ctime);
834 
835 	leak(fd);
836 }
837 
838 TEST_F(Write, write)
839 {
840 	const char FULLPATH[] = "mountpoint/some_file.txt";
841 	const char RELPATH[] = "some_file.txt";
842 	const char *CONTENTS = "abcdefgh";
843 	uint64_t ino = 42;
844 	int fd;
845 	ssize_t bufsize = strlen(CONTENTS);
846 
847 	expect_lookup(RELPATH, ino, 0);
848 	expect_open(ino, 0, 1);
849 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
850 
851 	fd = open(FULLPATH, O_WRONLY);
852 	ASSERT_LE(0, fd) << strerror(errno);
853 
854 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
855 	leak(fd);
856 }
857 
858 /* fuse(4) should not issue writes of greater size than the daemon requests */
859 TEST_F(WriteMaxWrite, write)
860 {
861 	const char FULLPATH[] = "mountpoint/some_file.txt";
862 	const char RELPATH[] = "some_file.txt";
863 	int *contents;
864 	uint64_t ino = 42;
865 	int fd;
866 	ssize_t halfbufsize, bufsize;
867 
868 	halfbufsize = m_mock->m_maxwrite;
869 	if (halfbufsize >= m_maxbcachebuf || halfbufsize >= m_maxphys)
870 		GTEST_SKIP() << "Must lower m_maxwrite for this test";
871 	bufsize = halfbufsize * 2;
872 	contents = (int*)malloc(bufsize);
873 	ASSERT_NE(nullptr, contents);
874 	for (int i = 0; i < (int)bufsize / (int)sizeof(i); i++) {
875 		contents[i] = i;
876 	}
877 
878 	expect_lookup(RELPATH, ino, 0);
879 	expect_open(ino, 0, 1);
880 	maybe_expect_write(ino, 0, halfbufsize, contents);
881 	maybe_expect_write(ino, halfbufsize, halfbufsize,
882 		&contents[halfbufsize / sizeof(int)]);
883 
884 	fd = open(FULLPATH, O_WRONLY);
885 	ASSERT_LE(0, fd) << strerror(errno);
886 
887 	ASSERT_EQ(bufsize, write(fd, contents, bufsize)) << strerror(errno);
888 	leak(fd);
889 
890 	free(contents);
891 }
892 
893 TEST_F(Write, write_nothing)
894 {
895 	const char FULLPATH[] = "mountpoint/some_file.txt";
896 	const char RELPATH[] = "some_file.txt";
897 	const char *CONTENTS = "";
898 	uint64_t ino = 42;
899 	int fd;
900 	ssize_t bufsize = 0;
901 
902 	expect_lookup(RELPATH, ino, 0);
903 	expect_open(ino, 0, 1);
904 
905 	fd = open(FULLPATH, O_WRONLY);
906 	ASSERT_LE(0, fd) << strerror(errno);
907 
908 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
909 	leak(fd);
910 }
911 
912 TEST_F(Write_7_8, write)
913 {
914 	const char FULLPATH[] = "mountpoint/some_file.txt";
915 	const char RELPATH[] = "some_file.txt";
916 	const char *CONTENTS = "abcdefgh";
917 	uint64_t ino = 42;
918 	int fd;
919 	ssize_t bufsize = strlen(CONTENTS);
920 
921 	expect_lookup(RELPATH, ino, 0);
922 	expect_open(ino, 0, 1);
923 	expect_write_7_8(ino, 0, bufsize, bufsize, CONTENTS);
924 
925 	fd = open(FULLPATH, O_WRONLY);
926 	ASSERT_LE(0, fd) << strerror(errno);
927 
928 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
929 	leak(fd);
930 }
931 
932 /* In writeback mode, dirty data should be written on close */
933 TEST_F(WriteBackAsync, close)
934 {
935 	const char FULLPATH[] = "mountpoint/some_file.txt";
936 	const char RELPATH[] = "some_file.txt";
937 	const char *CONTENTS = "abcdefgh";
938 	uint64_t ino = 42;
939 	int fd;
940 	ssize_t bufsize = strlen(CONTENTS);
941 
942 	expect_lookup(RELPATH, ino, 0);
943 	expect_open(ino, 0, 1);
944 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
945 	EXPECT_CALL(*m_mock, process(
946 		ResultOf([=](auto in) {
947 			return (in.header.opcode == FUSE_SETATTR);
948 		}, Eq(true)),
949 		_)
950 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {
951 		SET_OUT_HEADER_LEN(out, attr);
952 		out.body.attr.attr.ino = ino;	// Must match nodeid
953 	})));
954 	expect_flush(ino, 1, ReturnErrno(0));
955 	expect_release(ino, ReturnErrno(0));
956 
957 	fd = open(FULLPATH, O_RDWR);
958 	ASSERT_LE(0, fd) << strerror(errno);
959 
960 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
961 	close(fd);
962 }
963 
964 /* In writeback mode, adjacent writes will be clustered together */
965 TEST_F(WriteCluster, clustering)
966 {
967 	const char FULLPATH[] = "mountpoint/some_file.txt";
968 	const char RELPATH[] = "some_file.txt";
969 	uint64_t ino = 42;
970 	int i, fd;
971 	void *wbuf, *wbuf2x;
972 	ssize_t bufsize = m_maxbcachebuf;
973 	off_t filesize = 5 * bufsize;
974 
975 	wbuf = malloc(bufsize);
976 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
977 	memset(wbuf, 'X', bufsize);
978 	wbuf2x = malloc(2 * bufsize);
979 	ASSERT_NE(nullptr, wbuf2x) << strerror(errno);
980 	memset(wbuf2x, 'X', 2 * bufsize);
981 
982 	expect_lookup(RELPATH, ino, filesize);
983 	expect_open(ino, 0, 1);
984 	/*
985 	 * Writes of bufsize-bytes each should be clustered into greater sizes.
986 	 * The amount of clustering is adaptive, so the first write actually
987 	 * issued will be 2x bufsize and subsequent writes may be larger
988 	 */
989 	expect_write(ino, 0, 2 * bufsize, 2 * bufsize, wbuf2x);
990 	expect_write(ino, 2 * bufsize, 2 * bufsize, 2 * bufsize, wbuf2x);
991 	expect_flush(ino, 1, ReturnErrno(0));
992 	expect_release(ino, ReturnErrno(0));
993 
994 	fd = open(FULLPATH, O_RDWR);
995 	ASSERT_LE(0, fd) << strerror(errno);
996 
997 	for (i = 0; i < 4; i++) {
998 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
999 			<< strerror(errno);
1000 	}
1001 	close(fd);
1002 	free(wbuf2x);
1003 	free(wbuf);
1004 }
1005 
1006 /*
1007  * When clustering writes, an I/O error to any of the cluster's children should
1008  * not panic the system on unmount
1009  */
1010 /*
1011  * Regression test for bug 238585
1012  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238565
1013  */
1014 TEST_F(WriteCluster, cluster_write_err)
1015 {
1016 	const char FULLPATH[] = "mountpoint/some_file.txt";
1017 	const char RELPATH[] = "some_file.txt";
1018 	uint64_t ino = 42;
1019 	int i, fd;
1020 	void *wbuf;
1021 	ssize_t bufsize = m_maxbcachebuf;
1022 	off_t filesize = 4 * bufsize;
1023 
1024 	wbuf = malloc(bufsize);
1025 	ASSERT_NE(nullptr, wbuf) << strerror(errno);
1026 	memset(wbuf, 'X', bufsize);
1027 
1028 	expect_lookup(RELPATH, ino, filesize);
1029 	expect_open(ino, 0, 1);
1030 	EXPECT_CALL(*m_mock, process(
1031 		ResultOf([=](auto in) {
1032 			return (in.header.opcode == FUSE_WRITE);
1033 		}, Eq(true)),
1034 		_)
1035 	).WillRepeatedly(Invoke(ReturnErrno(EIO)));
1036 	expect_flush(ino, 1, ReturnErrno(0));
1037 	expect_release(ino, ReturnErrno(0));
1038 
1039 	fd = open(FULLPATH, O_RDWR);
1040 	ASSERT_LE(0, fd) << strerror(errno);
1041 
1042 	for (i = 0; i < 3; i++) {
1043 		ASSERT_EQ(bufsize, write(fd, wbuf, bufsize))
1044 			<< strerror(errno);
1045 	}
1046 	close(fd);
1047 	free(wbuf);
1048 }
1049 
1050 /*
1051  * In writeback mode, writes to an O_WRONLY file could trigger reads from the
1052  * server.  The FUSE protocol explicitly allows that.
1053  */
1054 TEST_F(WriteBack, rmw)
1055 {
1056 	const char FULLPATH[] = "mountpoint/some_file.txt";
1057 	const char RELPATH[] = "some_file.txt";
1058 	const char *CONTENTS = "abcdefgh";
1059 	const char *INITIAL   = "XXXXXXXXXX";
1060 	uint64_t ino = 42;
1061 	uint64_t offset = 1;
1062 	off_t fsize = 10;
1063 	int fd;
1064 	ssize_t bufsize = strlen(CONTENTS);
1065 
1066 	FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
1067 	expect_open(ino, 0, 1);
1068 	expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
1069 	maybe_expect_write(ino, offset, bufsize, CONTENTS);
1070 
1071 	fd = open(FULLPATH, O_WRONLY);
1072 	ASSERT_LE(0, fd) << strerror(errno);
1073 
1074 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS, bufsize, offset))
1075 		<< strerror(errno);
1076 	leak(fd);
1077 }
1078 
1079 /*
1080  * Without direct_io, writes should be committed to cache
1081  */
1082 TEST_F(WriteBack, cache)
1083 {
1084 	const char FULLPATH[] = "mountpoint/some_file.txt";
1085 	const char RELPATH[] = "some_file.txt";
1086 	const char *CONTENTS = "abcdefgh";
1087 	uint64_t ino = 42;
1088 	int fd;
1089 	ssize_t bufsize = strlen(CONTENTS);
1090 	uint8_t readbuf[bufsize];
1091 
1092 	expect_lookup(RELPATH, ino, 0);
1093 	expect_open(ino, 0, 1);
1094 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1095 
1096 	fd = open(FULLPATH, O_RDWR);
1097 	ASSERT_LE(0, fd) << strerror(errno);
1098 
1099 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1100 	/*
1101 	 * A subsequent read should be serviced by cache, without querying the
1102 	 * filesystem daemon
1103 	 */
1104 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1105 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1106 	leak(fd);
1107 }
1108 
1109 /*
1110  * With O_DIRECT, writes should be not committed to cache.  Admittedly this is
1111  * an odd test, because it would be unusual to use O_DIRECT for writes but not
1112  * reads.
1113  */
1114 TEST_F(WriteBack, o_direct)
1115 {
1116 	const char FULLPATH[] = "mountpoint/some_file.txt";
1117 	const char RELPATH[] = "some_file.txt";
1118 	const char *CONTENTS = "abcdefgh";
1119 	uint64_t ino = 42;
1120 	int fd;
1121 	ssize_t bufsize = strlen(CONTENTS);
1122 	uint8_t readbuf[bufsize];
1123 
1124 	expect_lookup(RELPATH, ino, 0);
1125 	expect_open(ino, 0, 1);
1126 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1127 		CONTENTS);
1128 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1129 
1130 	fd = open(FULLPATH, O_RDWR | O_DIRECT);
1131 	ASSERT_LE(0, fd) << strerror(errno);
1132 
1133 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1134 	/* A subsequent read must query the daemon because cache is empty */
1135 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1136 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1137 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1138 	leak(fd);
1139 }
1140 
1141 TEST_F(WriteBack, direct_io)
1142 {
1143 	const char FULLPATH[] = "mountpoint/some_file.txt";
1144 	const char RELPATH[] = "some_file.txt";
1145 	const char *CONTENTS = "abcdefgh";
1146 	uint64_t ino = 42;
1147 	int fd;
1148 	ssize_t bufsize = strlen(CONTENTS);
1149 	uint8_t readbuf[bufsize];
1150 
1151 	expect_lookup(RELPATH, ino, 0);
1152 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1153 	FuseTest::expect_write(ino, 0, bufsize, bufsize, 0, FUSE_WRITE_CACHE,
1154 		CONTENTS);
1155 	expect_read(ino, 0, bufsize, bufsize, CONTENTS);
1156 
1157 	fd = open(FULLPATH, O_RDWR);
1158 	ASSERT_LE(0, fd) << strerror(errno);
1159 
1160 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1161 	/* A subsequent read must query the daemon because cache is empty */
1162 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1163 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1164 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1165 	leak(fd);
1166 }
1167 
1168 /*
1169  * mmap should still be possible even if the server used direct_io.  Mmap will
1170  * still use the cache, though.
1171  *
1172  * Regression test for bug 247276
1173  * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=247276
1174  */
1175 TEST_F(WriteBack, mmap_direct_io)
1176 {
1177 	const char FULLPATH[] = "mountpoint/some_file.txt";
1178 	const char RELPATH[] = "some_file.txt";
1179 	const char *CONTENTS = "abcdefgh";
1180 	uint64_t ino = 42;
1181 	int fd;
1182 	size_t len;
1183 	ssize_t bufsize = strlen(CONTENTS);
1184 	void *p, *zeros;
1185 
1186 	len = getpagesize();
1187 	zeros = calloc(1, len);
1188 	ASSERT_NE(nullptr, zeros);
1189 
1190 	expect_lookup(RELPATH, ino, len);
1191 	expect_open(ino, FOPEN_DIRECT_IO, 1);
1192 	expect_read(ino, 0, len, len, zeros);
1193 	expect_flush(ino, 1, ReturnErrno(0));
1194 	FuseTest::expect_write(ino, 0, len, len, FUSE_WRITE_CACHE, 0, zeros);
1195 	expect_release(ino, ReturnErrno(0));
1196 
1197 	fd = open(FULLPATH, O_RDWR);
1198 	ASSERT_LE(0, fd) << strerror(errno);
1199 
1200 	p = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1201 	ASSERT_NE(MAP_FAILED, p) << strerror(errno);
1202 
1203 	memmove((uint8_t*)p, CONTENTS, bufsize);
1204 
1205 	ASSERT_EQ(0, munmap(p, len)) << strerror(errno);
1206 	close(fd);	// Write mmap'd data on close
1207 
1208 	free(zeros);
1209 }
1210 
1211 /*
1212  * When mounted with -o async, the writeback cache mode should delay writes
1213  */
1214 TEST_F(WriteBackAsync, delay)
1215 {
1216 	const char FULLPATH[] = "mountpoint/some_file.txt";
1217 	const char RELPATH[] = "some_file.txt";
1218 	const char *CONTENTS = "abcdefgh";
1219 	uint64_t ino = 42;
1220 	int fd;
1221 	ssize_t bufsize = strlen(CONTENTS);
1222 
1223 	expect_lookup(RELPATH, ino, 0);
1224 	expect_open(ino, 0, 1);
1225 	/* Write should be cached, but FUSE_WRITE shouldn't be sent */
1226 	EXPECT_CALL(*m_mock, process(
1227 		ResultOf([=](auto in) {
1228 			return (in.header.opcode == FUSE_WRITE);
1229 		}, Eq(true)),
1230 		_)
1231 	).Times(0);
1232 
1233 	fd = open(FULLPATH, O_RDWR);
1234 	ASSERT_LE(0, fd) << strerror(errno);
1235 
1236 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1237 
1238 	/* Don't close the file because that would flush the cache */
1239 	leak(fd);
1240 }
1241 
1242 /*
1243  * A direct write should not evict dirty cached data from outside of its own
1244  * byte range.
1245  */
1246 TEST_F(WriteBackAsync, direct_io_ignores_unrelated_cached)
1247 {
1248 	const char FULLPATH[] = "mountpoint/some_file.txt";
1249 	const char RELPATH[] = "some_file.txt";
1250 	const char CONTENTS0[] = "abcdefgh";
1251 	const char CONTENTS1[] = "ijklmnop";
1252 	uint64_t ino = 42;
1253 	int fd;
1254 	ssize_t bufsize = strlen(CONTENTS0) + 1;
1255 	ssize_t fsize = 2 * m_maxbcachebuf;
1256 	char readbuf[bufsize];
1257 	void *zeros;
1258 
1259 	zeros = calloc(1, m_maxbcachebuf);
1260 	ASSERT_NE(nullptr, zeros);
1261 
1262 	expect_lookup(RELPATH, ino, fsize);
1263 	expect_open(ino, 0, 1);
1264 	expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, zeros);
1265 	FuseTest::expect_write(ino, m_maxbcachebuf, bufsize, bufsize, 0, 0,
1266 		CONTENTS1);
1267 
1268 	fd = open(FULLPATH, O_RDWR);
1269 	ASSERT_LE(0, fd) << strerror(errno);
1270 
1271 	// Cache first block with dirty data.  This will entail first reading
1272 	// the existing data.
1273 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS0, bufsize, 0))
1274 		<< strerror(errno);
1275 
1276 	// Write directly to second block
1277 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1278 	ASSERT_EQ(bufsize, pwrite(fd, CONTENTS1, bufsize, m_maxbcachebuf))
1279 		<< strerror(errno);
1280 
1281 	// Read from the first block again.  Should be serviced by cache.
1282 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1283 	ASSERT_EQ(bufsize, pread(fd, readbuf, bufsize, 0)) << strerror(errno);
1284 	ASSERT_STREQ(readbuf, CONTENTS0);
1285 
1286 	leak(fd);
1287 	free(zeros);
1288 }
1289 
1290 /*
1291  * If a direct io write partially overlaps one or two blocks of dirty cached
1292  * data, No dirty data should be lost.  Admittedly this is a weird test,
1293  * because it would be unusual to use O_DIRECT and the writeback cache.
1294  */
1295 TEST_F(WriteBackAsync, direct_io_partially_overlaps_cached_block)
1296 {
1297 	const char FULLPATH[] = "mountpoint/some_file.txt";
1298 	const char RELPATH[] = "some_file.txt";
1299 	uint64_t ino = 42;
1300 	int fd;
1301 	off_t bs = m_maxbcachebuf;
1302 	ssize_t fsize = 3 * bs;
1303 	void *readbuf, *zeros, *ones, *zeroones, *onezeros;
1304 
1305 	readbuf = malloc(bs);
1306 	ASSERT_NE(nullptr, readbuf) << strerror(errno);
1307 	zeros = calloc(1, 3 * bs);
1308 	ASSERT_NE(nullptr, zeros);
1309 	ones = calloc(1, 2 * bs);
1310 	ASSERT_NE(nullptr, ones);
1311 	memset(ones, 1, 2 * bs);
1312 	zeroones = calloc(1, bs);
1313 	ASSERT_NE(nullptr, zeroones);
1314 	memset((uint8_t*)zeroones + bs / 2, 1, bs / 2);
1315 	onezeros = calloc(1, bs);
1316 	ASSERT_NE(nullptr, onezeros);
1317 	memset(onezeros, 1, bs / 2);
1318 
1319 	expect_lookup(RELPATH, ino, fsize);
1320 	expect_open(ino, 0, 1);
1321 
1322 	fd = open(FULLPATH, O_RDWR);
1323 	ASSERT_LE(0, fd) << strerror(errno);
1324 
1325 	/* Cache first and third blocks with dirty data.  */
1326 	ASSERT_EQ(3 * bs, pwrite(fd, zeros, 3 * bs, 0)) << strerror(errno);
1327 
1328 	/*
1329 	 * Write directly to all three blocks.  The partially written blocks
1330 	 * will be flushed because they're dirty.
1331 	 */
1332 	FuseTest::expect_write(ino, 0, bs, bs, 0, 0, zeros);
1333 	FuseTest::expect_write(ino, 2 * bs, bs, bs, 0, 0, zeros);
1334 	/* The direct write is split in two because of the m_maxwrite value */
1335 	FuseTest::expect_write(ino,     bs / 2, bs, bs, 0, 0, ones);
1336 	FuseTest::expect_write(ino, 3 * bs / 2, bs, bs, 0, 0, ones);
1337 	ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno);
1338 	ASSERT_EQ(2 * bs, pwrite(fd, ones, 2 * bs, bs / 2)) << strerror(errno);
1339 
1340 	/*
1341 	 * Read from both the valid and invalid portions of the first and third
1342 	 * blocks again.  This will entail FUSE_READ operations because these
1343 	 * blocks were invalidated by the direct write.
1344 	 */
1345 	expect_read(ino, 0, bs, bs, zeroones);
1346 	expect_read(ino, 2 * bs, bs, bs, onezeros);
1347 	ASSERT_EQ(0, fcntl(fd, F_SETFL, 0)) << strerror(errno);
1348 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 0)) << strerror(errno);
1349 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1350 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 5 * bs / 2))
1351 		<< strerror(errno);
1352 	EXPECT_EQ(0, memcmp(zeros, readbuf, bs / 2));
1353 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, bs / 2))
1354 		<< strerror(errno);
1355 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1356 	ASSERT_EQ(bs / 2, pread(fd, readbuf, bs / 2, 2 * bs))
1357 		<< strerror(errno);
1358 	EXPECT_EQ(0, memcmp(ones, readbuf, bs / 2));
1359 
1360 	leak(fd);
1361 	free(zeroones);
1362 	free(onezeros);
1363 	free(ones);
1364 	free(zeros);
1365 	free(readbuf);
1366 }
1367 
1368 /*
1369  * In WriteBack mode, writes may be cached beyond what the server thinks is the
1370  * EOF.  In this case, a short read at EOF should _not_ cause fusefs to update
1371  * the file's size.
1372  */
1373 TEST_F(WriteBackAsync, eof)
1374 {
1375 	const char FULLPATH[] = "mountpoint/some_file.txt";
1376 	const char RELPATH[] = "some_file.txt";
1377 	const char *CONTENTS0 = "abcdefgh";
1378 	const char *CONTENTS1 = "ijklmnop";
1379 	uint64_t ino = 42;
1380 	int fd;
1381 	off_t offset = m_maxbcachebuf;
1382 	ssize_t wbufsize = strlen(CONTENTS1);
1383 	off_t old_filesize = (off_t)strlen(CONTENTS0);
1384 	ssize_t rbufsize = 2 * old_filesize;
1385 	char readbuf[rbufsize];
1386 	size_t holesize = rbufsize - old_filesize;
1387 	char hole[holesize];
1388 	struct stat sb;
1389 	ssize_t r;
1390 
1391 	expect_lookup(RELPATH, ino, 0);
1392 	expect_open(ino, 0, 1);
1393 	expect_read(ino, 0, m_maxbcachebuf, old_filesize, CONTENTS0);
1394 
1395 	fd = open(FULLPATH, O_RDWR);
1396 	ASSERT_LE(0, fd) << strerror(errno);
1397 
1398 	/* Write and cache data beyond EOF */
1399 	ASSERT_EQ(wbufsize, pwrite(fd, CONTENTS1, wbufsize, offset))
1400 		<< strerror(errno);
1401 
1402 	/* Read from the old EOF */
1403 	r = pread(fd, readbuf, rbufsize, 0);
1404 	ASSERT_LE(0, r) << strerror(errno);
1405 	EXPECT_EQ(rbufsize, r) << "read should've synthesized a hole";
1406 	EXPECT_EQ(0, memcmp(CONTENTS0, readbuf, old_filesize));
1407 	bzero(hole, holesize);
1408 	EXPECT_EQ(0, memcmp(hole, readbuf + old_filesize, holesize));
1409 
1410 	/* The file's size should still be what was established by pwrite */
1411 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1412 	EXPECT_EQ(offset + wbufsize, sb.st_size);
1413 	leak(fd);
1414 }
1415 
1416 /*
1417  * When a file has dirty writes that haven't been flushed, the server's notion
1418  * of its mtime and ctime will be wrong.  The kernel should ignore those if it
1419  * gets them from a FUSE_GETATTR before flushing.
1420  */
1421 TEST_F(WriteBackAsync, timestamps)
1422 {
1423 	const char FULLPATH[] = "mountpoint/some_file.txt";
1424 	const char RELPATH[] = "some_file.txt";
1425 	const char *CONTENTS = "abcdefgh";
1426 	ssize_t bufsize = strlen(CONTENTS);
1427 	uint64_t ino = 42;
1428 	uint64_t attr_valid = 0;
1429 	uint64_t attr_valid_nsec = 0;
1430 	uint64_t server_time = 12345;
1431 	mode_t mode = S_IFREG | 0644;
1432 	int fd;
1433 
1434 	struct stat sb;
1435 
1436 	EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
1437 	.WillRepeatedly(Invoke(
1438 		ReturnImmediate([=](auto in __unused, auto& out) {
1439 		SET_OUT_HEADER_LEN(out, entry);
1440 		out.body.entry.attr.mode = mode;
1441 		out.body.entry.nodeid = ino;
1442 		out.body.entry.attr.nlink = 1;
1443 		out.body.entry.attr_valid = attr_valid;
1444 		out.body.entry.attr_valid_nsec = attr_valid_nsec;
1445 	})));
1446 	expect_open(ino, 0, 1);
1447 	EXPECT_CALL(*m_mock, process(
1448 		ResultOf([=](auto in) {
1449 			return (in.header.opcode == FUSE_GETATTR &&
1450 				in.header.nodeid == ino);
1451 		}, Eq(true)),
1452 		_)
1453 	).WillRepeatedly(Invoke(
1454 	ReturnImmediate([=](auto i __unused, auto& out) {
1455 		SET_OUT_HEADER_LEN(out, attr);
1456 		out.body.attr.attr.ino = ino;
1457 		out.body.attr.attr.mode = mode;
1458 		out.body.attr.attr_valid = attr_valid;
1459 		out.body.attr.attr_valid_nsec = attr_valid_nsec;
1460 		out.body.attr.attr.atime = server_time;
1461 		out.body.attr.attr.mtime = server_time;
1462 		out.body.attr.attr.ctime = server_time;
1463 	})));
1464 
1465 	fd = open(FULLPATH, O_RDWR);
1466 	ASSERT_LE(0, fd) << strerror(errno);
1467 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1468 
1469 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1470 	EXPECT_EQ((time_t)server_time, sb.st_atime);
1471 	EXPECT_NE((time_t)server_time, sb.st_mtime);
1472 	EXPECT_NE((time_t)server_time, sb.st_ctime);
1473 
1474 	leak(fd);
1475 }
1476 
1477 /* Any dirty timestamp fields should be flushed during a SETATTR */
1478 TEST_F(WriteBackAsync, timestamps_during_setattr)
1479 {
1480 	const char FULLPATH[] = "mountpoint/some_file.txt";
1481 	const char RELPATH[] = "some_file.txt";
1482 	const char *CONTENTS = "abcdefgh";
1483 	ssize_t bufsize = strlen(CONTENTS);
1484 	uint64_t ino = 42;
1485 	const mode_t newmode = 0755;
1486 	int fd;
1487 
1488 	expect_lookup(RELPATH, ino, 0);
1489 	expect_open(ino, 0, 1);
1490 	EXPECT_CALL(*m_mock, process(
1491 		ResultOf([=](auto in) {
1492 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1493 			return (in.header.opcode == FUSE_SETATTR &&
1494 				in.header.nodeid == ino &&
1495 				in.body.setattr.valid == valid);
1496 		}, Eq(true)),
1497 		_)
1498 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1499 		SET_OUT_HEADER_LEN(out, attr);
1500 		out.body.attr.attr.ino = ino;
1501 		out.body.attr.attr.mode = S_IFREG | newmode;
1502 	})));
1503 
1504 	fd = open(FULLPATH, O_RDWR);
1505 	ASSERT_LE(0, fd) << strerror(errno);
1506 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1507 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1508 
1509 	leak(fd);
1510 }
1511 
1512 /* fuse_init_out.time_gran controls the granularity of timestamps */
1513 TEST_P(TimeGran, timestamps_during_setattr)
1514 {
1515 	const char FULLPATH[] = "mountpoint/some_file.txt";
1516 	const char RELPATH[] = "some_file.txt";
1517 	const char *CONTENTS = "abcdefgh";
1518 	ssize_t bufsize = strlen(CONTENTS);
1519 	uint64_t ino = 42;
1520 	const mode_t newmode = 0755;
1521 	int fd;
1522 
1523 	expect_lookup(RELPATH, ino, 0);
1524 	expect_open(ino, 0, 1);
1525 	EXPECT_CALL(*m_mock, process(
1526 		ResultOf([=](auto in) {
1527 			uint32_t valid = FATTR_MODE | FATTR_MTIME | FATTR_CTIME;
1528 			return (in.header.opcode == FUSE_SETATTR &&
1529 				in.header.nodeid == ino &&
1530 				in.body.setattr.valid == valid &&
1531 				in.body.setattr.mtimensec % m_time_gran == 0 &&
1532 				in.body.setattr.ctimensec % m_time_gran == 0);
1533 		}, Eq(true)),
1534 		_)
1535 	).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) {
1536 		SET_OUT_HEADER_LEN(out, attr);
1537 		out.body.attr.attr.ino = ino;
1538 		out.body.attr.attr.mode = S_IFREG | newmode;
1539 	})));
1540 
1541 	fd = open(FULLPATH, O_RDWR);
1542 	ASSERT_LE(0, fd) << strerror(errno);
1543 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1544 	ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno);
1545 
1546 	leak(fd);
1547 }
1548 
1549 INSTANTIATE_TEST_CASE_P(RA, TimeGran, Range(0u, 10u));
1550 
1551 /*
1552  * Without direct_io, writes should be committed to cache
1553  */
1554 TEST_F(Write, writethrough)
1555 {
1556 	const char FULLPATH[] = "mountpoint/some_file.txt";
1557 	const char RELPATH[] = "some_file.txt";
1558 	const char *CONTENTS = "abcdefgh";
1559 	uint64_t ino = 42;
1560 	int fd;
1561 	ssize_t bufsize = strlen(CONTENTS);
1562 	uint8_t readbuf[bufsize];
1563 
1564 	expect_lookup(RELPATH, ino, 0);
1565 	expect_open(ino, 0, 1);
1566 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1567 
1568 	fd = open(FULLPATH, O_RDWR);
1569 	ASSERT_LE(0, fd) << strerror(errno);
1570 
1571 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1572 	/*
1573 	 * A subsequent read should be serviced by cache, without querying the
1574 	 * filesystem daemon
1575 	 */
1576 	ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno);
1577 	ASSERT_EQ(bufsize, read(fd, readbuf, bufsize)) << strerror(errno);
1578 	leak(fd);
1579 }
1580 
1581 /* Writes that extend a file should update the cached file size */
1582 TEST_F(Write, update_file_size)
1583 {
1584 	const char FULLPATH[] = "mountpoint/some_file.txt";
1585 	const char RELPATH[] = "some_file.txt";
1586 	const char *CONTENTS = "abcdefgh";
1587 	struct stat sb;
1588 	uint64_t ino = 42;
1589 	int fd;
1590 	ssize_t bufsize = strlen(CONTENTS);
1591 
1592 	expect_lookup(RELPATH, ino, 0);
1593 	expect_open(ino, 0, 1);
1594 	expect_write(ino, 0, bufsize, bufsize, CONTENTS);
1595 
1596 	fd = open(FULLPATH, O_RDWR);
1597 	ASSERT_LE(0, fd) << strerror(errno);
1598 
1599 	ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
1600 	/* Get cached attributes */
1601 	ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno);
1602 	ASSERT_EQ(bufsize, sb.st_size);
1603 	leak(fd);
1604 }
1605