xref: /freebsd/tests/sys/fs/fusefs/io.cc (revision 5f4c09dd85bff675e0ca63c55ea3c517e0fddfcc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 The FreeBSD Foundation
5  *
6  * This software was developed by BFF Storage Systems, LLC under sponsorship
7  * from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 extern "C" {
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/sysctl.h>
35 
36 #include <fcntl.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 }
40 
41 #include "mockfs.hh"
42 #include "utils.hh"
43 
44 /*
45  * For testing I/O like fsx does, but deterministically and without a real
46  * underlying file system
47  */
48 
49 using namespace testing;
50 
51 const char FULLPATH[] = "mountpoint/some_file.txt";
52 const char RELPATH[] = "some_file.txt";
53 const uint64_t ino = 42;
54 
55 static void compare(const void *tbuf, const void *controlbuf, off_t baseofs,
56 	ssize_t size)
57 {
58 	int i;
59 
60 	for (i = 0; i < size; i++) {
61 		if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) {
62 			off_t ofs = baseofs + i;
63 			FAIL() << "miscompare at offset "
64 			       << std::hex
65 			       << std::showbase
66 			       << ofs
67 			       << ".  expected = "
68 			       << std::setw(2)
69 			       << (unsigned)((const uint8_t*)controlbuf)[i]
70 			       << " got = "
71 			       << (unsigned)((const uint8_t*)tbuf)[i];
72 		}
73 	}
74 }
75 
76 typedef tuple<bool, uint32_t, cache_mode> IoParam;
77 
78 class Io: public FuseTest, public WithParamInterface<IoParam> {
79 public:
80 int m_backing_fd, m_control_fd, m_test_fd;
81 off_t m_filesize;
82 bool m_direct_io;
83 
84 Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
85 	m_direct_io(false) {};
86 
87 void SetUp()
88 {
89 	m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
90 	if (m_backing_fd < 0)
91 		FAIL() << strerror(errno);
92 	m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644);
93 	if (m_control_fd < 0)
94 		FAIL() << strerror(errno);
95 	srandom(22'9'1982);	// Seed with my birthday
96 
97 	if (get<0>(GetParam()))
98 		m_init_flags |= FUSE_ASYNC_READ;
99 	m_maxwrite = get<1>(GetParam());
100 	switch (get<2>(GetParam())) {
101 		case Uncached:
102 			m_direct_io = true;
103 			break;
104 		case WritebackAsync:
105 			m_async = true;
106 			/* FALLTHROUGH */
107 		case Writeback:
108 			m_init_flags |= FUSE_WRITEBACK_CACHE;
109 			/* FALLTHROUGH */
110 		case Writethrough:
111 			break;
112 		default:
113 			FAIL() << "Unknown cache mode";
114 	}
115 	m_noatime = true;	// To prevent SETATTR for atime on close
116 
117 	FuseTest::SetUp();
118 	if (IsSkipped())
119 		return;
120 
121 	if (verbosity > 0) {
122 		printf("Test Parameters: init_flags=%#x maxwrite=%#x "
123 		    "%sasync cache=%s\n",
124 		    m_init_flags, m_maxwrite, m_async? "" : "no",
125 		    cache_mode_to_s(get<2>(GetParam())));
126 	}
127 
128 	expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1);
129 	expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1);
130 	EXPECT_CALL(*m_mock, process(
131 		ResultOf([=](auto in) {
132 			return (in.header.opcode == FUSE_WRITE &&
133 				in.header.nodeid == ino);
134 		}, Eq(true)),
135 		_)
136 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
137 		const char *buf = (const char*)in.body.bytes +
138 			sizeof(struct fuse_write_in);
139 		ssize_t isize = in.body.write.size;
140 		off_t iofs = in.body.write.offset;
141 
142 		assert((size_t)isize <= sizeof(in.body.bytes) -
143 			sizeof(struct fuse_write_in));
144 		ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs))
145 			<< strerror(errno);
146 		SET_OUT_HEADER_LEN(out, write);
147 		out.body.write.size = isize;
148 	})));
149 	EXPECT_CALL(*m_mock, process(
150 		ResultOf([=](auto in) {
151 			return (in.header.opcode == FUSE_READ &&
152 				in.header.nodeid == ino);
153 		}, Eq(true)),
154 		_)
155 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
156 		ssize_t isize = in.body.write.size;
157 		off_t iofs = in.body.write.offset;
158 		void *buf = out.body.bytes;
159 		ssize_t osize;
160 
161 		assert((size_t)isize <= sizeof(out.body.bytes));
162 		osize = pread(m_backing_fd, buf, isize, iofs);
163 		ASSERT_LE(0, osize) << strerror(errno);
164 		out.header.len = sizeof(struct fuse_out_header) + osize;
165 	})));
166 	EXPECT_CALL(*m_mock, process(
167 		ResultOf([=](auto in) {
168 			return (in.header.opcode == FUSE_SETATTR &&
169 				in.header.nodeid == ino &&
170 				(in.body.setattr.valid & FATTR_SIZE));
171 
172 		}, Eq(true)),
173 		_)
174 	).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) {
175 		ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size))
176 			<< strerror(errno);
177 		SET_OUT_HEADER_LEN(out, attr);
178 		out.body.attr.attr.ino = ino;
179 		out.body.attr.attr.mode = S_IFREG | 0755;
180 		out.body.attr.attr.size = in.body.setattr.size;
181 		out.body.attr.attr_valid = UINT64_MAX;
182 	})));
183 	/* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */
184 	EXPECT_CALL(*m_mock, process(
185 		ResultOf([=](auto in) {
186 			return (in.header.opcode == FUSE_FLUSH &&
187 				in.header.nodeid == ino);
188 		}, Eq(true)),
189 		_)
190 	).WillRepeatedly(Invoke(ReturnErrno(0)));
191 	EXPECT_CALL(*m_mock, process(
192 		ResultOf([=](auto in) {
193 			return (in.header.opcode == FUSE_RELEASE &&
194 				in.header.nodeid == ino);
195 		}, Eq(true)),
196 		_)
197 	).WillRepeatedly(Invoke(ReturnErrno(0)));
198 
199 	m_test_fd = open(FULLPATH, O_RDWR );
200 	EXPECT_LE(0, m_test_fd) << strerror(errno);
201 }
202 
203 void TearDown()
204 {
205 	if (m_test_fd >= 0)
206 		close(m_test_fd);
207 	if (m_backing_fd >= 0)
208 		close(m_backing_fd);
209 	if (m_control_fd >= 0)
210 		close(m_control_fd);
211 	FuseTest::TearDown();
212 	leak(m_test_fd);
213 }
214 
215 void do_closeopen()
216 {
217 	ASSERT_EQ(0, close(m_test_fd)) << strerror(errno);
218 	m_test_fd = open("backing_file", O_RDWR);
219 	ASSERT_LE(0, m_test_fd) << strerror(errno);
220 
221 	ASSERT_EQ(0, close(m_control_fd)) << strerror(errno);
222 	m_control_fd = open("control", O_RDWR);
223 	ASSERT_LE(0, m_control_fd) << strerror(errno);
224 }
225 
226 void do_ftruncate(off_t offs)
227 {
228 	ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno);
229 	ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno);
230 	m_filesize = offs;
231 }
232 
233 void do_mapread(ssize_t size, off_t offs)
234 {
235 	void *control_buf, *p;
236 	off_t pg_offset, page_mask;
237 	size_t map_size;
238 
239 	page_mask = getpagesize() - 1;
240 	pg_offset = offs & page_mask;
241 	map_size = pg_offset + size;
242 
243 	p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd,
244 	    offs - pg_offset);
245 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
246 
247 	control_buf = malloc(size);
248 	ASSERT_NE(nullptr, control_buf) << strerror(errno);
249 
250 	ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs))
251 		<< strerror(errno);
252 
253 	compare((void*)((char*)p + pg_offset), control_buf, offs, size);
254 
255 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
256 	free(control_buf);
257 }
258 
259 void do_read(ssize_t size, off_t offs)
260 {
261 	void *test_buf, *control_buf;
262 	ssize_t r;
263 
264 	test_buf = malloc(size);
265 	ASSERT_NE(nullptr, test_buf) << strerror(errno);
266 	control_buf = malloc(size);
267 	ASSERT_NE(nullptr, control_buf) << strerror(errno);
268 
269 	errno = 0;
270 	r = pread(m_test_fd, test_buf, size, offs);
271 	ASSERT_NE(-1, r) << strerror(errno);
272 	ASSERT_EQ(size, r) << "unexpected short read";
273 	r = pread(m_control_fd, control_buf, size, offs);
274 	ASSERT_NE(-1, r) << strerror(errno);
275 	ASSERT_EQ(size, r) << "unexpected short read";
276 
277 	compare(test_buf, control_buf, offs, size);
278 
279 	free(control_buf);
280 	free(test_buf);
281 }
282 
283 void do_mapwrite(ssize_t size, off_t offs)
284 {
285 	char *buf;
286 	void *p;
287 	off_t pg_offset, page_mask;
288 	size_t map_size;
289 	long i;
290 
291 	page_mask = getpagesize() - 1;
292 	pg_offset = offs & page_mask;
293 	map_size = pg_offset + size;
294 
295 	buf = (char*)malloc(size);
296 	ASSERT_NE(nullptr, buf) << strerror(errno);
297 	for (i=0; i < size; i++)
298 		buf[i] = random();
299 
300 	if (offs + size > m_filesize) {
301 		/*
302 		 * Must manually extend.  vm_mmap_vnode will not implicitly
303 		 * extend a vnode
304 		 */
305 		do_ftruncate(offs + size);
306 	}
307 
308 	p = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
309 	    MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset);
310 	ASSERT_NE(p, MAP_FAILED) << strerror(errno);
311 
312 	bcopy(buf, (char*)p + pg_offset, size);
313 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
314 		<< strerror(errno);
315 
316 	free(buf);
317 	ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno);
318 }
319 
320 void do_write(ssize_t size, off_t offs)
321 {
322 	char *buf;
323 	long i;
324 
325 	buf = (char*)malloc(size);
326 	ASSERT_NE(nullptr, buf) << strerror(errno);
327 	for (i=0; i < size; i++)
328 		buf[i] = random();
329 
330 	ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs ))
331 		<< strerror(errno);
332 	ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs))
333 		<< strerror(errno);
334 	m_filesize = std::max(m_filesize, offs + size);
335 
336 	free(buf);
337 }
338 
339 };
340 
341 class IoCacheable: public Io {
342 public:
343 virtual void SetUp() {
344 	Io::SetUp();
345 }
346 };
347 
348 /*
349  * Extend a file with dirty data in the last page of the last block.
350  *
351  * fsx -WR -P /tmp -S8 -N3 fsx.bin
352  */
353 TEST_P(Io, extend_from_dirty_page)
354 {
355 	off_t wofs = 0x21a0;
356 	ssize_t wsize = 0xf0a8;
357 	off_t rofs = 0xb284;
358 	ssize_t rsize = 0x9b22;
359 	off_t truncsize = 0x28702;
360 
361 	do_write(wsize, wofs);
362 	do_ftruncate(truncsize);
363 	do_read(rsize, rofs);
364 }
365 
366 /*
367  * mapwrite into a newly extended part of a file.
368  *
369  * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin
370  */
371 TEST_P(IoCacheable, extend_by_mapwrite)
372 {
373 	do_mapwrite(0x849e, 0x29a3a);	/* [0x29a3a, 0x31ed7] */
374 	do_mapwrite(0x3994, 0x3c7d8);	/* [0x3c7d8, 0x4016b] */
375 	do_read(0xf556, 0x30c16);	/* [0x30c16, 0x4016b] */
376 }
377 
378 /*
379  * When writing the last page of a file, it must be written synchronously.
380  * Otherwise the cached page can become invalid by a subsequent extend
381  * operation.
382  *
383  * fsx -WR -P /tmp -S642 -N3 fsx.bin
384  */
385 TEST_P(Io, last_page)
386 {
387 	do_write(0xcc77, 0x1134f);	/* [0x1134f, 0x1dfc5] */
388 	do_write(0xdfa7, 0x2096a);	/* [0x2096a, 0x2e910] */
389 	do_read(0xb5b7, 0x1a3aa);	/* [0x1a3aa, 0x25960] */
390 }
391 
392 /*
393  * Read a hole using mmap
394  *
395  * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp  -S14 fsx.bin
396  */
397 TEST_P(IoCacheable, mapread_hole)
398 {
399 	do_write(0x123b7, 0xf205);	/* [0xf205, 0x215bb] */
400 	do_mapread(0xeeea, 0x2f4c);	/* [0x2f4c, 0x11e35] */
401 }
402 
403 /*
404  * Read a hole from a block that contains some cached data.
405  *
406  * fsx -WR -P /tmp -S55  fsx.bin
407  */
408 TEST_P(Io, read_hole_from_cached_block)
409 {
410 	off_t wofs = 0x160c5;
411 	ssize_t wsize = 0xa996;
412 	off_t rofs = 0x472e;
413 	ssize_t rsize = 0xd8d5;
414 
415 	do_write(wsize, wofs);
416 	do_read(rsize, rofs);
417 }
418 
419 /*
420  * Truncating a file into a dirty buffer should not causing anything untoward
421  * to happen when that buffer is eventually flushed.
422  *
423  * fsx -WR -P /tmp -S839 -d -N6 fsx.bin
424  */
425 TEST_P(Io, truncate_into_dirty_buffer)
426 {
427 	off_t wofs0 = 0x3bad7;
428 	ssize_t wsize0 = 0x4529;
429 	off_t wofs1 = 0xc30d;
430 	ssize_t wsize1 = 0x5f77;
431 	off_t truncsize0 = 0x10916;
432 	off_t rofs = 0xdf17;
433 	ssize_t rsize = 0x29ff;
434 	off_t truncsize1 = 0x152b4;
435 
436 	do_write(wsize0, wofs0);
437 	do_write(wsize1, wofs1);
438 	do_ftruncate(truncsize0);
439 	do_read(rsize, rofs);
440 	do_ftruncate(truncsize1);
441 	close(m_test_fd);
442 }
443 
444 /*
445  * Truncating a file into a dirty buffer should not causing anything untoward
446  * to happen when that buffer is eventually flushed, even when the buffer's
447  * dirty_off is > 0.
448  *
449  * Based on this command with a few steps removed:
450  * fsx -WR -P /tmp -S677 -d -N8 fsx.bin
451  */
452 TEST_P(Io, truncate_into_dirty_buffer2)
453 {
454 	off_t truncsize0 = 0x344f3;
455 	off_t wofs = 0x2790c;
456 	ssize_t wsize = 0xd86a;
457 	off_t truncsize1 = 0x2de38;
458 	off_t rofs2 = 0x1fd7a;
459 	ssize_t rsize2 = 0xc594;
460 	off_t truncsize2 = 0x31e71;
461 
462 	/* Sets the file size to something larger than the next write */
463 	do_ftruncate(truncsize0);
464 	/*
465 	 * Creates a dirty buffer.  The part in lbn 2 doesn't flush
466 	 * synchronously.
467 	 */
468 	do_write(wsize, wofs);
469 	/* Truncates part of the dirty buffer created in step 2 */
470 	do_ftruncate(truncsize1);
471 	/* XXX ?I don't know why this is necessary? */
472 	do_read(rsize2, rofs2);
473 	/* Truncates the dirty buffer */
474 	do_ftruncate(truncsize2);
475 	close(m_test_fd);
476 }
477 
478 /*
479  * Regression test for a bug introduced in r348931
480  *
481  * Sequence of operations:
482  * 1) The first write reads lbn so it can modify it
483  * 2) The first write flushes lbn 3 immediately because it's the end of file
484  * 3) The first write then flushes lbn 4 because it's the end of the file
485  * 4) The second write modifies the cached versions of lbn 3 and 4
486  * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's
487  *    extending the buffer.  Then it flushes lbn 4 because B_DELWRI was set but
488  *    B_CACHE was clear.
489  * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the
490  *    buffer into a weird write-only state.  All read operations would return
491  *    0.  Writes were apparently still processed, because the buffer's contents
492  *    were correct when examined in a core dump.
493  * 7) The third write reads lbn 4 because cache is clear
494  * 9) uiomove dutifully copies new data into the buffer
495  * 10) The buffer's dirty is flushed to lbn 4
496  * 11) The read returns all zeros because of step 6.
497  *
498  * Based on:
499  * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q  fsx.bin
500  */
501 TEST_P(Io, resize_a_valid_buffer_while_extending)
502 {
503 	do_write(0x14530, 0x36ee6);	/* [0x36ee6, 0x4b415] */
504 	do_write(0x1507c, 0x33256);	/* [0x33256, 0x482d1] */
505 	do_write(0x175c, 0x4c03d);	/* [0x4c03d, 0x4d798] */
506 	do_read(0xe277, 0x3599c);	/* [0x3599c, 0x43c12] */
507 	close(m_test_fd);
508 }
509 
510 INSTANTIATE_TEST_SUITE_P(Io, Io,
511 	Combine(Bool(),					/* async read */
512 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
513 		Values(Uncached, Writethrough, Writeback, WritebackAsync)
514 	)
515 );
516 
517 INSTANTIATE_TEST_SUITE_P(Io, IoCacheable,
518 	Combine(Bool(),					/* async read */
519 		Values(0x1000, 0x10000, 0x20000),	/* m_maxwrite */
520 		Values(Writethrough, Writeback, WritebackAsync)
521 	)
522 );
523